本文主要是介绍binder 驱动情景分析-获取服务,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
binder进程间通讯 C程序代码示例 binder_c 。本文居于该示例,对binder通讯的获取服务进行分析。关于注册服务binder驱动分析可以参考文章binder 驱动情景分析-注册服务 binder驱动的路径为:kernel\drivers\android\binder.c
test_client 启动时,和test_server进程一样,也是先通过binder_open 打开binder驱动并映射内存,这样,在binder驱动中就会创建一个binder_proc和test_client 对应。
打开驱动后,接着调用svcmgr_lookup来获取服务
handle = svcmgr_lookup(bs, svcmgr, HELLO_SERVER_NAME); //svcmgr:0 ,HELLO_SERVER_NAME:hellouint32_t svcmgr_lookup(struct binder_state *bs, uint32_t target, const char *name)
{uint32_t handle;unsigned iodata[512/4];struct binder_io msg, reply;bio_init(&msg, iodata, sizeof(iodata), 4);bio_put_uint32(&msg, 0); // strict mode headerbio_put_string16_x(&msg, SVC_MGR_NAME);bio_put_string16_x(&msg, name);if (binder_call(bs, &msg, &reply, target, SVC_MGR_CHECK_SERVICE))//1return 0;handle = bio_get_ref(&reply); //2if (handle)binder_acquire(bs, handle);binder_done(bs, &msg, &reply);return handle;
}
可以看出也是先构造binder_io,将数据放入其中,只不过和注册服务不同的是没有放入flat_binder_object 。 注释1处构造好数据后,调用binder_call发起远程调用。注释2处得到服务端回复的数据,取出handle。
和注册服务一样,在binder_call中也是将binder_io转换为binder_write_read,然后调用ioctl将数据写给binder驱动。和注册服务不同的是传入的code为SVC_MGR_CHECK_SERVICE 代表获取服务
整体的流程和test_server注册服务基本一致,都是先构造数据,然后将数据写给binder驱动。不同的是:
- 构造数据时,不会放入flat_binder_object
- 传入的code为SVC_MGR_CHECK_SERVICE ,代表需要获取服务
注意传入的cmd是BC_TRANSACTION,调用ioctl传入的参数为BINDER_WRITE_READ。调用 ioctl导致binder驱动的binder_ioctl被调用,对于BINDER_WRITE_READ,调用binder_ioctl_write_read处理
case BINDER_WRITE_READ:ret = binder_ioctl_write_read(filp, cmd, arg, thread);if (ret)goto err;break;
在binder_ioctl_write_read函数中,对于写操作调用binder_thread_write,读操作,调用binder_thread_read
if (bwr.write_size > 0) {ret = binder_thread_write(proc, thread,bwr.write_buffer,bwr.write_size,&bwr.write_consumed);trace_binder_write_done(ret);}if (bwr.read_size > 0) {ret = binder_thread_read(proc, thread, bwr.read_buffer,bwr.read_size,&bwr.read_consumed,filp->f_flags & O_NONBLOCK);trace_binder_read_done(ret);binder_inner_proc_lock(proc);if (!binder_worklist_empty_ilocked(&proc->todo))binder_wakeup_proc_ilocked(proc);binder_inner_proc_unlock(proc);}
对于test_client,write_size 和read_size 都大于0,表明先写数据,然后读数据,在binder_thread_read函数中,如果没有数据就休眠。
在binder_thread_write中,对于BC_TRANSACTION,调用binder_transaction进一步处理
case BC_TRANSACTION:case BC_REPLY: {struct binder_transaction_data tr;if (copy_from_user(&tr, ptr, sizeof(tr)))return -EFAULT;ptr += sizeof(tr);binder_transaction(proc, thread, &tr,cmd == BC_REPLY, 0);break;}
来看一下binder_transaction的处理
1,找到目的进程
//省略
} else {mutex_lock(&context->context_mgr_node_lock);target_node = context->binder_context_mgr_node;//1if (target_node)target_node = binder_get_node_refs_for_txn(target_node, &target_proc,&return_error);//2elsereturn_error = BR_DEAD_REPLY;
//省略
传入的handle为0,表明发送数据给servicemanager进程。注释1处得到servicemanager进程对应的binder_node。注释2处根据binder_node,找到servicemanager进程
2,创建binder_transaction
//省略
t = kzalloc(sizeof(*t), GFP_KERNEL);//1
if (t == NULL) {return_error = BR_FAILED_REPLY;return_error_param = -ENOMEM;return_error_line = __LINE__;goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);//2
if (tcomplete == NULL) {return_error = BR_FAILED_REPLY;return_error_param = -ENOMEM;return_error_line = __LINE__;goto err_alloc_tcomplete_failed;
}if (!reply && !(tr->flags & TF_ONE_WAY))t->from = thread;elset->from = NULL;t->sender_euid = task_euid(proc->tsk);t->to_proc = target_proc;t->to_thread = target_thread;t->code = tr->code;t->flags = tr->flags;if (!(t->flags & TF_ONE_WAY) &&binder_supported_policy(current->policy)) {/* Inherit supported policies for synchronous transactions */t->priority.sched_policy = current->policy;t->priority.prio = current->normal_prio;} else {/* Otherwise, fall back to the default priority */t->priority = target_proc->default_priority;}
注释1处申请binder_transaction内存,注释2处申请binder_work内存。后面就是给binder_transaction赋值。注意以下几个值
t->from = thread; //from指向当前线程,即test_client线程
t->to_proc = target_proc; //to_proc指向servicemanager进程
t->to_thread = target_thread;//to_thread 指向servicemanager的线程
t->code = tr->code; //code为SVC_MGR_CHECK_SERVICE
3,数据拷贝
//省略
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,tr->offsets_size, extra_buffers_size,!reply && (t->flags & TF_ONE_WAY));//1
t->buffer->allow_user_free = 0;t->buffer->debug_id = t->debug_id;t->buffer->transaction = t;t->buffer->target_node = target_node;trace_binder_transaction_alloc_buf(t->buffer);off_start = (binder_size_t *)(t->buffer->data +ALIGN(tr->data_size, sizeof(void *)));offp = off_start;if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size)) {//2binder_user_error("%d:%d got transaction with invalid data ptr\n",proc->pid, thread->pid);return_error = BR_FAILED_REPLY;return_error_param = -EFAULT;return_error_line = __LINE__;goto err_copy_data_failed;}
注释1处申请内存,注意申请的内存是servicemanager映射的那块内存,后续servicemanager就可以直接从这块内存上得到数据,就不用再次拷贝了。注释2处从用户空间拷贝数据到这个内存上。
4,入栈及唤醒目的进程(servicemanager)
//省略
t->work.type = BINDER_WORK_TRANSACTION;//1
if (reply) {//现在不是回复,不走这个分支//省略} else if (!(t->flags & TF_ONE_WAY)) {BUG_ON(t->buffer->async_transaction != 0);binder_inner_proc_lock(proc);t->need_reply = 1;t->from_parent = thread->transaction_stack;thread->transaction_stack = t; //2binder_inner_proc_unlock(proc);if (!binder_proc_transaction(t, target_proc, target_thread)) {//3binder_inner_proc_lock(proc);binder_pop_transaction_ilocked(thread, t);binder_inner_proc_unlock(proc);goto err_dead_proc_or_thread;}} else {BUG_ON(target_node == NULL);BUG_ON(t->buffer->async_transaction != 1);if (!binder_proc_transaction(t, target_proc, NULL))goto err_dead_proc_or_thread;}//省略
注意注释1处 work的type为BINDER_WORK_TRANSACTION。注释2处通过from_parent 入栈,此时当前线程即test_client进程的transaction_stack为空,所以from_parent 指向NULL,结合之前binder_transaction的赋值操作,经过入栈之后,test_client中包含以下传输栈
注释3处调用binder_proc_transaction,将数据放入servicemanager的todo链表,唤醒servicemanager进程
static bool binder_proc_transaction(struct binder_transaction *t,struct binder_proc *proc,struct binder_thread *thread)
{//省略if (!thread && !target_list)thread = binder_select_thread_ilocked(proc);if (thread) {target_list = &thread->todo;binder_transaction_priority(thread->task, t, node_prio,node->inherit_rt);} else if (!target_list) {target_list = &proc->todo;} else {BUG_ON(target_list != &node->async_todo);}binder_enqueue_work_ilocked(&t->work, target_list);//1if (wakeup)binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);//2binder_inner_proc_unlock(proc);binder_node_unlock(node);return true;
}
注释1处放入链表,注释2处唤醒servicemanager进程。注意type为BINDER_WORK_TRANSACTION。servicemanager启动的时候,调用binder_thread_read,等待数据,没有数据就休眠,被唤醒后,继续往下执行
//省略
while (1) {uint32_t cmd;struct binder_transaction_data tr;struct binder_work *w = NULL;struct list_head *list = NULL;struct binder_transaction *t = NULL;struct binder_thread *t_from;binder_inner_proc_lock(proc);if (!binder_worklist_empty_ilocked(&thread->todo))list = &thread->todo;//1else if (!binder_worklist_empty_ilocked(&proc->todo) &&wait_for_proc_work)list = &proc->todo;//1else {binder_inner_proc_unlock(proc);w = binder_dequeue_work_head_ilocked(list);//2switch (w->type) {case BINDER_WORK_TRANSACTION: {binder_inner_proc_unlock(proc);t = container_of(w, struct binder_transaction, work);//3} break;//省略
注释1处从todo链表中取出头部,前面test_client已经将数据放入servicemanager的todo链表了。注释2处取出binder_work ,type为前面说的BINDER_WORK_TRANSACTION。注释3处取出binder_transaction,里面包含了各种数据
接下来就是通过取出的binder_transaction给tr变量赋值,赋值之后写给servicemanager用户态了
if (t->buffer->target_node) {struct binder_node *target_node = t->buffer->target_node;struct binder_priority node_prio;tr.target.ptr = target_node->ptr;tr.cookie = target_node->cookie;node_prio.sched_policy = target_node->sched_policy;node_prio.prio = target_node->min_priority;binder_transaction_priority(current, t, node_prio,target_node->inherit_rt);cmd = BR_TRANSACTION;} else {tr.target.ptr = 0;tr.cookie = 0;cmd = BR_REPLY;}tr.code = t->code;tr.flags = t->flags;tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);t_from = binder_get_txn_from(t);if (t_from) {struct task_struct *sender = t_from->proc->tsk;tr.sender_pid = task_tgid_nr_ns(sender,task_active_pid_ns(current));} else {tr.sender_pid = 0;}tr.data_size = t->buffer->data_size;tr.offsets_size = t->buffer->offsets_size;tr.data.ptr.buffer = (binder_uintptr_t)((uintptr_t)t->buffer->data +binder_alloc_get_user_buffer_offset(&proc->alloc));tr.data.ptr.offsets = tr.data.ptr.buffer +ALIGN(t->buffer->data_size,sizeof(void *));if (put_user(cmd, (uint32_t __user *)ptr)) {if (t_from)binder_thread_dec_tmpref(t_from);binder_cleanup_transaction(t, "put_user failed",BR_FAILED_REPLY);return -EFAULT;}ptr += sizeof(uint32_t);if (copy_to_user(ptr, &tr, sizeof(tr))) {if (t_from)binder_thread_dec_tmpref(t_from);binder_cleanup_transaction(t, "copy_to_user failed",BR_FAILED_REPLY);return -EFAULT;}ptr += sizeof(tr);
这里注意几个数据:1,给用户态的cmd为BR_TRANSACTION 2,code为SVC_MGR_CHECK_SERVICE
数据处理好后,还要进行入栈操作
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {binder_inner_proc_lock(thread->proc);t->to_parent = thread->transaction_stack;t->to_thread = thread;thread->transaction_stack = t;binder_inner_proc_unlock(thread->proc);
}
接着来看servicemanager用户空间的处理,对于SVC_MGR_CHECK_SERVICE,表明需要查找服务
case SVC_MGR_CHECK_SERVICE:s = bio_get_string16(msg, &len);//1if (s == NULL) {return -1;}handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);//2if (!handle)break;bio_put_ref(reply, handle);//3return 0;
注释1处取出name,即hello ,注释2处调用do_find_service查找服务,在test_server注册服务的时候,将name和handle放入了链表,do_find_service就是根据name,得到handle整数。注释3处将handle放入flat_binder_object中,并将flat_binder_object放入reply
void bio_put_ref(struct binder_io *bio, uint32_t handle)
{struct flat_binder_object *obj;if (handle)obj = bio_alloc_obj(bio);elseobj = bio_alloc(bio, sizeof(*obj));if (!obj)return;obj->flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;obj->type = BINDER_TYPE_HANDLE;obj->handle = handle;obj->cookie = 0;
}
servicemanager处理完之后调用 binder_send_reply 回复。同注册服务的回复过程,将回复的数据写给binder驱动,回复是的cmd为BC_REPLY,不过获取服务的回复数据带有flat_binder_object,里面的type是BINDER_TYPE_HANDLE,handle指向注册hello服务时的handle。接着又进入binder驱动,对于BC_REPLY,同样使用binder_transaction处理
case BC_REPLY: {struct binder_transaction_data tr;if (copy_from_user(&tr, ptr, sizeof(tr)))return -EFAULT;ptr += sizeof(tr);binder_transaction(proc, thread, &tr,cmd == BC_REPLY, 0);break;}
接着来看一下回复时binder_transaction处理的过程,要记住现在所处的进程是servicemanager进程
1,找到需要回复的目的进程test_client
和发送数据时一样,这里同样要先找到目的进程,不过这里的查找的方法有所不同,这里查找是通过传输栈查找的
if (reply) {binder_inner_proc_lock(proc);in_reply_to = thread->transaction_stack;//1thread->transaction_stack = in_reply_to->to_parent;//2binder_inner_proc_unlock(proc);target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);//3target_proc = target_thread->proc;//4target_proc->tmp_ref++;binder_inner_proc_unlock(target_thread->proc);}
注释1处找到servicemanager的传输栈。注释2处对servicemanager的传输栈进行出栈处理 。注释3处找到需要回复的线程。注释4处根据线程找到回复的进程
static struct binder_thread *binder_get_txn_from_and_acq_inner(struct binder_transaction *t)
{struct binder_thread *from;from = binder_get_txn_from(t); //from指向的是test_client线程if (!from)return NULL;binder_inner_proc_lock(from->proc);if (t->from) {BUG_ON(from != t->from);return from;}binder_inner_proc_unlock(from->proc);binder_thread_dec_tmpref(from);return NULL;
}
可以看出,通过传输栈,找到了需要回复的线程是test_client的线程,接着找到test_client进程
2,创建binder_transaction
和前面创建binder_transaction的流程一样,不同的是各参数的赋值不一样。
3,将应用空间中的数据拷贝到内核空间
和前面拷贝的流程也是一样的,只不过现在拷贝到的位置是test_client进程映射的内存里,并且带有flat_binder_object,flat_binder_object中type为BINDER_TYPE_HANDLE,handle为hello服务对应的引用
4,处理flat_binder_object
对于BINDER_TYPE_HANDLE,用以下方法处理
case BINDER_TYPE_HANDLE:case BINDER_TYPE_WEAK_HANDLE: {struct flat_binder_object *fp;fp = to_flat_binder_object(hdr);ret = binder_translate_handle(fp, t, thread);} break;
static int binder_translate_handle(struct flat_binder_object *fp,struct binder_transaction *t,struct binder_thread *thread)
{//省略node = binder_get_node_from_ref(proc, fp->handle,fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); //1if (node->proc == target_proc) {//2//省略} else {struct binder_ref_data dest_rdata;binder_node_unlock(node);ret = binder_inc_ref_for_node(target_proc, node,fp->hdr.type == BINDER_TYPE_HANDLE,NULL, &dest_rdata);//3 if (ret)goto done;fp->binder = 0;fp->handle = dest_rdata.desc;//4fp->cookie = 0;trace_binder_transaction_ref_to_ref(t, node, &src_rdata,&dest_rdata);binder_debug(BINDER_DEBUG_TRANSACTION," ref %d desc %d -> ref %d desc %d (node %d)\n",src_rdata.debug_id, src_rdata.desc,dest_rdata.debug_id, dest_rdata.desc,node->debug_id);}}
注释1处,根据servicemanager用户空间传入的handle找到servicemanager对hello服务的引用binder_ref,然后根据binder_ref找到hello服务对应的binder_node。注释2处的条件不成立:node->proc指向test_server,target_proc为test_client。注释3处为test_client创建binder_ref,用来引用hello服务的binder_node,注释4处将binder_ref的desc赋值给handle ,后面需要传给test_client
5,出栈并唤醒test_client进程
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {binder_inner_proc_lock(target_proc);if (target_thread->is_dead) {binder_inner_proc_unlock(target_proc);goto err_dead_proc_or_thread;}BUG_ON(t->buffer->async_transaction != 0);binder_pop_transaction_ilocked(target_thread, in_reply_to);//1binder_enqueue_work_ilocked(&t->work, &target_thread->todo);//2binder_inner_proc_unlock(target_proc);wake_up_interruptible_sync(&target_thread->wait);binder_restore_priority(current, in_reply_to->saved_priority);binder_free_transaction(in_reply_to);}
注释1处进行出栈处理(对test_client的栈进行出栈),注释2处将数据放入test_client的todo链表,并唤醒test_client进程。前面提到过,test_client在binder_thread_read函数里休眠,被唤醒后继续往下执行,同样也是取出数据,然后写给test_client的用户空间。test_client收到数据后,取出handle,后面可以通过这个handle,找到test_server进程,进而和其进行通信。整个过程结束
handle = bio_get_ref(&reply);
总结
获取服务的流程:
- test_client 构造数据,调用ioctl,将数据写给binder驱动,数据中包含handle,值为0
- binder驱动根据handle为0,找到servicemanager进程,将数据放入servicemanager映射的内存中
- servicemanager用户空间根据名字hello,找到之前注册hello服务时的handle(注意这个handle和test_client传入的handle不是同一个)
- servicemanager根据找到的handle,构造flat_binder_object,然后将数据写给binder驱动
- binder驱动取出flat_binder_object,得到handle,根据这个handle,找到servicemanager中的binder_ref,进而找到hello服务的实体binder_node
- 为test_client创建binder_ref,引用hello服务的binder_node(就是将test_client的binder_ref中的node指向hello服务)
- 返回binder_ref的desc(也是放在flat_binder_object中),给test_client的用户空间
- test_client取出desc,赋值给handle。后续test_client可以通过这个handle,在binder驱动中找到binder_ref,进而找到test_server进程。
这篇关于binder 驱动情景分析-获取服务的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!