如何形象的理解binder(2)?
如何形象的理解binder(2)?
文接上章,上一篇我们把消息在根本上是怎么进行交换的,但是上一篇并没有讲通信的过程,网上的很多文章呢大多数都是将过程做下简单的说明,其中的一个我认为比较关键的点没讲到,我认为将这个点带入进来进行区分,这个点就是oneway
.
简单介绍oneway
oneway
直译过来就是单通道的意思,还是比较好理解的,为了更好的理解,我先用aidl
中的这个关键字做一下简单的介绍.
interface ITest {
void basicTypes(int anInt);
}
@Override
public void basicTypes(int anInt) throws android.os.RemoteException {
android.os.Parcel _data = android.os.Parcel.obtain();
android.os.Parcel _reply = android.os.Parcel.obtain();
try {
_data.writeInterfaceToken(DESCRIPTOR);
_data.writeInt(anInt);
boolean _status = mRemote.transact(Stub.TRANSACTION_basicTypes, _data, _reply, 0);
if (!_status && getDefaultImpl() != null) {
getDefaultImpl().basicTypes(anInt);
return;
}
_reply.readException();
} finally {
_reply.recycle();
_data.recycle();
}
}
上面是我随便创建了一个默认的aidl
类,其自动生成的Proxy
中的basicTypes
函数.我们在看一下给函数加上oneway
之后生成的函数.
interface ITest {
oneway void basicTypes(int anInt);
}
@Override
public void basicTypes(int anInt) throws android.os.RemoteException {
android.os.Parcel _data = android.os.Parcel.obtain();
try {
_data.writeInterfaceToken(DESCRIPTOR);
_data.writeInt(anInt);
boolean _status = mRemote.transact(Stub.TRANSACTION_basicTypes, _data, null, android.os.IBinder.FLAG_ONEWAY);
if (!_status && getDefaultImpl() != null) {
getDefaultImpl().basicTypes(anInt);
return;
}
} finally {
_data.recycle();
}
}
是否已经发现了其中的不同,加了oneway
关键字的函数中没有reply
,我们没有加oneway
的函数,即便是void
修饰的函数也是有reply
的,那么就意味着oneway
和非oneway
就是两种模式,我们带着这个疑问看下去.
开始通信
binder_ioctl
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data; // binder进程,前文有讲过file文件的private_data是指向它的.
struct binder_thread *thread; // binder线程
unsigned int size = _IOC_SIZE(cmd);
//这个很关键,传入的数据本质上就是一个地址,记住这个arg,后面在详细讲述这个地方为何要强制转换一下子
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
//测试调用
binder_selftest_alloc(&proc->alloc);
//trace
trace_binder_ioctl(cmd, arg);
//内核宏函数 满足条件就会返回
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
//取线程
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
//传入的命令
switch (cmd) {
case BINDER_WRITE_READ: //读写命令,只是取出了线程,并没有将数据做转换.
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS: { //设置最大线程数量
int max_threads;
if (copy_from_user(&max_threads, ubuf,
sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
binder_inner_proc_lock(proc);
proc->max_threads = max_threads;
binder_inner_proc_unlock(proc);
break;
}
case BINDER_SET_CONTEXT_MGR_EXT: { //注册成为 ServiceManager 节点
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
case BINDER_SET_CONTEXT_MGR: // 和BINDER_SET_CONTEXT_MGR_EXT一样,这个是旧版本就存在,无法携带参数
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT: // 退出,释放线程
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: { // 获取版本号
struct binder_version __user *ver = ubuf;
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
case BINDER_GET_NODE_INFO_FOR_REF: { // 获取binder的引用节点信息
struct binder_node_info_for_ref info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
if (ret < 0)
goto err;
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_GET_NODE_DEBUG_INFO: { //获得binder节点的debug信息
struct binder_node_debug_info info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_node_debug_info(proc, &info);
if (ret < 0)
goto err;
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}
上面的代码略长,那么其他的指令存在的意义是什么呢?
那么回到我们的酒店系统中,我们的通信系统其实不仅仅是只提供用户和用户之间的通信需求,肯定还要满足用户
快捷的使用酒店
各种不同服务
的需求,那么我们叉开一个话题就是酒店的各种服务肯定是不会像用户一样在给每个服务员单独一个房间
也就是进程
.
这个时候就需要BINDER_SET_CONTEXT_MGR_EXT
指令把整体的酒店服务包装成一个 ServiceManager
,这样就可以快捷的将所有的服务包装为一个整体快捷的提供给用户.其他的指令依然基本上都是在整个通信系统内部启动等不同场景中使用,用户主要用到就是BINDER_WRITE_READ
.
ps: ServiceManager在Frameworks中是一个非常非常重要的概念之一,后面可能会写一篇文章单独去分析这个东西-
我们将注意力回到BINDER_WRITE_READ
也就是binder_ioctl_write_read
,接着看代码
binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
// 获取binder进程
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
//在binder_ioctl中就有过同样的一段代码,这里说下为何要这样做,arg是传入数据的地址,
//但是这个地址地是用户空间的地址,想要将其copy到内核地址中的时候,需要将其转换为__user这样的地址,
//__user毫不意外也是一个宏定义
void __user *ubuf = (void __user *)arg;
//这里为何要创建这样的一个结构呢,因为传入的arg的所指向的地址也是存储了这样的一个结构,
//后面会在讲述servicemanager中贴出对应的代码,等不及的可以自行查看
//frameworks/native/cmds/servicemanager/binder.c中的binder_write函数这样更快的理解这一大段的代码
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
// 做了上面所有的铺垫之后,就是一个朴实无华的copy_from_user函数将用户数据copy到bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
//binder_write_read,中的写入数据长度大于0,先看写入
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
//这里如果返回值小于0,发生错误,将数据重新放回用户地址中
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
其实我将重要的逻辑和知识点写到了代码的注释之中,现在简单的判断的盘点一下上面的内容.
1: 首先调用binder_ioctl()
函数,这里出现了第一个code
,这个code
是直接传入的cmd
,相对比较好理解,通常使用BINDER_WRITE_READ
.这样就会调用第二步binder_ioctl_write_read
2:binder_ioctl_write_read
函数中要理解__user
这个宏定义,理解一个结构体binder_write_read
,理解两个函数copy_from_user
和copy_to_user
.这两个函数和第一讲中的地址映射函数一样.(我将binder_write_read
结构贴到下方)
struct binder_write_read {
binder_size_t write_size; /* bytes to write */
binder_size_t write_consumed; /* bytes consumed by driver */
binder_uintptr_t write_buffer;
binder_size_t read_size; /* bytes to read */
binder_size_t read_consumed; /* bytes consumed by driver */
binder_uintptr_t read_buffer;
};
3:接下来我们就要注意write_buffer
和read_buffer
,它们是一个地址指针指向了binder_transaction_data
结构体,我们就可以理解为中就是常常说的协议,而接下来的binder_thread_write
函数的部分主要逻辑就是对binder_transaction_data
的操作.下面我将结构贴出来.
//这里可以看到统一定义地址是64位还是32
#ifdef BINDER_IPC_32BIT
typedef __u32 binder_size_t;
typedef __u32 binder_uintptr_t;
#else
typedef __u64 binder_size_t;
typedef __u64 binder_uintptr_t;
#endif
// 这里就是binderIPC传递数据的结构体
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
/* target descriptor of command transaction */
__u32 handle;
/* target descriptor of return transaction */
binder_uintptr_t ptr;
} target;
binder_uintptr_t cookie; /* target object cookie */
__u32 code; /* transaction command */
/* General information about the transaction. */
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size; /* number of bytes of data */
binder_size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
binder_uintptr_t buffer;
/* offsets from buffer to flat_binder_object structs */
binder_uintptr_t offsets;
} ptr;
__u8 buf[8];
} data;
};
binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer; // 要传递的数据
void __user *ptr = buffer + *consumed; //传递的数据开始地址
void __user *end = buffer + size; //传递的数据结束地址
//简单的地址判断
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
//这里要去参考一下IPCThreadState.cpp或者frameworks/native/servicemanager/binder.c
//中的代码,在封装binder_transaction_data结构体的首地址前加上了一个cmd值,在这个地址在使用
//get_user函数将对应的cmd值取出来,get_user也好不意外的是一个Liunx的函数,后面会在说一下
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
case BC_ACQUIRE:
case BC_RELEASE:
case BC_DECREFS: {
...
breark;
}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
...
break;
}
case BC_ATTEMPT_ACQUIRE:
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
return -EINVAL;
case BC_ACQUIRE_RESULT:
pr_err("BC_ACQUIRE_RESULT not supported\n");
return -EINVAL;
case BC_FREE_BUFFER: {
...
break;
}
...
break;
}
case BC_TRANSACTION_SG:
case BC_REPLY_SG: {
struct binder_transaction_data_sg tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr.transaction_data,
cmd == BC_REPLY_SG, tr.buffers_size);
break;
}
case BC_TRANSACTION:
case BC_REPLY: {
//这个函数的代码实在是太多了,不得不去掉大部分,聚焦在正常的通信流程中
struct binder_transaction_data tr;
//将需要传递的数据copy到内核空间中
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
//接着看binder_transaction函数,第四个参数代表着是否是回复信息
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
case BC_REGISTER_LOOPER:
...
default:
pr_err("%d:%d unknown command %d\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
*consumed = ptr - buffer;
}
return 0;
}
binder_transaction
接下来就到了这个最后一个超级长的函数了.需要花费一些耐心去仔细去看这段代码
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
int ret;
struct binder_transaction *t; // 当前正在处理的事务,transaction过程中需要记录数据的结构体
struct binder_work *tcomplete; // 当前工作列表和工作状态
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc = NULL; // 传递目标进程
struct binder_thread *target_thread = NULL; // 传递目标线程
struct binder_node *target_node = NULL; // 传递目标节点
struct binder_transaction *in_reply_to = NULL; // 在回复过程时用
struct binder_transaction_log_entry *e; // 过程日志记录
uint32_t return_error = 0;
uint32_t return_error_param = 0;
uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); //TF_ONE_WAY 开头我们提到的ONE_WAY 标签出现了
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
e->context_name = proc->context->name;
// 回复流程,由于这个函数是在是太长了,我就将回复流程先...
if (reply) {
...
} else {
//handle非0
if (tr->target.handle) {
struct binder_ref *ref;
/*
* There must already be a strong ref
* on this node. If so, do a strong
* increment on the node to ensure it
* stays alive until the transaction is
* done.
*/
//锁
binder_proc_lock(proc);
// 通过handle 获取binder_ref,binder_ref中通过红黑树记录着节点和目标节点信息
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
// 拿到目标节点
target_node = binder_get_node_refs_for_txn(
ref->node, &target_proc,
&return_error);
} else {
// 错误流程
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
}
// 解锁
binder_proc_unlock(proc);
} else {
//handle为0,代表是serviceManager,通过下方的target_node也可以看的出来,略过
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node)
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
if (target_node && target_proc == proc) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
}
// 目标节点为空,走错误死亡流程
if (!target_node) {
/*
* return_error is set above
*/
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
e->to_node = target_node->debug_id;
// 权限判断
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
binder_inner_proc_lock(proc);
// 不是one_way,且进程中有事务在进行
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
spin_unlock(&tmp->lock);
binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
return_error_param = -EPROTO;
return_error_line = __LINE__;
goto err_bad_call_stack;
}
// 循环所有的事务
while (tmp) {
struct binder_thread *from;
spin_lock(&tmp->lock);
from = tmp->from;
if (from && from->proc == target_proc) {
atomic_inc(&from->tmp_ref);
// 确定传递目标线程
target_thread = from;
spin_unlock(&tmp->lock);
break;
}
spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
binder_inner_proc_unlock(proc);
}
// 目标线程不为空
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
// 申请空间
t = kzalloc(sizeof(*t), GFP_KERNEL);
// 申请失败
if (t == NULL) {
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);
// 申请内存
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
// 申请失败
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
return_error_param = -ENOMEM;
return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
t->debug_id = t_debug_id;
...
// 不是回复且flags不是one_way
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
if (!(t->flags & TF_ONE_WAY) &&
binder_supported_policy(current->policy)) {
/* Inherit supported policies for synchronous transactions */
t->priority.sched_policy = current->policy;
t->priority.prio = current->normal_prio;
} else {
/* Otherwise, fall back to the default priority */
t->priority = target_proc->default_priority;
}
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
added_size = ALIGN(secctx_sz, sizeof(u64));
extra_buffers_size += added_size;
if (extra_buffers_size < added_size) {
/* integer overflow of extra_buffers_size */
return_error = BR_FAILED_REPLY;
return_error_param = EINVAL;
return_error_line = __LINE__;
goto err_bad_extra_size;
}
}
//tace相关
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
if (IS_ERR(t->buffer)) {
/*
* -ESRCH indicates VMA cleared. The target is dying.
*/
return_error_param = PTR_ERR(t->buffer);
return_error = return_error_param == -ESRCH ?
BR_DEAD_REPLY : BR_FAILED_REPLY;
return_error_line = __LINE__;
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
//是serviceManager
if (secctx) {
size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
ALIGN(tr->offsets_size, sizeof(void *)) +
ALIGN(extra_buffers_size, sizeof(void *)) -
ALIGN(secctx_sz, sizeof(u64));
char *kptr = t->buffer->data + buf_offset;
t->security_ctx = (uintptr_t)kptr +
binder_alloc_get_user_buffer_offset(&target_proc->alloc);
memcpy(kptr, secctx, secctx_sz);
security_release_secctx(secctx, secctx_sz);
secctx = NULL;
}
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
// copy数据,重要
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EFAULT;
return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EFAULT;
return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
sg_buf_end = sg_bufp + extra_buffers_size -
ALIGN(secctx_sz, sizeof(u64));
off_min = 0;
// 上面计算了传递数据地址起点
for (; offp < off_end; offp++) {
struct binder_object_header *hdr;
size_t object_size = binder_validate_object(t->buffer, *offp);
if (object_size == 0 || *offp < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid, (u64)*offp,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
// 根据地址的偏移量来获得当前binder_object的类型,和通信主流程强关联性不强,略过
hdr = (struct binder_object_header *)(t->buffer->data + *offp);
off_min = *offp + object_size;
switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
int target_fd = binder_translate_fd(fp->fd, t, thread,
in_reply_to);
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = target_fd;
return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
fp->fd = target_fd;
} break;
case BINDER_TYPE_FDA: {
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
struct binder_buffer_object *parent =
binder_validate_ptr(t->buffer, fda->parent,
off_start,
offp - off_start);
if (!parent) {
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
parent, fda->parent_offset,
last_fixup_obj,
last_fixup_min_off)) {
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
last_fixup_min_off =
fda->parent_offset + sizeof(u32) * fda->num_fds;
} break;
case BINDER_TYPE_PTR: {
struct binder_buffer_object *bp =
to_binder_buffer_object(hdr);
size_t buf_left = sg_buf_end - sg_bufp;
if (bp->length > buf_left) {
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_offset;
}
if (copy_from_user(sg_bufp,
(const void __user *)(uintptr_t)
bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
binder_alloc_get_user_buffer_offset(
&target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
offp - off_start,
last_fixup_obj,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
last_fixup_min_off = 0;
} break;
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_bad_object_type;
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
t->work.type = BINDER_WORK_TRANSACTION;
// 回复走这里
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
// 正常的传递流程且标签不是oneWay
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
/*
* Defer the TRANSACTION_COMPLETE, so we don't return to
* userspace immediately; this allows the target process to
* immediately start processing this transaction, reducing
* latency. We will then return the TRANSACTION_COMPLETE when
* the target replies (or there is an error).
*/
//对队列的操作
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
// 走这个函数 binder_proc_transaction ,带着目标进程和目标线程
if (!binder_proc_transaction(t, target_proc, target_thread)) {
binder_inner_proc_lock(proc);
binder_pop_transaction_ilocked(thread, t);
binder_inner_proc_unlock(proc);
goto err_dead_proc_or_thread;
}
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
// 这里就是不带one_way标签对队列操作的不同
binder_enqueue_thread_work(thread, tcomplete);
// 这个是带了oneWay标签的 也是走binder_proc_transaction
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
if (target_node)
binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
*/
smp_wmb();
WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
// 错误的跳转
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
if (target_node) {
binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
proc->pid, thread->pid, return_error, return_error_param,
(u64)tr->data_size, (u64)tr->offsets_size,
return_error_line);
{
struct binder_transaction_log_entry *fe;
e->return_error = return_error;
e->return_error_param = return_error_param;
e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
/*
* write barrier to synchronize with initialization
* of log entry
*/
smp_wmb();
WRITE_ONCE(e->debug_id_done, t_debug_id);
WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}
上面的函数是一个非常长的类,但是主要的重点并不多,重要的节点注释在了代码中.一鼓作气将代码读完,接着看binder_proc_transaction
函数
binder_proc_transaction
static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
node_prio.prio = node->min_priority;
node_prio.sched_policy = node->sched_policy;
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
pending_async = true;
} else {
node->has_async_transaction = true;
}
}
binder_inner_proc_lock(proc);
// 如果目标进程死亡就 return false
if (proc->is_dead || (thread && thread->is_dead)) {
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
return false;
}
if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
if (thread) {
//调整优先级
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
// 唤醒目标进程
binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
}
if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
return true;
}
唤醒目标进程的时候,我们在回去看开头binder_ioctl_write_read
函数中我们没有去研究的read
函数,这样就基本将我们描述的逻辑形成了一个闭环.
流程总结
我们将上面的代码基本在重新给总结一下.
- 1: 传入我们的进程信息,cmd和arg(组装的通信的协议地址),通过cmd来告诉通信系统(
binder
)我们干什么,我们讲述了通信过程,就忽略了诸如和BINDER_SET_CONTEXT_MGR_EXT
注册成为酒店内部工作系统(serviceManager
)的流程. - 2:
__user
这个宏我们需要了解,将传入的地址转换为__user
之后才可以在通信系统所处的公共空间(内核空间
)进程操作. - 3: 发现传入的信息不是空之后,通过通信系统(
binder
)制定的通信协议(本质就是对应的地址上存放什么的数据,根据地址的偏移量
)来判断用户是正常通信,还是其他特殊的服务(BC协议
). - 4: 发现是正常的通信需求之后通过
copy_from_user
将对应的数据复制到目标用户(目标进程也就是服务
)的内核虚拟地址中,(这里要回想一下第一章,这里是目标进程的用户空间虚拟地址和内核虚拟地址指向同一块物理页). - 5 :找到目标进程(这里
binder
是通过红黑树来存储管理不同的进程节点信息的),然后通过我们第一章简单说过的workqueue
来通知目标进程,将其唤醒(放到工作队列中), - 6 :目标进程被唤醒之后在
binder_ioctl_write_read
函数中会发现自己的read_size
相当与留言里有数据,就会去走binder_thread_read
读取流程.
其他重要的知识点
- 1 : 上面一共是三个code,第一个简单说是区分身份的,因为普通人是无法将自己设置为
serviceManager
的,还有就是对自己的binder
进行简单的设置. - 2 : 第二个code是区分不同的通信协议,例如通信失败,通信成功啊这些,
BC协议
.这个是通过偏移量地址来获取的,也就是frameworks
中的IPCThreadState
中组装的,给我们封装好了. - 3 : 第三个code就是我们日常使用的,用来定义我们自己的通信协议,
one_way
这些标签和我们传递的数据和我们自定义的协议是存放在binder_transaction_data
中的. - 4 : 整个学习过程中需要去仔细了理解的函数,本质上就是对实际的物理的地址.
函数 | 用途 |
---|---|
kzalloc | 分配内核虚拟地址 |
alloc_page | 申请物理页 |
map_vm_area | 内核虚拟地址和物理页映射 |
vm_insert_page | 用户虚拟地址和物业映射 |
copy_from_user | 复制用户虚拟地址指向的数据到内核虚拟地址 |
copy_to_user | 将数据复制到用户虚拟地址 |
- 5 : 带
one_way
和不带one_way
是两种不同的通信模型,这个搜索一下网上有很多,我单独列出来,只是,没有看到对应的源码讲解. - 6 : 学习下来,难度基本上就是一些
Linux
中的宏函数和一些Linux
的基础知识点需要学习,整体回想下来,流程并不是特别复杂,主要复杂的在于无数的结构体,这些复杂的结构体,将整个通信过程中的通信双方的信息,数据信息全部存储下来,组成我们所说的抽象的协议.
转载自:https://juejin.cn/post/7153568951912366110