Android系统之Binder子系统(下)

发布时间:2020-07-04 12:17:40 作者:上帝之子521
来源:网络 阅读:412

        在上文中分析了 binder 驱动的框架以及它是怎么注册服务、获取服务和使用服务的整个过程,接下来就来看看 binder transaction stack 机制。

        我们在前面也有提到进程 A 向进程 B 相互发数据,模式是进程 A 先发一个 BC_TRANSACTION 给进程 B,进程 B 收到之后给进程 A 回复一个 BR_TRANSACTION 表示数据已经收到了。然后进程 B 向进程 A 发送一个 BC_REPLY 用以发送进程 B 需要向进程 A 回复的相关数据,进程 A 收到数据后向进程 B 发送一个 BR_REPLY 表示回复的数据已经收到。它们是通过驱动来进行数据的交互的,只有这四种模式是涉及到两进程的,其他的模式只是 app 和驱动的交互,用于改变/报告状态。

        那么就有两个问题了,需要发给谁?回给谁呢?要回答这两个问题,我们先来看看 test_client(A)和 test_server(B)是怎么工作的。根据我们之前的代码分析,可知:在 svcmgr_publish 注册服务时序调用 binder_call 函数,而在 binder_call 函数中构造参数时 cmd 为 BC_TRANSACTION。由此可知 test_client 先发送一个 BC_TRANSACTION 给 test_server。并且构造的是 write 相关的数据,最后调用 ioctl 来发送 BINDER_WRITE_READ。那么我们进到 binder_ioctl 函数中看看。找到 BINDER_WRITE_READ 命令,根据之前的构造的 write 相关的数据可知进入到 binder_thread_write 函数中。在 binder_thread_write 函数中找到 BC_TRANSACTION 命令,我们看到在里面做了一个 binder_transaction 的动作。

        我们在进到 binder_transaction 函数中看看是怎样完成的:

           a. 一开始,非“双向传输”。所以,数据将放在 test_server 的 binder_proc.todo 链表中;

            b. 入 A 栈,test_client.binder_thread.transaction_stack 中:.from = test_client、.to_proc = test_server、.to_thread= test_server;

            c. 数据放入 test_server.binder_proc.todo 链表中,唤醒 test_server.binder_proc.wait 上的线程。

        对于 test_server 来说,收到一个 BR_TRANSACTION,那么便在 binder_thread_read 函数中做相关的事情:

            a. 从 test_server.binder_proc.todo 链表中取出数据,进行处理;

            b. 入 B 栈,test_server.binder_thread.transaction_stack 中:.from = test_client、.to_proc = test_server、.to_thread= test_server;

        对于同一个 binder_transaction 来说,通过 .from_parent 放入发送者的栈,通过 .to_parent 放入接收者的栈。

        然后 B 向 A 发送一个 BC_REPLY,也是在 binder_transaction 函数中完成的:

            a. 从栈中取出 test_server.binder_thread.transaction_stack 中的相关数据(.from、.to_proc、.to_thread),由 .from 可知回复给 test_client;

            b. 出栈(test_server),即 test_server.binder_thread.transaction_stack = NULL;

            c. 数据 copy_from_user 到 test_client;

            d. 出栈(test_client),即 test_client.binder_thread.transaction_stack = NULL;

            e. 放入 todo 链表,并唤醒。

        最后进程 A 收到数据后向进程 B 发送一个 BR_REPLY。返回给用户空间,这块就不涉及到栈的操作了。

        至此,两进程的交互过程已完成。

    上面涉及到的相关代码如下:

int svcmgr_publish(struct binder_state *bs, uint32_t target, const char *name, void *ptr)
{
    int status;
    unsigned iodata[512/4];
    struct binder_io msg, reply;

    bio_init(&msg, iodata, sizeof(iodata), 4);
    bio_put_uint32(&msg, 0);  // strict mode header
    bio_put_string16_x(&msg, SVC_MGR_NAME);
    bio_put_string16_x(&msg, name);
    bio_put_obj(&msg, ptr);

    if (binder_call(bs, &msg, &reply, target, SVC_MGR_ADD_SERVICE))    // 注册服务
        return -1;

    status = bio_get_uint32(&reply);

    binder_done(bs, &msg, &reply);

    return status;
}

binder_call 函数如下

int binder_call(struct binder_state *bs,
                struct binder_io *msg, struct binder_io *reply,
                uint32_t target, uint32_t code)
{
    int res;
    struct binder_write_read bwr;
    struct {
        uint32_t cmd;
        struct binder_transaction_data txn;
    } __attribute__((packed)) writebuf;
    unsigned readbuf[32];

    if (msg->flags & BIO_F_OVERFLOW) {
        fprintf(stderr,"binder: txn buffer overflow\n");
        goto fail;
    }

    // 构造参数
    writebuf.cmd = BC_TRANSACTION;
    writebuf.txn.target.handle = target;
    writebuf.txn.code = code;
    writebuf.txn.flags = 0;
    writebuf.txn.data_size = msg->data - msg->data0;
    writebuf.txn.offsets_size = ((char*) msg->offs) - ((char*) msg->offs0);
    writebuf.txn.data.ptr.buffer = (uintptr_t)msg->data0;
    writebuf.txn.data.ptr.offsets = (uintptr_t)msg->offs0;

    bwr.write_size = sizeof(writebuf);
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) &writebuf;

    hexdump(msg->data0, msg->data - msg->data0);
    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);    // 调用 ioctl 发数据

        if (res < 0) {
            fprintf(stderr,"binder: ioctl failed (%s)\n", strerror(errno));
            goto fail;
        }

        res = binder_parse(bs, reply, (uintptr_t) readbuf, bwr.read_consumed, 0);
        if (res == 0) return 0;
        if (res < 0) goto fail;
    }

fail:
    memset(reply, 0, sizeof(*reply));
    reply->flags |= BIO_F_IOERROR;
    return -1;
}

binder_transaction 函数如下

static void binder_transaction(struct binder_proc *proc,
                   struct binder_thread *thread,
                   struct binder_transaction_data *tr, int reply)
{
    struct binder_transaction *t;
    struct binder_work *tcomplete;
    size_t *offp, *off_end;
    size_t off_min;
    struct binder_proc *target_proc;
    struct binder_thread *target_thread = NULL;
    struct binder_node *target_node = NULL;
    struct list_head *target_list;
    wait_queue_head_t *target_wait;
    struct binder_transaction *in_reply_to = NULL;
    struct binder_transaction_log_entry *e;
    uint32_t return_error = BR_OK;

    e = binder_transaction_log_add(&binder_transaction_log);
    e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
    e->from_proc = proc->pid;
    e->from_thread = thread->pid;
    e->target_handle = tr->target.handle;
    e->data_size = tr->data_size;
    e->offsets_size = tr->offsets_size;

    if (reply) {
        in_reply_to = thread->transaction_stack;
        if (in_reply_to == NULL) {
            binder_user_error("binder: %d:%d got reply transaction "
                      "with no transaction stack\n",
                      proc->pid, thread->pid);
            return_error = BR_FAILED_REPLY;
            goto err_empty_call_stack;
        }
        binder_set_nice(in_reply_to->saved_priority);
        if (in_reply_to->to_thread != thread) {
            binder_user_error("binder: %d:%d got reply transaction "
                "with bad transaction stack,"
                " transaction %d has target %d:%d\n",
                proc->pid, thread->pid, in_reply_to->debug_id,
                in_reply_to->to_proc ?
                in_reply_to->to_proc->pid : 0,
                in_reply_to->to_thread ?
                in_reply_to->to_thread->pid : 0);
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            goto err_bad_call_stack;
        }
        thread->transaction_stack = in_reply_to->to_parent;
        target_thread = in_reply_to->from;
        if (target_thread == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (target_thread->transaction_stack != in_reply_to) {
            binder_user_error("binder: %d:%d got reply transaction "
                "with bad target transaction stack %d, "
                "expected %d\n",
                proc->pid, thread->pid,
                target_thread->transaction_stack ?
                target_thread->transaction_stack->debug_id : 0,
                in_reply_to->debug_id);
            return_error = BR_FAILED_REPLY;
            in_reply_to = NULL;
            target_thread = NULL;
            goto err_dead_binder;
        }
        target_proc = target_thread->proc;
    } else {
        if (tr->target.handle) {
            struct binder_ref *ref;
            ref = binder_get_ref(proc, tr->target.handle);
            if (ref == NULL) {
                binder_user_error("binder: %d:%d got "
                    "transaction to invalid handle\n",
                    proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_invalid_target_handle;
            }
            target_node = ref->node;
        } else {
            target_node = binder_context_mgr_node;
            if (target_node == NULL) {
                return_error = BR_DEAD_REPLY;
                goto err_no_context_mgr_node;
            }
        }
        e->to_node = target_node->debug_id;
        target_proc = target_node->proc;
        if (target_proc == NULL) {
            return_error = BR_DEAD_REPLY;
            goto err_dead_binder;
        }
        if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
            return_error = BR_FAILED_REPLY;
            goto err_invalid_target_handle;
        }
        if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
            struct binder_transaction *tmp;
            tmp = thread->transaction_stack;
            if (tmp->to_thread != thread) {
                binder_user_error("binder: %d:%d got new "
                    "transaction with bad transaction stack"
                    ", transaction %d has target %d:%d\n",
                    proc->pid, thread->pid, tmp->debug_id,
                    tmp->to_proc ? tmp->to_proc->pid : 0,
                    tmp->to_thread ?
                    tmp->to_thread->pid : 0);
                return_error = BR_FAILED_REPLY;
                goto err_bad_call_stack;
            }
            while (tmp) {
                if (tmp->from && tmp->from->proc == target_proc)
                    target_thread = tmp->from;
                tmp = tmp->from_parent;
            }
        }
    }
    if (target_thread) {
        e->to_thread = target_thread->pid;
        target_list = &target_thread->todo;
        target_wait = &target_thread->wait;
    } else {
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }
    e->to_proc = target_proc->pid;

    /* TODO: reuse incoming transaction for reply */
    t = kzalloc(sizeof(*t), GFP_KERNEL);
    if (t == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_t_failed;
    }
    binder_stats_created(BINDER_STAT_TRANSACTION);

    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    if (tcomplete == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_alloc_tcomplete_failed;
    }
    binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

    t->debug_id = ++binder_last_id;
    e->debug_id = t->debug_id;

    if (reply)
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "binder: %d:%d BC_REPLY %d -> %d:%d, "
                 "data %p-%p size %zd-%zd\n",
                 proc->pid, thread->pid, t->debug_id,
                 target_proc->pid, target_thread->pid,
                 tr->data.ptr.buffer, tr->data.ptr.offsets,
                 tr->data_size, tr->offsets_size);
    else
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "binder: %d:%d BC_TRANSACTION %d -> "
                 "%d - node %d, data %p-%p size %zd-%zd\n",
                 proc->pid, thread->pid, t->debug_id,
                 target_proc->pid, target_node->debug_id,
                 tr->data.ptr.buffer, tr->data.ptr.offsets,
                 tr->data_size, tr->offsets_size);

    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;
#if defined(CONFIG_MACH_P4NOTE) || defined(CONFIG_MACH_SP7160LTE) || defined(CONFIG_MACH_TAB3) || defined(CONFIG_MACH_KONA)
    /* workaround code for invalid binder proc */
    if (!proc->tsk) {
        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
                 "binder: %d:%d invalid proc\n",
                 proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_binder_alloc_buf_failed;
    }
#endif
    t->sender_euid = proc->tsk->cred->euid;
    t->to_proc = target_proc;
    t->to_thread = target_thread;
    t->code = tr->code;
    t->flags = tr->flags;
    t->priority = task_nice(current);
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
    if (t->buffer == NULL) {
        return_error = BR_FAILED_REPLY;
        goto err_binder_alloc_buf_failed;
    }
    t->buffer->allow_user_free = 0;
    t->buffer->debug_id = t->debug_id;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;
    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL);

    offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));

    if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
        binder_user_error("binder: %d:%d got transaction with invalid "
            "data ptr\n", proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
    if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
        binder_user_error("binder: %d:%d got transaction with invalid "
            "offsets ptr\n", proc->pid, thread->pid);
        return_error = BR_FAILED_REPLY;
        goto err_copy_data_failed;
    }
    if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
        binder_user_error("binder: %d:%d got transaction with "
            "invalid offsets size, %zd\n",
            proc->pid, thread->pid, tr->offsets_size);
        return_error = BR_FAILED_REPLY;
        goto err_bad_offset;
    }
    off_end = (void *)offp + tr->offsets_size;
    off_min = 0;
    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;
        if (*offp > t->buffer->data_size - sizeof(*fp) ||
            *offp < off_min ||
            t->buffer->data_size < sizeof(*fp) ||
            !IS_ALIGNED(*offp, sizeof(void *))) {
            binder_user_error("%d:%d got transaction with invalid offset, %zd (min %zd, max %zd)\n",
                               proc->pid, thread->pid, *offp, off_min,
                               (t->buffer->data_size - sizeof(*fp)));
            return_error = BR_FAILED_REPLY;
            goto err_bad_offset;
        }
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        off_min = *offp + sizeof(struct flat_binder_object);
        switch (fp->type) {
        case BINDER_TYPE_BINDER:
        case BINDER_TYPE_WEAK_BINDER: {
            struct binder_ref *ref;
            struct binder_node *node = binder_get_node(proc, fp->binder);
            if (node == NULL) {
                node = binder_new_node(proc, fp->binder, fp->cookie);
                if (node == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_new_node_failed;
                }
                node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
                node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
            }
            if (fp->cookie != node->cookie) {
                binder_user_error("binder: %d:%d sending u%p "
                    "node %d, cookie mismatch %p != %p\n",
                    proc->pid, thread->pid,
                    fp->binder, node->debug_id,
                    fp->cookie, node->cookie);
                goto err_binder_get_ref_for_node_failed;
            }
            if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            ref = binder_get_ref_for_node(target_proc, node);
            if (ref == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_for_node_failed;
            }
            if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
            else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
            fp->handle = ref->desc;
            binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                       &thread->todo);

            binder_debug(BINDER_DEBUG_TRANSACTION,
                     "        node %d u%p -> ref %d desc %d\n",
                     node->debug_id, node->ptr, ref->debug_id,
                     ref->desc);
        } break;
        case BINDER_TYPE_HANDLE:
        case BINDER_TYPE_WEAK_HANDLE: {
            struct binder_ref *ref = binder_get_ref(proc, fp->handle);
            if (ref == NULL) {
                binder_user_error("binder: %d:%d got "
                    "transaction with invalid "
                    "handle, %ld\n", proc->pid,
                    thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_failed;
            }
            if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
                return_error = BR_FAILED_REPLY;
                goto err_binder_get_ref_failed;
            }
            if (ref->node->proc == target_proc) {
                if (fp->type == BINDER_TYPE_HANDLE)
                    fp->type = BINDER_TYPE_BINDER;
                else
                    fp->type = BINDER_TYPE_WEAK_BINDER;
                fp->binder = ref->node->ptr;
                fp->cookie = ref->node->cookie;
                binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
                binder_debug(BINDER_DEBUG_TRANSACTION,
                         "        ref %d desc %d -> node %d u%p\n",
                         ref->debug_id, ref->desc, ref->node->debug_id,
                         ref->node->ptr);
            } else {
                struct binder_ref *new_ref;
                new_ref = binder_get_ref_for_node(target_proc, ref->node);
                if (new_ref == NULL) {
                    return_error = BR_FAILED_REPLY;
                    goto err_binder_get_ref_for_node_failed;
                }
                fp->handle = new_ref->desc;
                binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
                binder_debug(BINDER_DEBUG_TRANSACTION,
                         "        ref %d desc %d -> ref %d desc %d (node %d)\n",
                         ref->debug_id, ref->desc, new_ref->debug_id,
                         new_ref->desc, ref->node->debug_id);
            }
        } break;

        case BINDER_TYPE_FD: {
            int target_fd;
            struct file *file;

            if (reply) {
                if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
                    binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
                        proc->pid, thread->pid, fp->handle);
                    return_error = BR_FAILED_REPLY;
                    goto err_fd_not_allowed;
                }
            } else if (!target_node->accept_fds) {
                binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
                    proc->pid, thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_fd_not_allowed;
            }

            file = fget(fp->handle);
            if (file == NULL) {
                binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
                    proc->pid, thread->pid, fp->handle);
                return_error = BR_FAILED_REPLY;
                goto err_fget_failed;
            }
            if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
                fput(file);
                return_error = BR_FAILED_REPLY;
                goto err_get_unused_fd_failed;
            }
            target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
            if (target_fd < 0) {
                fput(file);
                return_error = BR_FAILED_REPLY;
                goto err_get_unused_fd_failed;
            }
            task_fd_install(target_proc, target_fd, file);
            binder_debug(BINDER_DEBUG_TRANSACTION,
                     "        fd %ld -> %d\n", fp->handle, target_fd);
            /* TODO: fput? */
            fp->handle = target_fd;
        } break;

        default:
            binder_user_error("binder: %d:%d got transactio"
                "n with invalid object type, %lx\n",
                proc->pid, thread->pid, fp->type);
            return_error = BR_FAILED_REPLY;
            goto err_bad_object_type;
        }
    }
    if (reply) {
        BUG_ON(t->buffer->async_transaction != 0);
        binder_pop_transaction(target_thread, in_reply_to);
    } else if (!(t->flags & TF_ONE_WAY)) {
        BUG_ON(t->buffer->async_transaction != 0);
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
    } else {
        BUG_ON(target_node == NULL);
        BUG_ON(t->buffer->async_transaction != 1);
        if (target_node->has_async_transaction) {
            target_list = &target_node->async_todo;
            target_wait = NULL;
        } else
            target_node->has_async_transaction = 1;
    }
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;

err_get_unused_fd_failed:
err_fget_failed:
err_fd_not_allowed:
err_binder_get_ref_for_node_failed:
err_binder_get_ref_failed:
err_binder_new_node_failed:
err_bad_object_type:
err_bad_offset:
err_copy_data_failed:
    binder_transaction_buffer_release(target_proc, t->buffer, offp);
    t->buffer->transaction = NULL;
    binder_free_buf(target_proc, t->buffer);
err_binder_alloc_buf_failed:
    kfree(tcomplete);
    binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
    kfree(t);
    binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
    binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
             "binder: %d:%d transaction failed %d, size %zd-%zd\n",
             proc->pid, thread->pid, return_error,
             tr->data_size, tr->offsets_size);

    {
        struct binder_transaction_log_entry *fe;
        fe = binder_transaction_log_add(&binder_transaction_log_failed);
        *fe = *e;
    }

    BUG_ON(thread->return_error != BR_OK);
    if (in_reply_to) {
        thread->return_error = BR_TRANSACTION_COMPLETE;
        binder_send_failed_reply(in_reply_to, return_error);
    } else
        thread->return_error = return_error;
}

binder_thread_write 函数如下

int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
            void __user *buffer, int size, signed long *consumed)
{
    uint32_t cmd;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    while (ptr < end && thread->return_error == BR_OK) {
        if (get_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
            binder_stats.bc[_IOC_NR(cmd)]++;
            proc->stats.bc[_IOC_NR(cmd)]++;
            thread->stats.bc[_IOC_NR(cmd)]++;
        }
        switch (cmd) {
        /* 省略无关代码 */
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;

            if (copy_from_user(&tr, ptr, sizeof(tr)))
                return -EFAULT;
            ptr += sizeof(tr);
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }
        /* 省略无关代码 */
        default:
            printk(KERN_ERR "binder: %d:%d unknown command %d\n",
                   proc->pid, thread->pid, cmd);
            return -EINVAL;
        }
        *consumed = ptr - buffer;
    }
    return 0;
}

binder_thread_read 函数如下

static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  void  __user *buffer, int size,
                  signed long *consumed, int non_block)
{
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;

    int ret = 0;
    int wait_for_proc_work;

    if (*consumed == 0) {
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
    }

retry:
    wait_for_proc_work = thread->transaction_stack == NULL &&
                list_empty(&thread->todo);

    if (thread->return_error != BR_OK && ptr < end) {
        if (thread->return_error2 != BR_OK) {
            if (put_user(thread->return_error2, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (ptr == end)
                goto done;
            thread->return_error2 = BR_OK;
        }
        if (put_user(thread->return_error, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        thread->return_error = BR_OK;
        goto done;
    }


    thread->looper |= BINDER_LOOPER_STATE_WAITING;
    if (wait_for_proc_work)
        proc->ready_threads++;

    binder_unlock(__func__);

    if (wait_for_proc_work) {
        if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
                    BINDER_LOOPER_STATE_ENTERED))) {
            binder_user_error("binder: %d:%d ERROR: Thread waiting "
                "for process work before calling BC_REGISTER_"
                "LOOPER or BC_ENTER_LOOPER (state %x)\n",
                proc->pid, thread->pid, thread->looper);
            wait_event_interruptible(binder_user_error_wait,
                         binder_stop_on_user_error < 2);
        }
        binder_set_nice(proc->default_priority);
        if (non_block) {
            if (!binder_has_proc_work(proc, thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
    } else {
        if (non_block) {
            if (!binder_has_thread_work(thread))
                ret = -EAGAIN;
        } else
            ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
    }

    binder_lock(__func__);

    if (wait_for_proc_work)
        proc->ready_threads--;
    thread->looper &= ~BINDER_LOOPER_STATE_WAITING;

    if (ret)
        return ret;

    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        if (!list_empty(&thread->todo))
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        else if (!list_empty(&proc->todo) && wait_for_proc_work)
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        else {
            if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
                goto retry;
            break;
        }

        if (end - ptr < sizeof(tr) + 4)
            break;

        switch (w->type) {
        case BINDER_WORK_TRANSACTION: {
            t = container_of(w, struct binder_transaction, work);
        } break;
        case BINDER_WORK_TRANSACTION_COMPLETE: {
            cmd = BR_TRANSACTION_COMPLETE;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);

            binder_stat_br(proc, thread, cmd);
            binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
                     "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
                     proc->pid, thread->pid);

            list_del(&w->entry);
            kfree(w);
            binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
        } break;
        case BINDER_WORK_NODE: {
            struct binder_node *node = container_of(w, struct binder_node, work);
            uint32_t cmd = BR_NOOP;
            const char *cmd_name;
            int strong = node->internal_strong_refs || node->local_strong_refs;
            int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
            if (weak && !node->has_weak_ref) {
                cmd = BR_INCREFS;
                cmd_name = "BR_INCREFS";
                node->has_weak_ref = 1;
                node->pending_weak_ref = 1;
                node->local_weak_refs++;
            } else if (strong && !node->has_strong_ref) {
                cmd = BR_ACQUIRE;
                cmd_name = "BR_ACQUIRE";
                node->has_strong_ref = 1;
                node->pending_strong_ref = 1;
                node->local_strong_refs++;
            } else if (!strong && node->has_strong_ref) {
                cmd = BR_RELEASE;
                cmd_name = "BR_RELEASE";
                node->has_strong_ref = 0;
            } else if (!weak && node->has_weak_ref) {
                cmd = BR_DECREFS;
                cmd_name = "BR_DECREFS";
                node->has_weak_ref = 0;
            }
            if (cmd != BR_NOOP) {
                if (put_user(cmd, (uint32_t __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(uint32_t);
                if (put_user(node->ptr, (void * __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(void *);
                if (put_user(node->cookie, (void * __user *)ptr))
                    return -EFAULT;
                ptr += sizeof(void *);

                binder_stat_br(proc, thread, cmd);
                binder_debug(BINDER_DEBUG_USER_REFS,
                         "binder: %d:%d %s %d u%p c%p\n",
                         proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
            } else {
                list_del_init(&w->entry);
                if (!weak && !strong) {
                    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "binder: %d:%d node %d u%p c%p deleted\n",
                             proc->pid, thread->pid, node->debug_id,
                             node->ptr, node->cookie);
                    rb_erase(&node->rb_node, &proc->nodes);
                    kfree(node);
                    binder_stats_deleted(BINDER_STAT_NODE);
                } else {
                    binder_debug(BINDER_DEBUG_INTERNAL_REFS,
                             "binder: %d:%d node %d u%p c%p state unchanged\n",
                             proc->pid, thread->pid, node->debug_id, node->ptr,
                             node->cookie);
                }
            }
        } break;
        case BINDER_WORK_DEAD_BINDER:
        case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
        case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
            struct binder_ref_death *death;
            uint32_t cmd;

            death = container_of(w, struct binder_ref_death, work);
            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
                cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
            else
                cmd = BR_DEAD_BINDER;
            if (put_user(cmd, (uint32_t __user *)ptr))
                return -EFAULT;
            ptr += sizeof(uint32_t);
            if (put_user(death->cookie, (void * __user *)ptr))
                return -EFAULT;
            ptr += sizeof(void *);
            binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
                     "binder: %d:%d %s %p\n",
                      proc->pid, thread->pid,
                      cmd == BR_DEAD_BINDER ?
                      "BR_DEAD_BINDER" :
                      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
                      death->cookie);

            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
                list_del(&w->entry);
                kfree(death);
                binder_stats_deleted(BINDER_STAT_DEATH);
            } else
                list_move(&w->entry, &proc->delivered_death);
            if (cmd == BR_DEAD_BINDER)
                goto done; /* DEAD_BINDER notifications can cause transactions */
        } break;
        }

        if (!t)
            continue;

        BUG_ON(t->buffer == NULL);
        if (t->buffer->target_node) {
            struct binder_node *target_node = t->buffer->target_node;
            tr.target.ptr = target_node->ptr;
            tr.cookie =  target_node->cookie;
            t->saved_priority = task_nice(current);
            if (t->priority < target_node->min_priority &&
                !(t->flags & TF_ONE_WAY))
                binder_set_nice(t->priority);
            else if (!(t->flags & TF_ONE_WAY) ||
                 t->saved_priority > target_node->min_priority)
                binder_set_nice(target_node->min_priority);
            cmd = BR_TRANSACTION;
        } else {
            tr.target.ptr = NULL;
            tr.cookie = NULL;
            cmd = BR_REPLY;
        }
        tr.code = t->code;
        tr.flags = t->flags;
        tr.sender_euid = t->sender_euid;

        if (t->from) {
            struct task_struct *sender = t->from->proc->tsk;
            tr.sender_pid = task_tgid_nr_ns(sender,
                            current->nsproxy->pid_ns);
        } else {
            tr.sender_pid = 0;
        }

        tr.data_size = t->buffer->data_size;
        tr.offsets_size = t->buffer->offsets_size;
        tr.data.ptr.buffer = (void *)t->buffer->data +
                    proc->user_buffer_offset;
        tr.data.ptr.offsets = tr.data.ptr.buffer +
                    ALIGN(t->buffer->data_size,
                        sizeof(void *));

        if (put_user(cmd, (uint32_t __user *)ptr))
            return -EFAULT;
        ptr += sizeof(uint32_t);
        if (copy_to_user(ptr, &tr, sizeof(tr)))
            return -EFAULT;
        ptr += sizeof(tr);

        binder_stat_br(proc, thread, cmd);
        binder_debug(BINDER_DEBUG_TRANSACTION,
                 "binder: %d:%d %s %d %d:%d, cmd %d"
                 "size %zd-%zd ptr %p-%p\n",
                 proc->pid, thread->pid,
                 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
                 "BR_REPLY",
                 t->debug_id, t->from ? t->from->proc->pid : 0,
                 t->from ? t->from->pid : 0, cmd,
                 t->buffer->data_size, t->buffer->offsets_size,
                 tr.data.ptr.buffer, tr.data.ptr.offsets);

        list_del(&t->work.entry);
        t->buffer->allow_user_free = 1;
        if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
            t->to_parent = thread->transaction_stack;
            t->to_thread = thread;
            thread->transaction_stack = t;
        } else {
            t->buffer->transaction = NULL;
            kfree(t);
            binder_stats_deleted(BINDER_STAT_TRANSACTION);
        }
        break;
    }

done:

    *consumed = ptr - buffer;
    if (proc->requested_threads + proc->ready_threads == 0 &&
        proc->requested_threads_started < proc->max_threads &&
        (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
         BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
         /*spawn a new thread if we leave this out */) {
        proc->requested_threads++;
        binder_debug(BINDER_DEBUG_THREADS,
                 "binder: %d:%d BR_SPAWN_LOOPER\n",
                 proc->pid, thread->pid);
        if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
            return -EFAULT;
    }
    return 0;
}

        前面我们分析的是 binder transaction stack 机制,接下来看下 transaction stack 机制双向服务。

        此时如果有 3 个进程:P1 进程提供 S1 服务,线程用 t1, t1' ... 表示;P2 进程提供 S2 服务,线程用 t2, t2' ... 表示;P3 进程提供 S3 服务,线程用 t3, t3' ... 表示;假设 t1 ==> t2 ==> t3,那么 t3 把数据发给谁?那么 t3 把数据放到 P1 的 binder_proc.todo 链表,让 P1 使用新线程 t1' 来处理,还是把数据放到 t1 的 binder_thread.todo 链表,让 t1 来处理呢?

        从进程 A 到进程 B:

            1. 进程 A 发送 BC_TRANSACTION,TR 通过 from_parent 入栈;

            2. 进程 B 回复 BR_TRANSACTION,TR 通过 to_parent 入栈;

            3. 进程 B 发送 BC_REPLY,TR 通过 to_parent 入栈,TR通 过 from_parent 出栈;

            4. 进程 A 回复 BR_REPLY;

        从 t1 ==> t2: t1.sp -> TR1.from_parent -> NULL;

            TR1.from = t1, TR1.to_proc = P2;

        从 t2 ==> t3: t2.sp -> TR1.to_parent -> NULL;t2.sp -> TR2.from_parent -> TR1.to_parent -> NULL;

            TR1.from = t1, TR1.to_proc = P2;

        从 t3 ==> ?: t3.sp -> TR2.to_parent -> NULL;我们先来分析下 binder_transaction 函数中下面这段代码

while (tmp) {
    if (tmp->from && tmp->from->proc == target_proc)
        target_thread = tmp->from;
    tmp = tmp->from_parent;
}

            a. TR2 -> from = t2 -> proc = P2;

            b. tmp = TR1

            c. TR1 -> from = t1 -> proc = P1;target_thread = t1。

        从这里我们就可以知道 t3 发给的是 t1,因此 TR2.from_parent -> TR1.from_parent -> NULL;t3.sp -> TR3.from_parent -> TR2.to_parent -> NULL;

        那么 t1 收到 t3 的 BR_TRANSACTION,t1.sp -> TR3.to_parent -> TR1.from_parent -> NULL;

        t1 发出 BC_REPLY,t1.sp -> TR1.from_parent -> NULL 入栈;t3.sp -> TR2.to_parent -> NULL 出栈;

        t3 收到 BR_REPLY,处理 TR2,发出 BC_REPLY。t3.sp -> TR2.to_parent -> NULL;从 t3.sp 中,取出 TR2,TR2.from = t2,所以回复 t2;t3.sp -> NULL 出栈,t2.sp -> TR1.to_parent -> NULL。

        t2 收到 BR_REPLY,处理 TR1,发出 BR_REPLY。从 t2.sp 中取出栈顶,TR1.from = t1,所以回复 t1;t2.sp = NULL,t1.sp = NULL;

        t1 收到 BR_REPLY,处理完毕。

        

        那么 binder_server 的多线程是怎么工作的,client 发出请求,server 提供服务。如果 server 忙不过来时,创建多线程:

            1. 驱动判断是否“忙不过来”;

            2. 驱动向 APP 发请求,创建新线程;

            3. APP 创建新线程。

        代码如下:

if (proc->requested_threads + proc->ready_threads == 0 &&
    proc->requested_threads_started < proc->max_threads &&
    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
    BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
     /*spawn a new thread if we leave this out */) {
    proc->requested_threads++;
    binder_debug(BINDER_DEBUG_THREADS,
         "binder: %d:%d BR_SPAWN_LOOPER\n",
         proc->pid, thread->pid);
    if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
        return -EFAULT;
}

    驱动向 APP 发出“创建新线程请求”的条件:

        1. proc->requested_treads = 0;“未处理的新线程请求”= 0

        2. proc->requesteds = 0,空闲线程数为 0

        3. 已启动的线程数 < max_threads

    那么怎么写 APP 呢?

        1. 设置 max_threads

        2. 收到 BR_SPAWN_LOOPER 后,创建新线程

        3. 新线程发出 ioctl:BC_REGISTER_LOOPER

        4. 像主线程一样,进入一个循环:read driver,处理。

推荐阅读:
  1. binder 机制流程分析
  2. Android系统中Binder子系统有什么用

免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。

binder transaction_stack 双向服务机制

上一篇:格式化输出

下一篇:spark分布式集群安装

相关阅读

您好,登录后才能下订单哦!

密码登录
登录注册
其他方式登录
点击 登录注册 即表示同意《亿速云用户服务条款》