[<prev] [next>] [day] [month] [year] [list]
Message-ID: <982e7012-672c-dbde-da97-c3752f9976ae@fb.com>
Date: Tue, 12 Jan 2021 10:43:03 -0800
From: Yonghong Song <yhs@...com>
To: Song Liu <songliubraving@...com>, <bpf@...r.kernel.org>,
<netdev@...r.kernel.org>, <linux-mm@...ck.org>
CC: <ast@...nel.org>, <daniel@...earbox.net>, <andrii@...nel.org>,
<john.fastabend@...il.com>, <kpsingh@...omium.org>,
<kernel-team@...com>, <akpm@...ux-foundation.org>
Subject: Re: [PATCH v3 bpf-next 1/4] bpf: introduce task_vma bpf_iter
On 1/6/21 8:17 PM, Song Liu wrote:
> Introduce task_vma bpf_iter to print memory information of a process. It
> can be used to print customized information similar to /proc/<pid>/maps.
>
> Current /proc/<pid>/maps and /proc/<pid>/smaps provide information of
> vma's of a process. However, these information are not flexible enough to
> cover all use cases. For example, if a vma cover mixed 2MB pages and 4kB
> pages (x86_64), there is no easy way to tell which address ranges are
> backed by 2MB pages. task_vma solves the problem by enabling the user to
> generate customize information based on the vma (and vma->vm_mm,
> vma->vm_file, etc.).
>
> To access the vma safely in the BPF program, task_vma iterator holds
> target mmap_lock while calling the BPF program. If the mmap_lock is
> contended, task_vma unlocks mmap_lock between iterations to unblock the
> writer(s). This lock contention avoidance mechanism is similar to the one
> used in show_smaps_rollup().
>
> Signed-off-by: Song Liu <songliubraving@...com>
> ---
> kernel/bpf/task_iter.c | 211 ++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 210 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
> index 0458a40edf10a..a1d3c9d230f68 100644
> --- a/kernel/bpf/task_iter.c
> +++ b/kernel/bpf/task_iter.c
> @@ -304,9 +304,192 @@ static const struct seq_operations task_file_seq_ops = {
> .show = task_file_seq_show,
> };
>
> +struct bpf_iter_seq_task_vma_info {
> + /* The first field must be struct bpf_iter_seq_task_common.
> + * this is assumed by {init, fini}_seq_pidns() callback functions.
> + */
> + struct bpf_iter_seq_task_common common;
> + struct task_struct *task;
> + struct vm_area_struct *vma;
> + u32 tid;
> + unsigned long prev_vm_start;
> + unsigned long prev_vm_end;
> +};
> +
> +enum bpf_task_vma_iter_find_op {
> + task_vma_iter_first_vma, /* use mm->mmap */
> + task_vma_iter_next_vma, /* use curr_vma->vm_next */
> + task_vma_iter_find_vma, /* use find_vma() to find next vma */
> +};
> +
> +static struct vm_area_struct *
> +task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
> +{
> + struct pid_namespace *ns = info->common.ns;
> + enum bpf_task_vma_iter_find_op op;
> + struct vm_area_struct *curr_vma;
> + struct task_struct *curr_task;
> + u32 curr_tid = info->tid;
> +
> + /* If this function returns a non-NULL vma, it holds a reference to
> + * the task_struct, and holds read lock on vma->mm->mmap_lock.
> + * If this function returns NULL, it does not hold any reference or
> + * lock.
> + */
> +again:
We can move label "again" to below right before "curr_task =
task_seq_get_next()" since "goto again" always having info->task == NULL.
> + if (info->task) {
> + curr_task = info->task;
> + curr_vma = info->vma;
> + /* In case of lock contention, drop mmap_lock to unblock
> + * the writer.
> + */
> + if (mmap_lock_is_contended(curr_task->mm)) {
> + info->prev_vm_start = curr_vma->vm_start;
> + info->prev_vm_end = curr_vma->vm_end;
> + op = task_vma_iter_find_vma;
> + mmap_read_unlock(curr_task->mm);
> + if (mmap_read_lock_killable(curr_task->mm))
Could you explain when mmap_read_lock_killable() may fail?
What I try to find is if mmap_read_lock_killable() fails, we should
abort iterating or we may request user to try again (if user buffer
is not filled anything).
> + goto finish;
> + } else {
> + op = task_vma_iter_next_vma;
> + }
> + } else {
> + curr_task = task_seq_get_next(ns, &curr_tid, true);
> + if (!curr_task) {
> + info->tid = curr_tid + 1;
> + goto finish;
> + }
> +
> + if (curr_tid != info->tid) {
> + info->tid = curr_tid;
> + op = task_vma_iter_first_vma;
> + } else {
> + op = task_vma_iter_find_vma;
> + }
> +
> + if (!curr_task->mm)
> + goto next_task;
> +
> + if (mmap_read_lock_killable(curr_task->mm))
> + goto finish;
> + }
> +
> + switch (op) {
> + case task_vma_iter_first_vma:
> + curr_vma = curr_task->mm->mmap;
> + break;
> + case task_vma_iter_next_vma:
> + curr_vma = curr_vma->vm_next;
> + break;
> + case task_vma_iter_find_vma:
> + /* We dropped mmap_lock so it is necessary to use find_vma
> + * to find the next vma. This is similar to the mechanism
> + * in show_smaps_rollup().
> + */
> + curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
> +
> + if (curr_vma && (curr_vma->vm_start == info->prev_vm_start))
> + curr_vma = curr_vma->vm_next;
> + break;
> + }
> + if (!curr_vma) {
> + mmap_read_unlock(curr_task->mm);
> + goto next_task;
> + }
> + info->task = curr_task;
> + info->vma = curr_vma;
> + return curr_vma;
> +
> +next_task:
> + put_task_struct(curr_task);
> + info->task = NULL;
> + curr_tid++;
> + goto again;
> +
> +finish:
> + info->task = NULL;
> + info->vma = NULL;
> + return NULL;
> +}
> +
> +static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
> +{
> + struct bpf_iter_seq_task_vma_info *info = seq->private;
> + struct vm_area_struct *vma;
> +
> + vma = task_vma_seq_get_next(info);
> + if (vma && *pos == 0)
> + ++*pos;
> +
> + return vma;
> +}
> +
> +static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
> +{
> + struct bpf_iter_seq_task_vma_info *info = seq->private;
> +
> + ++*pos;
> + return task_vma_seq_get_next(info);
> +}
> +
> +struct bpf_iter__task_vma {
> + __bpf_md_ptr(struct bpf_iter_meta *, meta);
> + __bpf_md_ptr(struct task_struct *, task);
> + __bpf_md_ptr(struct vm_area_struct *, vma);
> +};
> +
> +DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
> + struct task_struct *task, struct vm_area_struct *vma)
> +
> +static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
> +{
> + struct bpf_iter_seq_task_vma_info *info = seq->private;
> + struct bpf_iter__task_vma ctx;
> + struct bpf_iter_meta meta;
> + struct bpf_prog *prog;
> +
> + meta.seq = seq;
> + prog = bpf_iter_get_info(&meta, in_stop);
> + if (!prog)
> + return 0;
> +
> + ctx.meta = &meta;
> + ctx.task = info->task;
> + ctx.vma = info->vma;
> + return bpf_iter_run_prog(prog, &ctx);
> +}
> +
> +static int task_vma_seq_show(struct seq_file *seq, void *v)
> +{
> + return __task_vma_seq_show(seq, false);
> +}
> +
> +static void task_vma_seq_stop(struct seq_file *seq, void *v)
> +{
> + struct bpf_iter_seq_task_vma_info *info = seq->private;
> +
> + if (!v) {
> + (void)__task_vma_seq_show(seq, true);
> + } else {
> + info->prev_vm_start = info->vma->vm_start;
> + info->prev_vm_end = info->vma->vm_end;
This change, together with task_vma_iter_find_vma action,
is not correct.
What we typically have is info->vma->vm_start/info->vma->vm_end
is the next show but since there is no room for that we will
delay until user space processed buffer and come back.
So here, it is not prev_vm_start/end as it has not been
displayed.
Maybe you can separate task_vma_iter_find_vma into two?
task_vma_iter_find_vma and task_vma_iter_find_vma_next.
(task_vma_iter_find_vma_next carriers the action with
what you have now for task_vma_iter_find_vma).
In the above task_vma_seq_get_next(), if info->task != NULL,
you will use task_vma_iter_find_vma_next (the one after previous
vm_start/end), if info->task == NULL, which means back from user
space, not going to next vma if there is an exact match.
> + mmap_read_unlock(info->task->mm);
> + put_task_struct(info->task);
> + info->task = NULL;
> + }
> +}
> +
> +static const struct seq_operations task_vma_seq_ops = {
> + .start = task_vma_seq_start,
> + .next = task_vma_seq_next,
> + .stop = task_vma_seq_stop,
> + .show = task_vma_seq_show,
> +};
> +
> BTF_ID_LIST(btf_task_file_ids)
> BTF_ID(struct, task_struct)
> BTF_ID(struct, file)
> +BTF_ID(struct, vm_area_struct)
>
> static const struct bpf_iter_seq_info task_seq_info = {
> .seq_ops = &task_seq_ops,
> @@ -346,6 +529,26 @@ static struct bpf_iter_reg task_file_reg_info = {
> .seq_info = &task_file_seq_info,
> };
>
> +static const struct bpf_iter_seq_info task_vma_seq_info = {
> + .seq_ops = &task_vma_seq_ops,
> + .init_seq_private = init_seq_pidns,
> + .fini_seq_private = fini_seq_pidns,
> + .seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
> +};
> +
> +static struct bpf_iter_reg task_vma_reg_info = {
> + .target = "task_vma",
> + .feature = BPF_ITER_RESCHED,
> + .ctx_arg_info_size = 2,
> + .ctx_arg_info = {
> + { offsetof(struct bpf_iter__task_vma, task),
> + PTR_TO_BTF_ID_OR_NULL },
> + { offsetof(struct bpf_iter__task_vma, vma),
> + PTR_TO_BTF_ID_OR_NULL },
> + },
> + .seq_info = &task_vma_seq_info,
> +};
> +
> static int __init task_iter_init(void)
> {
> int ret;
> @@ -357,6 +560,12 @@ static int __init task_iter_init(void)
>
> task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
> task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
> - return bpf_iter_reg_target(&task_file_reg_info);
> + ret = bpf_iter_reg_target(&task_file_reg_info);
> + if (ret)
> + return ret;
> +
> + task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
> + task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[2];
> + return bpf_iter_reg_target(&task_vma_reg_info);
> }
> late_initcall(task_iter_init);
>
Powered by blists - more mailing lists