[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230118090859.GC1737564@quicinc.com>
Date: Fri, 20 Jan 2023 11:04:07 +0530
From: Srivatsa Vaddagiri <quic_svaddagi@...cinc.com>
To: Elliot Berman <quic_eberman@...cinc.com>
CC: Bjorn Andersson <quic_bjorande@...cinc.com>,
Murali Nalajala <quic_mnalajal@...cinc.com>,
Trilok Soni <quic_tsoni@...cinc.com>,
"Carl van Schaik" <quic_cvanscha@...cinc.com>,
Prakruthi Deepak Heragu <quic_pheragu@...cinc.com>,
Arnd Bergmann <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Rob Herring <robh+dt@...nel.org>,
"Krzysztof Kozlowski" <krzysztof.kozlowski+dt@...aro.org>,
Jonathan Corbet <corbet@....net>,
Bagas Sanjaya <bagasdotme@...il.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Jassi Brar <jassisinghbrar@...il.com>,
Sudeep Holla <sudeep.holla@....com>,
Mark Rutland <mark.rutland@....com>,
Lorenzo Pieralisi <lpieralisi@...nel.org>,
"Dmitry Baryshkov" <dmitry.baryshkov@...aro.org>,
<linux-arm-msm@...r.kernel.org>, <devicetree@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linux-doc@...r.kernel.org>,
<linux-arm-kernel@...ts.infradead.org>,
<linux-acpi@...r.kernel.org>
Subject: Re: [PATCH v8 14/28] gunyah: vm_mgr: Add/remove user memory regions
* Elliot Berman <quic_eberman@...cinc.com> [2022-12-19 14:58:35]:
> static int gh_vm_release(struct inode *inode, struct file *filp)
> {
> struct gunyah_vm *ghvm = filp->private_data;
> + struct gunyah_vm_memory_mapping *mapping, *tmp;
>
> + list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
> + gh_vm_mem_mapping_reclaim(ghvm, mapping);
kfree(mapping) also?
> + }
> put_gh_rm(ghvm->rm);
> kfree(ghvm);
> return 0;
[snip]
> +struct gunyah_vm_memory_mapping {
> + struct list_head list;
> + enum gunyah_vm_mem_share_type share_type;
> + struct gh_rm_mem_parcel parcel;
> +
> + __u64 guest_phys_addr;
> + __u32 mem_size;
'gh_userspace_memory_region' allows 64-bit mem_size, so perhaps make it 64-bit
here as well?
> +struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_alloc(struct gunyah_vm *ghvm,
> + struct gh_userspace_memory_region *region)
> +{
> + phys_addr_t curr_page, prev_page;
> + struct gunyah_vm_memory_mapping *mapping, *tmp_mapping;
> + struct gh_rm_mem_entry *mem_entries;
> + int i, j, pinned, ret = 0;
> + struct gh_rm_mem_parcel *parcel;
> +
> + if (!region->memory_size || !PAGE_ALIGNED(region->memory_size) ||
> + !PAGE_ALIGNED(region->userspace_addr))
> + return ERR_PTR(-EINVAL);
> +
> + ret = mutex_lock_interruptible(&ghvm->mm_lock);
> + if (ret)
> + return ERR_PTR(ret);
> + mapping = __gh_vm_mem_mapping_find(ghvm, region->label);
> + if (mapping) {
> + ret = -EEXIST;
> + goto unlock;
We should avoid kfree(mapping) in this case?
> + }
> +
> + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
> + if (!mapping) {
> + ret = -ENOMEM;
> + goto unlock;
> + }
> +
> + mapping->parcel.label = region->label;
> + mapping->guest_phys_addr = region->guest_phys_addr;
> + mapping->npages = region->memory_size >> PAGE_SHIFT;
> + parcel = &mapping->parcel;
> + parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */
> + parcel->mem_type = GH_RM_MEM_TYPE_NORMAL;
> +
> + /* Check for overlap */
> + list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) {
> + if (!((mapping->guest_phys_addr + (mapping->npages << PAGE_SHIFT) <=
> + tmp_mapping->guest_phys_addr) ||
> + (mapping->guest_phys_addr >=
> + tmp_mapping->guest_phys_addr + (tmp_mapping->npages << PAGE_SHIFT)))) {
> + ret = -EEXIST;
> + goto unlock;
> + }
> + }
> +
> + list_add(&mapping->list, &ghvm->memory_mappings);
I think we need to either avoid adding to the list this early (until all steps
below are successfull) or maintain some additional state in 'mapping' to
indicate that its work in progress. Consider the race condition for example when
multiple threads call SET_USER_MEM_REGION ioctl on same label (one with size > 0
and other with size = 0), which can lead to unpleasant outcome AFAICS.
> +unlock:
> + mutex_unlock(&ghvm->mm_lock);
> + if (ret)
> + goto free_mapping;
> +
> + mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL);
> + if (!mapping->pages) {
> + ret = -ENOMEM;
> + goto reclaim;
Can you check this error path? We seem to call unpin_user_page() in this path,
which is not correct.
> + }
> +
> + pinned = pin_user_pages_fast(region->userspace_addr, mapping->npages,
> + FOLL_WRITE | FOLL_LONGTERM, mapping->pages);
> + if (pinned < 0) {
> + ret = pinned;
> + goto reclaim;
Same comment as above
> + parcel->acl_entries[0].vmid = ghvm->vmid;
cpu_to_le() wrapper missing here and few other places in this function. Pls check.
Powered by blists - more mailing lists