lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d31ac48b-13d3-cb9c-7a59-220b31a89274@quicinc.com>
Date:   Fri, 9 Jun 2023 15:35:31 -0700
From:   Elliot Berman <quic_eberman@...cinc.com>
To:     Alex Elder <elder@...aro.org>,
        Srinivas Kandagatla <srinivas.kandagatla@...aro.org>,
        Prakruthi Deepak Heragu <quic_pheragu@...cinc.com>
CC:     Murali Nalajala <quic_mnalajal@...cinc.com>,
        Trilok Soni <quic_tsoni@...cinc.com>,
        Srivatsa Vaddagiri <quic_svaddagi@...cinc.com>,
        Carl van Schaik <quic_cvanscha@...cinc.com>,
        Dmitry Baryshkov <dmitry.baryshkov@...aro.org>,
        Bjorn Andersson <andersson@...nel.org>,
        "Konrad Dybcio" <konrad.dybcio@...aro.org>,
        Arnd Bergmann <arnd@...db.de>,
        "Greg Kroah-Hartman" <gregkh@...uxfoundation.org>,
        Rob Herring <robh+dt@...nel.org>,
        Krzysztof Kozlowski <krzysztof.kozlowski+dt@...aro.org>,
        Jonathan Corbet <corbet@....net>,
        Bagas Sanjaya <bagasdotme@...il.com>,
        Will Deacon <will@...nel.org>, Andy Gross <agross@...nel.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Jassi Brar <jassisinghbrar@...il.com>,
        <linux-arm-msm@...r.kernel.org>, <devicetree@...r.kernel.org>,
        <linux-kernel@...r.kernel.org>, <linux-doc@...r.kernel.org>,
        <linux-arm-kernel@...ts.infradead.org>
Subject: Re: [PATCH v13 09/24] gunyah: rsc_mgr: Add RPC for sharing memory



On 6/5/2023 12:48 PM, Alex Elder wrote:
> On 5/9/23 3:47 PM, Elliot Berman wrote: >> +
>> +    req_header->mem_handle = cpu_to_le32(mem_handle);
>> +    if (end_append)
>> +        req_header->flags |= GH_MEM_APPEND_REQ_FLAGS_END;
>> +
>> +    mem_section->n_entries = cpu_to_le16(n_mem_entries);
>> +    memcpy(mem_section->entries, mem_entries, sizeof(*mem_entries) * 
>> n_mem_entries);
>> +
>> +    ret = gh_rm_call(rm, GH_RM_RPC_MEM_APPEND, msg, msg_size, NULL, 
>> NULL);
>> +    kfree(msg);
>> +
>> +    return ret;
>> +}
>> +
>> +static int gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle,
>> +            struct gh_rm_mem_entry *mem_entries, size_t n_mem_entries)
>> +{
>> +    bool end_append;
>> +    int ret = 0;
>> +    size_t n;
>> +
>> +    while (n_mem_entries) {
>> +        if (n_mem_entries > GH_RM_MAX_MEM_ENTRIES) {
>> +            end_append = false;
>> +            n = GH_RM_MAX_MEM_ENTRIES;
>> +        } else {
>> +            end_append = true;
>> +            n = n_mem_entries;
>> +        }
>> +
>> +        ret = _gh_rm_mem_append(rm, mem_handle, end_append, 
>> mem_entries, n);
>> +        if (ret)
>> +            break;
>> +
>> +        mem_entries += n;
>> +        n_mem_entries -= n;
>> +    }
>> +
>> +    return ret;
>> +}
>> +
>> +static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, 
>> struct gh_rm_mem_parcel *p)
>> +{
>> +    size_t msg_size = 0, initial_mem_entries = p->n_mem_entries, 
>> resp_size;
>> +    size_t acl_section_size, mem_section_size;
>> +    struct gh_rm_mem_share_req_acl_section *acl_section;
>> +    struct gh_rm_mem_share_req_mem_section *mem_section;
>> +    struct gh_rm_mem_share_req_header *req_header;
>> +    u32 *attr_section;
>> +    __le32 *resp;
>> +    void *msg;
>> +    int ret;
>> +
>> +    if (!p->acl_entries || !p->n_acl_entries || !p->mem_entries || 
>> !p->n_mem_entries ||
>> +        p->n_acl_entries > U8_MAX || p->mem_handle != 
>> GH_MEM_HANDLE_INVAL)
>> +        return -EINVAL;
>> +
>> +    if (initial_mem_entries > GH_RM_MAX_MEM_ENTRIES)
>> +        initial_mem_entries = GH_RM_MAX_MEM_ENTRIES;
> 
> Is it OK to truncate the number of entries silently?
> 

The initial share/lend accepts GH_RM_MAX_MEM_ENTRIES. I append the rest 
of the mem entries later.

>> +
>> +    acl_section_size = struct_size(acl_section, entries, 
>> p->n_acl_entries);
> 
> Is there a limit on the number of ACL entries (as there is for
> the number of mem entries).
> 

There is limit based at the transport level -- messages sent to resource 
manager can only be so long. Max # ACL entries limit is dynamic based on 
the size of the rest of the message such as how many mem entries there 
are. We could try to compute the limit and even lower max number of 
mem_entries, but max # of ACL entries in practice will single digits so 
it seemed premature optimization to be "smarter" about the limit and let 
the RPC core do the checking/complaining.

>> +    mem_section_size = struct_size(mem_section, entries, 
>> initial_mem_entries);
>> +    /* The format of the message goes:
>> +     * request header
>> +     * ACL entries (which VMs get what kind of access to this memory 
>> parcel)
>> +     * Memory entries (list of memory regions to share)
>> +     * Memory attributes (currently unused, we'll hard-code the size 
>> to 0)
>> +     */
>> +    msg_size += sizeof(struct gh_rm_mem_share_req_header);
>> +    msg_size += acl_section_size;
>> +    msg_size += mem_section_size;
>> +    msg_size += sizeof(u32); /* for memory attributes, currently 
>> unused */
>> +
>> +    msg = kzalloc(msg_size, GFP_KERNEL);
>> +    if (!msg)
>> +        return -ENOMEM;
>> +
>> +    req_header = msg;
>> +    acl_section = (void *)req_header + sizeof(*req_header);
>> +    mem_section = (void *)acl_section + acl_section_size;
>> +    attr_section = (void *)mem_section + mem_section_size;
>> +
>> +    req_header->mem_type = p->mem_type;
>> +    if (initial_mem_entries != p->n_mem_entries)
>> +        req_header->flags |= GH_MEM_SHARE_REQ_FLAGS_APPEND;
>> +    req_header->label = cpu_to_le32(p->label);
>> +
>> +    acl_section->n_entries = cpu_to_le32(p->n_acl_entries);
>> +    memcpy(acl_section->entries, p->acl_entries,
>> +        flex_array_size(acl_section, entries, p->n_acl_entries));
>> +
>> +    mem_section->n_entries = cpu_to_le16(initial_mem_entries);
>> +    memcpy(mem_section->entries, p->mem_entries,
>> +        flex_array_size(mem_section, entries, initial_mem_entries));
>> +
>> +    /* Set n_entries for memory attribute section to 0 */
>> +    *attr_section = 0;
>> +
>> +    ret = gh_rm_call(rm, message_id, msg, msg_size, (void **)&resp, 
>> &resp_size);
>> +    kfree(msg);
>> +
>> +    if (ret)
>> +        return ret;
>> +
>> +    p->mem_handle = le32_to_cpu(*resp);
>> +    kfree(resp);
>> +
>> +    if (initial_mem_entries != p->n_mem_entries) {
>> +        ret = gh_rm_mem_append(rm, p->mem_handle,
>> +                    &p->mem_entries[initial_mem_entries],
>> +                    p->n_mem_entries - initial_mem_entries);
> 
> Will there always be at most one gh_rm_mem_append() call?
> 

Yes, gh_rm_mem_append makes multiple RPC calls as necessary for all the 
remaining entries.

>> +        if (ret) {
>> +            gh_rm_mem_reclaim(rm, p);
>> +            p->mem_handle = GH_MEM_HANDLE_INVAL;
>> +        }
>> +    }
>> +
>> +    return ret;
>> +}
> 
> . . .
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ