lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Sun, 21 Jan 2018 11:37:23 -0800
From:   Joe Perches <joe@...ches.com>
To:     Jörg Rödel <joro@...tes.org>
Cc:     SF Markus Elfring <elfring@...rs.sourceforge.net>,
        iommu@...ts.linux-foundation.org,
        David Woodhouse <dwmw2@...radead.org>,
        LKML <linux-kernel@...r.kernel.org>,
        kernel-janitors@...r.kernel.org
Subject: Re: [PATCH] Intel-IOMMU: Delete an error message for a failed
 memory allocation in init_dmars()

On Sun, 2018-01-21 at 08:19 +0100, Jörg Rödel wrote:
> On Sat, Jan 20, 2018 at 05:37:52PM -0800, Joe Perches wrote:
> > While Markus' commit messages are nearly universally terrible,
> > is there really any signficant value in knowing when any
> > particular OOM condition occurs other than the subsystem that
> > became OOM?
> > 
> > You're going to be hosed in any case.
> > 
> > Why isn't the generic OOM stack dump good enough?
> 
> Because if we know the exact allocation attempt that failed right away,
> we can more easily check if we can rewrite it so that it is more likely
> to succeed, e.g. rewriting one higher-order allocation to multiple
> order-0 allocations.

Up to you but I think that's pretty dubious
as this is an init function and if it fails
the system really is stuffed.

Unrelated, there are some unnecessary casts
of pointers to void * that could be removed
to help make the code a bit more regular as
some callers use the cast and some do not.

---
 drivers/iommu/intel-iommu.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4a2de34895ec..8d7ea76345ae 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -845,8 +845,8 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
 		if (!context)
 			return NULL;
 
-		__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
-		phy_addr = virt_to_phys((void *)context);
+		__iommu_flush_cache(iommu, context, CONTEXT_SIZE);
+		phy_addr = virt_to_phys(context);
 		*entry = phy_addr | 1;
 		__iommu_flush_cache(iommu, entry, sizeof(*entry));
 	}
@@ -1298,7 +1298,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
 	struct root_entry *root;
 	unsigned long flags;
 
-	root = (struct root_entry *)alloc_pgtable_page(iommu->node);
+	root = alloc_pgtable_page(iommu->node);
 	if (!root) {
 		pr_err("Allocating root entry for %s failed\n",
 			iommu->name);
@@ -1978,7 +1978,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
 	domain->nid = iommu->node;
 
 	/* always allocate the top pgd */
-	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+	domain->pgd = alloc_pgtable_page(domain->nid);
 	if (!domain->pgd)
 		return -ENOMEM;
 	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -4168,7 +4168,7 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 	if (!rmrru->resv)
 		goto free_rmrru;
 
-	rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
+	rmrru->devices = dmar_alloc_dev_scope(rmrr + 1,
 				((void *)rmrr) + rmrr->header.length,
 				&rmrru->devices_cnt);
 	if (rmrru->devices_cnt && rmrru->devices == NULL)
@@ -4229,7 +4229,7 @@ int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
 	memcpy(atsru->hdr, hdr, hdr->length);
 	atsru->include_all = atsr->flags & 0x1;
 	if (!atsru->include_all) {
-		atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
+		atsru->devices = dmar_alloc_dev_scope(atsr + 1,
 				(void *)atsr + atsr->header.length,
 				&atsru->devices_cnt);
 		if (atsru->devices_cnt && atsru->devices == NULL) {
@@ -4465,7 +4465,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
 		rmrr = container_of(rmrru->hdr,
 				    struct acpi_dmar_reserved_memory, header);
 		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
-			ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
+			ret = dmar_insert_dev_scope(info, rmrr + 1,
 				((void *)rmrr) + rmrr->header.length,
 				rmrr->segment, rmrru->devices,
 				rmrru->devices_cnt);
@@ -4483,7 +4483,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
 
 		atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
 		if (info->event == BUS_NOTIFY_ADD_DEVICE) {
-			ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
+			ret = dmar_insert_dev_scope(info, atsr + 1,
 					(void *)atsr + atsr->header.length,
 					atsr->segment, atsru->devices,
 					atsru->devices_cnt);
@@ -4920,7 +4920,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
 	domain->max_addr = 0;
 
 	/* always allocate the top pgd */
-	domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+	domain->pgd = alloc_pgtable_page(domain->nid);
 	if (!domain->pgd)
 		return -ENOMEM;
 	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ