lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <vouu7wme3guxivbvphafehazewq2gqw5xvazj6wq5pgmsct55a@r5cabs6qy3kj>
Date: Sat, 2 Mar 2024 12:57:47 -0600
From: Bjorn Andersson <andersson@...nel.org>
To: Mukesh Ojha <quic_mojha@...cinc.com>
Cc: konrad.dybcio@...aro.org, linux-arm-msm@...r.kernel.org, 
	linux-kernel@...r.kernel.org, linus.walleij@...aro.org, linux-gpio@...r.kernel.org
Subject: Re: [PATCH v12 8/9] firmware: qcom: scm: Add check to prevent Null
 pointer dereference

On Tue, Feb 27, 2024 at 09:23:07PM +0530, Mukesh Ojha wrote:
> There are multiple place in SCM driver __scm->dev is being
> accessed without checking if it is valid

Why is this a problem?

> or not and all
> not all of function needs the device but it is needed
> for some cases when the number of argument passed is more
> and dma_map_single () api is used.
> 

Why can't we just always pass NULL in these cases then?

> Add a NULL check for the cases when it is fine even to pass
> device as NULL and add qcom_scm_is_available() check for
> cases when it is needed for DMA api's.
> 

It could be argued that returning an error for this scenario could be
making things more robust. But I can see no scenario where the calling
driver would be able to react in a suitable way when getting
-EPROBE_DEFERRED back.

Our current philosophy is that the client will do probe deferral by
invoking qcom_scm_is_available() at probe time. Please provide an
adequate description of the problem that you're solving by introducing
all these checks.

And pick a return value that is appropriate for the context these
functions are being expected to be called in.


PS. Why are you extending the scope of this series? You're just making
sure that your original patches aren't getting merged.

Regards,
Bjorn

> Signed-off-by: Mukesh Ojha <quic_mojha@...cinc.com>
> ---
>  drivers/firmware/qcom/qcom_scm.c | 88 ++++++++++++++++++++++++--------
>  1 file changed, 66 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
> index 6f14254c0c10..a1dce417e6ec 100644
> --- a/drivers/firmware/qcom/qcom_scm.c
> +++ b/drivers/firmware/qcom/qcom_scm.c
> @@ -465,7 +465,7 @@ int qcom_scm_set_remote_state(u32 state, u32 id)
>  	struct qcom_scm_res res;
>  	int ret;
>  
> -	ret = qcom_scm_call(__scm->dev, &desc, &res);
> +	ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res);
>  
>  	return ret ? : res.result[0];
>  }
> @@ -591,6 +591,9 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
>  	};
>  	struct qcom_scm_res res;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	/*
>  	 * During the scm call memory protection will be enabled for the meta
>  	 * data blob, so make sure it's physically contiguous, 4K aligned and
> @@ -637,6 +640,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
>   */
>  void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
>  {
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	if (!ctx->ptr)
>  		return;
>  
> @@ -671,6 +677,9 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
>  	};
>  	struct qcom_scm_res res;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	ret = qcom_scm_clk_enable();
>  	if (ret)
>  		return ret;
> @@ -706,6 +715,9 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
>  	};
>  	struct qcom_scm_res res;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	ret = qcom_scm_clk_enable();
>  	if (ret)
>  		return ret;
> @@ -740,6 +752,9 @@ int qcom_scm_pas_shutdown(u32 peripheral)
>  	};
>  	struct qcom_scm_res res;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	ret = qcom_scm_clk_enable();
>  	if (ret)
>  		return ret;
> @@ -776,11 +791,11 @@ bool qcom_scm_pas_supported(u32 peripheral)
>  	};
>  	struct qcom_scm_res res;
>  
> -	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
> +	if (!__qcom_scm_is_call_available(__scm ? __scm->dev : NULL, QCOM_SCM_SVC_PIL,
>  					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
>  		return false;
>  
> -	ret = qcom_scm_call(__scm->dev, &desc, &res);
> +	ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res);
>  
>  	return ret ? false : !!res.result[0];
>  }
> @@ -840,7 +855,7 @@ int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
>  	int ret;
>  
>  
> -	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
> +	ret = qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, &res);
>  	if (ret >= 0)
>  		*val = res.result[0];
>  
> @@ -859,7 +874,7 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
>  		.owner = ARM_SMCCC_OWNER_SIP,
>  	};
>  
> -	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
> +	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
>  
> @@ -871,7 +886,8 @@ EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
>   */
>  bool qcom_scm_restore_sec_cfg_available(void)
>  {
> -	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
> +	return __qcom_scm_is_call_available(__scm ? __scm->dev : NULL,
> +					    QCOM_SCM_SVC_MP,
>  					    QCOM_SCM_MP_RESTORE_SEC_CFG);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
> @@ -889,7 +905,7 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
>  	struct qcom_scm_res res;
>  	int ret;
>  
> -	ret = qcom_scm_call(__scm->dev, &desc, &res);
> +	ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res);
>  
>  	return ret ? : res.result[0];
>  }
> @@ -907,7 +923,7 @@ int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
>  	struct qcom_scm_res res;
>  	int ret;
>  
> -	ret = qcom_scm_call(__scm->dev, &desc, &res);
> +	ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res);
>  
>  	if (size)
>  		*size = res.result[0];
> @@ -930,7 +946,7 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
>  	};
>  	int ret;
>  
> -	ret = qcom_scm_call(__scm->dev, &desc, NULL);
> +	ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
>  
>  	/* the pg table has been initialized already, ignore the error */
>  	if (ret == -EPERM)
> @@ -951,7 +967,7 @@ int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
>  		.owner = ARM_SMCCC_OWNER_SIP,
>  	};
>  
> -	return qcom_scm_call(__scm->dev, &desc, NULL);
> +	return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
>  
> @@ -973,7 +989,7 @@ int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
>  	};
>  	struct qcom_scm_res res;
>  
> -	ret = qcom_scm_call(__scm->dev, &desc, &res);
> +	ret = qcom_scm_call(__scm ? __scm->dev : NULL, &desc, &res);
>  
>  	return ret ? : res.result[0];
>  }
> @@ -1038,6 +1054,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
>  	int ret, i, b;
>  	u64 srcvm_bits = *srcvm;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	src_sz = hweight64(srcvm_bits) * sizeof(*src);
>  	mem_to_map_sz = sizeof(*mem_to_map);
>  	dest_sz = dest_cnt * sizeof(*destvm);
> @@ -1093,7 +1112,8 @@ EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
>   */
>  bool qcom_scm_ocmem_lock_available(void)
>  {
> -	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
> +	return __qcom_scm_is_call_available(__scm ? __scm->dev : NULL,
> +					    QCOM_SCM_SVC_OCMEM,
>  					    QCOM_SCM_OCMEM_LOCK_CMD);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
> @@ -1120,7 +1140,7 @@ int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
>  		.arginfo = QCOM_SCM_ARGS(4),
>  	};
>  
> -	return qcom_scm_call(__scm->dev, &desc, NULL);
> +	return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
>  
> @@ -1143,7 +1163,7 @@ int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
>  		.arginfo = QCOM_SCM_ARGS(3),
>  	};
>  
> -	return qcom_scm_call(__scm->dev, &desc, NULL);
> +	return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
>  
> @@ -1155,9 +1175,11 @@ EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
>   */
>  bool qcom_scm_ice_available(void)
>  {
> -	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
> +	return __qcom_scm_is_call_available(__scm ? __scm->dev : NULL,
> +					    QCOM_SCM_SVC_ES,
>  					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
> -		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
> +		__qcom_scm_is_call_available(__scm ?__scm->dev : NULL,
> +					     QCOM_SCM_SVC_ES,
>  					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
> @@ -1184,7 +1206,7 @@ int qcom_scm_ice_invalidate_key(u32 index)
>  		.owner = ARM_SMCCC_OWNER_SIP,
>  	};
>  
> -	return qcom_scm_call(__scm->dev, &desc, NULL);
> +	return qcom_scm_call(__scm ?__scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
>  
> @@ -1228,6 +1250,9 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
>  	dma_addr_t key_phys;
>  	int ret;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	/*
>  	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
>  	 * physical address that's been properly flushed.  The sanctioned way to
> @@ -1262,7 +1287,12 @@ EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
>  bool qcom_scm_hdcp_available(void)
>  {
>  	bool avail;
> -	int ret = qcom_scm_clk_enable();
> +	int ret;
> +
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
> +	ret = qcom_scm_clk_enable();
>  
>  	if (ret)
>  		return ret;
> @@ -1307,6 +1337,9 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
>  	};
>  	struct qcom_scm_res res;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
>  		return -ERANGE;
>  
> @@ -1335,7 +1368,7 @@ int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
>  		.owner = ARM_SMCCC_OWNER_SIP,
>  	};
>  
> -	return qcom_scm_call(__scm->dev, &desc, NULL);
> +	return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
>  
> @@ -1351,13 +1384,15 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
>  	};
>  
>  
> -	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
> +	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
>  
>  bool qcom_scm_lmh_dcvsh_available(void)
>  {
> -	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
> +	return __qcom_scm_is_call_available(__scm ? __scm->dev : NULL,
> +					    QCOM_SCM_SVC_LMH,
> +					    QCOM_SCM_LMH_LIMIT_DCVSH);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
>  
> @@ -1371,7 +1406,7 @@ int qcom_scm_lmh_profile_change(u32 profile_id)
>  		.owner = ARM_SMCCC_OWNER_SIP,
>  	};
>  
> -	return qcom_scm_call(__scm->dev, &desc, NULL);
> +	return qcom_scm_call(__scm ? __scm->dev : NULL, &desc, NULL);
>  }
>  EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
>  
> @@ -1394,6 +1429,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
>  		.owner = ARM_SMCCC_OWNER_SIP,
>  	};
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
>  	if (!payload_buf)
>  		return -ENOMEM;
> @@ -1568,6 +1606,9 @@ int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
>  	char *name_buf;
>  	int status;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	if (app_name_len >= name_buf_size)
>  		return -EINVAL;
>  
> @@ -1638,6 +1679,9 @@ int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
>  	dma_addr_t rsp_phys;
>  	int status;
>  
> +	if (!qcom_scm_is_available())
> +		return -EPROBE_DEFER;
> +
>  	/* Map request buffer */
>  	req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
>  	status = dma_mapping_error(__scm->dev, req_phys);
> -- 
> 2.43.0.254.ga26002b62827
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ