lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 21 Jul 2021 09:05:43 +0300
From:   Julian Wiedmann <jwi@...ux.ibm.com>
To:     Arnd Bergmann <arnd@...nel.org>, netdev@...r.kernel.org
Cc:     Christoph Hellwig <hch@....de>, Arnd Bergmann <arnd@...db.de>,
        Karsten Graul <kgraul@...ux.ibm.com>,
        linux-s390 <linux-s390@...r.kernel.org>
Subject: Re: [PATCH net-next v2 16/31] qeth: use ndo_siocdevprivate

On 20.07.21 17:46, Arnd Bergmann wrote:
> From: Arnd Bergmann <arnd@...db.de>
> 
> qeth has both standard MII ioctls and custom SIOCDEVPRIVATE ones,
> all of which work correctly with compat user space.
> 
> Move the private ones over to the new ndo_siocdevprivate callback.
> 
> Signed-off-by: Arnd Bergmann <arnd@...db.de>
> ---

your get_maintainers scripting seems broken, adding the usual suspects.

>  drivers/s390/net/qeth_core.h      |  5 ++++-
>  drivers/s390/net/qeth_core_main.c | 35 ++++++++++++++++++++++---------
>  drivers/s390/net/qeth_l3_main.c   |  6 +++---
>  3 files changed, 32 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
> index f4d554ea0c93..89fd7432dbec 100644
> --- a/drivers/s390/net/qeth_core.h
> +++ b/drivers/s390/net/qeth_core.h
> @@ -790,7 +790,8 @@ struct qeth_discipline {
>  	void (*remove) (struct ccwgroup_device *);
>  	int (*set_online)(struct qeth_card *card, bool carrier_ok);
>  	void (*set_offline)(struct qeth_card *card);
> -	int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
> +	int (*do_ioctl)(struct net_device *dev, struct ifreq *rq,
> +			void __user *data, int cmd);
>  	int (*control_event_handler)(struct qeth_card *card,
>  					struct qeth_ipa_cmd *cmd);
>  };
> @@ -1124,6 +1125,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
>  			unsigned int offset, unsigned int hd_len,
>  			int elements_needed);
>  int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
> +int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
> +			void __user *data, int cmd);
>  void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
>  int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
>  int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
> diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
> index 62f88ccbd03f..be19cfd05136 100644
> --- a/drivers/s390/net/qeth_core_main.c
> +++ b/drivers/s390/net/qeth_core_main.c
> @@ -6672,21 +6672,42 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id)
>  }
>  EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
>  
> -int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
> +int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
>  {
>  	struct qeth_card *card = dev->ml_priv;
> -	struct mii_ioctl_data *mii_data;
>  	int rc = 0;
>  
>  	switch (cmd) {
>  	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
> -		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
> +		rc = qeth_snmp_command(card, data);
>  		break;
>  	case SIOC_QETH_GET_CARD_TYPE:
>  		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
>  		    !IS_VM_NIC(card))
>  			return 1;
>  		return 0;
> +	case SIOC_QETH_QUERY_OAT:
> +		rc = qeth_query_oat_command(card, data);
> +		break;
> +	default:
> +		if (card->discipline->do_ioctl)
> +			rc = card->discipline->do_ioctl(dev, rq, data, cmd);
> +		else
> +			rc = -EOPNOTSUPP;
> +	}
> +	if (rc)
> +		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
> +	return rc;
> +}
> +EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
> +

Looks like you missed to wire this up in our netdev_ops structs.

Powered by blists - more mailing lists