lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a0f9b8e801f99c726f655c2a81b39fe5@mail.gmail.com>
Date:   Mon, 19 Dec 2016 20:51:04 -0500
From:   Sasikumar PC <sasikumar.pc@...adcom.com>
To:     Tomas Henzl <thenzl@...hat.com>, jejb@...nel.org, hch@...radead.org
Cc:     linux-scsi@...r.kernel.org,
        Sathya Prakash Veerichetty <sathya.prakash@...adcom.com>,
        linux-kernel@...r.kernel.org,
        Christopher Owens <christopher.owens@...adcom.com>,
        Kiran Kumar Kasturi <kiran-kumar.kasturi@...adcom.com>
Subject: RE: [PATCH V4 06/11] megaraid_sas: Dynamic Raid Map Changes for
 SAS3.5 Generic Megaraid Controllers

Hi Tomas,

Please see my response inline

Thanks
sasi

-----Original Message-----
From: Sasikumar PC [mailto:sasikumar.pc@...adcom.com]
Sent: Wednesday, December 14, 2016 4:49 PM
To: 'Tomas Henzl'; 'jejb@...nel.org'; 'hch@...radead.org'
Cc: 'linux-scsi@...r.kernel.org'; Sathya Prakash Veerichetty;
'linux-kernel@...r.kernel.org'; Christopher Owens; Kiran Kumar Kasturi
Subject: RE: [PATCH V4 06/11] megaraid_sas: Dynamic Raid Map Changes for
SAS3.5 Generic Megaraid Controllers

Hi Tomas,

Please see my response inline

Thanks
sasi

-----Original Message-----
From: Tomas Henzl [mailto:thenzl@...hat.com]
Sent: Friday, December 09, 2016 7:55 AM
To: Sasikumar Chandrasekaran; jejb@...nel.org; hch@...radead.org
Cc: linux-scsi@...r.kernel.org; Sathya.Prakash@...adcom.com;
linux-kernel@...r.kernel.org; christopher.owens@...adcom.com;
kiran-kumar.kasturi@...adcom.com
Subject: Re: [PATCH V4 06/11] megaraid_sas: Dynamic Raid Map Changes for
SAS3.5 Generic Megaraid Controllers

On 7.12.2016 00:00, Sasikumar Chandrasekaran wrote:
> SAS3.5 Generic Megaraid Controllers FW will support new dynamic RaidMap
to have different
> sizes for different number of supported VDs.
>
> This patch is depending on patch 5
>
> Signed-off-by: Sasikumar Chandrasekaran <sasikumar.pc@...adcom.com>
> ---
>  drivers/scsi/megaraid/megaraid_sas.h        |   7 +
>  drivers/scsi/megaraid/megaraid_sas_base.c   |  61 ++++--
>  drivers/scsi/megaraid/megaraid_sas_fp.c     | 303
++++++++++++++++++++++++----
>  drivers/scsi/megaraid/megaraid_sas_fusion.c | 223 ++++++++++++++++----
>  drivers/scsi/megaraid/megaraid_sas_fusion.h | 240
++++++++++++++++++----
>  5 files changed, 699 insertions(+), 135 deletions(-)
>
> diff --git a/drivers/scsi/megaraid/megaraid_sas.h
b/drivers/scsi/megaraid/megaraid_sas.h
> index f4d6a94..3e087ab 100644
> --- a/drivers/scsi/megaraid/megaraid_sas.h
> +++ b/drivers/scsi/megaraid/megaraid_sas.h
> @@ -1434,6 +1434,12 @@ enum FW_BOOT_CONTEXT {
>  #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
>  #define MR_MAX_MSIX_REG_ARRAY                   16
>  #define MR_RDPQ_MODE_OFFSET			0X00800000
> +
> +#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT	16
> +#define MR_MAX_RAID_MAP_SIZE_MASK		0x1FF
> +#define MR_MIN_MAP_SIZE				0x10000
> +/* 64k */
> +
>  #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET		0X01000000
>
>  /*
> @@ -2152,6 +2158,7 @@ struct megasas_instance {
>  	bool fw_sync_cache_support;
>  	bool is_ventura;
>  	bool msix_combined;
> +	u16 max_raid_mapsize;
>  };
>  struct MR_LD_VF_MAP {
>  	u32 size;
> diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c
b/drivers/scsi/megaraid/megaraid_sas_base.c
> index c52f7be..3f06b57 100644
> --- a/drivers/scsi/megaraid/megaraid_sas_base.c
> +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
> @@ -4424,8 +4424,7 @@ int megasas_alloc_cmds(struct megasas_instance
*instance)
>  static void megasas_update_ext_vd_details(struct megasas_instance
*instance)
>  {
>  	struct fusion_context *fusion;
> -	u32 old_map_sz;
> -	u32 new_map_sz;
> +	u32 ventura_map_sz = 0;
>
>  	fusion = instance->ctrl_context;
>  	/* For MFI based controllers return dummy success */
> @@ -4455,21 +4454,39 @@ static void megasas_update_ext_vd_details(struct
megasas_instance *instance)
>  		instance->supportmax256vd ? "Extended VD(240 VD)firmware"
:
>  		"Legacy(64 VD) firmware");
>
> -	old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
> -				(sizeof(struct MR_LD_SPAN_MAP) *
> -				(instance->fw_supported_vd_count - 1));
> -	new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
> -	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
> -				(sizeof(struct MR_LD_SPAN_MAP) *
> -				(instance->drv_supported_vd_count - 1));
> -
> -	fusion->max_map_sz = max(old_map_sz, new_map_sz);
> +	if (instance->max_raid_mapsize) {
> +		ventura_map_sz = instance->max_raid_mapsize *
> +						MR_MIN_MAP_SIZE; /* 64k */
> +		fusion->current_map_sz = ventura_map_sz;
> +		fusion->max_map_sz = ventura_map_sz;
> +	} else {
> +		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
> +					(sizeof(struct MR_LD_SPAN_MAP) *
> +					(instance->fw_supported_vd_count -
1));
> +		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
>
> +		fusion->max_map_sz =
> +			max(fusion->old_map_sz, fusion->new_map_sz);
>
> -	if (instance->supportmax256vd)
> -		fusion->current_map_sz = new_map_sz;
> -	else
> -		fusion->current_map_sz = old_map_sz;
> +		if (instance->supportmax256vd)
> +			fusion->current_map_sz = fusion->new_map_sz;
> +		else
> +			fusion->current_map_sz = fusion->old_map_sz;
> +	}
> +	/* irrespective of FW raid maps, driver raid map is constant */
> +	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
> +#if VD_EXT_DEBUG
> +	dev_info(&instance->pdev->dev, "instance->max_raid_mapsize 0x%x \n
",
> +			instance->max_raid_mapsize);
> +	dev_info(&instance->pdev->dev,
> +	"new_map_sz = 0x%x, old_map_sz = 0x%x, "
> +	"ventura_map_sz = 0x%x, current_map_sz = 0x%x "
> +	"fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx\n",
> +	fusion->new_map_sz, fusion->old_map_sz,
> +	ventura_map_sz, fusion->current_map_sz,
> +	fusion->drv_map_sz,
> +	sizeof(struct MR_DRV_RAID_MAP_ALL));
> +#endif
>  }
>
>  /**
> @@ -5010,7 +5027,7 @@ static int megasas_init_fw(struct megasas_instance
*instance)
>  {
>  	u32 max_sectors_1;
>  	u32 max_sectors_2;
> -	u32 tmp_sectors, msix_enable, scratch_pad_2;
> +	u32 tmp_sectors, msix_enable, scratch_pad_2, scratch_pad_3;
>  	resource_size_t base_addr;
>  	struct megasas_register_set __iomem *reg_set;
>  	struct megasas_ctrl_info *ctrl_info = NULL;
> @@ -5086,7 +5103,17 @@ static int megasas_init_fw(struct
megasas_instance *instance)
>  			goto fail_ready_state;
>  	}
>
> -
> +	if (instance->is_ventura) {
> +		scratch_pad_3 =
> +			readl(&instance->reg_set->outbound_scratch_pad_3);
> +#if VD_EXT_DEBUG
> +		dev_info(&instance->pdev->dev, "scratch_pad3 0x%x\n",
> +			scratch_pad_3);
> +#endif
> +		instance->max_raid_mapsize = ((scratch_pad_3 >>
> +			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
> +			MR_MAX_RAID_MAP_SIZE_MASK);
> +	}
>
>  	/* Check if MSI-X is supported while in ready state */
>  	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
> diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c
b/drivers/scsi/megaraid/megaraid_sas_fp.c
> index eb9ff44..a6957a3 100644
> --- a/drivers/scsi/megaraid/megaraid_sas_fp.c
> +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
> @@ -179,18 +179,204 @@ void MR_PopulateDrvRaidMap(struct
megasas_instance *instance)
>  	struct fusion_context *fusion = instance->ctrl_context;
>  	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
>  	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
> -	int i;
> +	int i, j;
>  	u16 ld_count;
> +	struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
> +	struct MR_FW_RAID_MAP_EXT *fw_map_ext;
> +	struct MR_RAID_MAP_DESC_TABLE *desc_table;
>
>
>  	struct MR_DRV_RAID_MAP_ALL *drv_map =
>  			fusion->ld_drv_map[(instance->map_id & 1)];
>  	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
> +	void *raid_map_data = NULL;
> +
> +	memset(drv_map, 0, fusion->drv_map_sz);
> +	memset(pDrvRaidMap->ldTgtIdToLd,
> +		0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
> +
> +	if (instance->max_raid_mapsize) {
> +		fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
> +#if VD_EXT_DEBUG
> +		dev_dbg(&instance->pdev->dev,
> +		" raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x, "
> +		" descTableSize 0x%x descTableNumElements 0x%x\n",
> +		le32_to_cpu(fw_map_dyn->raid_map_size),
> +		le32_to_cpu(fw_map_dyn->desc_table_offset),
> +		le32_to_cpu(fw_map_dyn->desc_table_size),
> +		le32_to_cpu(fw_map_dyn->desc_table_num_elements));
> +		dev_dbg(&instance->pdev->dev,
> +		"drv map %p ldCount %d\n", drv_map, fw_map_dyn->ld_count);
> +#endif
> +		desc_table =
> +		(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn +
> +		le32_to_cpu(fw_map_dyn->desc_table_offset));
> +		if (desc_table != fw_map_dyn->raid_map_desc_table) {
> +			dev_err(&instance->pdev->dev,
> +			"offsets of desc table are not matching returning
"
> +			" FW raid map has been changed: desc %p original
%p\n",
> +			desc_table, fw_map_dyn->raid_map_desc_table);
> +		}
> +		ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
> +		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
> +		pDrvRaidMap->fpPdIoTimeoutSec =
fw_map_dyn->fp_pd_io_timeout_sec;
> +		pDrvRaidMap->totalSize = sizeof(struct
MR_DRV_RAID_MAP_ALL);
> +		/* point to actual data starting point*/
> +		raid_map_data = (void *)fw_map_dyn +
> +			le32_to_cpu(fw_map_dyn->desc_table_offset) +
> +			le32_to_cpu(fw_map_dyn->desc_table_size);
> +
> +		for (i = 0; i <
le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
> +			if (!desc_table) {
> +				dev_err(&instance->pdev->dev,
> +				"desc table is null, coming out %p \n",
desc_table);
> +				return;
> +			}

Is it possible that desc_table ever happens to NULL? It looks
like a test something else was intended here ?

It is a defensive check. yes can never be true and can be removed
Sasi


Also all the VD_EXT_DEBUG macros make with bad indentation added
make the code hard to read. Please fix it later in a follow up patch.


Sasi - This shall be fixed in V5 patch

> +#if VD_EXT_DEBUG
> +			dev_err(&instance->pdev->dev,
> +				"desc table %p \n", desc_table);
> +			dev_err(&instance->pdev->dev,
> +			"raidmap type %d, raidmapOffset 0x%x, "
> +			" raid map number of elements 0%x, raidmapsize
0x%x\n",
> +			desc_table->raid_map_desc_type,
> +			desc_table->raid_map_desc_offset,
> +			desc_table->raid_map_desc_elements,
> +			desc_table->raid_map_desc_buffer_size);
> +#endif
> +			switch
(le32_to_cpu(desc_table->raid_map_desc_type)) {
> +			case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
> +				fw_map_dyn->dev_hndl_info = (struct
MR_DEV_HANDLE_INFO *)
> +				(raid_map_data +
> +
le32_to_cpu(desc_table->raid_map_desc_offset));
> +#if VD_EXT_DEBUG
> +				dev_err(&instance->pdev->dev,
> +				"devHndlInfo  address %p\n",
> +				 fw_map_dyn->dev_hndl_info);
> +#endif
> +				memcpy(pDrvRaidMap->devHndlInfo,
fw_map_dyn->dev_hndl_info,
> +					sizeof(struct MR_DEV_HANDLE_INFO)
*
> +
le32_to_cpu(desc_table->raid_map_desc_elements));
> +			break;
> +			case RAID_MAP_DESC_TYPE_TGTID_INFO:
> +					fw_map_dyn->ld_tgt_id_to_ld = (u16
*) (raid_map_data +
> +
le32_to_cpu(desc_table->raid_map_desc_offset));
> +#if VD_EXT_DEBUG
> +			dev_err(&instance->pdev->dev,
> +				"ldTgtIdToLd  address %p\n",
> +					fw_map_dyn->ld_tgt_id_to_ld);
> +#endif
> +			for (j = 0; j <
le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
> +				pDrvRaidMap->ldTgtIdToLd[j] =
> +					fw_map_dyn->ld_tgt_id_to_ld[j];
> +#if VD_EXT_DEBUG
> +				dev_err(&instance->pdev->dev,
> +					" %d drv ldTgtIdToLd %d\n",
> +						j,
pDrvRaidMap->ldTgtIdToLd[j]);
> +#endif
> +			}
> +			break;
> +			case RAID_MAP_DESC_TYPE_ARRAY_INFO:
> +				fw_map_dyn->ar_map_info = (struct
MR_ARRAY_INFO *)
> +				(raid_map_data +
le32_to_cpu(desc_table->raid_map_desc_offset));
> +#if VD_EXT_DEBUG
> +				dev_err(&instance->pdev->dev,
> +					"arMapInfo  address %p\n",
> +					 fw_map_dyn->ar_map_info);
> +#endif
> +
> +				memcpy(pDrvRaidMap->arMapInfo,
> +				fw_map_dyn->ar_map_info,
> +				sizeof(struct MR_ARRAY_INFO) *
> +
le32_to_cpu(desc_table->raid_map_desc_elements));
> +			break;
> +			case RAID_MAP_DESC_TYPE_SPAN_INFO:
> +				fw_map_dyn->ld_span_map = (struct
MR_LD_SPAN_MAP *)
> +				(raid_map_data +
le32_to_cpu(desc_table->raid_map_desc_offset));
> +				memcpy(pDrvRaidMap->ldSpanMap,
> +				fw_map_dyn->ld_span_map,
> +				sizeof(struct MR_LD_SPAN_MAP) *
> +
le32_to_cpu(desc_table->raid_map_desc_elements));
> +#if VD_EXT_DEBUG
> +				dev_err(&instance->pdev->dev,
> +					"ldSpanMap  address %p\n",
> +						fw_map_dyn->ld_span_map);
> +				dev_err(&instance->pdev->dev,
> +				"MR_LD_SPAN_MAP size 0x%lx\n",
sizeof(struct MR_LD_SPAN_MAP));
> +				for (j = 0; j < ld_count; j++) {
> +					printk(KERN_DEBUG
"megaraid_sas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x
fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n",
> +					j, j,
fw_map_dyn->ld_span_map[j].ldRaid.targetId, j,
> +
fw_map_dyn->ld_span_map[j].ldRaid.seqNum,
> +
(u32)fw_map_dyn->ld_span_map[j].ldRaid.rowSize);
> +					printk(KERN_DEBUG
"megaraid_sas(%d) :pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x
pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n",
> +					j, j,
pDrvRaidMap->ldSpanMap[j].ldRaid.targetId, j,
> +
pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum,
> +
(u32)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize);
> +					printk(KERN_DEBUG
"megaraid_sas(%d) : drv raid map all %p raid map %p LD RAID MAP %p/%p\n",
> +					instance->unique_id, drv_map,
pDrvRaidMap,
> +
&fw_map_dyn->ld_span_map[j].ldRaid,
> +
&pDrvRaidMap->ldSpanMap[j].ldRaid);
> +				}
> +#endif
> +			break;
> +			default:
> +				dev_err(&instance->pdev->dev,
> +					"wrong number of desctableElements
%d\n",
> +
fw_map_dyn->desc_table_num_elements);
> +			}
> +			++desc_table;
> +		}
> +
> +	} else if (instance->supportmax256vd) {
> +		fw_map_ext =
> +		(struct MR_FW_RAID_MAP_EXT *)
fusion->ld_map[(instance->map_id & 1)];
> +		ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
> +		if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
> +			printk(KERN_DEBUG "megaraid_sas: LD count exposed
in RAID map in not valid\n");
> +			return;
> +		}
> +#if VD_EXT_DEBUG
> +		for (i = 0; i < ld_count; i++) {
> +			printk(KERN_DEBUG "megaraid_sas(%d) :Index 0x%x
Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
> +				instance->unique_id,
> +				i,
fw_map_ext->ldSpanMap[i].ldRaid.targetId,
> +				fw_map_ext->ldSpanMap[i].ldRaid.seqNum,
> +				fw_map_ext->ldSpanMap[i].ldRaid.size);
> +		}
> +#endif
> +
> +		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
> +		pDrvRaidMap->fpPdIoTimeoutSec =
> +
fw_map_ext->fpPdIoTimeoutSec;
> +		for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
> +			pDrvRaidMap->ldTgtIdToLd[i] =
> +			(u16)fw_map_ext->ldTgtIdToLd[i];
> +		memcpy(pDrvRaidMap->ldSpanMap,
> +				fw_map_ext->ldSpanMap,
> +				sizeof(struct MR_LD_SPAN_MAP) *
> +				ld_count);
> +#if VD_EXT_DEBUG
> +		for (i = 0; i < ld_count; i++) {
> +			printk(KERN_DEBUG "megaraid_sas(%d) :
fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x
fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x size 0x%x\n",
> +			i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId, i,
> +			fw_map_ext->ldSpanMap[i].ldRaid.seqNum,
> +			(u32)fw_map_ext->ldSpanMap[i].ldRaid.rowSize);
> +			printk(KERN_DEBUG "megaraid_sas(%d) :
pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x"
> +			"pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x
size 0x%x\n",
> +			i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
i,
> +			pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
> +			(u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
> +			printk(KERN_DEBUG "megaraid_sas(%d) : drv raid map
all %p raid map %p LD RAID MAP %p %p\n",
> +			instance->unique_id, drv_map, pDrvRaidMap,
> +			&fw_map_ext->ldSpanMap[i].ldRaid,
> +				&pDrvRaidMap->ldSpanMap[i].ldRaid);
> +		}
> +#endif
> +		memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
> +			sizeof(struct MR_ARRAY_INFO) *
MAX_API_ARRAYS_EXT);
> +		memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
> +			sizeof(struct MR_DEV_HANDLE_INFO) *
> +					MAX_RAIDMAP_PHYSICAL_DEVICES);
>
> -	if (instance->supportmax256vd) {
> -		memcpy(fusion->ld_drv_map[instance->map_id & 1],
> -			fusion->ld_map[instance->map_id & 1],
> -			fusion->current_map_sz);
>  		/* New Raid map will not set totalSize, so keep expected
value
>  		 * for legacy code in ValidateMapInfo
>  		 */
> @@ -213,16 +399,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance
*instance)
>  		}
>  #endif
>
> -		memset(drv_map, 0, fusion->drv_map_sz);
>  		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
>  		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
>  		pDrvRaidMap->fpPdIoTimeoutSec =
pFwRaidMap->fpPdIoTimeoutSec;
>  		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES +
MAX_RAIDMAP_VIEWS; i++)
>  			pDrvRaidMap->ldTgtIdToLd[i] =
>  				(u8)pFwRaidMap->ldTgtIdToLd[i];
> -		for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
> -			i < MAX_LOGICAL_DRIVES_EXT; i++)
> -			pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
>  		for (i = 0; i < ld_count; i++) {
>  			pDrvRaidMap->ldSpanMap[i] =
pFwRaidMap->ldSpanMap[i];
>  #if VD_EXT_DEBUG
> @@ -279,7 +461,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance
*instance)
>  	lbInfo = fusion->load_balance_info;
>  	ldSpanInfo = fusion->log_to_span;
>
> -	if (instance->supportmax256vd)
> +	if (instance->max_raid_mapsize)
> +		expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
> +	else if (instance->supportmax256vd)
>  		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
>  	else
>  		expected_size =
> @@ -287,8 +471,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance
*instance)
>  			(sizeof(struct MR_LD_SPAN_MAP) *
le16_to_cpu(pDrvRaidMap->ldCount)));
>
>  	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
> -		dev_err(&instance->pdev->dev, "map info structure size
0x%x is not matching with ld count\n",
> -		       (unsigned int) expected_size);
> +		dev_err(&instance->pdev->dev, "megasas: map info
structure"
> +		" size 0x%x is not matching expected size 0x%x\n",
> +			le32_to_cpu(pDrvRaidMap->totalSize),
> +			(unsigned int) expected_size);
>  		dev_err(&instance->pdev->dev, "megasas: span map %x,
pDrvRaidMap->totalSize : %x\n",
>  			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
>  			le32_to_cpu(pDrvRaidMap->totalSize));
> @@ -787,7 +973,7 @@ static u8 mr_spanset_get_phy_params(struct
megasas_instance *instance, u32 ld,
>  			((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
>  			((fusion->adapter_type == INVADER_SERIES) &&
>  			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
> -			pRAID_Context->regLockFlags =
REGION_TYPE_EXCLUSIVE;
> +			pRAID_Context->reg_lock_flags =
REGION_TYPE_EXCLUSIVE;
>  		else if (raid->level == 1) {
>  			physArm = physArm + 1;
>  			pd = MR_ArPdGet(arRef, physArm, map);
> @@ -797,9 +983,16 @@ static u8 mr_spanset_get_phy_params(struct
megasas_instance *instance, u32 ld,
>  	}
>
>  	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span,
map)->startBlk);
> -	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
> -					physArm;
> -	io_info->span_arm = pRAID_Context->spanArm;
> +	if (instance->is_ventura) {
> +		((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
> +			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
> +		io_info->span_arm =
> +			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
> +	} else {
> +		pRAID_Context->span_arm =
> +			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
> +		io_info->span_arm = pRAID_Context->span_arm;
> +	}
>  	return retval;
>  }
>
> @@ -891,7 +1084,7 @@ u8 MR_GetPhyParams(struct megasas_instance
*instance, u32 ld, u64 stripRow,
>  			((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
>  			((fusion->adapter_type == INVADER_SERIES) &&
>  			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
> -			pRAID_Context->regLockFlags =
REGION_TYPE_EXCLUSIVE;
> +			pRAID_Context->reg_lock_flags =
REGION_TYPE_EXCLUSIVE;
>  		else if (raid->level == 1) {
>  			/* Get alternate Pd. */
>  			physArm = physArm + 1;
> @@ -903,9 +1096,16 @@ u8 MR_GetPhyParams(struct megasas_instance
*instance, u32 ld, u64 stripRow,
>  	}
>
>  	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span,
map)->startBlk);
> -	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
> -		physArm;
> -	io_info->span_arm = pRAID_Context->spanArm;
> +	if (instance->is_ventura) {
> +		((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
> +				(span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
> +		io_info->span_arm =
> +				(span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
> +	} else {
> +		pRAID_Context->span_arm =
> +			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
> +		io_info->span_arm = pRAID_Context->span_arm;
> +	}
>  	return retval;
>  }
>
> @@ -1109,20 +1309,20 @@ u8 MR_GetPhyParams(struct megasas_instance
*instance, u32 ld, u64 stripRow,
>  			regSize += stripSize;
>  	}
>
> -	pRAID_Context->timeoutValue =
> +	pRAID_Context->timeout_value =
>  		cpu_to_le16(raid->fpIoTimeoutForLd ?
>  			    raid->fpIoTimeoutForLd :
>  			    map->raidMap.fpPdIoTimeoutSec);
>  	if (fusion->adapter_type == INVADER_SERIES)
> -		pRAID_Context->regLockFlags = (isRead) ?
> +		pRAID_Context->reg_lock_flags = (isRead) ?
>  			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
> -	else
> -		pRAID_Context->regLockFlags = (isRead) ?
> +	else if (!instance->is_ventura)
> +		pRAID_Context->reg_lock_flags = (isRead) ?
>  			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
> -	pRAID_Context->VirtualDiskTgtId = raid->targetId;
> -	pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
> -	pRAID_Context->regLockLength    = cpu_to_le32(regSize);
> -	pRAID_Context->configSeqNum	= raid->seqNum;
> +	pRAID_Context->virtual_disk_tgt_id = raid->targetId;
> +	pRAID_Context->reg_lock_row_lba    = cpu_to_le64(regStart);
> +	pRAID_Context->reg_lock_length    = cpu_to_le32(regSize);
> +	pRAID_Context->config_seq_num	= raid->seqNum;
>  	/* save pointer to raid->LUN array */
>  	*raidLUN = raid->LUN;
>
> @@ -1140,6 +1340,14 @@ u8 MR_GetPhyParams(struct megasas_instance
*instance, u32 ld, u64 stripRow,
>  		/* If IO on an invalid Pd, then FP is not possible.*/
>  		if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
>  			io_info->fpOkForIo = FALSE;
> +		/* if FP possible, set the SLUD bit in
> +		*  regLockFlags for ventura
> +		*/
> +		else if ((instance->is_ventura) && !isRead &&
> +				(raid->writeMode == MR_RL_WRITE_BACK_MODE)
&&
> +				raid->capability.fp_cache_bypass_capable)
> +				((struct RAID_CONTEXT_G35 *)
pRAID_Context)->routing_flags.bits.sld
> +					= 1;
>  		/* set raid 1/10 fast path write capable bit in io_info */
>  		if (io_info->fpOkForIo &&
>  		    (io_info->r1_alt_dev_handle != MR_PD_INVALID) &&
> @@ -1319,6 +1527,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance
*instance,
>  	struct fusion_context *fusion;
>  	struct MR_LD_RAID  *raid;
>  	struct MR_DRV_RAID_MAP_ALL *drv_map;
> +	u16	pd1_dev_handle;
>  	u16     pend0, pend1, ld;
>  	u64     diff0, diff1;
>  	u8      bestArm, pd0, pd1, span, arm;
> @@ -1344,23 +1553,37 @@ u8 megasas_get_best_arm_pd(struct
megasas_instance *instance,
>  	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
>  		(arm + 1 - span_row_size) : arm + 1, drv_map);
>
> -	/* get the pending cmds for the data and mirror arms */
> -	pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
> -	pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
> +	/* Get PD1 Dev Handle */
>
> -	/* Determine the disk whose head is nearer to the req. block */
> -	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
> -	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
> -	bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
> +	pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
>
> -	if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
> +	if (pd1_dev_handle == MR_PD_INVALID) {
> +		bestArm = arm;
> +	} else {
> +		/* get the pending cmds for the data and mirror arms */
> +		pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
> +		pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
> +
> +		/* Determine the disk whose head is nearer to the req.
block */
> +		diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
> +		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
> +		/*bestArm = diff0<=diff1 ? arm : arm ^ 1;*/
> +		bestArm = (diff0 <= diff1 ? arm : arm ^ 1);

The above comment is useless and the parentheses not needed,
again please clean it up in a next series.
Sasi - This shall be fixed in V5 patch

tomash

> +
> +		/* Make balance count from 16 to 4 to
> +		*  keep driver in sync with Firmware
> +		*/
> +		if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)
||
>  			(bestArm != arm && pend1 > pend0 +
lb_pending_cmds))
> -		bestArm ^= 1;
> +			bestArm ^= 1;
> +
> +		/* Update the last accessed block on the correct pd */
> +		io_info->span_arm =
> +			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
> +		io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
> +	}
>
> -	/* Update the last accessed block on the correct pd */
> -	io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
>  	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count
- 1;
> -	io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
bestArm;
>  #if SPAN_DEBUG
>  	if (arm != bestArm)
>  		dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
> diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c
b/drivers/scsi/megaraid/megaraid_sas_fusion.c
> index 2fcd5cd..58f86aa 100644
> --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
> +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
> @@ -1834,7 +1834,7 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  			  struct megasas_cmd_fusion *cmd)
>  {
>  	u8 fp_possible;
> -	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
> +	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0, ld;
>  	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
>  	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
>  	struct IO_REQUEST_INFO io_info;
> @@ -1842,16 +1842,18 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
>  	u8 *raidLUN;
>  	unsigned long spinlock_flags;
> +	union RAID_CONTEXT_UNION *praid_context;
> +	struct MR_LD_RAID *raid;
>
>  	device_id = MEGASAS_DEV_INDEX(scp);
>
>  	fusion = instance->ctrl_context;
>
>  	io_request = cmd->io_request;
> -	io_request->RaidContext.raid_context.VirtualDiskTgtId =
> +	io_request->RaidContext.raid_context.virtual_disk_tgt_id =
>  		cpu_to_le16(device_id);
>  	io_request->RaidContext.raid_context.status = 0;
> -	io_request->RaidContext.raid_context.exStatus = 0;
> +	io_request->RaidContext.raid_context.ex_status = 0;
>
>  	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION
*)cmd->request_desc;
>
> @@ -1920,10 +1922,12 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  		io_info.isRead = 1;
>
>  	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
> +	ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
> +	raid = MR_LdRaidGet(ld, local_map_ptr);
>
>  	if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
>  		instance->fw_supported_vd_count) ||
(!fusion->fast_path_io)) {
> -		io_request->RaidContext.raid_context.regLockFlags  = 0;
> +		io_request->RaidContext.raid_context.reg_lock_flags  = 0;
>  		fp_possible = 0;
>  	} else {
>  		if (MR_BuildRaidContext(instance, &io_info,
> @@ -1950,6 +1954,8 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  			fp_possible = false;
>  	}
>
> +	praid_context = &io_request->RaidContext;
> +
>  	if (fp_possible) {
>  		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info,
scp,
>  				   local_map_ptr, start_lba_lo);
> @@ -1958,18 +1964,26 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
>  			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
>  		if (fusion->adapter_type == INVADER_SERIES) {
> -			if
(io_request->RaidContext.raid_context.regLockFlags ==
> +			if
(io_request->RaidContext.raid_context.reg_lock_flags ==
>  			    REGION_TYPE_UNUSED)
>  				cmd->request_desc->SCSIIO.RequestFlags =
>
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
>
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
> -			io_request->RaidContext.raid_context.Type
> +			io_request->RaidContext.raid_context.type
>  				= MPI2_TYPE_CUDA;
>  			io_request->RaidContext.raid_context.nseg = 0x1;
>  			io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
> -			io_request->RaidContext.raid_context.regLockFlags
|=
> +
io_request->RaidContext.raid_context.reg_lock_flags |=
>  			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
>  			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
> +		} else if (instance->is_ventura) {
> +			io_request->RaidContext.raid_context_g35.type
> +				= MPI2_TYPE_CUDA;
> +			io_request->RaidContext.raid_context_g35.nseg =
0x1;
> +
io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn
> +				= 1;
> +			io_request->IoFlags |=
> +
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
>  		}
>  		if ((fusion->load_balance_info[device_id].loadBalanceFlag)
&&
>  		    (io_info.isRead)) {
> @@ -1979,6 +1993,13 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  					&io_info);
>  			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
>  			cmd->pd_r1_lb = io_info.pd_after_lb;
> +			if (instance->is_ventura)
> +
io_request->RaidContext.raid_context_g35.span_arm
> +					= io_info.span_arm;
> +			else
> +
io_request->RaidContext.raid_context.span_arm
> +					= io_info.span_arm;
> +
>  		} else
>  			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
>
> @@ -1997,28 +2018,98 @@ static void megasas_stream_detect(struct
megasas_instance *instance,
>  		io_request->DevHandle = io_info.devHandle;
>  		/* populate the LUN field */
>  		memcpy(io_request->LUN, raidLUN, 8);
> +		if (instance->is_ventura) {
> +			if (io_info.isRead) {
> +				if ((raid->cpuAffinity.pdRead.cpu0) &&
> +					(raid->cpuAffinity.pdRead.cpu1))
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_FCFS;
> +				else if (raid->cpuAffinity.pdRead.cpu1)
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_1;
> +				else
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_0;
> +			} else {
> +			if ((raid->cpuAffinity.pdWrite.cpu0)
> +			&& (raid->cpuAffinity.pdWrite.cpu1))
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_FCFS;
> +				else if (raid->cpuAffinity.pdWrite.cpu1)
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_1;
> +				else
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_0;
> +				if
(praid_context->raid_context_g35.routing_flags.bits.sld) {
> +
praid_context->raid_context_g35.raid_flags
> +					=
(MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
> +					<<
MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
> +				}
> +			}
> +		}
>  	} else {
> -		io_request->RaidContext.raid_context.timeoutValue =
> +		io_request->RaidContext.raid_context.timeout_value =
>
cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
>  		cmd->request_desc->SCSIIO.RequestFlags =
>  			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
>  			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
>  		if (fusion->adapter_type == INVADER_SERIES) {
>  			if (io_info.do_fp_rlbypass ||
> -			(io_request->RaidContext.raid_context.regLockFlags
> +
(io_request->RaidContext.raid_context.reg_lock_flags
>  					== REGION_TYPE_UNUSED))
>  				cmd->request_desc->SCSIIO.RequestFlags =
>
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
>
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
> -			io_request->RaidContext.raid_context.Type
> +			io_request->RaidContext.raid_context.type
>  				= MPI2_TYPE_CUDA;
> -			io_request->RaidContext.raid_context.regLockFlags
|=
> +
io_request->RaidContext.raid_context.reg_lock_flags |=
>  				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
>  				 MR_RL_FLAGS_SEQ_NUM_ENABLE);
>  			io_request->RaidContext.raid_context.nseg = 0x1;
> +		} else if (instance->is_ventura) {
> +			io_request->RaidContext.raid_context_g35.type
> +			= MPI2_TYPE_CUDA;
> +
io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn
> +			= 1;
> +			io_request->RaidContext.raid_context_g35.nseg =
0x1;
>  		}
>  		io_request->Function =
MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
>  		io_request->DevHandle = cpu_to_le16(device_id);
> +
> +		if (instance->is_ventura) {
> +			if (io_info.isRead) {
> +				if ((raid->cpuAffinity.ldRead.cpu0)
> +				&& (raid->cpuAffinity.ldRead.cpu1))
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_FCFS;
> +				else if (raid->cpuAffinity.ldRead.cpu1)
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +						= MR_RAID_CTX_CPUSEL_1;
> +				else
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +						= MR_RAID_CTX_CPUSEL_0;
> +			} else {
> +				if ((raid->cpuAffinity.ldWrite.cpu0) &&
> +					(raid->cpuAffinity.ldWrite.cpu1))
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +							=
MR_RAID_CTX_CPUSEL_FCFS;
> +				else if (raid->cpuAffinity.ldWrite.cpu1)
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +						= MR_RAID_CTX_CPUSEL_1;
> +				else
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +					= MR_RAID_CTX_CPUSEL_0;
> +
> +				if
(io_request->RaidContext.raid_context_g35.stream_detected
> +				&& (raid->level == 5)
> +				&& (raid->writeMode ==
MR_RL_WRITE_THROUGH_MODE)) {
> +					if
(praid_context->raid_context_g35.routing_flags.bits.cpu_sel ==
MR_RAID_CTX_CPUSEL_FCFS)
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +							=
MR_RAID_CTX_CPUSEL_0;
> +				}
> +			}
> +		}
>  	} /* Not FP */
>  }
>
> @@ -2053,9 +2144,9 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  	/* get RAID_Context pointer */
>  	pRAID_Context = &io_request->RaidContext.raid_context;
>  	/* Check with FW team */
> -	pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
> -	pRAID_Context->regLockRowLBA    = 0;
> -	pRAID_Context->regLockLength    = 0;
> +	pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
> +	pRAID_Context->reg_lock_row_lba    = 0;
> +	pRAID_Context->reg_lock_length    = 0;
>
>  	if (fusion->fast_path_io && (
>  		device_id < instance->fw_supported_vd_count)) {
> @@ -2074,7 +2165,7 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  		io_request->Function  =
MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
>  		io_request->DevHandle = cpu_to_le16(device_id);
>  		io_request->LUN[1] = scmd->device->lun;
> -		pRAID_Context->timeoutValue =
> +		pRAID_Context->timeout_value =
>  			cpu_to_le16 (scmd->request->timeout / HZ);
>  		cmd->request_desc->SCSIIO.RequestFlags =
>  			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
> @@ -2082,9 +2173,10 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  	} else {
>
>  		/* set RAID context values */
> -		pRAID_Context->configSeqNum = raid->seqNum;
> -		pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
> -		pRAID_Context->timeoutValue =
cpu_to_le16(raid->fpIoTimeoutForLd);
> +		pRAID_Context->config_seq_num = raid->seqNum;
> +		if (!instance->is_ventura)
> +			pRAID_Context->reg_lock_flags =
REGION_TYPE_SHARED_READ;
> +		pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
>
>  		/* get the DevHandle for the PD (since this is
>  		   fpNonRWCapable, this is a single disk RAID0) */
> @@ -2139,12 +2231,12 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  	io_request = cmd->io_request;
>  	/* get RAID_Context pointer */
>  	pRAID_Context = &io_request->RaidContext.raid_context;
> -	pRAID_Context->regLockFlags = 0;
> -	pRAID_Context->regLockRowLBA = 0;
> -	pRAID_Context->regLockLength = 0;
> +	pRAID_Context->reg_lock_flags = 0;
> +	pRAID_Context->reg_lock_row_lba = 0;
> +	pRAID_Context->reg_lock_length = 0;
>  	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
>  	io_request->LUN[1] = scmd->device->lun;
> -	pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
> +	pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
>  		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
>
>  	/* If FW supports PD sequence number */
> @@ -2153,24 +2245,28 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  		/* TgtId must be incremented by 255 as jbod seq number is
index
>  		 * below raid map
>  		 */
> -		pRAID_Context->VirtualDiskTgtId =
> +		pRAID_Context->virtual_disk_tgt_id =
>  			cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES -
1));
> -		pRAID_Context->configSeqNum =
pd_sync->seq[pd_index].seqNum;
> +		pRAID_Context->config_seq_num =
pd_sync->seq[pd_index].seqNum;
>  		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
> -		pRAID_Context->regLockFlags |=
> +		if (instance->is_ventura)
> +
io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn
> +			= 1;
> +		else
> +		pRAID_Context->reg_lock_flags |=
>
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
> -		pRAID_Context->Type = MPI2_TYPE_CUDA;
> +		pRAID_Context->type = MPI2_TYPE_CUDA;
>  		pRAID_Context->nseg = 0x1;
>  	} else if (fusion->fast_path_io) {
> -		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
> -		pRAID_Context->configSeqNum = 0;
> +		pRAID_Context->virtual_disk_tgt_id =
cpu_to_le16(device_id);
> +		pRAID_Context->config_seq_num = 0;
>  		local_map_ptr = fusion->ld_drv_map[(instance->map_id &
1)];
>  		io_request->DevHandle =
>
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
>  	} else {
>  		/* Want to send all IO via FW path */
> -		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
> -		pRAID_Context->configSeqNum = 0;
> +		pRAID_Context->virtual_disk_tgt_id =
cpu_to_le16(device_id);
> +		pRAID_Context->config_seq_num = 0;
>  		io_request->DevHandle = cpu_to_le16(0xFFFF);
>  	}
>
> @@ -2186,14 +2282,14 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  		cmd->request_desc->SCSIIO.RequestFlags =
>  			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
>  				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
> -		pRAID_Context->timeoutValue =
cpu_to_le16(os_timeout_value);
> -		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
> +		pRAID_Context->timeout_value =
cpu_to_le16(os_timeout_value);
> +		pRAID_Context->virtual_disk_tgt_id =
cpu_to_le16(device_id);
>  	} else {
>  		/* system pd Fast Path */
>  		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
>  		timeout_limit = (scmd->device->type == TYPE_DISK) ?
>  				255 : 0xFFFF;
> -		pRAID_Context->timeoutValue =
> +		pRAID_Context->timeout_value =
>  			cpu_to_le16((os_timeout_value > timeout_limit) ?
>  			timeout_limit : os_timeout_value);
>  		if (fusion->adapter_type == INVADER_SERIES)
> @@ -2232,8 +2328,8 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  	io_request->Control = 0;
>  	io_request->EEDPBlockSize = 0;
>  	io_request->ChainOffset = 0;
> -	io_request->RaidContext.raid_context.RAIDFlags = 0;
> -	io_request->RaidContext.raid_context.Type = 0;
> +	io_request->RaidContext.raid_context.raid_flags = 0;
> +	io_request->RaidContext.raid_context.type = 0;
>  	io_request->RaidContext.raid_context.nseg = 0;
>
>  	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
> @@ -2278,11 +2374,16 @@ static void megasas_build_ld_nonrw_fusion(struct
megasas_instance *instance,
>  		return 1;
>  	}
>
> -	/* numSGE store lower 8 bit of sge_count.
> -	 * numSGEExt store higher 8 bit of sge_count
> -	 */
> -	io_request->RaidContext.raid_context.numSGE = sge_count;
> -	io_request->RaidContext.raid_context.numSGEExt = (u8)(sge_count >>
8);
> +	if (instance->is_ventura)
> +		io_request->RaidContext.raid_context_g35.num_sge =
sge_count;
> +	else {
> +		/* numSGE store lower 8 bit of sge_count.
> +		 * numSGEExt store higher 8 bit of sge_count
> +		 */
> +		io_request->RaidContext.raid_context.num_sge = sge_count;
> +		io_request->RaidContext.raid_context.num_sge_ext =
> +			(u8)(sge_count >> 8);
> +	}
>
>  	io_request->SGLFlags =
cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
>
> @@ -2331,6 +2432,10 @@ void megasas_fpio_to_ldio(struct megasas_instance
*instance,
>  	struct megasas_cmd_fusion *cmd, struct scsi_cmnd *scmd)
>  {
>  	struct fusion_context *fusion;
> +	union RAID_CONTEXT_UNION *praid_context;
> +	struct MR_LD_RAID *raid;
> +	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
> +	u32 device_id, ld;
>  	fusion = instance->ctrl_context;
>
>  	cmd->request_desc->SCSIIO.RequestFlags =
> @@ -2354,6 +2459,35 @@ void megasas_fpio_to_ldio(struct megasas_instance
*instance,
>  	cmd->io_request->Control = 0;
>  	cmd->io_request->EEDPBlockSize = 0;
>  	cmd->is_raid_1_fp_write = 0;
> +
> +	device_id = MEGASAS_DEV_INDEX(cmd->scmd);
> +	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
> +	ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
> +	raid = MR_LdRaidGet(ld, local_map_ptr);
> +	praid_context = &cmd->io_request->RaidContext;
> +	if (cmd->scmd->sc_data_direction == PCI_DMA_FROMDEVICE) {
> +		if ((raid->cpuAffinity.ldRead.cpu0)
> +		&& (raid->cpuAffinity.ldRead.cpu1))
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +			= MR_RAID_CTX_CPUSEL_FCFS;
> +		else if (raid->cpuAffinity.ldRead.cpu1)
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +			= MR_RAID_CTX_CPUSEL_1;
> +		else
> +
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +			= MR_RAID_CTX_CPUSEL_0;
> +	} else {
> +	if ((raid->cpuAffinity.ldWrite.cpu0)
> +		&& (raid->cpuAffinity.ldWrite.cpu1))
> +		praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +			= MR_RAID_CTX_CPUSEL_FCFS;
> +	else if (raid->cpuAffinity.ldWrite.cpu1)
> +		praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +			= MR_RAID_CTX_CPUSEL_1;
> +	else
> +		praid_context->raid_context_g35.routing_flags.bits.cpu_sel
> +		= MR_RAID_CTX_CPUSEL_0;
> +	}
>  }
>  /*megasas_prepate_secondRaid1_IO
>   * It prepares the raid 1 second IO
> @@ -2491,6 +2625,7 @@ void megasas_prepare_secondRaid1_IO(struct
megasas_instance *instance,
>  	*	corresponds to single R1/10 LD are always same
>  	*
>  	*/
> +
>  	/*	driver side count always should be less than max_fw_cmds
>  	*	to get new command
>  	*/
> @@ -2588,7 +2723,7 @@ void megasas_prepare_secondRaid1_IO(struct
megasas_instance *instance,
>
>  		scmd_local = cmd_fusion->scmd;
>  		status = scsi_io_req->RaidContext.raid_context.status;
> -		extStatus =
scsi_io_req->RaidContext.raid_context.exStatus;
> +		extStatus =
scsi_io_req->RaidContext.raid_context.ex_status;
>  		sense = cmd_fusion->sense;
>  		data_length = scsi_io_req->DataLength;
>
> @@ -2656,13 +2791,13 @@ void megasas_prepare_secondRaid1_IO(struct
megasas_instance *instance,
>  					status =
>
r1_cmd->io_request->RaidContext.raid_context.status;
>  					extStatus =
> -
r1_cmd->io_request->RaidContext.raid_context.exStatus;
> +
r1_cmd->io_request->RaidContext.raid_context.ex_status;
>  					data_length =
>
r1_cmd->io_request->DataLength;
>  					sense = r1_cmd->sense;
>  				}
>
r1_cmd->io_request->RaidContext.raid_context.status = 0;
> -
r1_cmd->io_request->RaidContext.raid_context.exStatus = 0;
> +
r1_cmd->io_request->RaidContext.raid_context.ex_status = 0;
>  				cmd_fusion->is_raid_1_fp_write = 0;
>  				r1_cmd->is_raid_1_fp_write = 0;
>  				r1_cmd->cmd_completed = false;
> @@ -2674,7 +2809,7 @@ void megasas_prepare_secondRaid1_IO(struct
megasas_instance *instance,
>  					extStatus, data_length, sense);
>
scsi_io_req->RaidContext.raid_context.status
>  				= 0;
> -
scsi_io_req->RaidContext.raid_context.exStatus
> +
scsi_io_req->RaidContext.raid_context.ex_status
>  				= 0;
>  				megasas_return_cmd_fusion(instance,
cmd_fusion);
>  				scsi_dma_unmap(scmd_local);
> diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h
b/drivers/scsi/megaraid/megaraid_sas_fusion.h
> index 5590c1d..cb42655 100644
> --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
> +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
> @@ -59,6 +59,8 @@
>  #define	MR_RL_FLAGS_GRANT_DESTINATION_CPU1	    0x10
>  #define	MR_RL_FLAGS_GRANT_DESTINATION_CUDA	    0x80
>  #define MR_RL_FLAGS_SEQ_NUM_ENABLE		    0x8
> +#define MR_RL_WRITE_THROUGH_MODE		    0x00
> +#define MR_RL_WRITE_BACK_MODE			    0x01
>
>  /* T10 PI defines */
>  #define MR_PROT_INFO_TYPE_CONTROLLER                0x8
> @@ -81,6 +83,11 @@
>  enum MR_RAID_FLAGS_IO_SUB_TYPE {
>  	MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
>  	MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
> +	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA     = 2,
> +	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P        = 3,
> +	MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q        = 4,
> +	MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
> +	MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
>  };
>
>  /*
> @@ -109,29 +116,29 @@ enum MR_FUSION_ADAPTER_TYPE {
>
>  struct RAID_CONTEXT {
>  #if   defined(__BIG_ENDIAN_BITFIELD)
> -	u8	nseg:4;
> -	u8	Type:4;
> +	u8 nseg:4;
> +	u8 type:4;
>  #else
> -	u8	Type:4;
> -	u8	nseg:4;
> +	u8 type:4;
> +	u8 nseg:4;
>  #endif
> -	u8	resvd0;
> -	__le16	timeoutValue;
> -	u8      regLockFlags;
> -	u8      resvd1;
> -	__le16	VirtualDiskTgtId;
> -	__le64	regLockRowLBA;
> -	__le32	regLockLength;
> -	__le16	nextLMId;
> -	u8      exStatus;
> -	u8      status;
> -	u8      RAIDFlags;
> -	u8      numSGE;
> -	__le16	configSeqNum;
> -	u8      spanArm;
> -	u8      priority;
> -	u8	numSGEExt;
> -	u8      resvd2;
> +	u8 resvd0;
> +	__le16 timeout_value;
> +	u8 reg_lock_flags;
> +	u8 resvd1;
> +	__le16 virtual_disk_tgt_id;
> +	__le64 reg_lock_row_lba;
> +	__le32 reg_lock_length;
> +	__le16 next_lmid;
> +	u8 ex_status;
> +	u8 status;
> +	u8 raid_flags;
> +	u8 num_sge;
> +	__le16 config_seq_num;
> +	u8 span_arm;
> +	u8 priority;
> +	u8 num_sge_ext;
> +	u8 resvd2;
>  };
>
>  /*
> @@ -187,7 +194,7 @@ struct RAID_CONTEXT_G35 {
>  	} smid;
>  	u8 ex_status;       /* 0x16 : OUT */
>  	u8 status;          /* 0x17 status */
> -	u8 RAIDFlags;		/* 0x18 resvd[7:6], ioSubType[5:4],
> +	u8 raid_flags;		/* 0x18 resvd[7:6], ioSubType[5:4],
>  						* resvd[3:1],
preferredCpu[0]
>  						*/
>  	u8 span_arm;            /* 0x1C span[7:5], arm[4:0] */
> @@ -672,14 +679,17 @@ struct MPI2_IOC_INIT_REQUEST {
>  #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
>  #define MAX_LOGICAL_DRIVES 64
>  #define MAX_LOGICAL_DRIVES_EXT 256
> +#define MAX_LOGICAL_DRIVES_DYN 512
>  #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
>  #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
>  #define MAX_ARRAYS 128
>  #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
>  #define MAX_ARRAYS_EXT	256
>  #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
> +#define MAX_API_ARRAYS_DYN 512
>  #define MAX_PHYSICAL_DEVICES 256
>  #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
> +#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
>  #define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
>  #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO      0x0200e102
>  #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB
alloc*/
> @@ -726,12 +736,56 @@ struct MR_SPAN_BLOCK_INFO {
>  	struct MR_SPAN_INFO block_span_info;
>  };
>
> +#define MR_RAID_CTX_CPUSEL_0		0
> +#define MR_RAID_CTX_CPUSEL_1		1
> +#define MR_RAID_CTX_CPUSEL_2		2
> +#define MR_RAID_CTX_CPUSEL_3		3
> +#define MR_RAID_CTX_CPUSEL_FCFS		0xF
> +
> +struct MR_CPU_AFFINITY_MASK {
> +	union {
> +		struct {
> +#ifndef MFI_BIG_ENDIAN
> +		u8 hw_path:1;
> +		u8 cpu0:1;
> +		u8 cpu1:1;
> +		u8 cpu2:1;
> +		u8 cpu3:1;
> +		u8 reserved:3;
> +#else
> +		u8 reserved:3;
> +		u8 cpu3:1;
> +		u8 cpu2:1;
> +		u8 cpu1:1;
> +		u8 cpu0:1;
> +		u8 hw_path:1;
> +#endif
> +		};
> +		u8 core_mask;
> +	};
> +};
> +
> +struct MR_IO_AFFINITY {
> +	union {
> +		struct {
> +			struct MR_CPU_AFFINITY_MASK pdRead;
> +			struct MR_CPU_AFFINITY_MASK pdWrite;
> +			struct MR_CPU_AFFINITY_MASK ldRead;
> +			struct MR_CPU_AFFINITY_MASK ldWrite;
> +			};
> +		u32 word;
> +		};
> +	u8 maxCores;    /* Total cores + HW Path in ROC */
> +	u8 reserved[3];
> +};
> +
>  struct MR_LD_RAID {
>  	struct {
>  #if   defined(__BIG_ENDIAN_BITFIELD)
> -		u32     reserved4:3;
> -		u32     fp_cache_bypass_capable:1;
> -		u32     fp_rmw_capable:1;
> +		u32 reserved4:2;
> +		u32 fp_cache_bypass_capable:1;
> +		u32 fp_rmw_capable:1;
> +		u32 disable_coalescing:1;
>  		u32     fpBypassRegionLock:1;
>  		u32     tmCapable:1;
>  		u32	fpNonRWCapable:1;
> @@ -759,9 +813,10 @@ struct MR_LD_RAID {
>  		u32	fpNonRWCapable:1;
>  		u32     tmCapable:1;
>  		u32     fpBypassRegionLock:1;
> -		u32     fp_rmw_capable:1;
> -		u32     fp_cache_bypass_capable:1;
> -		u32     reserved4:3;
> +		u32 disable_coalescing:1;
> +		u32 fp_rmw_capable:1;
> +		u32 fp_cache_bypass_capable:1;
> +		u32 reserved4:2;
>  #endif
>  	} capability;
>  	__le32     reserved6;
> @@ -788,7 +843,36 @@ struct MR_LD_RAID {
>
>  	u8	LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
>  	u8	fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP
IO*/
> -	u8      reserved3[0x80-0x2D]; /* 0x2D */
> +	/* Ox2D This LD accept priority boost of this type */
> +	u8 ld_accept_priority_type;
> +	u8 reserved2[2];	        /* 0x2E - 0x2F */
> +	/* 0x30 - 0x33, Logical block size for the LD */
> +	u32 logical_block_length;
> +	struct {
> +#ifndef MFI_BIG_ENDIAN
> +	/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
> +	u32 ld_pi_exp:4;
> +	/* 0x34, LOGICAL BLOCKS PER PHYSICAL
> +	*  BLOCK EXPONENT from READ CAPACITY 16
> +	*/
> +	u32 ld_logical_block_exp:4;
> +	u32 reserved1:24;           /* 0x34 */
> +#else
> +	u32 reserved1:24;           /* 0x34 */
> +	/* 0x34, LOGICAL BLOCKS PER PHYSICAL
> +	*  BLOCK EXPONENT from READ CAPACITY 16
> +	*/
> +	u32 ld_logical_block_exp:4;
> +	/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
> +	u32 ld_pi_exp:4;
> +#endif
> +	};                               /* 0x34 - 0x37 */
> +	 /* 0x38 - 0x3f, This will determine which
> +	 *  core will process LD IO and PD IO.
> +	 */
> +	struct MR_IO_AFFINITY cpuAffinity;
> +     /* Bit definiations are specified by MR_IO_AFFINITY */
> +	u8 reserved3[0x80-0x40];    /* 0x40 - 0x7f */
>  };
>
>  struct MR_LD_SPAN_MAP {
> @@ -846,6 +930,91 @@ struct MR_LD_TARGET_SYNC {
>  	__le16 seqNum;
>  };
>
> +/*
> +* RAID Map descriptor Types.
> +* Each element should uniquely idetify one data structure in the RAID
map
> +*/
> +enum MR_RAID_MAP_DESC_TYPE {
> +	/* MR_DEV_HANDLE_INFO data */
> +	RAID_MAP_DESC_TYPE_DEVHDL_INFO    = 0x0,
> +	/* target to Ld num Index map */
> +	RAID_MAP_DESC_TYPE_TGTID_INFO     = 0x1,
> +	/* MR_ARRAY_INFO data */
> +	RAID_MAP_DESC_TYPE_ARRAY_INFO     = 0x2,
> +	/* MR_LD_SPAN_MAP data */
> +	RAID_MAP_DESC_TYPE_SPAN_INFO      = 0x3,
> +	RAID_MAP_DESC_TYPE_COUNT,
> +};
> +
> +/*
> +* This table defines the offset, size and num elements  of each
descriptor
> +* type in the RAID Map buffer
> +*/
> +struct MR_RAID_MAP_DESC_TABLE {
> +	/* Raid map descriptor type */
> +	u32 raid_map_desc_type;
> +	/* Offset into the RAID map buffer where
> +	*  descriptor data is saved
> +	*/
> +	u32 raid_map_desc_offset;
> +	/* total size of the
> +	* descriptor buffer
> +	*/
> +	u32 raid_map_desc_buffer_size;
> +	/* Number of elements contained in the
> +	*  descriptor buffer
> +	*/
> +	u32 raid_map_desc_elements;
> +};
> +
> +/*
> +* Dynamic Raid Map Structure.
> +*/
> +struct MR_FW_RAID_MAP_DYNAMIC {
> +	u32 raid_map_size;   /* total size of RAID Map structure */
> +	u32 desc_table_offset;/* Offset of desc table into RAID map*/
> +	u32 desc_table_size;  /* Total Size of desc table */
> +	/* Total Number of elements in the desc table */
> +	u32 desc_table_num_elements;
> +	u64	reserved1;
> +	u32	reserved2[3];	/*future use */
> +	/* timeout value used by driver in FP IOs */
> +	u8 fp_pd_io_timeout_sec;
> +	u8 reserved3[3];
> +	/* when this seqNum increments, driver needs to
> +	*  release RMW buffers asap
> +	*/
> +	u32 rmw_fp_seq_num;
> +	u16 ld_count;	/* count of lds. */
> +	u16 ar_count;   /* count of arrays */
> +	u16 span_count; /* count of spans */
> +	u16 reserved4[3];
> +/*
> +* The below structure of pointers is only to be used by the driver.
> +* This is added in the ,API to reduce the amount of code changes
> +* needed in the driver to support dynamic RAID map Firmware should
> +* not update these pointers while preparing the raid map
> +*/
> +	union {
> +		struct {
> +			struct MR_DEV_HANDLE_INFO  *dev_hndl_info;
> +			u16 *ld_tgt_id_to_ld;
> +			struct MR_ARRAY_INFO *ar_map_info;
> +			struct MR_LD_SPAN_MAP *ld_span_map;
> +			};
> +		u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
> +		};
> +/*
> +* RAID Map descriptor table defines the layout of data in the RAID Map.
> +* The size of the descriptor table itself could change.
> +*/
> +	/* Variable Size descriptor Table. */
> +	struct MR_RAID_MAP_DESC_TABLE
> +			raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
> +	/* Variable Size buffer containing all data */
> +	u32 raid_map_desc_data[1];
> +}; /* Dynamicaly sized RAID MAp structure */
> +
>  #define IEEE_SGE_FLAGS_ADDR_MASK            (0x03)
>  #define IEEE_SGE_FLAGS_SYSTEM_ADDR          (0x00)
>  #define IEEE_SGE_FLAGS_IOCDDR_ADDR          (0x01)
> @@ -955,9 +1124,10 @@ struct MR_DRV_RAID_MAP {
>  	__le16                 spanCount;
>  	__le16                 reserve3;
>
> -	struct MR_DEV_HANDLE_INFO
devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
> -	u8                  ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
> -	struct MR_ARRAY_INFO       arMapInfo[MAX_API_ARRAYS_EXT];
> +	struct MR_DEV_HANDLE_INFO
> +		devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
> +	u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
> +	struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
>  	struct MR_LD_SPAN_MAP      ldSpanMap[1];
>
>  };
> @@ -969,7 +1139,7 @@ struct MR_DRV_RAID_MAP {
>  struct MR_DRV_RAID_MAP_ALL {
>
>  	struct MR_DRV_RAID_MAP raidMap;
> -	struct MR_LD_SPAN_MAP      ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
> +	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
>  } __packed;
>
>
> @@ -1088,7 +1258,7 @@ struct fusion_context {
>  	u8	chain_offset_io_request;
>  	u8	chain_offset_mfi_pthru;
>
> -	struct MR_FW_RAID_MAP_ALL *ld_map[2];
> +	struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
>  	dma_addr_t ld_map_phys[2];
>
>  	/*Non dma-able memory. Driver local copy.*/
> @@ -1096,6 +1266,8 @@ struct fusion_context {
>
>  	u32 max_map_sz;
>  	u32 current_map_sz;
> +	u32 old_map_sz;
> +	u32 new_map_sz;
>  	u32 drv_map_sz;
>  	u32 drv_map_pages;
>  	struct MR_PD_CFG_SEQ_NUM_SYNC	*pd_seq_sync[JBOD_MAPS_COUNT];

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ