summaryrefslogtreecommitdiff
path: root/drivers/scsi/smartpqi/smartpqi_init.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 08:58:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 08:58:51 -0800
commitaa5ad10f6cca6d42f3fef6cb862e03b220ea19a6 (patch)
tree80da5833dccec9d952c8458c98c65864fca1186a /drivers/scsi/smartpqi/smartpqi_init.c
parente2ca6ba6ba0152361aa4fcbf6067db71b2c7a770 (diff)
parent4e80eef45ad775a54fb06a66bf8267a154781ce5 (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "Updates to the usual drivers (target, ufs, smartpqi, lpfc). There are some core changes, mostly around reworking some of our user context assumptions in device put and moving some code around. The remaining updates are bug fixes and minor changes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (138 commits) scsi: sg: Fix get_user() in call sg_scsi_ioctl() scsi: megaraid_sas: Fix some spelling mistakes in comment scsi: core: Use SCSI_SCAN_INITIAL in do_scsi_scan_host() scsi: core: Use SCSI_SCAN_RESCAN in __scsi_add_device() scsi: ufs: ufs-mediatek: Remove unnecessary return code scsi: ufs: core: Fix the polling implementation scsi: libsas: Do not export sas_ata_wait_after_reset() scsi: hisi_sas: Fix SATA devices missing issue during I_T nexus reset scsi: libsas: Add smp_ata_check_ready_type() scsi: Revert "scsi: hisi_sas: Don't send bcast events from HW during nexus HA reset" scsi: Revert "scsi: hisi_sas: Drain bcast events in hisi_sas_rescan_topology()" scsi: ufs: ufs-mediatek: Modify the return value scsi: ufs: ufs-mediatek: Remove unneeded code scsi: device_handler: alua: Call scsi_device_put() from non-atomic context scsi: device_handler: alua: Revert "Move a scsi_device_put() call out of alua_check_vpd()" scsi: snic: Fix possible UAF in snic_tgt_create() scsi: qla2xxx: Initialize vha->unknown_atio_[list, work] for NPIV hosts scsi: qla2xxx: Remove duplicate of vha->iocb_work initialization scsi: fcoe: Fix transport not deattached when fcoe_if_init() fails scsi: sd: Use 16-byte SYNCHRONIZE CACHE on ZBC devices ...
Diffstat (limited to 'drivers/scsi/smartpqi/smartpqi_init.c')
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c164
1 files changed, 124 insertions, 40 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b971fbe3b3a1..d0446d4d4465 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.18-045"
+#define DRIVER_VERSION "2.1.20-035"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 18
-#define DRIVER_REVISION 45
+#define DRIVER_RELEASE 20
+#define DRIVER_REVISION 35
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -678,23 +678,36 @@ static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
io_request->raid_bypass = false;
}
-static struct pqi_io_request *pqi_alloc_io_request(
- struct pqi_ctrl_info *ctrl_info)
+static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
struct pqi_io_request *io_request;
- u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
+ u16 i;
- while (1) {
+ if (scmd) { /* SML I/O request */
+ u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+
+ i = blk_mq_unique_tag_to_tag(blk_tag);
io_request = &ctrl_info->io_request_pool[i];
- if (atomic_inc_return(&io_request->refcount) == 1)
- break;
- atomic_dec(&io_request->refcount);
- i = (i + 1) % ctrl_info->max_io_slots;
+ if (atomic_inc_return(&io_request->refcount) > 1) {
+ atomic_dec(&io_request->refcount);
+ return NULL;
+ }
+ } else { /* IOCTL or driver internal request */
+ /*
+ * benignly racy - may have to wait for an open slot.
+ * command slot range is scsi_ml_can_queue -
+ * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
+ */
+ i = 0;
+ while (1) {
+ io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
+ if (atomic_inc_return(&io_request->refcount) == 1)
+ break;
+ atomic_dec(&io_request->refcount);
+ i = (i + 1) % PQI_RESERVED_IO_SLOTS;
+ }
}
- /* benignly racy */
- ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
-
pqi_reinit_io_request(io_request);
return io_request;
@@ -1610,9 +1623,7 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
&id_phys->alternate_paths_phys_connector,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
- device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
- if (!device->multi_lun_device_lun_count)
- device->multi_lun_device_lun_count = 1;
+ device->lun_count = id_phys->multi_lun_device_lun_count;
if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
id_phys->phy_count)
device->phy_id =
@@ -1746,7 +1757,7 @@ out:
return offline;
}
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
{
@@ -1763,6 +1774,20 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
return rc;
}
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
+
+ rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
+
+ if (rc == 0 && device->lun_count == 0)
+ device->lun_count = 1;
+
+ return rc;
+}
+
static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
@@ -1897,7 +1922,7 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi
int rc;
int lun;
- for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
+ for (lun = 0; lun < device->lun_count; lun++) {
rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
if (rc)
@@ -2076,6 +2101,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
existing_device->sas_address = new_device->sas_address;
existing_device->queue_depth = new_device->queue_depth;
existing_device->device_offline = false;
+ existing_device->lun_count = new_device->lun_count;
if (pqi_is_logical_device(existing_device)) {
existing_device->is_external_raid_device = new_device->is_external_raid_device;
@@ -2108,10 +2134,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
-
- existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
- if (existing_device->multi_lun_device_lun_count == 0)
- existing_device->multi_lun_device_lun_count = 1;
}
}
@@ -4586,7 +4608,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, NULL);
put_unaligned_le16(io_request->index,
&(((struct pqi_raid_path_request *)request)->request_id));
@@ -5233,7 +5255,6 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
}
ctrl_info->num_queue_groups = num_queue_groups;
- ctrl_info->max_hw_queue_index = num_queue_groups - 1;
/*
* Make sure that the max. inbound IU length is an even multiple
@@ -5567,7 +5588,9 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
{
struct pqi_io_request *io_request;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
device, scmd, queue_group);
@@ -5671,7 +5694,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device;
device = scmd->device->hostdata;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
@@ -5743,7 +5768,10 @@ static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request;
struct pqi_aio_r1_path_request *r1_request;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = true;
@@ -5801,7 +5829,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_io_request *io_request;
struct pqi_aio_r56_path_request *r56_request;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = true;
@@ -5860,13 +5890,10 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
- u16 hw_queue;
-
- hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
- if (hw_queue > ctrl_info->max_hw_queue_index)
- hw_queue = 0;
-
- return hw_queue;
+ /*
+ * We are setting host_tagset = 1 during init.
+ */
+ return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
}
static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
@@ -6268,7 +6295,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd
struct pqi_scsi_dev *device;
device = scmd->device->hostdata;
- io_request = pqi_alloc_io_request(ctrl_info);
+ io_request = pqi_alloc_io_request(ctrl_info, NULL);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6484,6 +6511,12 @@ static void pqi_slave_destroy(struct scsi_device *sdev)
return;
}
+ device->lun_count--;
+ if (device->lun_count > 0) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
list_del(&device->scsi_device_list_entry);
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -7237,7 +7270,7 @@ static ssize_t pqi_raid_level_show(struct device *dev,
return -ENODEV;
}
- if (pqi_is_logical_device(device))
+ if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
raid_level = pqi_raid_level_to_string(device->raid_level);
else
raid_level = "N/A";
@@ -7405,7 +7438,6 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
- shost->max_lun = ~0;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
shost->can_queue = ctrl_info->scsi_ml_can_queue;
@@ -7972,7 +8004,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
struct pqi_config_table *config_table;
struct pqi_config_table_section_header *section;
struct pqi_config_table_section_info section_info;
- struct pqi_config_table_section_info feature_section_info;
+ struct pqi_config_table_section_info feature_section_info = {0};
table_length = ctrl_info->config_table_length;
if (table_length == 0)
@@ -9008,6 +9040,7 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
u16 vendor_id;
+ int rc;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
@@ -9019,6 +9052,13 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
else
ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
+ rc = pqi_flush_cache(ctrl_info, RESTART);
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache during remove\n");
+ }
+
pqi_remove_ctrl(ctrl_info);
}
@@ -9304,6 +9344,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x193d, 0x110b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x8460)
},
{
@@ -9404,6 +9448,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0086)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0087)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0088)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0089)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -9652,6 +9712,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1475)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1480)
},
{
@@ -9708,6 +9772,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
},
{
@@ -9944,6 +10016,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1000)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }