diff options
36 files changed, 635 insertions, 950 deletions
diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml index 25a5edeea164..cde334e3206b 100644 --- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml @@ -26,6 +26,7 @@ properties: - qcom,msm8994-ufshc - qcom,msm8996-ufshc - qcom,msm8998-ufshc + - qcom,qcs8300-ufshc - qcom,sa8775p-ufshc - qcom,sc7180-ufshc - qcom,sc7280-ufshc @@ -146,6 +147,7 @@ allOf: contains: enum: - qcom,msm8998-ufshc + - qcom,qcs8300-ufshc - qcom,sa8775p-ufshc - qcom,sc7280-ufshc - qcom,sc8180x-ufshc diff --git a/MAINTAINERS b/MAINTAINERS index c27f3190737f..6b5e9f0ffc92 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23816,7 +23816,9 @@ F: drivers/ufs/host/*dwc* UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER EXYNOS HOOKS M: Alim Akhtar <alim.akhtar@samsung.com> +R: Peter Griffin <peter.griffin@linaro.org> L: linux-scsi@vger.kernel.org +L: linux-samsung-soc@vger.kernel.org S: Maintained F: drivers/ufs/host/ufs-exynos* diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h index a1ec7e84d6fe..40b34f670065 100644 --- a/drivers/message/fusion/mptlan.h +++ b/drivers/message/fusion/mptlan.h @@ -51,10 +51,7 @@ #define LINUX_MPTLAN_H_INCLUDED /*****************************************************************************/ -#if !defined(__GENKSYMS__) #include <linux/module.h> -#endif - #include <linux/netdevice.h> #include <linux/errno.h> // #include <linux/etherdevice.h> diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 1d09d3ac6aa4..8c384c25dca1 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2736,7 +2736,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index, int isAif, int isFastResponse, struct hw_fib *aif_fib); int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type); -int aac_check_health(struct aac_dev * dev); int aac_command_thread(void *data); int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 47287559c768..ffef61c4aa01 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1698,127 +1698,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) return retval; } -int aac_check_health(struct aac_dev * aac) -{ - int BlinkLED; - unsigned long time_now, flagv = 0; - struct list_head * entry; - - /* Extending the scope of fib_lock slightly to protect aac->in_reset */ - if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0) - return 0; - - if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) { - spin_unlock_irqrestore(&aac->fib_lock, flagv); - return 0; /* OK */ - } - - aac->in_reset = 1; - - /* Fake up an AIF: - * aac_aifcmd.command = AifCmdEventNotify = 1 - * aac_aifcmd.seqnum = 0xFFFFFFFF - * aac_aifcmd.data[0] = AifEnExpEvent = 23 - * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3 - * aac.aifcmd.data[2] = AifHighPriority = 3 - * aac.aifcmd.data[3] = BlinkLED - */ - - time_now = jiffies/HZ; - entry = aac->fib_list.next; - - /* - * For each Context that is on the - * fibctxList, make a copy of the - * fib, and then set the event to wake up the - * thread that is waiting for it. - */ - while (entry != &aac->fib_list) { - /* - * Extract the fibctx - */ - struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next); - struct hw_fib * hw_fib; - struct fib * fib; - /* - * Check if the queue is getting - * backlogged - */ - if (fibctx->count > 20) { - /* - * It's *not* jiffies folks, - * but jiffies / HZ, so do not - * panic ... - */ - u32 time_last = fibctx->jiffies; - /* - * Has it been > 2 minutes - * since the last read off - * the queue? - */ - if ((time_now - time_last) > aif_timeout) { - entry = entry->next; - aac_close_fib_context(aac, fibctx); - continue; - } - } - /* - * Warning: no sleep allowed while - * holding spinlock - */ - hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC); - fib = kzalloc(sizeof(struct fib), GFP_ATOMIC); - if (fib && hw_fib) { - struct aac_aifcmd * aif; - - fib->hw_fib_va = hw_fib; - fib->dev = aac; - aac_fib_init(fib); - fib->type = FSAFS_NTC_FIB_CONTEXT; - fib->size = sizeof (struct fib); - fib->data = hw_fib->data; - aif = (struct aac_aifcmd *)hw_fib->data; - aif->command = cpu_to_le32(AifCmdEventNotify); - aif->seqnum = cpu_to_le32(0xFFFFFFFF); - ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent); - ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic); - ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority); - ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED); - - /* - * Put the FIB onto the - * fibctx's fibs - */ - list_add_tail(&fib->fiblink, &fibctx->fib_list); - fibctx->count++; - /* - * Set the event to wake up the - * thread that will waiting. - */ - complete(&fibctx->completion); - } else { - printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); - kfree(fib); - kfree(hw_fib); - } - entry = entry->next; - } - - spin_unlock_irqrestore(&aac->fib_lock, flagv); - - if (BlinkLED < 0) { - printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", - aac->name, BlinkLED); - goto out; - } - - printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); - -out: - aac->in_reset = 0; - return BlinkLED; -} - static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target) { return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers; diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c index 176704b24e6a..f1ce02cd569e 100644 --- a/drivers/scsi/aic7xxx/aic7770.c +++ b/drivers/scsi/aic7xxx/aic7770.c @@ -99,21 +99,6 @@ struct aic7770_identity aic7770_ident_table[] = ahc_aic7770_EISA_setup } }; -const int ahc_num_aic7770_devs = ARRAY_SIZE(aic7770_ident_table); - -struct aic7770_identity * -aic7770_find_device(uint32_t id) -{ - struct aic7770_identity *entry; - int i; - - for (i = 0; i < ahc_num_aic7770_devs; i++) { - entry = &aic7770_ident_table[i]; - if (entry->full_id == (id & entry->id_mask)) - return (entry); - } - return (NULL); -} int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index 9bc755a0a2d3..20857c213c72 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -1119,7 +1119,6 @@ struct aic7770_identity { ahc_device_setup_t *setup; }; extern struct aic7770_identity aic7770_ident_table[]; -extern const int ahc_num_aic7770_devs; #define AHC_EISA_SLOT_OFFSET 0xc00 #define AHC_EISA_IOSIZE 0x100 @@ -1135,7 +1134,6 @@ int ahc_pci_test_register_access(struct ahc_softc *); void __maybe_unused ahc_pci_resume(struct ahc_softc *ahc); /*************************** EISA/VL Front End ********************************/ -struct aic7770_identity *aic7770_find_device(uint32_t); int aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *, u_int port); diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 4cb9249e583c..a6b8c4ddea19 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -138,14 +138,6 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), } while (0) -/* - * PCI devices supported by the current BFA - */ -struct bfa_pciid_s { - u16 device_id; - u16 vendor_id; -}; - extern char bfa_version[]; struct bfa_iocfc_regs_s { @@ -408,9 +400,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, (((&(_bfa)->modules.dconf_mod)->min_cfg) \ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status)) -void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); -void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa); diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 3438d0b8ba06..a99a101b95ef 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1934,24 +1934,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) } /* - * Return the list of PCI vendor/device id lists supported by this - * BFA instance. - */ -void -bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) -{ - static struct bfa_pciid_s __pciids[] = { - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT}, - {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, - }; - - *npciids = ARRAY_SIZE(__pciids); - *pciids = __pciids; -} - -/* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. @@ -1987,20 +1969,3 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) cfg->drvcfg.delay_comp = BFA_FALSE; } - -void -bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) -{ - bfa_cfg_get_default(cfg); - cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN; - cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN; - cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN; - cfg->fwcfg.num_uf_bufs = BFA_UF_MIN; - cfg->fwcfg.num_rports = BFA_RPORT_MIN; - cfg->fwcfg.num_fwtio_reqs = 0; - - cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; - cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; - cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; - cfg->drvcfg.min_cfg = BFA_TRUE; -} diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 5e36620425ac..760606062c66 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -125,28 +125,6 @@ enum bfa_lport_offline_reason { }; /* - * FCS lport info. - */ -struct bfa_lport_info_s { - u8 port_type; /* bfa_lport_type_t : physical or - * virtual */ - u8 port_state; /* one of bfa_lport_state values */ - u8 offline_reason; /* one of bfa_lport_offline_reason_t - * values */ - wwn_t port_wwn; - wwn_t node_wwn; - - /* - * following 4 feilds are valid for Physical Ports only - */ - u32 max_vports_supp; /* Max supported vports */ - u32 num_vports_inuse; /* Num of in use vports */ - u32 max_rports_supp; /* Max supported rports */ - u32 num_rports_inuse; /* Num of doscovered rports */ - -}; - -/* * FCS port statistics */ struct bfa_lport_stats_s { diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 28ae4dc14dc9..2518e36d70d3 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -3413,15 +3413,6 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) } /* - * Notification on completions from related ioim. - */ -void -bfa_tskim_iodone(struct bfa_tskim_s *tskim) -{ - bfa_wc_down(&tskim->wc); -} - -/* * Handle IOC h/w failure notification from itnim. */ void diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 4499f84c2d81..64e4485b226e 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -339,7 +339,6 @@ void bfa_ioim_tov(struct bfa_ioim_s *ioim); void bfa_tskim_attach(struct bfa_fcpim_s *fcpim); void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_tskim_iodone(struct bfa_tskim_s *tskim); void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw); diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 9788354b90da..19903539a08d 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -373,15 +373,11 @@ bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rport[], int *nrports); -wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, - int index, int nrports, bfa_boolean_t bwwn); struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); -void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, struct bfa_lport_attr_s *port_attr); void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, @@ -416,8 +412,6 @@ struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid( struct bfa_fcs_lport_s *port, u32 pid); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( struct bfa_fcs_lport_s *port, wwn_t pwwn); -struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t nwwn); struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier( struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid); void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, @@ -486,7 +480,6 @@ bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_lport_cfg_s *port_cfg, struct bfad_vport_s *vport_drv); -bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); @@ -494,7 +487,6 @@ void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, struct bfa_vport_attr_s *vport_attr); struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn); -void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); @@ -623,8 +615,6 @@ void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, struct bfa_rport_attr_s *attr); struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn); -struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( - struct bfa_fcs_lport_s *port, wwn_t rnwwn); void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); void bfa_fcs_rport_set_max_logins(u32 max_logins); void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, @@ -633,8 +623,6 @@ void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 pid); -void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi_rsp); void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_logi_s *plogi); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 966bf6cc6dd9..9a85f417018f 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -938,25 +938,6 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) } /* - * NWWN based Lookup for a R-Port in the Port R-Port Queue - */ -struct bfa_fcs_rport_s * -bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) -{ - struct bfa_fcs_rport_s *rport; - struct list_head *qe; - - list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *) qe; - if (wwn_is_equal(rport->nwwn, nwwn)) - return rport; - } - - bfa_trc(port->fcs, nwwn); - return NULL; -} - -/* * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * @@ -5645,54 +5626,6 @@ bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) return &fcs->fabric.bport; } -wwn_t -bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, - int nrports, bfa_boolean_t bwwn) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - int i; - struct bfa_fcs_s *fcs; - - if (port == NULL || nrports == 0) - return (wwn_t) 0; - - fcs = port->fcs; - bfa_trc(fcs, (u32) nrports); - - i = 0; - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while ((qe != qh) && (i < nrports)) { - rport = (struct bfa_fcs_rport_s *) qe; - if (bfa_ntoh3b(rport->pid) > 0xFFF000) { - qe = bfa_q_next(qe); - bfa_trc(fcs, (u32) rport->pwwn); - bfa_trc(fcs, rport->pid); - bfa_trc(fcs, i); - continue; - } - - if (bwwn) { - if (!memcmp(&wwn, &rport->pwwn, 8)) - break; - } else { - if (i == index) - break; - } - - i++; - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, i); - if (rport) - return rport->pwwn; - else - return (wwn_t) 0; -} - void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port, struct bfa_rport_qualifier_s rports[], int *nrports) @@ -5823,54 +5756,6 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) return NULL; } -/* - * API corresponding to NPIV_VPORT_GETINFO. - */ -void -bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, - struct bfa_lport_info_s *port_info) -{ - - bfa_trc(port->fcs, port->fabric->fabric_name); - - if (port->vport == NULL) { - /* - * This is a Physical port - */ - port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - - port_info->max_vports_supp = - bfa_lps_get_max_vport(port->fcs->bfa); - port_info->num_vports_inuse = - port->fabric->num_vports; - port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; - port_info->num_rports_inuse = port->num_rports; - } else { - /* - * This is a virtual port - */ - port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); - port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); - } -} - void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, struct bfa_lport_stats_s *port_stats) @@ -6568,15 +6453,6 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) } /* - * Cleanup notification from fabric SM on link timer expiry. - */ -void -bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_cleanup++; -} - -/* * Stop notification from fabric SM. To be invoked from within FCS. */ void @@ -6699,24 +6575,6 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, } /* - * Use this function to findout if this is a pbc vport or not. - * - * @param[in] vport - pointer to bfa_fcs_vport_t. - * - * @returns None - */ -bfa_boolean_t -bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) -{ - - if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) - return BFA_TRUE; - else - return BFA_FALSE; - -} - -/* * Use this function initialize the vport. * * @param[in] vport - pointer to bfa_fcs_vport_t. diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index ce52a9c88ae6..d4bde9bbe75b 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -2646,27 +2646,6 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); return rport; } -/* - * Called by bport in private loop topology to indicate that a - * rport has been discovered and plogi has been completed. - * - * @param[in] port - base port or vport - * @param[in] rpid - remote port ID - */ -void -bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, - struct fc_logi_s *plogi) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); - if (!rport) - return; - - bfa_fcs_rport_update(rport, plogi); - - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); -} /* * Called by bport/vport to handle PLOGI received from a new remote port. @@ -3089,21 +3068,6 @@ bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) return rport; } -struct bfa_fcs_rport_s * -bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - } - - return rport; -} - /* * Remote port features (RPF) implementation. */ diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index ea2f107f564c..aa68d61a2d0d 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -2254,17 +2254,6 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) return status; } -/* - * Enable/disable IOC failure auto recovery. - */ -void -bfa_ioc_auto_recover(bfa_boolean_t auto_recover) -{ - bfa_auto_recover = auto_recover; -} - - - bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { @@ -2272,16 +2261,6 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc) } bfa_boolean_t -bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) -{ - u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); - - return ((r32 != BFI_IOC_UNINIT) && - (r32 != BFI_IOC_INITING) && - (r32 != BFI_IOC_MEMTEST)); -} - -bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { __be32 *msgp = mbmsg; diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 3ec10503caff..d70332e9ad6d 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -919,7 +919,6 @@ void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc); void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); -void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); void bfa_ioc_detach(struct bfa_ioc_s *ioc); void bfa_ioc_suspend(struct bfa_ioc_s *ioc); void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, @@ -934,7 +933,6 @@ bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); -bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 578e7678b056..ed29ebda30da 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -113,7 +113,6 @@ void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *, struct bfa_pcidev_s *); -void bfa_uf_iocdisable(struct bfa_s *); void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, struct bfa_s *); void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 9f33aa303b18..df33afaaa673 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -913,14 +913,6 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) return reqbuf; } -u32 -bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - - return mod->req_pld_sz; -} - /* * Get the internal response buffer pointer * @@ -1023,21 +1015,6 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_fcxp_queue(fcxp, send_req); } -/* - * Abort a BFA FCXP - * - * @param[in] fcxp BFA fcxp pointer - * - * @return void - */ -bfa_status_t -bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) -{ - bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); - WARN_ON(1); - return BFA_STATUS_OK; -} - void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, @@ -3857,15 +3834,6 @@ bfa_fcport_clr_hardalpa(struct bfa_s *bfa) return BFA_STATUS_OK; } -bfa_boolean_t -bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - *alpa = fcport->cfg.hardalpa; - return fcport->cfg.cfg_hardalpa; -} - u8 bfa_fcport_get_myalpa(struct bfa_s *bfa) { @@ -3923,17 +3891,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) /* * Get port attributes. */ - -wwn_t -bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (node) - return fcport->nwwn; - else - return fcport->pwwn; -} - void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { @@ -4106,18 +4063,6 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa) } /* - * Enable/Disable FAA feature in port config - */ -void -bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, state); - fcport->cfg.faa_state = state; -} - -/* * Get default minimum ratelim speed */ enum bfa_port_speed @@ -5529,23 +5474,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) } void -bfa_uf_iocdisable(struct bfa_s *bfa) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - struct bfa_uf_s *uf; - struct list_head *qe, *qen; - - /* Enqueue unused uf resources to free_q */ - list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q); - - list_for_each_safe(qe, qen, &ufm->uf_posted_q) { - uf = (struct bfa_uf_s *) qe; - list_del(&uf->qe); - bfa_uf_put(ufm, uf); - } -} - -void bfa_uf_start(struct bfa_s *bfa) { bfa_uf_post_all(BFA_UF_MOD(bfa)); diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 26eeee82bedc..99a960a8a2db 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -587,14 +587,12 @@ bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); enum bfa_port_topology bfa_fcport_get_cfg_topology(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); -bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr); -wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, enum bfa_port_linkstate event), void *event_cbarg); @@ -619,7 +617,6 @@ bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa); void bfa_fcport_dportenable(struct bfa_s *bfa); void bfa_fcport_dportdisable(struct bfa_s *bfa); bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); -void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn); bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, @@ -687,8 +684,6 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_cb_fcxp_send_t cbfn, void *cbarg, u32 rsp_maxlen, u8 rsp_timeout); -bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); -u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 62cb7a864fd5..19675a6e0780 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -843,26 +843,6 @@ bfad_drv_init(struct bfad_s *bfad) } void -bfad_drv_uninit(struct bfad_s *bfad) -{ - unsigned long flags; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfa_iocfc_stop(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); - - del_timer_sync(&bfad->hal_tmo); - bfa_isr_disable(&bfad->bfa); - bfa_detach(&bfad->bfa); - bfad_remove_intr(bfad); - bfad_hal_mem_release(bfad); - - bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; -} - -void bfad_drv_start(struct bfad_s *bfad) { unsigned long flags; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index da42e3261237..ce2ec5108947 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -312,7 +312,6 @@ void bfad_bfa_tmo(struct timer_list *t); void bfad_init_timer(struct bfad_s *bfad); int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); -void bfad_drv_uninit(struct bfad_s *bfad); int bfad_worker(void *ptr); void bfad_debugfs_init(struct bfad_port_s *port); void bfad_debugfs_exit(struct bfad_port_s *port); diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 41e6b4dac056..e1e0e967bcc3 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -1148,7 +1148,7 @@ struct bfi_diag_dport_scn_testcomp_s { u16 numbuffer; /* from switch */ u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */ u32 latency; /* from switch */ - u32 distance; /* from swtich unit in meters */ + u32 distance; /* from switch unit in meters */ /* Buffers required to saturate the link */ u16 frm_sz; /* from switch for buf_reqd */ u8 rsvd[2]; diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index d223f482488f..a44768bceb9a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -307,6 +307,7 @@ enum { struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); + int (*fw_info_check)(struct hisi_hba *hisi_hba); int (*interrupt_preinit)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, struct hisi_sas_device *device); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 6219807ce3b9..53cb15f6714b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1321,6 +1321,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) } if (rc == TMF_RESP_FUNC_COMPLETE) { + usleep_range(900, 1000); ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); @@ -1384,6 +1385,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { + u32 new_state = hisi_hba->hw->get_phys_state(hisi_hba); struct asd_sas_port *_sas_port = NULL; int phy_no; @@ -1397,7 +1399,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) continue; /* Report PHY state change to libsas */ - if (state & BIT(phy_no)) { + if (new_state & BIT(phy_no)) { if (do_port_check && sas_port && sas_port->port_dev) { struct domain_device *dev = sas_port->port_dev; @@ -1410,6 +1412,16 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) } } else { hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); + + /* + * The new_state is not ready but old_state is ready, + * the two possible causes: + * 1. The connected device is removed + * 2. Device exists but phyup timed out + */ + if (state & BIT(phy_no)) + hisi_sas_notify_phy_event(phy, + HISI_PHYE_LINK_RESET); } } } @@ -1545,9 +1557,15 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) /* Init and wait for PHYs to come up and all libsas event finished. */ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; - if (!(hisi_hba->phy_state & BIT(phy_no))) + if (!sas_phy->phy->enabled) + continue; + + if (!(hisi_hba->phy_state & BIT(phy_no))) { + hisi_sas_phy_enable(hisi_hba, phy_no, 1); continue; + } async_schedule_domain(hisi_sas_async_init_wait_phyup, phy, &async); @@ -2450,6 +2468,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (hisi_hba->hw->fw_info_check) { + if (hisi_hba->hw->fw_info_check(hisi_hba)) + goto err_out; + } + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (error) { dev_err(dev, "No usable DMA addressing method\n"); @@ -2630,10 +2653,10 @@ static __init int hisi_sas_init(void) static __exit void hisi_sas_exit(void) { - sas_release_transport(hisi_sas_stt); - if (hisi_sas_debugfs_enable) debugfs_remove(hisi_sas_debugfs_dir); + + sas_release_transport(hisi_sas_stt); } module_init(hisi_sas_init); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 71b5008c3552..70bba55bc5d0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1734,6 +1734,23 @@ static struct attribute *host_v1_hw_attrs[] = { ATTRIBUTE_GROUPS(host_v1_hw); +static int check_fw_info_v1_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 32) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v1_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT .device_configure = hisi_sas_device_configure, @@ -1747,6 +1764,7 @@ static const struct scsi_host_template sht_v1_hw = { static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, + .fw_info_check = check_fw_info_v1_hw, .setup_itct = setup_itct_v1_hw, .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 342d75f12051..ab6668dc5b77 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3566,6 +3566,23 @@ static void map_queues_v2_hw(struct Scsi_Host *shost) } } +static int check_fw_info_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 9) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static const struct scsi_host_template sht_v2_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT .device_configure = hisi_sas_device_configure, @@ -3582,6 +3599,7 @@ static const struct scsi_host_template sht_v2_hw = { static const struct hisi_sas_hw hisi_sas_v2_hw = { .hw_init = hisi_sas_v2_init, + .fw_info_check = check_fw_info_v2_hw, .interrupt_preinit = hisi_sas_v2_interrupt_preinit, .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 4cd3a3eab6f1..5db931663ae4 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -43,6 +43,7 @@ #define CQ_INT_CONVERGE_EN 0xb0 #define CFG_AGING_TIME 0xbc #define HGC_DFX_CFG2 0xc0 +#define CFG_ICT_TIMER_STEP_TRSH 0xc8 #define CFG_ABT_SET_QUERY_IPTT 0xd4 #define CFG_SET_ABORTED_IPTT_OFF 0 #define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF) @@ -638,9 +639,12 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, CFG_ICT_TIMER_STEP_TRSH, 0xf4240); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN, hisi_sas_intr_conv); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff); @@ -682,7 +686,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); - hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); + hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7ffffff); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 0x30f4240); @@ -2493,6 +2497,7 @@ static int complete_v3_hw(struct hisi_sas_cq *cq) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + cond_resched(); return completed; } @@ -2796,14 +2801,15 @@ static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba) { /* config those registers between enable and disable PHYs */ hisi_sas_stop_phys(hisi_hba); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); if (hisi_hba->intr_coal_ticks == 0 || hisi_hba->intr_coal_count == 0) { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + /* configure the interrupt coalescing timeout period 10us */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0xa); + /* configure the count of CQ entries 10 */ + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0xa); } else { - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, hisi_hba->intr_coal_ticks); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, @@ -3371,6 +3377,23 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw, }; +static int check_fw_info_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + if (hisi_hba->n_phy < 0 || hisi_hba->n_phy > 8) { + dev_err(dev, "invalid phy number from FW\n"); + return -EINVAL; + } + + if (hisi_hba->queue_count < 0 || hisi_hba->queue_count > 16) { + dev_err(dev, "invalid queue count from FW\n"); + return -EINVAL; + } + + return 0; +} + static struct Scsi_Host * hisi_sas_shost_alloc_pci(struct pci_dev *pdev) { @@ -3401,6 +3424,9 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; + if (check_fw_info_v3_hw(hisi_hba) < 0) + goto err_out; + if (experimental_iopoll_q_cnt < 0 || experimental_iopoll_q_cnt >= hisi_hba->queue_count) dev_err(dev, "iopoll queue count %d cannot exceed or equal 16, using default 0\n", @@ -3550,6 +3576,11 @@ debugfs_to_reg_name_v3_hw(int off, int base_off, return NULL; } +static bool debugfs_dump_is_generated_v3_hw(void *p) +{ + return p ? true : false; +} + static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, const struct hisi_sas_debugfs_reg *reg) { @@ -3575,6 +3606,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *global = s->private; + if (!debugfs_dump_is_generated_v3_hw(global->data)) + return -EPERM; + debugfs_print_reg_v3_hw(global->data, s, &debugfs_global_reg); @@ -3586,6 +3620,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *axi = s->private; + if (!debugfs_dump_is_generated_v3_hw(axi->data)) + return -EPERM; + debugfs_print_reg_v3_hw(axi->data, s, &debugfs_axi_reg); @@ -3597,6 +3634,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p) { struct hisi_sas_debugfs_regs *ras = s->private; + if (!debugfs_dump_is_generated_v3_hw(ras->data)) + return -EPERM; + debugfs_print_reg_v3_hw(ras->data, s, &debugfs_ras_reg); @@ -3609,6 +3649,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_port *port = s->private; const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg; + if (!debugfs_dump_is_generated_v3_hw(port->data)) + return -EPERM; + debugfs_print_reg_v3_hw(port->data, s, reg_port); return 0; @@ -3664,6 +3707,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_cq *debugfs_cq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq); @@ -3685,8 +3731,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot, static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p) { + struct hisi_sas_debugfs_dq *debugfs_dq = s->private; int slot; + if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr)) + return -EPERM; + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) debugfs_dq_show_slot_v3_hw(s, slot, s->private); @@ -3700,6 +3750,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_iost *iost = debugfs_iost->iost; int i, max_command_entries = HISI_SAS_MAX_COMMANDS; + if (!debugfs_dump_is_generated_v3_hw(iost)) + return -EPERM; + for (i = 0; i < max_command_entries; i++, iost++) { __le64 *data = &iost->qw0; @@ -3719,6 +3772,9 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) int i, tab_idx; __le64 *iost; + if (!debugfs_dump_is_generated_v3_hw(iost_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { /* * Data struct of IOST cache: @@ -3742,6 +3798,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_itct *debugfs_itct = s->private; struct hisi_sas_itct *itct = debugfs_itct->itct; + if (!debugfs_dump_is_generated_v3_hw(itct)) + return -EPERM; + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { __le64 *data = &itct->qw0; @@ -3761,6 +3820,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) int i, tab_idx; __le64 *itct; + if (!debugfs_dump_is_generated_v3_hw(itct_cache)) + return -EPERM; + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { /* * Data struct of ITCT cache: @@ -3778,10 +3840,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) } DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw); -static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) +static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) { u64 *debugfs_timestamp; - int dump_index = hisi_hba->debugfs_dump_index; struct dentry *dump_dentry; struct dentry *dentry; char name[256]; @@ -3789,17 +3850,17 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) int c; int d; - snprintf(name, 256, "%d", dump_index); + snprintf(name, 256, "%d", index); dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); - debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; + debugfs_timestamp = &hisi_hba->debugfs_timestamp[index]; debugfs_create_u64("timestamp", 0400, dump_dentry, debugfs_timestamp); debugfs_create_file("global", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], + &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL], &debugfs_global_v3_hw_fops); /* Create port dir and files */ @@ -3808,7 +3869,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", p); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_port_reg[dump_index][p], + &hisi_hba->debugfs_port_reg[index][p], &debugfs_port_v3_hw_fops); } @@ -3818,7 +3879,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", c); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_cq[dump_index][c], + &hisi_hba->debugfs_cq[index][c], &debugfs_cq_v3_hw_fops); } @@ -3828,32 +3889,32 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba) snprintf(name, 256, "%d", d); debugfs_create_file(name, 0400, dentry, - &hisi_hba->debugfs_dq[dump_index][d], + &hisi_hba->debugfs_dq[index][d], &debugfs_dq_v3_hw_fops); } debugfs_create_file("iost", 0400, dump_dentry, - &hisi_hba->debugfs_iost[dump_index], + &hisi_hba->debugfs_iost[index], &debugfs_iost_v3_hw_fops); debugfs_create_file("iost_cache", 0400, dump_dentry, - &hisi_hba->debugfs_iost_cache[dump_index], + &hisi_hba->debugfs_iost_cache[index], &debugfs_iost_cache_v3_hw_fops); debugfs_create_file("itct", 0400, dump_dentry, - &hisi_hba->debugfs_itct[dump_index], + &hisi_hba->debugfs_itct[index], &debugfs_itct_v3_hw_fops); debugfs_create_file("itct_cache", 0400, dump_dentry, - &hisi_hba->debugfs_itct_cache[dump_index], + &hisi_hba->debugfs_itct_cache[index], &debugfs_itct_cache_v3_hw_fops); debugfs_create_file("axi", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], + &hisi_hba->debugfs_regs[index][DEBUGFS_AXI], &debugfs_axi_v3_hw_fops); debugfs_create_file("ras", 0400, dump_dentry, - &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], + &hisi_hba->debugfs_regs[index][DEBUGFS_RAS], &debugfs_ras_v3_hw_fops); } @@ -4516,22 +4577,34 @@ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index) int i; devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); + hisi_hba->debugfs_iost_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); + hisi_hba->debugfs_itct_cache[dump_index].cache = NULL; devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); + hisi_hba->debugfs_iost[dump_index].iost = NULL; devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); + hisi_hba->debugfs_itct[dump_index].itct = NULL; - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); + hisi_hba->debugfs_dq[dump_index][i].hdr = NULL; + } - for (i = 0; i < hisi_hba->queue_count; i++) + for (i = 0; i < hisi_hba->queue_count; i++) { devm_kfree(dev, hisi_hba->debugfs_cq[dump_index][i].complete_hdr); + hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL; + } - for (i = 0; i < DEBUGFS_REGS_NUM; i++) + for (i = 0; i < DEBUGFS_REGS_NUM; i++) { devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); + hisi_hba->debugfs_regs[dump_index][i].data = NULL; + } - for (i = 0; i < hisi_hba->n_phy; i++) + for (i = 0; i < hisi_hba->n_phy; i++) { devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); + hisi_hba->debugfs_port_reg[dump_index][i].data = NULL; + } } static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = { @@ -4658,8 +4731,6 @@ static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba) debugfs_snapshot_itct_reg_v3_hw(hisi_hba); debugfs_snapshot_iost_reg_v3_hw(hisi_hba); - debugfs_create_files_v3_hw(hisi_hba); - debugfs_snapshot_restore_v3_hw(hisi_hba); hisi_hba->debugfs_dump_index++; @@ -4743,6 +4814,34 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; } +static int debugfs_dump_index_v3_hw_show(struct seq_file *s, void *p) +{ + int *debugfs_dump_index = s->private; + + if (*debugfs_dump_index > 0) + seq_printf(s, "%d\n", *debugfs_dump_index - 1); + else + seq_puts(s, "dump not triggered\n"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_dump_index_v3_hw); + +static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba) +{ + int i; + + hisi_hba->debugfs_dump_dentry = + debugfs_create_dir("dump", hisi_hba->debugfs_dir); + + debugfs_create_file("latest_dump", 0400, hisi_hba->debugfs_dump_dentry, + &hisi_hba->debugfs_dump_index, + &debugfs_dump_index_v3_hw_fops); + + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) + debugfs_create_files_v3_hw(hisi_hba, i); +} + static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba) { debugfs_remove_recursive(hisi_hba->debugfs_dir); @@ -4755,19 +4854,17 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), hisi_sas_debugfs_dir); - debugfs_create_file("trigger_dump", 0200, - hisi_hba->debugfs_dir, - hisi_hba, - &debugfs_trigger_dump_v3_hw_fops); - /* create bist structures */ debugfs_bist_init_v3_hw(hisi_hba); - hisi_hba->debugfs_dump_dentry = - debugfs_create_dir("dump", hisi_hba->debugfs_dir); + debugfs_dump_init_v3_hw(hisi_hba); debugfs_phy_down_cnt_init_v3_hw(hisi_hba); debugfs_fifo_init_v3_hw(hisi_hba); + debugfs_create_file("trigger_dump", 0200, + hisi_hba->debugfs_dir, + hisi_hba, + &debugfs_trigger_dump_v3_hw_fops); } static int @@ -4860,16 +4957,13 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) SHOST_DIX_GUARD_CRC); } - if (hisi_sas_debugfs_enable) - debugfs_init_v3_hw(hisi_hba); - rc = interrupt_preinit_v3_hw(hisi_hba); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = scsi_add_host(shost, dev); if (rc) - goto err_out_undo_debugfs; + goto err_out_free_host; rc = sas_register_ha(sha); if (rc) @@ -4880,6 +4974,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_out_unregister_ha; scsi_scan_host(shost); + if (hisi_sas_debugfs_enable) + debugfs_init_v3_hw(hisi_hba); pm_runtime_set_autosuspend_delay(dev, 5000); pm_runtime_use_autosuspend(dev); @@ -4900,9 +4996,6 @@ err_out_unregister_ha: sas_unregister_ha(sha); err_out_remove_host: scsi_remove_host(shost); -err_out_undo_debugfs: - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); err_out_free_host: hisi_sas_free(hisi_hba); scsi_host_put(shost); @@ -4934,6 +5027,8 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct Scsi_Host *shost = sha->shost; pm_runtime_get_noresume(dev); + if (hisi_sas_debugfs_enable) + debugfs_exit_v3_hw(hisi_hba); sas_unregister_ha(sha); flush_workqueue(hisi_hba->wq); @@ -4941,9 +5036,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) hisi_sas_v3_destroy_irqs(pdev, hisi_hba); hisi_sas_free(hisi_hba); - if (hisi_sas_debugfs_enable) - debugfs_exit_v3_hw(hisi_hba); - scsi_host_put(shost); } @@ -5034,7 +5126,8 @@ static int _suspend_v3_hw(struct device *device) interrupt_disable_v3_hw(hisi_hba); #ifdef CONFIG_PM - if (atomic_read(&device->power.usage_count)) { + if ((device->power.runtime_status == RPM_SUSPENDING) && + atomic_read(&device->power.usage_count)) { dev_err(dev, "PM suspend: host status cannot be suspended\n"); rc = -EBUSY; goto err_out; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index cf13148ba281..df756f3eef3e 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -4018,11 +4018,6 @@ void qedf_stag_change_work(struct work_struct *work) struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); - if (!qedf) { - QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); - return; - } - if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { QEDF_ERR(&qedf->dbg_ctx, "Already is in recovery, hence not calling software context reset.\n"); diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 5891cdacd0b3..988400500560 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -417,13 +417,6 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba) -{ - ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, - REG_UFS_MEM_CFG); -} -EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); - void ufshcd_mcq_enable(struct ufs_hba *hba) { ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG); @@ -437,6 +430,13 @@ void ufshcd_mcq_disable(struct ufs_hba *hba) hba->mcq_enabled = false; } +void ufshcd_mcq_enable_esi(struct ufs_hba *hba) +{ + ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, + REG_UFS_MEM_CFG); +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); + void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) { ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); @@ -572,14 +572,18 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) /* SQRTCy.ICU = 1 */ writel(SQ_ICU, opr_sqd_base + REG_SQRTC); - /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ + /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */ reg = opr_sqd_base + REG_SQRTS; err = read_poll_timeout(readl, val, val & SQ_CUS, 20, MCQ_POLL_US, false, reg); if (err) - dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", - __func__, id, task_tag, - FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n", + __func__, id, task_tag, err); + else + dev_info(hba->dev, + "%s, hwq %d: cleanup return code (RTC) %ld\n", + __func__, id, + FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); if (ufshcd_mcq_sq_start(hba, hwq)) err = -ETIMEDOUT; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 24a32e2fd75e..099373a25017 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -298,6 +298,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); +static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -349,18 +350,6 @@ static void ufshcd_configure_wb(struct ufs_hba *hba) ufshcd_wb_toggle_buf_flush(hba, true); } -static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) -{ - if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) - scsi_unblock_requests(hba->host); -} - -static void ufshcd_scsi_block_requests(struct ufs_hba *hba) -{ - if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) - scsi_block_requests(hba->host); -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { @@ -739,25 +728,15 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us); * Return: -ETIMEDOUT on error, zero on success. */ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, - u32 val, unsigned long interval_us, - unsigned long timeout_ms) + u32 val, unsigned long interval_us, + unsigned long timeout_ms) { - int err = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); - - /* ignore bits that we don't intend to wait on */ - val = val & mask; + u32 v; - while ((ufshcd_readl(hba, reg) & mask) != val) { - usleep_range(interval_us, interval_us + 50); - if (time_after(jiffies, timeout)) { - if ((ufshcd_readl(hba, reg) & mask) != val) - err = -ETIMEDOUT; - break; - } - } + val &= mask; /* ignore bits that we don't intend to wait on */ - return err; + return read_poll_timeout(ufshcd_readl, v, (v & mask) == val, + interval_us, timeout_ms * 1000, false, hba, reg); } /** @@ -1255,11 +1234,13 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, static u32 ufshcd_pending_cmds(struct ufs_hba *hba) { const struct scsi_device *sdev; + unsigned long flags; u32 pending = 0; - lockdep_assert_held(hba->host->host_lock); + spin_lock_irqsave(hba->host->host_lock, flags); __shost_for_each_device(sdev, hba->host) pending += sbitmap_weight(&sdev->budget_map); + spin_unlock_irqrestore(hba->host->host_lock, flags); return pending; } @@ -1273,7 +1254,6 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba) static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us) { - unsigned long flags; int ret = 0; u32 tm_doorbell; u32 tr_pending; @@ -1281,7 +1261,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ktime_t start; ufshcd_hold(hba); - spin_lock_irqsave(hba->host->host_lock, flags); /* * Wait for all the outstanding tasks/transfer requests. * Verify by checking the doorbell registers are clear. @@ -1302,7 +1281,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, break; } - spin_unlock_irqrestore(hba->host->host_lock, flags); io_schedule_timeout(msecs_to_jiffies(20)); if (ktime_to_us(ktime_sub(ktime_get(), start)) > wait_timeout_us) { @@ -1314,7 +1292,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, */ do_last_check = true; } - spin_lock_irqsave(hba->host->host_lock, flags); } while (tm_doorbell || tr_pending); if (timeout) { @@ -1324,7 +1301,6 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, ret = -EBUSY; } out: - spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_release(hba); return ret; } @@ -2411,8 +2387,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1; @@ -2551,13 +2525,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result * @hba: per adapter instance * @uic_cmd: UIC command - * @completion: initialize the completion only if this is set to true * * Return: 0 only if success. */ static int -__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, - bool completion) +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { lockdep_assert_held(&hba->uic_cmd_mutex); @@ -2567,8 +2539,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, return -EIO; } - if (completion) - init_completion(&uic_cmd->done); + init_completion(&uic_cmd->done); uic_cmd->cmd_active = 1; ufshcd_dispatch_uic_cmd(hba, uic_cmd); @@ -2594,7 +2565,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); - ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); if (!ret) ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); @@ -2775,7 +2746,6 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); - memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); @@ -2878,6 +2848,26 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } +static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag) +{ + memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr)); + + lrbp->cmd = cmd; + lrbp->task_tag = tag; + lrbp->lun = lun; + ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp); +} + +static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, + struct scsi_cmnd *cmd, u8 lun, int tag) +{ + __ufshcd_setup_cmd(lrbp, cmd, lun, tag); + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); + lrbp->req_abort_skip = false; + + ufshcd_comp_scsi_upiu(hba, lrbp); +} + /** * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID * @upiu_wlun_id: UPIU W-LUN id @@ -3011,16 +3001,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_hold(hba); lrbp = &hba->lrb[tag]; - lrbp->cmd = cmd; - lrbp->task_tag = tag; - lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba); - - ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); - - lrbp->req_abort_skip = false; - ufshcd_comp_scsi_upiu(hba, lrbp); + ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag); err = ufshcd_map_sg(hba, lrbp); if (err) { @@ -3048,11 +3030,8 @@ out: static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, u8 lun, int tag) { - lrbp->cmd = NULL; - lrbp->task_tag = tag; - lrbp->lun = lun; + __ufshcd_setup_cmd(lrbp, NULL, lun, tag); lrbp->intr_cmd = true; /* No interrupt aggregation */ - ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; } @@ -3084,7 +3063,6 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { u32 mask; - unsigned long flags; int err; if (hba->mcq_enabled) { @@ -3104,9 +3082,7 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) mask = 1U << task_tag; /* clear outstanding transaction before retry */ - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utrl_clear(hba, mask); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. @@ -4289,7 +4265,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) reenable_intr = true; } spin_unlock_irqrestore(hba->host->host_lock, flags); - ret = __ufshcd_send_uic_cmd(hba, cmd, false); + ret = __ufshcd_send_uic_cmd(hba, cmd); if (ret) { dev_err(hba->dev, "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", @@ -4540,6 +4516,14 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) return -EINVAL; } + if (pwr_info->lane_rx != pwr_info->lane_tx) { + dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n", + __func__, + pwr_info->lane_rx, + pwr_info->lane_tx); + return -EINVAL; + } + /* * First, get the maximum gears of HS speed. * If a zero value, it means there is no HSGEAR capability. @@ -4823,51 +4807,44 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop); */ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { - int retry_outer = 3; - int retry_inner; + int retry; -start: - if (ufshcd_is_hba_active(hba)) - /* change controller state to "reset state" */ - ufshcd_hba_stop(hba); + for (retry = 3; retry > 0; retry--) { + if (ufshcd_is_hba_active(hba)) + /* change controller state to "reset state" */ + ufshcd_hba_stop(hba); - /* UniPro link is disabled at this point */ - ufshcd_set_link_off(hba); + /* UniPro link is disabled at this point */ + ufshcd_set_link_off(hba); - ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); - /* start controller initialization sequence */ - ufshcd_hba_start(hba); + /* start controller initialization sequence */ + ufshcd_hba_start(hba); - /* - * To initialize a UFS host controller HCE bit must be set to 1. - * During initialization the HCE bit value changes from 1->0->1. - * When the host controller completes initialization sequence - * it sets the value of HCE bit to 1. The same HCE bit is read back - * to check if the controller has completed initialization sequence. - * So without this delay the value HCE = 1, set in the previous - * instruction might be read back. - * This delay can be changed based on the controller. - */ - ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); + /* + * To initialize a UFS host controller HCE bit must be set to 1. + * During initialization the HCE bit value changes from 1->0->1. + * When the host controller completes initialization sequence + * it sets the value of HCE bit to 1. The same HCE bit is read back + * to check if the controller has completed initialization sequence. + * So without this delay the value HCE = 1, set in the previous + * instruction might be read back. + * This delay can be changed based on the controller. + */ + ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); - /* wait for the host controller to complete initialization */ - retry_inner = 50; - while (!ufshcd_is_hba_active(hba)) { - if (retry_inner) { - retry_inner--; - } else { - dev_err(hba->dev, - "Controller enable failed\n"); - if (retry_outer) { - retry_outer--; - goto start; - } - return -EIO; - } - usleep_range(1000, 1100); + /* wait for the host controller to complete initialization */ + if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE, + CONTROLLER_ENABLE, 1000, 50)) + break; + + dev_err(hba->dev, "Enabling the controller failed\n"); } + if (!retry) + return -EIO; + /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); @@ -5477,32 +5454,37 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; + struct uic_command *cmd; spin_lock(hba->host->host_lock); + cmd = hba->active_uic_cmd; + if (WARN_ON_ONCE(!cmd)) + goto unlock; + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); - if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { - hba->active_uic_cmd->argument2 |= - ufshcd_get_uic_cmd_result(hba); - hba->active_uic_cmd->argument3 = - ufshcd_get_dme_attr_val(hba); + if (intr_status & UIC_COMMAND_COMPL) { + cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); + cmd->argument3 = ufshcd_get_dme_attr_val(hba); if (!hba->uic_async_done) - hba->active_uic_cmd->cmd_active = 0; - complete(&hba->active_uic_cmd->done); + cmd->cmd_active = 0; + complete(&cmd->done); retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { - hba->active_uic_cmd->cmd_active = 0; + if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) { + cmd->cmd_active = 0; complete(hba->uic_async_done); retval = IRQ_HANDLED; } if (retval == IRQ_HANDLED) - ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, - UFS_CMD_COMP); + ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP); + +unlock: spin_unlock(hba->host->host_lock); + return retval; } @@ -6195,12 +6177,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work) u32 status = 0; hba = container_of(work, struct ufs_hba, eeh_work); - ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", __func__, err); - goto out; + return; } trace_ufshcd_exception_event(dev_name(hba->dev), status); @@ -6212,8 +6193,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_temp_exception_event_handler(hba, status); ufs_debugfs_exception_event(hba, status); -out: - ufshcd_scsi_unblock_requests(hba); } /* Complete requests that have door-bell cleared */ @@ -6379,15 +6358,14 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } - ufshcd_scsi_block_requests(hba); /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ - blk_mq_wait_quiesce_done(&hba->host->tag_set); + blk_mq_quiesce_tagset(&hba->host->tag_set); cancel_work_sync(&hba->eeh_work); } static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) { - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); @@ -7015,14 +6993,11 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; u32 mask = 1 << tag; - unsigned long flags; if (!test_bit(tag, &hba->outstanding_tasks)) goto out; - spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_utmrl_clear(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); /* poll for max. 1 sec to clear door bell register by h/w */ err = ufshcd_wait_for_register(hba, @@ -7065,12 +7040,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); - /* send command to the controller */ __set_bit(task_tag, &hba->outstanding_tasks); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); + /* send command to the controller */ + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); + ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); /* wait until the task management command is completed */ @@ -7486,10 +7462,9 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) { struct ufshcd_lrb *lrbp = &hba->lrb[tag]; - int err = 0; + int err; int poll_cnt; u8 resp = 0xF; - u32 reg; for (poll_cnt = 100; poll_cnt; poll_cnt--) { err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, @@ -7504,46 +7479,27 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) * cmd not pending in the device, check if it is * in transition. */ - dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", + dev_info( + hba->dev, + "%s: cmd with tag %d not pending in the device.\n", __func__, tag); - if (hba->mcq_enabled) { - /* MCQ mode */ - if (ufshcd_cmd_inflight(lrbp->cmd)) { - /* sleep for max. 200us same delay as in SDB mode */ - usleep_range(100, 200); - continue; - } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", - __func__, tag); - goto out; + if (!ufshcd_cmd_inflight(lrbp->cmd)) { + dev_info(hba->dev, + "%s: cmd with tag=%d completed.\n", + __func__, tag); + return 0; } - - /* Single Doorbell Mode */ - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - if (reg & (1 << tag)) { - /* sleep for max. 200us to stabilize */ - usleep_range(100, 200); - continue; - } - /* command completed already */ - dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", - __func__, tag); - goto out; + usleep_range(100, 200); } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", __func__, tag, err); - if (!err) - err = resp; /* service response error */ - goto out; + return err ? : resp; } } - if (!poll_cnt) { - err = -EBUSY; - goto out; - } + if (!poll_cnt) + return -EBUSY; err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, UFS_ABORT_TASK, &resp); @@ -7553,7 +7509,7 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", __func__, tag, err); } - goto out; + return err; } err = ufshcd_clear_cmd(hba, tag); @@ -7561,7 +7517,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); -out: return err; } @@ -7685,6 +7640,29 @@ release: } /** + * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result. + * @hba: UFS host controller instance. + * @probe_start: time when the ufshcd_probe_hba() call started. + * @ret: ufshcd_probe_hba() return value. + */ +static void ufshcd_process_probe_result(struct ufs_hba *hba, + ktime_t probe_start, int ret) +{ + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (ret) + hba->ufshcd_state = UFSHCD_STATE_ERROR; + else if (hba->ufshcd_state == UFSHCD_STATE_RESET) + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + trace_ufshcd_init(dev_name(hba->dev), ret, + ktime_to_us(ktime_sub(ktime_get(), probe_start)), + hba->curr_dev_pwr_mode, hba->uic_link_state); +} + +/** * ufshcd_host_reset_and_restore - reset and restore host controller * @hba: per-adapter instance * @@ -7713,8 +7691,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) err = ufshcd_hba_enable(hba); /* Establish the link again and restore the device */ - if (!err) - err = ufshcd_probe_hba(hba, false); + if (!err) { + ktime_t probe_start = ktime_get(); + + err = ufshcd_device_init(hba, /*init_dev_params=*/false); + if (!err) + err = ufshcd_probe_hba(hba, false); + ufshcd_process_probe_result(hba, probe_start, err); + } if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); @@ -8732,10 +8716,45 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) hba->nutrs); } +static int ufshcd_post_device_init(struct ufs_hba *hba) +{ + int ret; + + ufshcd_tune_unipro_params(hba); + + /* UFS device is also active now */ + ufshcd_set_ufs_dev_active(hba); + ufshcd_force_reset_auto_bkops(hba); + + ufshcd_set_timestamp_attr(hba); + schedule_delayed_work(&hba->ufs_rtc_update_work, + msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); + + if (!hba->max_pwr_info.is_valid) + return 0; + + /* + * Set the right value to bRefClkFreq before attempting to + * switch to HS gears. + */ + if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) + ufshcd_set_dev_ref_clk(hba); + /* Gear up to HS gear. */ + ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + if (ret) { + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + return ret; + } + + return 0; +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; - struct Scsi_Host *host = hba->host; + + WARN_ON_ONCE(!hba->scsi_host_added); hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -8776,58 +8795,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ret = ufshcd_device_params_init(hba); if (ret) return ret; - if (is_mcq_supported(hba) && !hba->scsi_host_added) { - ufshcd_mcq_enable(hba); - ret = ufshcd_alloc_mcq(hba); - if (!ret) { - ufshcd_config_mcq(hba); - } else { - /* Continue with SDB mode */ - ufshcd_mcq_disable(hba); - use_mcq_mode = false; - dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", - ret); - } - ret = scsi_add_host(host, hba->dev); - if (ret) { - dev_err(hba->dev, "scsi_add_host failed\n"); - return ret; - } - hba->scsi_host_added = true; - } else if (is_mcq_supported(hba)) { - /* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */ + if (is_mcq_supported(hba) && + hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { ufshcd_config_mcq(hba); ufshcd_mcq_enable(hba); } } - ufshcd_tune_unipro_params(hba); - - /* UFS device is also active now */ - ufshcd_set_ufs_dev_active(hba); - ufshcd_force_reset_auto_bkops(hba); - - ufshcd_set_timestamp_attr(hba); - schedule_delayed_work(&hba->ufs_rtc_update_work, - msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); - - /* Gear up to HS gear if supported */ - if (hba->max_pwr_info.is_valid) { - /* - * Set the right value to bRefClkFreq before attempting to - * switch to HS gears. - */ - if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) - ufshcd_set_dev_ref_clk(hba); - ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - if (ret) { - dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", - __func__, ret); - return ret; - } - } - - return 0; + return ufshcd_post_device_init(hba); } /** @@ -8841,14 +8816,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) */ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) { - ktime_t start = ktime_get(); - unsigned long flags; int ret; - ret = ufshcd_device_init(hba, init_dev_params); - if (ret) - goto out; - if (!hba->pm_op_in_progress && (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) { /* Reset the device and controller before doing reinit */ @@ -8861,13 +8830,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto out; + return ret; } /* Reinit the device */ ret = ufshcd_device_init(hba, init_dev_params); if (ret) - goto out; + return ret; } ufshcd_print_pwr_info(hba); @@ -8887,18 +8856,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) ufshcd_write_ee_control(hba); ufshcd_configure_auto_hibern8(hba); -out: - spin_lock_irqsave(hba->host->host_lock, flags); - if (ret) - hba->ufshcd_state = UFSHCD_STATE_ERROR; - else if (hba->ufshcd_state == UFSHCD_STATE_RESET) - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - spin_unlock_irqrestore(hba->host->host_lock, flags); - - trace_ufshcd_init(dev_name(hba->dev), ret, - ktime_to_us(ktime_sub(ktime_get(), start)), - hba->curr_dev_pwr_mode, hba->uic_link_state); - return ret; + return 0; } /** @@ -8909,11 +8867,14 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + ktime_t probe_start; int ret; down(&hba->host_sem); /* Initialize hba, detect and initialize UFS device */ + probe_start = ktime_get(); ret = ufshcd_probe_hba(hba, true); + ufshcd_process_probe_result(hba, probe_start, ret); up(&hba->host_sem); if (ret) goto out; @@ -10314,6 +10275,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); */ static int ufshcd_set_dma_mask(struct ufs_hba *hba) { + if (hba->vops && hba->vops->set_dma_mask) + return hba->vops->set_dma_mask(hba); if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) return 0; @@ -10377,6 +10340,74 @@ static const struct blk_mq_ops ufshcd_tmf_ops = { .queue_rq = ufshcd_queue_tmf, }; +static int ufshcd_add_scsi_host(struct ufs_hba *hba) +{ + int err; + + if (is_mcq_supported(hba)) { + ufshcd_mcq_enable(hba); + err = ufshcd_alloc_mcq(hba); + if (!err) { + ufshcd_config_mcq(hba); + } else { + /* Continue with SDB mode */ + ufshcd_mcq_disable(hba); + use_mcq_mode = false; + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", + err); + } + } + if (!is_mcq_supported(hba) && !hba->lsdb_sup) { + dev_err(hba->dev, + "%s: failed to initialize (legacy doorbell mode not supported)\n", + __func__); + return -EINVAL; + } + + err = scsi_add_host(hba->host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + return err; + } + hba->scsi_host_added = true; + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto remove_scsi_host; + hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, + sizeof(*hba->tmf_rqs), GFP_KERNEL); + if (!hba->tmf_rqs) { + err = -ENOMEM; + goto free_tmf_queue; + } + + return 0; + +free_tmf_queue: + blk_mq_destroy_queue(hba->tmf_queue); + blk_put_queue(hba->tmf_queue); + +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); + +remove_scsi_host: + if (hba->scsi_host_added) + scsi_remove_host(hba->host); + + return err; +} + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -10508,42 +10539,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->is_irq_enabled = true; } - if (!is_mcq_supported(hba)) { - if (!hba->lsdb_sup) { - dev_err(hba->dev, "%s: failed to initialize (legacy doorbell mode not supported)\n", - __func__); - err = -EINVAL; - goto out_disable; - } - err = scsi_add_host(host, hba->dev); - if (err) { - dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; - } - hba->scsi_host_added = true; - } - - hba->tmf_tag_set = (struct blk_mq_tag_set) { - .nr_hw_queues = 1, - .queue_depth = hba->nutmrs, - .ops = &ufshcd_tmf_ops, - .flags = BLK_MQ_F_NO_SCHED, - }; - err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); - if (err < 0) - goto out_remove_scsi_host; - hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL); - if (IS_ERR(hba->tmf_queue)) { - err = PTR_ERR(hba->tmf_queue); - goto free_tmf_tag_set; - } - hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, - sizeof(*hba->tmf_rqs), GFP_KERNEL); - if (!hba->tmf_rqs) { - err = -ENOMEM; - goto free_tmf_queue; - } - /* Reset the attached device */ ufshcd_device_reset(hba); @@ -10555,7 +10550,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_evt_hist(hba); ufshcd_print_host_state(hba); - goto free_tmf_queue; + goto out_disable; } /* @@ -10581,7 +10576,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Hold auto suspend until async scan completes */ pm_runtime_get_sync(dev); - atomic_set(&hba->scsi_block_reqs_cnt, 0); + /* * We are assuming that device wasn't put in sleep/power-down * state exclusively during the boot stage before kernel. @@ -10590,6 +10585,49 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ ufshcd_set_ufs_dev_active(hba); + /* Initialize hba, detect and initialize UFS device */ + ktime_t probe_start = ktime_get(); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + + err = ufshcd_link_startup(hba); + if (err) + goto out_disable; + + if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) + goto initialized; + + /* Debug counters initialization */ + ufshcd_clear_dbg_ufs_stats(hba); + + /* UniPro link is active now */ + ufshcd_set_link_active(hba); + + /* Verify device initialization by sending NOP OUT UPIU */ + err = ufshcd_verify_dev_init(hba); + if (err) + goto out_disable; + + /* Initiate UFS initialization, and waiting until completion */ + err = ufshcd_complete_dev_init(hba); + if (err) + goto out_disable; + + err = ufshcd_device_params_init(hba); + if (err) + goto out_disable; + + err = ufshcd_post_device_init(hba); + +initialized: + ufshcd_process_probe_result(hba, probe_start, err); + if (err) + goto out_disable; + + err = ufshcd_add_scsi_host(hba); + if (err) + goto out_disable; + async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); @@ -10597,14 +10635,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_pm_qos_init(hba); return 0; -free_tmf_queue: - blk_mq_destroy_queue(hba->tmf_queue); - blk_put_queue(hba->tmf_queue); -free_tmf_tag_set: - blk_mq_free_tag_set(&hba->tmf_tag_set); -out_remove_scsi_host: - if (hba->scsi_host_added) - scsi_remove_host(hba->host); out_disable: hba->is_irq_enabled = false; ufshcd_hba_exit(hba); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 9ec318ef52bf..91827b3e582b 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -48,6 +48,8 @@ #define HCI_UNIPRO_APB_CLK_CTRL 0x68 #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define WLU_EN BIT(31) +#define WLU_BURST_LEN(x) ((x) << 27 | ((x) & 0xF)) #define HCI_GPIO_OUT 0x70 #define HCI_ERR_EN_PA_LAYER 0x78 #define HCI_ERR_EN_DL_LAYER 0x7C @@ -74,6 +76,10 @@ #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ UNIPRO_PCLK_CTRL_EN |\ UNIPRO_MCLK_CTRL_EN) + +#define HCI_IOP_ACG_DISABLE 0x100 +#define HCI_IOP_ACG_DISABLE_EN BIT(0) + /* Device fatal error */ #define DFES_ERR_EN BIT(31) #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ @@ -198,15 +204,8 @@ static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) exynos_ufs_ctrl_clkstop(ufs, false); } -static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) -{ - return 0; -} - -static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +static int exynos_ufs_shareability(struct exynos_ufs *ufs) { - struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - /* IO Coherency setting */ if (ufs->sysreg) { return regmap_update_bits(ufs->sysreg, @@ -214,11 +213,32 @@ static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) UFS_SHARABLE, UFS_SHARABLE); } - attr->tx_dif_p_nsec = 3200000; - return 0; } +static int gs101_ufs_drv_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 reg; + + /* Enable WriteBooster */ + hba->caps |= UFSHCD_CAP_WB_EN; + + /* Enable clock gating and hibern8 */ + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + + /* set ACG to be controlled by UFS_ACG_DISABLE */ + reg = hci_readl(ufs, HCI_IOP_ACG_DISABLE); + hci_writel(ufs, reg & (~HCI_IOP_ACG_DISABLE_EN), HCI_IOP_ACG_DISABLE); + + return exynos_ufs_shareability(ufs); +} + +static int exynosauto_ufs_drv_init(struct exynos_ufs *ufs) +{ + return exynos_ufs_shareability(ufs); +} + static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; @@ -546,6 +566,9 @@ static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR) + return; + t_cfg->tx_linereset_p = exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); t_cfg->tx_linereset_n = @@ -724,6 +747,9 @@ static void exynos_ufs_config_smu(struct exynos_ufs *ufs) { u32 reg, val; + if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE) + return; + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); /* make encryption disabled by default */ @@ -771,6 +797,21 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, exynos_ufs_disable_ov_tm(hba); } +#define UFS_HW_VER_MAJOR_MASK GENMASK(15, 8) + +static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba) +{ + u8 major; + + major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, hba->ufs_version); + + if (major >= 3) + return UFS_HS_G4; + + /* Default is HS-G3 */ + return UFS_HS_G3; +} + static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -788,6 +829,10 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, ufshcd_init_host_params(&host_params); + /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */ + host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba); + host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba); + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_err("%s: failed to determine capabilities\n", __func__); @@ -1429,7 +1474,7 @@ static int exynos_ufs_init(struct ufs_hba *hba) exynos_ufs_fmp_init(hba, ufs); if (ufs->drv_data->drv_init) { - ret = ufs->drv_data->drv_init(dev, ufs); + ret = ufs->drv_data->drv_init(ufs); if (ret) { dev_err(dev, "failed to init drv-data\n"); goto out; @@ -1440,8 +1485,8 @@ static int exynos_ufs_init(struct ufs_hba *hba) if (ret) goto out; exynos_ufs_specify_phy_time_attr(ufs); - if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)) - exynos_ufs_config_smu(ufs); + + exynos_ufs_config_smu(ufs); hba->host->dma_alignment = DATA_UNIT_SIZE - 1; return 0; @@ -1484,12 +1529,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); } -static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - if (!enter) { + if (cmd == UIC_CMD_DME_HIBER_EXIT) { if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) exynos_ufs_disable_auto_ctrl_hcc(ufs); exynos_ufs_ungate_clks(ufs); @@ -1517,30 +1562,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) } } -static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); - if (!enter) { - u32 cur_mode = 0; - u32 pwrmode; - - if (ufshcd_is_hs_mode(&ufs->dev_req_params)) - pwrmode = FAST_MODE; - else - pwrmode = SLOW_MODE; - - ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); - if (cur_mode != (pwrmode << 4 | pwrmode)) { - dev_warn(hba->dev, "%s: power mode change\n", __func__); - hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; - hba->pwr_info.pwr_tx = cur_mode & 0xf; - ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); - } - - if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) - exynos_ufs_establish_connt(ufs); - } else { + if (cmd == UIC_CMD_DME_HIBER_ENTER) { ufs->entry_hibern8_t = ktime_get(); exynos_ufs_gate_clks(ufs); if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) @@ -1627,15 +1653,15 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, } static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, - enum uic_cmd_dme enter, + enum uic_cmd_dme cmd, enum ufs_notify_change_status notify) { switch ((u8)notify) { case PRE_CHANGE: - exynos_ufs_pre_hibern8(hba, enter); + exynos_ufs_pre_hibern8(hba, cmd); break; case POST_CHANGE: - exynos_ufs_post_hibern8(hba, enter); + exynos_ufs_post_hibern8(hba, cmd); break; } } @@ -1891,6 +1917,12 @@ static int gs101_ufs_post_link(struct exynos_ufs *ufs) { struct ufs_hba *hba = ufs->hba; + /* + * Enable Write Line Unique. This field has to be 0x3 + * to support Write Line Unique transaction on gs101. + */ + hci_writel(ufs, WLU_EN | WLU_BURST_LEN(3), HCI_AXIDMA_RWDATA_BURST_LEN); + exynos_ufs_enable_dbg_mode(hba); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0x3e8); exynos_ufs_disable_dbg_mode(hba); @@ -2036,7 +2068,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, - .drv_init = exynos7_ufs_drv_init, .pre_link = exynos7_ufs_pre_link, .post_link = exynos7_ufs_post_link, .pre_pwr_change = exynos7_ufs_pre_pwr_change, @@ -2045,26 +2076,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { static struct exynos_ufs_uic_attr gs101_uic_attr = { .tx_trailingclks = 0xff, - .tx_dif_p_nsec = 3000000, /* unit: ns */ - .tx_dif_n_nsec = 1000000, /* unit: ns */ - .tx_high_z_cnt_nsec = 20000, /* unit: ns */ - .tx_base_unit_nsec = 100000, /* unit: ns */ - .tx_gran_unit_nsec = 4000, /* unit: ns */ - .tx_sleep_cnt = 1000, /* unit: ns */ - .tx_min_activatetime = 0xa, - .rx_filler_enable = 0x2, - .rx_dif_p_nsec = 1000000, /* unit: ns */ - .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ - .rx_base_unit_nsec = 100000, /* unit: ns */ - .rx_gran_unit_nsec = 4000, /* unit: ns */ - .rx_sleep_cnt = 1280, /* unit: ns */ - .rx_stall_cnt = 320, /* unit: ns */ - .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), - .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), - .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), - .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), .pa_dbg_opt_suite1_val = 0x90913C1C, .pa_dbg_opt_suite1_off = PA_GS101_DBG_OPTION_SUITE1, .pa_dbg_opt_suite2_val = 0xE01C115F, @@ -2122,11 +2133,10 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = { UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING, - .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | - EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | + .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR | EXYNOS_UFS_OPT_UFSPR_SECURE | EXYNOS_UFS_OPT_TIMER_TICK_SELECT, - .drv_init = exynosauto_ufs_drv_init, + .drv_init = gs101_ufs_drv_init, .pre_link = gs101_ufs_pre_link, .post_link = gs101_ufs_post_link, .pre_pwr_change = gs101_ufs_pre_pwr_change, diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h index 1646c4a9bb08..9670dc138d1e 100644 --- a/drivers/ufs/host/ufs-exynos.h +++ b/drivers/ufs/host/ufs-exynos.h @@ -182,7 +182,7 @@ struct exynos_ufs_drv_data { unsigned int quirks; unsigned int opts; /* SoC's specific operations */ - int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*drv_init)(struct exynos_ufs *ufs); int (*pre_link)(struct exynos_ufs *ufs); int (*post_link)(struct exynos_ufs *ufs); int (*pre_pwr_change)(struct exynos_ufs *ufs, diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index ecdfff2456e3..a5a0646bb80a 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -828,12 +828,28 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); - if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) - hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; - return err; } +/* UFS device-specific quirks */ +static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { + { .wmanufacturerid = UFS_VENDOR_SKHYNIX, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + { .wmanufacturerid = UFS_VENDOR_TOSHIBA, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, + { .wmanufacturerid = UFS_VENDOR_WDC, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + {} +}; + +static void ufs_qcom_fixup_dev_quirks(struct ufs_hba *hba) +{ + ufshcd_fixup_dev_quirks(hba, ufs_qcom_dev_fixups); +} + static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { return ufshci_version(2, 0); @@ -1801,6 +1817,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .link_startup_notify = ufs_qcom_link_startup_notify, .pwr_change_notify = ufs_qcom_pwr_change_notify, .apply_dev_quirks = ufs_qcom_apply_dev_quirks, + .fixup_dev_quirks = ufs_qcom_fixup_dev_quirks, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 8711e5cbc968..3ff97112e1f6 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/kernel.h> @@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba) return -ENOMEM; ufshcd_set_variant(hba, priv); - hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO; return 0; } +static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) +{ + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + static const struct ufs_hba_variant_ops ufs_renesas_vops = { .name = "renesas", .init = ufs_renesas_init, + .set_dma_mask = ufs_renesas_set_dma_mask, .setup_clocks = ufs_renesas_setup_clocks, .hce_enable_notify = ufs_renesas_hce_enable_notify, .dbg_register_dump = ufs_renesas_dbg_register_dump, diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 3f68ae3e4330..9ea2a7411bb5 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -299,6 +299,8 @@ struct ufs_pwr_mode_info { * @max_num_rtt: maximum RTT supported by the host * @init: called when the driver is initialized * @exit: called to cleanup everything done in init + * @set_dma_mask: For setting another DMA mask than indicated by the 64AS + * capability bit. * @get_ufs_hci_version: called to get UFS HCI version * @clk_scale_notify: notifies that clks are scaled up/down * @setup_clocks: called before touching any of the controller registers @@ -341,6 +343,7 @@ struct ufs_hba_variant_ops { int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); u32 (*get_ufs_hci_version)(struct ufs_hba *); + int (*set_dma_mask)(struct ufs_hba *); int (*clk_scale_notify)(struct ufs_hba *, bool, enum ufs_notify_change_status); int (*setup_clocks)(struct ufs_hba *, bool, @@ -625,12 +628,6 @@ enum ufshcd_quirks { /* * This quirk needs to be enabled if the host controller has - * 64-bit addressing supported capability but it doesn't work. - */ - UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17, - - /* - * This quirk needs to be enabled if the host controller has * auto-hibernate capability but it's FASTAUTO only. */ UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18, @@ -877,9 +874,10 @@ enum ufshcd_mcq_opr { * @tmf_tag_set: TMF tag set. * @tmf_queue: Used to allocate TMF tags. * @tmf_rqs: array with pointers to TMF requests while these are in progress. - * @active_uic_cmd: handle of active UIC command - * @uic_cmd_mutex: mutex for UIC command - * @uic_async_done: completion used during UIC processing + * @active_uic_cmd: pointer to active UIC command. + * @uic_cmd_mutex: mutex used for serializing UIC command processing. + * @uic_async_done: completion used to wait for power mode or hibernation state + * changes. * @ufshcd_state: UFSHCD state * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits @@ -927,7 +925,6 @@ enum ufshcd_mcq_opr { * @wb_mutex: used to serialize devfreq and sysfs write booster toggling * @clk_scaling_lock: used to serialize device commands and clock scaling * @desc_size: descriptor sizes reported by device - * @scsi_block_reqs_cnt: reference counting for scsi block requests * @bsg_dev: struct device associated with the BSG queue * @bsg_queue: BSG queue associated with the UFS controller * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power @@ -1088,7 +1085,6 @@ struct ufs_hba { struct mutex wb_mutex; struct rw_semaphore clk_scaling_lock; - atomic_t scsi_block_reqs_cnt; struct device bsg_dev; struct request_queue *bsg_queue; @@ -1320,8 +1316,8 @@ void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_enable(struct ufs_hba *hba); +void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table, |