diff options
Diffstat (limited to 'drivers/net')
419 files changed, 20072 insertions, 11227 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0d87e11e7f1d..ee28ec9e0aba 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev, static void bond_slave_arr_handler(struct work_struct *work); static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, int mod); +static void bond_netdev_notify_work(struct work_struct *work); /*---------------------------- General routines -----------------------------*/ @@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - /* don't change skb->dev for link-local packets */ - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + /* Link-local multicast packets should be passed to the + * stack on the link they arrive as well as pass them to the + * bond-master device. These packets are mostly usable when + * stack receives it with the link on which they arrive + * (e.g. LLDP) they also must be available on master. Some of + * the use cases include (but are not limited to): LLDP agents + * that must be able to operate both on enslaved interfaces as + * well as on bonds themselves; linux bridges that must be able + * to process/pass BPDUs from attached bonds when any kind of + * STP version is enabled on the network. + */ + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + + if (nskb) { + nskb->dev = bond->dev; + nskb->queue_mapping = 0; + netif_rx(nskb); + } return RX_HANDLER_PASS; + } if (bond_should_deliver_exact_match(skb, slave, bond)) return RX_HANDLER_EXACT; @@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond) return NULL; } } + INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); + return slave; } @@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave) { struct bonding *bond = bond_get_bond_by_slave(slave); + cancel_delayed_work_sync(&slave->notify_work); if (BOND_MODE(bond) == BOND_MODE_8023AD) kfree(SLAVE_AD_INFO(slave)); @@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) info->link_failure_count = slave->link_failure_count; } -static void bond_netdev_notify(struct net_device *dev, - struct netdev_bonding_info *info) -{ - rtnl_lock(); - netdev_bonding_info_change(dev, info); - rtnl_unlock(); -} - static void bond_netdev_notify_work(struct work_struct *_work) { - struct netdev_notify_work *w = - container_of(_work, struct netdev_notify_work, work.work); + struct slave *slave = container_of(_work, struct slave, + notify_work.work); + + if (rtnl_trylock()) { + struct netdev_bonding_info binfo; - bond_netdev_notify(w->dev, &w->bonding_info); - dev_put(w->dev); - kfree(w); + bond_fill_ifslave(slave, &binfo.slave); + bond_fill_ifbond(slave->bond, &binfo.master); + netdev_bonding_info_change(slave->dev, &binfo); + rtnl_unlock(); + } else { + queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); + } } void bond_queue_slave_event(struct slave *slave) { - struct bonding *bond = slave->bond; - struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC); - - if (!nnw) - return; - - dev_hold(slave->dev); - nnw->dev = slave->dev; - bond_fill_ifslave(slave, &nnw->bonding_info.slave); - bond_fill_ifbond(bond, &nnw->bonding_info.master); - INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work); - - queue_delayed_work(slave->bond->wq, &nnw->work, 0); + queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); } void bond_lower_state_changed(struct slave *slave) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 700d86dd5e13..0e4bbdcc614f 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1291,7 +1291,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_get_vlan_entry(dev, vid, vl); vl->members |= BIT(port); - if (untagged) + if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag |= BIT(port); else vl->untag &= ~BIT(port); @@ -1333,7 +1333,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port, pvid = 0; } - if (untagged) + if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag &= ~(BIT(port)); b53_set_vlan_entry(dev, vid, vl); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 1fc27e149e7f..3017ecf82ca5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -702,7 +702,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) static int bcm_sf2_sw_resume(struct dsa_switch *ds) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - unsigned int port; int ret; ret = bcm_sf2_sw_rst(priv); @@ -714,14 +713,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) if (priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, true); - for (port = 0; port < DSA_MAX_PORTS; port++) { - if (dsa_is_user_port(ds, port)) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - bcm_sf2_imp_setup(ds, port); - } - - bcm_sf2_enable_acb(ds); + ds->ops->setup(ds); return 0; } @@ -1172,10 +1164,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) { struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); - /* Disable all ports and interrupts */ priv->wol_ports_mask = 0; - bcm_sf2_sw_suspend(priv->dev->ds); dsa_unregister_switch(priv->dev->ds); + /* Disable all ports and interrupts */ + bcm_sf2_sw_suspend(priv->dev->ds); bcm_sf2_mdio_unregister(priv); return 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 4532e574ebcd..9f80b73f90b1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -32,115 +32,81 @@ #ifndef _ENA_ADMIN_H_ #define _ENA_ADMIN_H_ -enum ena_admin_aq_opcode { - ENA_ADMIN_CREATE_SQ = 1, - - ENA_ADMIN_DESTROY_SQ = 2, - - ENA_ADMIN_CREATE_CQ = 3, - - ENA_ADMIN_DESTROY_CQ = 4, - ENA_ADMIN_GET_FEATURE = 8, - - ENA_ADMIN_SET_FEATURE = 9, - - ENA_ADMIN_GET_STATS = 11, +enum ena_admin_aq_opcode { + ENA_ADMIN_CREATE_SQ = 1, + ENA_ADMIN_DESTROY_SQ = 2, + ENA_ADMIN_CREATE_CQ = 3, + ENA_ADMIN_DESTROY_CQ = 4, + ENA_ADMIN_GET_FEATURE = 8, + ENA_ADMIN_SET_FEATURE = 9, + ENA_ADMIN_GET_STATS = 11, }; enum ena_admin_aq_completion_status { - ENA_ADMIN_SUCCESS = 0, - - ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, - - ENA_ADMIN_BAD_OPCODE = 2, - - ENA_ADMIN_UNSUPPORTED_OPCODE = 3, - - ENA_ADMIN_MALFORMED_REQUEST = 4, - + ENA_ADMIN_SUCCESS = 0, + ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1, + ENA_ADMIN_BAD_OPCODE = 2, + ENA_ADMIN_UNSUPPORTED_OPCODE = 3, + ENA_ADMIN_MALFORMED_REQUEST = 4, /* Additional status is provided in ACQ entry extended_status */ - ENA_ADMIN_ILLEGAL_PARAMETER = 5, - - ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_ILLEGAL_PARAMETER = 5, + ENA_ADMIN_UNKNOWN_ERROR = 6, + ENA_ADMIN_RESOURCE_BUSY = 7, }; enum ena_admin_aq_feature_id { - ENA_ADMIN_DEVICE_ATTRIBUTES = 1, - - ENA_ADMIN_MAX_QUEUES_NUM = 2, - - ENA_ADMIN_HW_HINTS = 3, - - ENA_ADMIN_RSS_HASH_FUNCTION = 10, - - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, - - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, - - ENA_ADMIN_MTU = 14, - - ENA_ADMIN_RSS_HASH_INPUT = 18, - - ENA_ADMIN_INTERRUPT_MODERATION = 20, - - ENA_ADMIN_AENQ_CONFIG = 26, - - ENA_ADMIN_LINK_CONFIG = 27, - - ENA_ADMIN_HOST_ATTR_CONFIG = 28, - - ENA_ADMIN_FEATURES_OPCODE_NUM = 32, + ENA_ADMIN_DEVICE_ATTRIBUTES = 1, + ENA_ADMIN_MAX_QUEUES_NUM = 2, + ENA_ADMIN_HW_HINTS = 3, + ENA_ADMIN_LLQ = 4, + ENA_ADMIN_RSS_HASH_FUNCTION = 10, + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, + ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12, + ENA_ADMIN_MTU = 14, + ENA_ADMIN_RSS_HASH_INPUT = 18, + ENA_ADMIN_INTERRUPT_MODERATION = 20, + ENA_ADMIN_AENQ_CONFIG = 26, + ENA_ADMIN_LINK_CONFIG = 27, + ENA_ADMIN_HOST_ATTR_CONFIG = 28, + ENA_ADMIN_FEATURES_OPCODE_NUM = 32, }; enum ena_admin_placement_policy_type { /* descriptors and headers are in host memory */ - ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, - + ENA_ADMIN_PLACEMENT_POLICY_HOST = 1, /* descriptors and headers are in device memory (a.k.a Low Latency * Queue) */ - ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, + ENA_ADMIN_PLACEMENT_POLICY_DEV = 3, }; enum ena_admin_link_types { - ENA_ADMIN_LINK_SPEED_1G = 0x1, - - ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, - - ENA_ADMIN_LINK_SPEED_5G = 0x4, - - ENA_ADMIN_LINK_SPEED_10G = 0x8, - - ENA_ADMIN_LINK_SPEED_25G = 0x10, - - ENA_ADMIN_LINK_SPEED_40G = 0x20, - - ENA_ADMIN_LINK_SPEED_50G = 0x40, - - ENA_ADMIN_LINK_SPEED_100G = 0x80, - - ENA_ADMIN_LINK_SPEED_200G = 0x100, - - ENA_ADMIN_LINK_SPEED_400G = 0x200, + ENA_ADMIN_LINK_SPEED_1G = 0x1, + ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2, + ENA_ADMIN_LINK_SPEED_5G = 0x4, + ENA_ADMIN_LINK_SPEED_10G = 0x8, + ENA_ADMIN_LINK_SPEED_25G = 0x10, + ENA_ADMIN_LINK_SPEED_40G = 0x20, + ENA_ADMIN_LINK_SPEED_50G = 0x40, + ENA_ADMIN_LINK_SPEED_100G = 0x80, + ENA_ADMIN_LINK_SPEED_200G = 0x100, + ENA_ADMIN_LINK_SPEED_400G = 0x200, }; enum ena_admin_completion_policy_type { /* completion queue entry for each sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC = 0, - + ENA_ADMIN_COMPLETION_POLICY_DESC = 0, /* completion queue entry upon request in sq descriptor */ - ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, - + ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1, /* current queue head pointer is updated in OS memory upon sq * descriptor request */ - ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, - + ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2, /* current queue head pointer is updated in OS memory for each sq * descriptor */ - ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, + ENA_ADMIN_COMPLETION_POLICY_HEAD = 3, }; /* basic stats return ena_admin_basic_stats while extanded stats return a @@ -148,15 +114,13 @@ enum ena_admin_completion_policy_type { * device id */ enum ena_admin_get_stats_type { - ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, - - ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, + ENA_ADMIN_GET_STATS_TYPE_BASIC = 0, + ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1, }; enum ena_admin_get_stats_scope { - ENA_ADMIN_SPECIFIC_QUEUE = 0, - - ENA_ADMIN_ETH_TRAFFIC = 1, + ENA_ADMIN_SPECIFIC_QUEUE = 0, + ENA_ADMIN_ETH_TRAFFIC = 1, }; struct ena_admin_aq_common_desc { @@ -227,7 +191,9 @@ struct ena_admin_acq_common_desc { u16 extended_status; - /* serves as a hint what AQ entries can be revoked */ + /* indicates to the driver which AQ entry has been consumed by the + * device and could be reused + */ u16 sq_head_indx; }; @@ -296,9 +262,8 @@ struct ena_admin_aq_create_sq_cmd { }; enum ena_admin_sq_direction { - ENA_ADMIN_SQ_DIRECTION_TX = 1, - - ENA_ADMIN_SQ_DIRECTION_RX = 2, + ENA_ADMIN_SQ_DIRECTION_TX = 1, + ENA_ADMIN_SQ_DIRECTION_RX = 2, }; struct ena_admin_acq_create_sq_resp_desc { @@ -483,8 +448,85 @@ struct ena_admin_device_attr_feature_desc { u32 max_mtu; }; +enum ena_admin_llq_header_location { + /* header is in descriptor list */ + ENA_ADMIN_INLINE_HEADER = 1, + /* header in a separate ring, implies 16B descriptor list entry */ + ENA_ADMIN_HEADER_RING = 2, +}; + +enum ena_admin_llq_ring_entry_size { + ENA_ADMIN_LIST_ENTRY_SIZE_128B = 1, + ENA_ADMIN_LIST_ENTRY_SIZE_192B = 2, + ENA_ADMIN_LIST_ENTRY_SIZE_256B = 4, +}; + +enum ena_admin_llq_num_descs_before_header { + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_0 = 0, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1 = 1, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2 = 2, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4 = 4, + ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8 = 8, +}; + +/* packet descriptor list entry always starts with one or more descriptors, + * followed by a header. The rest of the descriptors are located in the + * beginning of the subsequent entry. Stride refers to how the rest of the + * descriptors are placed. This field is relevant only for inline header + * mode + */ +enum ena_admin_llq_stride_ctrl { + ENA_ADMIN_SINGLE_DESC_PER_ENTRY = 1, + ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, +}; + +struct ena_admin_feature_llq_desc { + u32 max_llq_num; + + u32 max_llq_depth; + + /* specify the header locations the device supports. bitfield of + * enum ena_admin_llq_header_location. + */ + u16 header_location_ctrl_supported; + + /* the header location the driver selected to use. */ + u16 header_location_ctrl_enabled; + + /* if inline header is specified - this is the size of descriptor + * list entry. If header in a separate ring is specified - this is + * the size of header ring entry. bitfield of enum + * ena_admin_llq_ring_entry_size. specify the entry sizes the device + * supports + */ + u16 entry_size_ctrl_supported; + + /* the entry size the driver selected to use. */ + u16 entry_size_ctrl_enabled; + + /* valid only if inline header is specified. First entry associated + * with the packet includes descriptors and header. Rest of the + * entries occupied by descriptors. This parameter defines the max + * number of descriptors precedding the header in the first entry. + * The field is bitfield of enum + * ena_admin_llq_num_descs_before_header and specify the values the + * device supports + */ + u16 desc_num_before_header_supported; + + /* the desire field the driver selected to use */ + u16 desc_num_before_header_enabled; + + /* valid only if inline was chosen. bitfield of enum + * ena_admin_llq_stride_ctrl + */ + u16 descriptors_stride_ctrl_supported; + + /* the stride control the driver selected to use */ + u16 descriptors_stride_ctrl_enabled; +}; + struct ena_admin_queue_feature_desc { - /* including LLQs */ u32 max_sq_num; u32 max_sq_depth; @@ -493,9 +535,9 @@ struct ena_admin_queue_feature_desc { u32 max_cq_depth; - u32 max_llq_num; + u32 max_legacy_llq_num; - u32 max_llq_depth; + u32 max_legacy_llq_depth; u32 max_header_size; @@ -583,9 +625,8 @@ struct ena_admin_feature_offload_desc { }; enum ena_admin_hash_functions { - ENA_ADMIN_TOEPLITZ = 1, - - ENA_ADMIN_CRC32 = 2, + ENA_ADMIN_TOEPLITZ = 1, + ENA_ADMIN_CRC32 = 2, }; struct ena_admin_feature_rss_flow_hash_control { @@ -611,50 +652,35 @@ struct ena_admin_feature_rss_flow_hash_function { /* RSS flow hash protocols */ enum ena_admin_flow_hash_proto { - ENA_ADMIN_RSS_TCP4 = 0, - - ENA_ADMIN_RSS_UDP4 = 1, - - ENA_ADMIN_RSS_TCP6 = 2, - - ENA_ADMIN_RSS_UDP6 = 3, - - ENA_ADMIN_RSS_IP4 = 4, - - ENA_ADMIN_RSS_IP6 = 5, - - ENA_ADMIN_RSS_IP4_FRAG = 6, - - ENA_ADMIN_RSS_NOT_IP = 7, - + ENA_ADMIN_RSS_TCP4 = 0, + ENA_ADMIN_RSS_UDP4 = 1, + ENA_ADMIN_RSS_TCP6 = 2, + ENA_ADMIN_RSS_UDP6 = 3, + ENA_ADMIN_RSS_IP4 = 4, + ENA_ADMIN_RSS_IP6 = 5, + ENA_ADMIN_RSS_IP4_FRAG = 6, + ENA_ADMIN_RSS_NOT_IP = 7, /* TCPv6 with extension header */ - ENA_ADMIN_RSS_TCP6_EX = 8, - + ENA_ADMIN_RSS_TCP6_EX = 8, /* IPv6 with extension header */ - ENA_ADMIN_RSS_IP6_EX = 9, - - ENA_ADMIN_RSS_PROTO_NUM = 16, + ENA_ADMIN_RSS_IP6_EX = 9, + ENA_ADMIN_RSS_PROTO_NUM = 16, }; /* RSS flow hash fields */ enum ena_admin_flow_hash_fields { /* Ethernet Dest Addr */ - ENA_ADMIN_RSS_L2_DA = BIT(0), - + ENA_ADMIN_RSS_L2_DA = BIT(0), /* Ethernet Src Addr */ - ENA_ADMIN_RSS_L2_SA = BIT(1), - + ENA_ADMIN_RSS_L2_SA = BIT(1), /* ipv4/6 Dest Addr */ - ENA_ADMIN_RSS_L3_DA = BIT(2), - + ENA_ADMIN_RSS_L3_DA = BIT(2), /* ipv4/6 Src Addr */ - ENA_ADMIN_RSS_L3_SA = BIT(3), - + ENA_ADMIN_RSS_L3_SA = BIT(3), /* tcp/udp Dest Port */ - ENA_ADMIN_RSS_L4_DP = BIT(4), - + ENA_ADMIN_RSS_L4_DP = BIT(4), /* tcp/udp Src Port */ - ENA_ADMIN_RSS_L4_SP = BIT(5), + ENA_ADMIN_RSS_L4_SP = BIT(5), }; struct ena_admin_proto_input { @@ -693,15 +719,13 @@ struct ena_admin_feature_rss_flow_hash_input { }; enum ena_admin_os_type { - ENA_ADMIN_OS_LINUX = 1, - - ENA_ADMIN_OS_WIN = 2, - - ENA_ADMIN_OS_DPDK = 3, - - ENA_ADMIN_OS_FREEBSD = 4, - - ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_LINUX = 1, + ENA_ADMIN_OS_WIN = 2, + ENA_ADMIN_OS_DPDK = 3, + ENA_ADMIN_OS_FREEBSD = 4, + ENA_ADMIN_OS_IPXE = 5, + ENA_ADMIN_OS_ESXI = 6, + ENA_ADMIN_OS_GROUPS_NUM = 6, }; struct ena_admin_host_info { @@ -723,11 +747,27 @@ struct ena_admin_host_info { /* 7:0 : major * 15:8 : minor * 23:16 : sub_minor + * 31:24 : module_type */ u32 driver_version; /* features bitmap */ - u32 supported_network_features[4]; + u32 supported_network_features[2]; + + /* ENA spec version of driver */ + u16 ena_spec_version; + + /* ENA device's Bus, Device and Function + * 2:0 : function + * 7:3 : device + * 15:8 : bus + */ + u16 bdf; + + /* Number of CPUs */ + u16 num_cpus; + + u16 reserved; }; struct ena_admin_rss_ind_table_entry { @@ -800,6 +840,8 @@ struct ena_admin_get_feat_resp { struct ena_admin_device_attr_feature_desc dev_attr; + struct ena_admin_feature_llq_desc llq; + struct ena_admin_queue_feature_desc max_queue; struct ena_admin_feature_aenq_desc aenq; @@ -847,6 +889,9 @@ struct ena_admin_set_feat_cmd { /* rss indirection table */ struct ena_admin_feature_rss_ind_table ind_table; + + /* LLQ configuration */ + struct ena_admin_feature_llq_desc llq; } u; }; @@ -875,25 +920,18 @@ struct ena_admin_aenq_common_desc { /* asynchronous event notification groups */ enum ena_admin_aenq_group { - ENA_ADMIN_LINK_CHANGE = 0, - - ENA_ADMIN_FATAL_ERROR = 1, - - ENA_ADMIN_WARNING = 2, - - ENA_ADMIN_NOTIFICATION = 3, - - ENA_ADMIN_KEEP_ALIVE = 4, - - ENA_ADMIN_AENQ_GROUPS_NUM = 5, + ENA_ADMIN_LINK_CHANGE = 0, + ENA_ADMIN_FATAL_ERROR = 1, + ENA_ADMIN_WARNING = 2, + ENA_ADMIN_NOTIFICATION = 3, + ENA_ADMIN_KEEP_ALIVE = 4, + ENA_ADMIN_AENQ_GROUPS_NUM = 5, }; enum ena_admin_aenq_notification_syndrom { - ENA_ADMIN_SUSPEND = 0, - - ENA_ADMIN_RESUME = 1, - - ENA_ADMIN_UPDATE_HINTS = 2, + ENA_ADMIN_SUSPEND = 0, + ENA_ADMIN_RESUME = 1, + ENA_ADMIN_UPDATE_HINTS = 2, }; struct ena_admin_aenq_entry { @@ -928,27 +966,27 @@ struct ena_admin_ena_mmio_req_read_less_resp { }; /* aq_common_desc */ -#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) -#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 -#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) +#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT 1 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK BIT(1) +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT 2 +#define ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK BIT(2) /* sq */ -#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 -#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_SQ_SQ_DIRECTION_MASK GENMASK(7, 5) /* acq_common_desc */ -#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) -#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) +#define ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK BIT(0) /* aq_create_sq_cmd */ -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 -#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT 5 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK GENMASK(7, 5) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK GENMASK(3, 0) +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT 4 +#define ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK GENMASK(6, 4) #define ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK BIT(0) /* aq_create_cq_cmd */ @@ -957,12 +995,12 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) /* get_set_feature_common_desc */ -#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) +#define ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0) /* get_feature_link_desc */ -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 -#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK BIT(0) +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT 1 +#define ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK BIT(1) /* feature_offload_desc */ #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK BIT(0) @@ -974,19 +1012,19 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK BIT(3) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT 4 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK BIT(4) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT 5 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK BIT(5) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT 6 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK BIT(6) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT 7 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK BIT(7) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK BIT(0) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT 1 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK BIT(1) #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT 2 #define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK BIT(2) -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 -#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT 3 +#define ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK BIT(3) /* feature_rss_flow_hash_function */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0) @@ -994,25 +1032,32 @@ struct ena_admin_ena_mmio_req_read_less_resp { /* feature_rss_flow_hash_input */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT 2 -#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) +#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK BIT(2) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT 1 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK BIT(1) #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT 2 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK BIT(2) /* host_info */ -#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) -#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 -#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 -#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MAJOR_MASK GENMASK(7, 0) +#define ENA_ADMIN_HOST_INFO_MINOR_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_MINOR_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT 16 +#define ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK GENMASK(23, 16) +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT 24 +#define ENA_ADMIN_HOST_INFO_MODULE_TYPE_MASK GENMASK(31, 24) +#define ENA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0) +#define ENA_ADMIN_HOST_INFO_DEVICE_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) +#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 +#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) /* aenq_common_desc */ -#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) +#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) /* aenq_link_change_desc */ -#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) +#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) #endif /*_ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 7635c38e77dd..420cede41ca4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -41,9 +41,6 @@ #define ENA_ASYNC_QUEUE_DEPTH 16 #define ENA_ADMIN_QUEUE_DEPTH 32 -#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ - | (ENA_COMMON_SPEC_VERSION_MINOR)) #define ENA_CTRL_MAJOR 0 #define ENA_CTRL_MINOR 0 @@ -61,6 +58,8 @@ #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF +#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 + #define ENA_REGS_ADMIN_INTR_MASK 1 #define ENA_POLL_MS 5 @@ -236,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ - cnt = atomic_read(&admin_queue->outstanding_cmds); + cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { pr_debug("admin queue is full.\n"); admin_queue->stats.out_of_space++; @@ -305,7 +304,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue struct ena_admin_acq_entry *comp, size_t comp_size_in_bytes) { - unsigned long flags; + unsigned long flags = 0; struct ena_comp_ctx *comp_ctx; spin_lock_irqsave(&admin_queue->q_lock, flags); @@ -333,7 +332,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); - io_sq->dma_addr_bits = ena_dev->dma_addr_bits; + io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; io_sq->desc_entry_size = (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? sizeof(struct ena_eth_io_tx_desc) : @@ -355,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, &io_sq->desc_addr.phys_addr, GFP_KERNEL); } - } else { + + if (!io_sq->desc_addr.virt_addr) { + pr_err("memory allocation failed"); + return -ENOMEM; + } + } + + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* Allocate bounce buffers */ + io_sq->bounce_buf_ctrl.buffer_size = + ena_dev->llq_info.desc_list_entry_size; + io_sq->bounce_buf_ctrl.buffers_num = + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; + io_sq->bounce_buf_ctrl.next_to_use = 0; + + size = io_sq->bounce_buf_ctrl.buffer_size * + io_sq->bounce_buf_ctrl.buffers_num; + dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); - io_sq->desc_addr.virt_addr = + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); - if (!io_sq->desc_addr.virt_addr) { - io_sq->desc_addr.virt_addr = + if (!io_sq->bounce_buf_ctrl.base_buffer) + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + + if (!io_sq->bounce_buf_ctrl.base_buffer) { + pr_err("bounce buffer memory allocation failed"); + return -ENOMEM; } - } - if (!io_sq->desc_addr.virt_addr) { - pr_err("memory allocation failed"); - return -ENOMEM; + memcpy(&io_sq->llq_info, &ena_dev->llq_info, + sizeof(io_sq->llq_info)); + + /* Initiate the first bounce buffer */ + io_sq->llq_buf_ctrl.curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, io_sq->llq_info.desc_list_entry_size); + io_sq->llq_buf_ctrl.descs_left_in_line = + io_sq->llq_info.descs_num_before_header; } io_sq->tail = 0; @@ -460,7 +486,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu /* Go over all the completions */ while ((READ_ONCE(cqe->acq_common_descriptor.flags) & - ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* Do not read the rest of the completion entry before the * phase bit was validated */ @@ -511,7 +537,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status) static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags, timeout; + unsigned long flags = 0; + unsigned long timeout; int ret; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); @@ -557,10 +584,160 @@ err: return ret; } +/** + * Set the LLQ configurations of the firmware + * + * The driver provides only the enabled feature values to the device, + * which in turn, checks if they are supported. + */ +static int ena_com_set_llq(struct ena_com_dev *ena_dev) +{ + struct ena_com_admin_queue *admin_queue; + struct ena_admin_set_feat_cmd cmd; + struct ena_admin_set_feat_resp resp; + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + int ret; + + memset(&cmd, 0x0, sizeof(cmd)); + admin_queue = &ena_dev->admin_queue; + + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; + cmd.feat_common.feature_id = ENA_ADMIN_LLQ; + + cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; + cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; + cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; + cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + + ret = ena_com_execute_admin_command(admin_queue, + (struct ena_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct ena_admin_acq_entry *)&resp, + sizeof(resp)); + + if (unlikely(ret)) + pr_err("Failed to set LLQ configurations: %d\n", ret); + + return ret; +} + +static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + u16 supported_feat; + int rc; + + memset(llq_info, 0, sizeof(*llq_info)); + + supported_feat = llq_features->header_location_ctrl_supported; + + if (likely(supported_feat & llq_default_cfg->llq_header_location)) { + llq_info->header_location_ctrl = + llq_default_cfg->llq_header_location; + } else { + pr_err("Invalid header location control, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { + supported_feat = llq_features->descriptors_stride_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { + llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; + } else { + if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { + llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; + } else { + pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_stride_ctrl, supported_feat, + llq_info->desc_stride_ctrl); + } + } else { + llq_info->desc_stride_ctrl = 0; + } + + supported_feat = llq_features->entry_size_ctrl_supported; + if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { + llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; + llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; + } else { + if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_info->desc_list_entry_size = 128; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; + llq_info->desc_list_entry_size = 192; + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_info->desc_list_entry_size = 256; + } else { + pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_ring_entry_size, supported_feat, + llq_info->desc_list_entry_size); + } + if (unlikely(llq_info->desc_list_entry_size & 0x7)) { + /* The desc list entry size should be whole multiply of 8 + * This requirement comes from __iowrite64_copy() + */ + pr_err("illegal entry size %d\n", + llq_info->desc_list_entry_size); + return -EINVAL; + } + + if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) + llq_info->descs_per_entry = llq_info->desc_list_entry_size / + sizeof(struct ena_eth_io_tx_desc); + else + llq_info->descs_per_entry = 1; + + supported_feat = llq_features->desc_num_before_header_supported; + if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { + llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; + } else { + if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; + } else { + pr_err("Invalid descs_num_before_header, supported: 0x%x\n", + supported_feat); + return -EINVAL; + } + + pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_num_decs_before_header, + supported_feat, llq_info->descs_num_before_header); + } + + rc = ena_com_set_llq(ena_dev); + if (rc) + pr_err("Cannot set LLQ configuration: %d\n", rc); + + return 0; +} + static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags; + unsigned long flags = 0; int ret; wait_for_completion_timeout(&comp_ctx->wait_event, @@ -606,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = mmio_read->read_resp; u32 mmio_read_reg, ret, i; - unsigned long flags; + unsigned long flags = 0; u32 timeout = mmio_read->reg_read_to; might_sleep(); @@ -728,15 +905,17 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_sq->desc_addr.virt_addr) { size = io_sq->desc_entry_size * io_sq->q_depth; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - dma_free_coherent(ena_dev->dmadev, size, - io_sq->desc_addr.virt_addr, - io_sq->desc_addr.phys_addr); - else - devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); + dma_free_coherent(ena_dev->dmadev, size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr); io_sq->desc_addr.virt_addr = NULL; } + + if (io_sq->bounce_buf_ctrl.base_buffer) { + devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); + io_sq->bounce_buf_ctrl.base_buffer = NULL; + } } static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, @@ -1248,7 +1427,7 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; - unsigned long flags; + unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { @@ -1292,7 +1471,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; - unsigned long flags; + unsigned long flags = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); ena_dev->admin_queue.running_state = state; @@ -1326,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { - pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", + pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", get_resp.u.aenq.supported_groups, groups_flag); return -EOPNOTSUPP; } @@ -1400,11 +1579,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - if (ver < MIN_ENA_VER) { - pr_err("ENA version is lower than the minimal version the driver supports\n"); - return -1; - } - pr_info("ena controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, @@ -1479,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) sizeof(*mmio_read->read_resp), &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) - return -ENOMEM; + goto err; ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); @@ -1488,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) mmio_read->readless_supported = true; return 0; + +err: + + return -ENOMEM; } void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) @@ -1523,8 +1701,7 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) } int ena_com_admin_init(struct ena_com_dev *ena_dev, - struct ena_aenq_handlers *aenq_handlers, - bool init_spinlock) + struct ena_aenq_handlers *aenq_handlers) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; @@ -1550,8 +1727,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, atomic_set(&admin_queue->outstanding_cmds, 0); - if (init_spinlock) - spin_lock_init(&admin_queue->q_lock); + spin_lock_init(&admin_queue->q_lock); ret = ena_com_init_comp_ctxt(admin_queue); if (ret) @@ -1748,6 +1924,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, else return rc; + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ); + if (!rc) + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, + sizeof(get_resp.u.llq)); + else if (rc == -EOPNOTSUPP) + memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); + else + return rc; + return 0; } @@ -1779,6 +1964,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; + unsigned long long timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; @@ -1796,10 +1982,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) */ dma_rmb(); + timestamp = + (unsigned long long)aenq_common->timestamp_low | + ((unsigned long long)aenq_common->timestamp_high << 32); pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", - aenq_common->group, aenq_common->syndrom, - (u64)aenq_common->timestamp_low + - ((u64)aenq_common->timestamp_high << 32)); + aenq_common->group, aenq_common->syndrom, timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(dev, @@ -2441,6 +2628,10 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) if (unlikely(!host_attr->host_info)) return -ENOMEM; + host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | + (ENA_COMMON_SPEC_VERSION_MINOR)); + return 0; } @@ -2712,3 +2903,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, intr_moder_tbl[level].pkts_per_interval; entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; } + +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_cfg) +{ + int rc; + int size; + + if (!llq_features->max_llq_num) { + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); + if (rc) + return rc; + + /* Validate the descriptor is not too big */ + size = ena_dev->tx_max_header_size; + size += ena_dev->llq_info.descs_num_before_header * + sizeof(struct ena_eth_io_tx_desc); + + if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) { + pr_err("the size of the LLQ entry is smaller than needed\n"); + return -EINVAL; + } + + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; + + return 0; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 7b784f8a06a6..ae8b4857fce3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -37,6 +37,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> +#include <linux/io.h> #include <linux/sched.h> #include <linux/sizes.h> #include <linux/spinlock.h> @@ -108,6 +109,14 @@ enum ena_intr_moder_level { ENA_INTR_MAX_NUM_OF_LEVELS, }; +struct ena_llq_configurations { + enum ena_admin_llq_header_location llq_header_location; + enum ena_admin_llq_ring_entry_size llq_ring_entry_size; + enum ena_admin_llq_stride_ctrl llq_stride_ctrl; + enum ena_admin_llq_num_descs_before_header llq_num_decs_before_header; + u16 llq_ring_entry_size_value; +}; + struct ena_intr_moder_entry { unsigned int intr_moder_interval; unsigned int pkts_per_interval; @@ -142,6 +151,15 @@ struct ena_com_tx_meta { u16 l4_hdr_len; /* In words */ }; +struct ena_com_llq_info { + u16 header_location_ctrl; + u16 desc_stride_ctrl; + u16 desc_list_entry_size_ctrl; + u16 desc_list_entry_size; + u16 descs_num_before_header; + u16 descs_per_entry; +}; + struct ena_com_io_cq { struct ena_com_io_desc_addr cdesc_addr; @@ -179,6 +197,20 @@ struct ena_com_io_cq { } ____cacheline_aligned; +struct ena_com_io_bounce_buffer_control { + u8 *base_buffer; + u16 next_to_use; + u16 buffer_size; + u16 buffers_num; /* Must be a power of 2 */ +}; + +/* This struct is to keep tracking the current location of the next llq entry */ +struct ena_com_llq_pkt_ctrl { + u8 *curr_bounce_buf; + u16 idx; + u16 descs_left_in_line; +}; + struct ena_com_io_sq { struct ena_com_io_desc_addr desc_addr; @@ -190,6 +222,9 @@ struct ena_com_io_sq { u32 msix_vector; struct ena_com_tx_meta cached_tx_meta; + struct ena_com_llq_info llq_info; + struct ena_com_llq_pkt_ctrl llq_buf_ctrl; + struct ena_com_io_bounce_buffer_control bounce_buf_ctrl; u16 q_depth; u16 qid; @@ -197,6 +232,7 @@ struct ena_com_io_sq { u16 idx; u16 tail; u16 next_to_comp; + u16 llq_last_copy_tail; u32 tx_max_header_size; u8 phase; u8 desc_entry_size; @@ -334,6 +370,8 @@ struct ena_com_dev { u16 intr_delay_resolution; u32 intr_moder_tx_interval; struct ena_intr_moder_entry *intr_moder_tbl; + + struct ena_com_llq_info llq_info; }; struct ena_com_dev_get_features_ctx { @@ -342,6 +380,7 @@ struct ena_com_dev_get_features_ctx { struct ena_admin_feature_aenq_desc aenq; struct ena_admin_feature_offload_desc offload; struct ena_admin_ena_hw_hints hw_hints; + struct ena_admin_feature_llq_desc llq; }; struct ena_com_create_io_ctx { @@ -397,8 +436,6 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); /* ena_com_admin_init - Init the admin and the async queues * @ena_dev: ENA communication layer struct * @aenq_handlers: Those handlers to be called upon event. - * @init_spinlock: Indicate if this method should init the admin spinlock or - * the spinlock was init before (for example, in a case of FLR). * * Initialize the admin submission and completion queues. * Initialize the asynchronous events notification queues. @@ -406,8 +443,7 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev); * @return - 0 on success, negative value on failure. */ int ena_com_admin_init(struct ena_com_dev *ena_dev, - struct ena_aenq_handlers *aenq_handlers, - bool init_spinlock); + struct ena_aenq_handlers *aenq_handlers); /* ena_com_admin_destroy - Destroy the admin and the async events queues. * @ena_dev: ENA communication layer struct @@ -935,6 +971,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, enum ena_intr_moder_level level, struct ena_intr_moder_entry *entry); +/* ena_com_config_dev_mode - Configure the placement policy of the device. + * @ena_dev: ENA communication layer struct + * @llq_features: LLQ feature descriptor, retrieve via + * ena_com_get_dev_attr_feat. + * @ena_llq_config: The default driver LLQ parameters configurations + */ +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq_features, + struct ena_llq_configurations *llq_default_config); + static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; @@ -1044,4 +1090,21 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg, intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; } +static inline u8 *ena_com_get_next_bounce_buffer(struct ena_com_io_bounce_buffer_control *bounce_buf_ctrl) +{ + u16 size, buffers_num; + u8 *buf; + + size = bounce_buf_ctrl->buffer_size; + buffers_num = bounce_buf_ctrl->buffers_num; + + buf = bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size; + + prefetchw(bounce_buf_ctrl->base_buffer + + (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size); + + return buf; +} + #endif /* !(ENA_COM) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h index bb8d73676eab..23beb7e7ed7b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -32,8 +32,8 @@ #ifndef _ENA_COMMON_H_ #define _ENA_COMMON_H_ -#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */ -#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */ +#define ENA_COMMON_SPEC_VERSION_MAJOR 2 +#define ENA_COMMON_SPEC_VERSION_MINOR 0 /* ENA operates with 48-bit memory addresses. ena_mem_addr_t */ struct ena_common_mem_addr { diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 1c682b76190f..f6c2d3855be8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -59,16 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( return cdesc; } -static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) -{ - io_cq->head++; - - /* Switch phase bit in case of wrap around */ - if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) - io_cq->phase ^= 1; -} - -static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) { u16 tail_masked; u32 offset; @@ -80,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); } -static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq) +static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, + u8 *bounce_buffer) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u32 offset = tail_masked * io_sq->desc_entry_size; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; - /* In case this queue isn't a LLQ */ - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return; + u16 dst_tail_mask; + u32 dst_offset; - memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset, - io_sq->desc_addr.virt_addr + offset, - io_sq->desc_entry_size); -} + dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); + dst_offset = dst_tail_mask * llq_info->desc_list_entry_size; + + /* Make sure everything was written into the bounce buffer before + * writing the bounce buffer to the device + */ + wmb(); + + /* The line is completed. Copy it to dev */ + __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, + bounce_buffer, (llq_info->desc_list_entry_size) / 8); -static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) -{ io_sq->tail++; /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) io_sq->phase ^= 1; + + return 0; } -static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, - u8 *head_src, u16 header_len) +static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, + u8 *header_src, + u16 header_len) { - u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1); - u8 __iomem *dev_head_addr = - io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size); + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf; + u16 header_offset; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) return 0; - if (unlikely(!io_sq->header_addr)) { - pr_err("Push buffer header ptr is NULL\n"); - return -EINVAL; + header_offset = + llq_info->descs_num_before_header * io_sq->desc_entry_size; + + if (unlikely((header_offset + header_len) > + llq_info->desc_list_entry_size)) { + pr_err("trying to write header larger than llq entry can accommodate\n"); + return -EFAULT; + } + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return -EFAULT; + } + + memcpy(bounce_buffer + header_offset, header_src, header_len); + + return 0; +} + +static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + u8 *bounce_buffer; + void *sq_desc; + + bounce_buffer = pkt_ctrl->curr_bounce_buf; + + if (unlikely(!bounce_buffer)) { + pr_err("bounce buffer is NULL\n"); + return NULL; + } + + sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; + pkt_ctrl->idx++; + pkt_ctrl->descs_left_in_line--; + + return sq_desc; +} + +static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) + return 0; + + /* bounce buffer was used, so write it and get a new one */ + if (pkt_ctrl->idx) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + } + + pkt_ctrl->idx = 0; + pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header; + return 0; +} + +static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return get_sq_desc_llq(io_sq); + + return get_sq_desc_regular_queue(io_sq); +} + +static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) +{ + struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; + struct ena_com_llq_info *llq_info = &io_sq->llq_info; + int rc; + + if (!pkt_ctrl->descs_left_in_line) { + rc = ena_com_write_bounce_buffer_to_dev(io_sq, + pkt_ctrl->curr_bounce_buf); + if (unlikely(rc)) + return rc; + + pkt_ctrl->curr_bounce_buf = + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, + 0x0, llq_info->desc_list_entry_size); + + pkt_ctrl->idx = 0; + if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)) + pkt_ctrl->descs_left_in_line = 1; + else + pkt_ctrl->descs_left_in_line = + llq_info->desc_list_entry_size / io_sq->desc_entry_size; } - memcpy_toio(dev_head_addr, head_src, header_len); + return 0; +} + +static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) +{ + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + return ena_com_sq_update_llq_tail(io_sq); + + io_sq->tail++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) + io_sq->phase ^= 1; return 0; } @@ -186,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, return false; } -static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -232,8 +337,7 @@ static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i memcpy(&io_sq->cached_tx_meta, ena_meta, sizeof(struct ena_com_tx_meta)); - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + return ena_com_sq_update_tail(io_sq); } static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, @@ -245,11 +349,14 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT; ena_rx_ctx->l3_csum_err = - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT; + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT); ena_rx_ctx->l4_csum_err = - (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT; + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT); + ena_rx_ctx->l4_csum_checked = + !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT); ena_rx_ctx->hash = cdesc->hash; ena_rx_ctx->frag = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> @@ -271,18 +378,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, { struct ena_eth_io_tx_desc *desc = NULL; struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs; - void *push_header = ena_tx_ctx->push_header; + void *buffer_to_push = ena_tx_ctx->push_header; u16 header_len = ena_tx_ctx->header_len; u16 num_bufs = ena_tx_ctx->num_bufs; - int total_desc, i, rc; + u16 start_tail = io_sq->tail; + int i, rc; bool have_meta; u64 addr_hi; WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); /* num_bufs +1 for potential meta desc */ - if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) { - pr_err("Not enough space in the tx queue\n"); + if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { + pr_debug("Not enough space in the tx queue\n"); return -ENOMEM; } @@ -292,23 +400,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, return -EINVAL; } - /* start with pushing the header (if needed) */ - rc = ena_com_write_header(io_sq, push_header, header_len); + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && + !buffer_to_push)) + return -EINVAL; + + rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); if (unlikely(rc)) return rc; have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, ena_tx_ctx); - if (have_meta) - ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (have_meta) { + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); + if (unlikely(rc)) + return rc; + } - /* If the caller doesn't want send packets */ + /* If the caller doesn't want to send packets */ if (unlikely(!num_bufs && !header_len)) { - *nb_hw_desc = have_meta ? 0 : 1; - return 0; + rc = ena_com_close_bounce_buffer(io_sq); + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); /* Set first desc when we don't have meta descriptor */ @@ -360,10 +477,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, for (i = 0; i < num_bufs; i++) { /* The first desc share the same desc as the header */ if (likely(i != 0)) { - ena_com_copy_curr_sq_desc_to_dev(io_sq); - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc)); desc->len_ctrl |= (io_sq->phase << @@ -386,15 +507,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, /* set the last desc indicator */ desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK; - ena_com_copy_curr_sq_desc_to_dev(io_sq); - - ena_com_sq_update_tail(io_sq); + rc = ena_com_sq_update_tail(io_sq); + if (unlikely(rc)) + return rc; - total_desc = max_t(u16, num_bufs, 1); - total_desc += have_meta ? 1 : 0; + rc = ena_com_close_bounce_buffer(io_sq); - *nb_hw_desc = total_desc; - return 0; + *nb_hw_desc = io_sq->tail - start_tail; + return rc; } int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, @@ -453,15 +573,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) + if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) return -ENOSPC; desc = get_sq_desc(io_sq); + if (unlikely(!desc)) + return -EFAULT; + memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); desc->length = ena_buf->len; - desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK; + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; @@ -472,43 +595,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); - ena_com_sq_update_tail(io_sq); - - return 0; -} - -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) -{ - u8 expected_phase, cdesc_phase; - struct ena_eth_io_tx_cdesc *cdesc; - u16 masked_head; - - masked_head = io_cq->head & (io_cq->q_depth - 1); - expected_phase = io_cq->phase; - - cdesc = (struct ena_eth_io_tx_cdesc *) - ((uintptr_t)io_cq->cdesc_addr.virt_addr + - (masked_head * io_cq->cdesc_entry_size_in_bytes)); - - /* When the current completion descriptor phase isn't the same as the - * expected, it mean that the device still didn't update - * this completion. - */ - cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; - if (cdesc_phase != expected_phase) - return -EAGAIN; - - dma_rmb(); - if (unlikely(cdesc->req_id >= io_cq->q_depth)) { - pr_err("Invalid req id %d\n", cdesc->req_id); - return -EINVAL; - } - - ena_com_cq_inc_head(io_cq); - - *req_id = READ_ONCE(cdesc->req_id); - - return 0; + return ena_com_sq_update_tail(io_sq); } bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 2f7657227cfe..340d02b64ca6 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -67,6 +67,7 @@ struct ena_com_rx_ctx { enum ena_eth_io_l4_proto_index l4_proto; bool l3_csum_err; bool l4_csum_err; + u8 l4_csum_checked; /* fragmented packet */ bool frag; u32 hash; @@ -86,8 +87,6 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, struct ena_com_buf *ena_buf, u16 req_id); -int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); - bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, @@ -96,7 +95,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, writel(intr_reg->intr_control, io_cq->unmask_reg); } -static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) +static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) { u16 tail, next_to_comp, cnt; @@ -107,11 +106,28 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) return io_sq->q_depth - 1 - cnt; } -static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +/* Check if the submission queue has enough space to hold required_buffers */ +static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, + u16 required_buffers) { - u16 tail; + int temp; - tail = io_sq->tail; + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return ena_com_free_desc(io_sq) >= required_buffers; + + /* This calculation doesn't need to be 100% accurate. So to reduce + * the calculation overhead just Subtract 2 lines from the free descs + * (one for the header line and one to compensate the devision + * down calculation. + */ + temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; + + return ena_com_free_desc(io_sq) > temp; +} + +static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) +{ + u16 tail = io_sq->tail; pr_debug("write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); @@ -159,4 +175,48 @@ static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) io_sq->next_to_comp += elem; } +static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) +{ + io_cq->head++; + + /* Switch phase bit in case of wrap around */ + if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) + io_cq->phase ^= 1; +} + +static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, + u16 *req_id) +{ + u8 expected_phase, cdesc_phase; + struct ena_eth_io_tx_cdesc *cdesc; + u16 masked_head; + + masked_head = io_cq->head & (io_cq->q_depth - 1); + expected_phase = io_cq->phase; + + cdesc = (struct ena_eth_io_tx_cdesc *) + ((uintptr_t)io_cq->cdesc_addr.virt_addr + + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ + cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK; + if (cdesc_phase != expected_phase) + return -EAGAIN; + + dma_rmb(); + + *req_id = READ_ONCE(cdesc->req_id); + if (unlikely(*req_id >= io_cq->q_depth)) { + pr_err("Invalid req id %d\n", cdesc->req_id); + return -EINVAL; + } + + ena_com_cq_inc_head(io_cq); + + return 0; +} + #endif /* ENA_ETH_COM_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h index f320c58793a5..00e0f056a741 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -33,25 +33,18 @@ #define _ENA_ETH_IO_H_ enum ena_eth_io_l3_proto_index { - ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, - - ENA_ETH_IO_L3_PROTO_IPV4 = 8, - - ENA_ETH_IO_L3_PROTO_IPV6 = 11, - - ENA_ETH_IO_L3_PROTO_FCOE = 21, - - ENA_ETH_IO_L3_PROTO_ROCE = 22, + ENA_ETH_IO_L3_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L3_PROTO_IPV4 = 8, + ENA_ETH_IO_L3_PROTO_IPV6 = 11, + ENA_ETH_IO_L3_PROTO_FCOE = 21, + ENA_ETH_IO_L3_PROTO_ROCE = 22, }; enum ena_eth_io_l4_proto_index { - ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, - - ENA_ETH_IO_L4_PROTO_TCP = 12, - - ENA_ETH_IO_L4_PROTO_UDP = 13, - - ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, + ENA_ETH_IO_L4_PROTO_UNKNOWN = 0, + ENA_ETH_IO_L4_PROTO_TCP = 12, + ENA_ETH_IO_L4_PROTO_UDP = 13, + ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23, }; struct ena_eth_io_tx_desc { @@ -242,9 +235,13 @@ struct ena_eth_io_rx_cdesc_base { * checksum error detected, or, the controller didn't * validate the checksum. This bit is valid only when * l4_proto_idx indicates TCP/UDP packet, and, - * ipv4_frag is not set + * ipv4_frag is not set. This bit is valid only when + * l4_csum_checked below is set. * 15 : ipv4_frag - Indicates IPv4 fragmented packet - * 23:16 : reserved16 + * 16 : l4_csum_checked - L4 checksum was verified + * (could be OK or error), when cleared the status of + * checksum is unknown + * 23:17 : reserved17 - MBZ * 24 : phase * 25 : l3_csum2 - second checksum engine result * 26 : first - Indicates first descriptor in @@ -303,114 +300,116 @@ struct ena_eth_io_numa_node_cfg_reg { }; /* tx_desc */ -#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 -#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) -#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 -#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) -#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 -#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) -#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 -#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) -#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 -#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) -#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 -#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) -#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) -#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 -#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) -#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 -#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 -#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 -#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 -#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 -#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 -#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 -#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) -#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 -#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) +#define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 +#define ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK GENMASK(3, 0) +#define ENA_ETH_IO_TX_DESC_DF_SHIFT 4 +#define ENA_ETH_IO_TX_DESC_DF_MASK BIT(4) +#define ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT 7 +#define ENA_ETH_IO_TX_DESC_TSO_EN_MASK BIT(7) +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT 13 +#define ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK BIT(13) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT 14 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK BIT(14) +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT 15 +#define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 +#define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 +#define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT 24 +#define ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK GENMASK(31, 24) /* tx_meta_desc */ -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 -#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 -#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) -#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 -#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) -#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 -#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) -#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 -#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) -#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 -#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) -#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 -#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 -#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) -#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 -#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 -#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 -#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 +#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16) +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20 +#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20) +#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21 +#define ENA_ETH_IO_TX_META_DESC_META_STORE_MASK BIT(21) +#define ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT 23 +#define ENA_ETH_IO_TX_META_DESC_META_DESC_MASK BIT(23) +#define ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT 24 +#define ENA_ETH_IO_TX_META_DESC_PHASE_MASK BIT(24) +#define ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT 26 +#define ENA_ETH_IO_TX_META_DESC_FIRST_MASK BIT(26) +#define ENA_ETH_IO_TX_META_DESC_LAST_SHIFT 27 +#define ENA_ETH_IO_TX_META_DESC_LAST_MASK BIT(27) +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT 28 +#define ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK BIT(28) +#define ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK GENMASK(5, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK GENMASK(7, 0) +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT 8 +#define ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK GENMASK(15, 8) +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT 16 +#define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 +#define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) /* tx_cdesc */ -#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) /* rx_desc */ -#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) -#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 -#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) -#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 -#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) -#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 -#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) +#define ENA_ETH_IO_RX_DESC_PHASE_MASK BIT(0) +#define ENA_ETH_IO_RX_DESC_FIRST_SHIFT 2 +#define ENA_ETH_IO_RX_DESC_FIRST_MASK BIT(2) +#define ENA_ETH_IO_RX_DESC_LAST_SHIFT 3 +#define ENA_ETH_IO_RX_DESC_LAST_MASK BIT(3) +#define ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT 4 +#define ENA_ETH_IO_RX_DESC_COMP_REQ_MASK BIT(4) /* rx_cdesc_base */ -#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 -#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 -#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 -#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 -#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 -#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 -#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 -#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 -#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 -#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 +#define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK BIT(13) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT 14 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 +#define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT 16 +#define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK BIT(16) +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 +#define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 +#define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK BIT(25) +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT 26 +#define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 +#define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 +#define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) /* intr_reg */ -#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 -#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 -#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) +#define ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK GENMASK(14, 0) +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT 15 +#define ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK GENMASK(29, 15) +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 +#define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) /* numa_node_cfg_reg */ -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 -#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) #endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 521607bc4393..f3a5a384e6e8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), ENA_STAT_TX_ENTRY(bad_req_id), + ENA_STAT_TX_ENTRY(llq_buffer_copy), ENA_STAT_TX_ENTRY(missed_tx), }; @@ -96,6 +97,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(bad_req_id), ENA_STAT_RX_ENTRY(empty_rx_ring), + ENA_STAT_RX_ENTRY(csum_unchecked), }; static const struct ena_stats ena_stats_ena_com_strings[] = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index b2522e84f482..284a0a612131 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -237,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) } } + size = tx_ring->tx_max_header_size; + tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); + if (!tx_ring->push_buf_intermediate_buf) { + tx_ring->push_buf_intermediate_buf = vzalloc(size); + if (!tx_ring->push_buf_intermediate_buf) { + vfree(tx_ring->tx_buffer_info); + vfree(tx_ring->free_tx_ids); + return -ENOMEM; + } + } + /* Req id ring for TX out of order completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_tx_ids[i] = i; @@ -265,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) vfree(tx_ring->free_tx_ids); tx_ring->free_tx_ids = NULL; + + vfree(tx_ring->push_buf_intermediate_buf); + tx_ring->push_buf_intermediate_buf = NULL; } /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues @@ -602,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) ena_free_rx_bufs(adapter, i); } +static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info) +{ + struct ena_com_buf *ena_buf; + u32 cnt; + int i; + + ena_buf = tx_info->bufs; + cnt = tx_info->num_of_bufs; + + if (unlikely(!cnt)) + return; + + if (tx_info->map_linear_data) { + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), + DMA_TO_DEVICE); + ena_buf++; + cnt--; + } + + /* unmap remaining mapped pages */ + for (i = 0; i < cnt; i++) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), + dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); + ena_buf++; + } +} + /* ena_free_tx_bufs - Free Tx Buffers per Queue * @tx_ring: TX ring for which buffers be freed */ @@ -612,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) for (i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; - struct ena_com_buf *ena_buf; - int nr_frags; - int j; if (!tx_info->skb) continue; @@ -630,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) tx_ring->qid, i); } - ena_buf = tx_info->bufs; - dma_unmap_single(tx_ring->dev, - ena_buf->paddr, - ena_buf->len, - DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - nr_frags = tx_info->num_of_bufs - 1; - for (j = 0; j < nr_frags; j++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, - ena_buf->paddr, - ena_buf->len, - DMA_TO_DEVICE); - } + ena_unmap_tx_skb(tx_ring, tx_info); dev_kfree_skb_any(tx_info->skb); } @@ -735,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) while (tx_pkts < budget) { struct ena_tx_buffer *tx_info; struct sk_buff *skb; - struct ena_com_buf *ena_buf; - int i, nr_frags; rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id); @@ -756,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) tx_info->skb = NULL; tx_info->last_jiffies = 0; - if (likely(tx_info->num_of_bufs != 0)) { - ena_buf = tx_info->bufs; - - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), - DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - nr_frags = tx_info->num_of_bufs - 1; - for (i = 0; i < nr_frags; i++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, - dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), - DMA_TO_DEVICE); - } - } + ena_unmap_tx_skb(tx_ring, tx_info); netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, "tx_poll: q %d skb %p completed\n", tx_ring->qid, @@ -804,12 +812,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) */ smp_mb(); - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > - ENA_TX_WAKEUP_THRESH; + above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH); if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { __netif_tx_lock(txq, smp_processor_id()); - above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > - ENA_TX_WAKEUP_THRESH; + above_thresh = + ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH); if (netif_tx_queue_stopped(txq) && above_thresh) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); @@ -985,8 +994,19 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, return; } - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (likely(ena_rx_ctx->l4_csum_checked)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.csum_unchecked++; + u64_stats_update_end(&rx_ring->syncp); + skb->ip_summed = CHECKSUM_NONE; + } + } else { + skb->ip_summed = CHECKSUM_NONE; + return; } + } static void ena_set_rx_hash(struct ena_ring *rx_ring, @@ -1101,8 +1121,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, rx_ring->next_to_clean = next_to_clean; - refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); - refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; + refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_threshold = + min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, + ENA_RX_REFILL_THRESH_PACKET); /* Optimization, try to batch new rx buffers */ if (refill_required > refill_threshold) { @@ -1299,7 +1321,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(num_queues); - netif_dbg(adapter, probe, adapter->netdev, "trying to enable MSI-X, vectors %d\n", msix_vecs); @@ -1574,8 +1595,6 @@ static int ena_up_complete(struct ena_adapter *adapter) if (rc) return rc; - ena_init_napi(adapter); - ena_change_mtu(adapter->netdev, adapter->netdev->mtu); ena_refill_all_rx_bufs(adapter); @@ -1592,7 +1611,7 @@ static int ena_up_complete(struct ena_adapter *adapter) static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) { - struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_create_io_ctx ctx; struct ena_com_dev *ena_dev; struct ena_ring *tx_ring; u32 msix_vector; @@ -1605,6 +1624,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_TXQ_IDX(qid); + memset(&ctx, 0x0, sizeof(ctx)); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.qid = ena_qid; ctx.mem_queue_type = ena_dev->tx_mem_queue_type; @@ -1658,7 +1679,7 @@ create_err: static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) { struct ena_com_dev *ena_dev; - struct ena_com_create_io_ctx ctx = { 0 }; + struct ena_com_create_io_ctx ctx; struct ena_ring *rx_ring; u32 msix_vector; u16 ena_qid; @@ -1670,6 +1691,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) msix_vector = ENA_IO_IRQ_IDX(qid); ena_qid = ENA_IO_RXQ_IDX(qid); + memset(&ctx, 0x0, sizeof(ctx)); + ctx.qid = ena_qid; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; @@ -1729,6 +1752,13 @@ static int ena_up(struct ena_adapter *adapter) ena_setup_io_intr(adapter); + /* napi poll functions should be initialized before running + * request_irq(), to handle a rare condition where there is a pending + * interrupt, causing the ISR to fire immediately while the poll + * function wasn't set yet, causing a null dereference + */ + ena_init_napi(adapter); + rc = ena_request_io_irq(adapter); if (rc) goto err_req_irq; @@ -1980,73 +2010,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, return rc; } -/* Called with netif_tx_lock. */ -static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int ena_tx_map_skb(struct ena_ring *tx_ring, + struct ena_tx_buffer *tx_info, + struct sk_buff *skb, + void **push_hdr, + u16 *header_len) { - struct ena_adapter *adapter = netdev_priv(dev); - struct ena_tx_buffer *tx_info; - struct ena_com_tx_ctx ena_tx_ctx; - struct ena_ring *tx_ring; - struct netdev_queue *txq; + struct ena_adapter *adapter = tx_ring->adapter; struct ena_com_buf *ena_buf; - void *push_hdr; - u32 len, last_frag; - u16 next_to_use; - u16 req_id; - u16 push_len; - u16 header_len; dma_addr_t dma; - int qid, rc, nb_hw_desc; - int i = -1; - - netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); - /* Determine which tx ring we will be placed on */ - qid = skb_get_queue_mapping(skb); - tx_ring = &adapter->tx_ring[qid]; - txq = netdev_get_tx_queue(dev, qid); - - rc = ena_check_and_linearize_skb(tx_ring, skb); - if (unlikely(rc)) - goto error_drop_packet; - - skb_tx_timestamp(skb); - len = skb_headlen(skb); + u32 skb_head_len, frag_len, last_frag; + u16 push_len = 0; + u16 delta = 0; + int i = 0; - next_to_use = tx_ring->next_to_use; - req_id = tx_ring->free_tx_ids[next_to_use]; - tx_info = &tx_ring->tx_buffer_info[req_id]; - tx_info->num_of_bufs = 0; - - WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); - ena_buf = tx_info->bufs; + skb_head_len = skb_headlen(skb); tx_info->skb = skb; + ena_buf = tx_info->bufs; if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - /* prepared the push buffer */ - push_len = min_t(u32, len, tx_ring->tx_max_header_size); - header_len = push_len; - push_hdr = skb->data; + /* When the device is LLQ mode, the driver will copy + * the header into the device memory space. + * the ena_com layer assume the header is in a linear + * memory space. + * This assumption might be wrong since part of the header + * can be in the fragmented buffers. + * Use skb_header_pointer to make sure the header is in a + * linear memory space. + */ + + push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); + *push_hdr = skb_header_pointer(skb, 0, push_len, + tx_ring->push_buf_intermediate_buf); + *header_len = push_len; + if (unlikely(skb->data != *push_hdr)) { + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.llq_buffer_copy++; + u64_stats_update_end(&tx_ring->syncp); + + delta = push_len - skb_head_len; + } } else { - push_len = 0; - header_len = min_t(u32, len, tx_ring->tx_max_header_size); - push_hdr = NULL; + *push_hdr = NULL; + *header_len = min_t(u32, skb_head_len, + tx_ring->tx_max_header_size); } - netif_dbg(adapter, tx_queued, dev, + netif_dbg(adapter, tx_queued, adapter->netdev, "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, - push_hdr, push_len); + *push_hdr, push_len); - if (len > push_len) { + if (skb_head_len > push_len) { dma = dma_map_single(tx_ring->dev, skb->data + push_len, - len - push_len, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + skb_head_len - push_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; - ena_buf->len = len - push_len; + ena_buf->len = skb_head_len - push_len; ena_buf++; tx_info->num_of_bufs++; + tx_info->map_linear_data = 1; + } else { + tx_info->map_linear_data = 0; } last_frag = skb_shinfo(skb)->nr_frags; @@ -2054,18 +2081,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < last_frag; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - len = skb_frag_size(frag); - dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, - DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, dma)) + frag_len = skb_frag_size(frag); + + if (unlikely(delta >= frag_len)) { + delta -= frag_len; + continue; + } + + dma = skb_frag_dma_map(tx_ring->dev, frag, delta, + frag_len - delta, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) goto error_report_dma_error; ena_buf->paddr = dma; - ena_buf->len = len; + ena_buf->len = frag_len - delta; ena_buf++; + tx_info->num_of_bufs++; + delta = 0; } - tx_info->num_of_bufs += last_frag; + return 0; + +error_report_dma_error: + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.dma_mapping_err++; + u64_stats_update_end(&tx_ring->syncp); + netdev_warn(adapter->netdev, "failed to map skb\n"); + + tx_info->skb = NULL; + + tx_info->num_of_bufs += i; + ena_unmap_tx_skb(tx_ring, tx_info); + + return -EINVAL; +} + +/* Called with netif_tx_lock. */ +static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ena_adapter *adapter = netdev_priv(dev); + struct ena_tx_buffer *tx_info; + struct ena_com_tx_ctx ena_tx_ctx; + struct ena_ring *tx_ring; + struct netdev_queue *txq; + void *push_hdr; + u16 next_to_use, req_id, header_len; + int qid, rc, nb_hw_desc; + + netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); + /* Determine which tx ring we will be placed on */ + qid = skb_get_queue_mapping(skb); + tx_ring = &adapter->tx_ring[qid]; + txq = netdev_get_tx_queue(dev, qid); + + rc = ena_check_and_linearize_skb(tx_ring, skb); + if (unlikely(rc)) + goto error_drop_packet; + + skb_tx_timestamp(skb); + + next_to_use = tx_ring->next_to_use; + req_id = tx_ring->free_tx_ids[next_to_use]; + tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->num_of_bufs = 0; + + WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); + + rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); + if (unlikely(rc)) + goto error_drop_packet; memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); ena_tx_ctx.ena_bufs = tx_info->bufs; @@ -2081,14 +2165,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, &nb_hw_desc); + /* ena_com_prepare_tx() can't fail due to overflow of tx queue, + * since the number of free descriptors in the queue is checked + * after sending the previous packet. In case there isn't enough + * space in the queue for the next packet, it is stopped + * until there is again enough available space in the queue. + * All other failure reasons of ena_com_prepare_tx() are fatal + * and therefore require a device reset. + */ if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, "failed to prepare tx bufs\n"); u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_stop++; tx_ring->tx_stats.prepare_ctx_err++; u64_stats_update_end(&tx_ring->syncp); - netif_tx_stop_queue(txq); + adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); goto error_unmap_dma; } @@ -2110,8 +2202,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) * to sgl_size + 2. one for the meta descriptor and one for header * (if the header is larger than tx_max_header_size). */ - if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < - (tx_ring->sgl_size + 2))) { + if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + tx_ring->sgl_size + 2))) { netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", __func__, qid); @@ -2130,8 +2222,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) */ smp_mb(); - if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) - > ENA_TX_WAKEUP_THRESH) { + if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, + ENA_TX_WAKEUP_THRESH)) { netif_tx_wake_queue(txq); u64_stats_update_begin(&tx_ring->syncp); tx_ring->tx_stats.queue_wakeup++; @@ -2151,58 +2243,15 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; -error_report_dma_error: - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&tx_ring->syncp); - netdev_warn(adapter->netdev, "failed to map skb\n"); - - tx_info->skb = NULL; - error_unmap_dma: - if (i >= 0) { - /* save value of frag that failed */ - last_frag = i; - - /* start back at beginning and unmap skb */ - tx_info->skb = NULL; - ena_buf = tx_info->bufs; - dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); - - /* unmap remaining mapped pages */ - for (i = 0; i < last_frag; i++) { - ena_buf++; - dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), - dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); - } - } + ena_unmap_tx_skb(tx_ring, tx_info); + tx_info->skb = NULL; error_drop_packet: - dev_kfree_skb(skb); return NETDEV_TX_OK; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void ena_netpoll(struct net_device *netdev) -{ - struct ena_adapter *adapter = netdev_priv(netdev); - int i; - - /* Dont schedule NAPI if the driver is in the middle of reset - * or netdev is down. - */ - - if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) || - test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) - return; - - for (i = 0; i < adapter->num_queues; i++) - napi_schedule(&adapter->ena_napi[i].napi); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev, select_queue_fallback_t fallback) @@ -2220,7 +2269,8 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, return qid; } -static void ena_config_host_info(struct ena_com_dev *ena_dev) +static void ena_config_host_info(struct ena_com_dev *ena_dev, + struct pci_dev *pdev) { struct ena_admin_host_info *host_info; int rc; @@ -2234,6 +2284,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info = ena_dev->host_attr.host_info; + host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; strncpy(host_info->kernel_ver_str, utsname()->version, @@ -2244,7 +2295,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->driver_version = (DRV_MODULE_VER_MAJOR) | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | - (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | + ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); + host_info->num_cpus = num_online_cpus(); rc = ena_com_set_host_attributes(ena_dev); if (rc) { @@ -2368,9 +2421,6 @@ static const struct net_device_ops ena_netdev_ops = { .ndo_change_mtu = ena_change_mtu, .ndo_set_mac_address = NULL, .ndo_validate_addr = eth_validate_addr, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ena_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ }; static int ena_device_validate_params(struct ena_adapter *adapter, @@ -2458,7 +2508,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, } /* ENA admin level init */ - rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); + rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (rc) { dev_err(dev, "Can not initialize ena admin queue with device\n"); @@ -2471,7 +2521,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, */ ena_com_set_admin_polling_mode(ena_dev, true); - ena_config_host_info(ena_dev); + ena_config_host_info(ena_dev, pdev); /* Get Device Attributes*/ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); @@ -2556,15 +2606,14 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); adapter->dev_up_before_reset = dev_up; - if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); - /* Before releasing the ENA resources, a device reset is required. - * (to prevent the device from accessing them). + /* Stop the device from sending AENQ events (in case reset flag is set + * and device is up, ena_close already reset the device * In case the reset flag is set and the device is up, ena_down() * already perform the reset, so it can be skipped. */ @@ -2633,14 +2682,20 @@ static int ena_restore_device(struct ena_adapter *adapter) set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - dev_err(&pdev->dev, "Device reset completed successfully\n"); + dev_err(&pdev->dev, + "Device reset completed successfully, Driver info: %s\n", + version); return rc; err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); + ena_com_mmio_reg_read_request_destroy(ena_dev); + ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); err: clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); @@ -2822,7 +2877,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) rx_ring = &adapter->rx_ring[i]; refill_required = - ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + ena_com_free_desc(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; @@ -2968,7 +3023,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, /* In case of LLQ use the llq number in the get feature cmd */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - io_sq_num = get_feat_ctx->max_queues.max_llq_num; + io_sq_num = get_feat_ctx->max_queues.max_legacy_llq_num; if (io_sq_num == 0) { dev_err(&pdev->dev, @@ -2996,18 +3051,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, return io_queue_num; } -static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx) +static int ena_set_queues_placement_policy(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq, + struct ena_llq_configurations *llq_default_configurations) { bool has_mem_bar; + int rc; + u32 llq_feature_mask; + + llq_feature_mask = 1 << ENA_ADMIN_LLQ; + if (!(ena_dev->supported_features & llq_feature_mask)) { + dev_err(&pdev->dev, + "LLQ is not supported Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); - /* Enable push mode if device supports LLQ */ - if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; - else + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); + if (unlikely(rc)) { + dev_err(&pdev->dev, + "Failed to configure the device mode. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + /* Nothing to config, exit */ + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return 0; + + if (!has_mem_bar) { + dev_err(&pdev->dev, + "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + + if (!ena_dev->mem_bar) + return -EFAULT; + + return 0; } static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, @@ -3120,18 +3209,20 @@ err_rss_init: static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { - int release_bars; + int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; - if (ena_dev->mem_bar) - devm_iounmap(&pdev->dev, ena_dev->mem_bar); - - if (ena_dev->reg_bar) - devm_iounmap(&pdev->dev, ena_dev->reg_bar); - - release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; pci_release_selected_regions(pdev, release_bars); } +static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) +{ + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + llq_config->llq_ring_entry_size_value = 128; +} + static int ena_calc_queue_size(struct pci_dev *pdev, struct ena_com_dev *ena_dev, u16 *max_tx_sgl_size, @@ -3147,7 +3238,7 @@ static int ena_calc_queue_size(struct pci_dev *pdev, if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) queue_size = min_t(u32, queue_size, - get_feat_ctx->max_queues.max_llq_depth); + get_feat_ctx->max_queues.max_legacy_llq_depth); queue_size = rounddown_pow_of_two(queue_size); @@ -3180,7 +3271,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; struct net_device *netdev; struct ena_adapter *adapter; + struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; + char *queue_type_str; static int adapters_found; int io_queue_num, bars, rc; int queue_size; @@ -3234,16 +3327,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } - ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); + set_default_llq_configurations(&llq_config); - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); - if (!ena_dev->mem_bar) { - rc = -EFAULT; - goto err_device_destroy; - } + rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, + &llq_config); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + goto err_device_destroy; } /* initial Tx interrupt delay, Assumes 1 usec granularity. @@ -3258,8 +3348,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_device_destroy; } - dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", - io_queue_num, queue_size); + dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n", + io_queue_num, queue_size, + (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? + "ENABLED" : "DISABLED"); /* dev zeroed in init_etherdev */ netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); @@ -3349,9 +3441,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) timer_setup(&adapter->timer_service, ena_timer_service, 0); mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) + queue_type_str = "Regular"; + else + queue_type_str = "Low Latency"; + + dev_info(&pdev->dev, + "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n", DEVICE_NAME, (long)pci_resource_start(pdev, 0), - netdev->dev_addr, io_queue_num); + netdev->dev_addr, io_queue_num, queue_type_str); set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 7c7ae56c52cf..521873642339 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -43,9 +43,9 @@ #include "ena_com.h" #include "ena_eth_com.h" -#define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 5 -#define DRV_MODULE_VER_SUBMINOR 0 +#define DRV_MODULE_VER_MAJOR 2 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 1 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION @@ -61,6 +61,17 @@ #define ENA_ADMIN_MSIX_VEC 1 #define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues)) +/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the + * driver passes 0. + * Since the max packet size the ENA handles is ~9kB limit the buffer length to + * 16kB. + */ +#if PAGE_SIZE > SZ_16K +#define ENA_PAGE_SIZE SZ_16K +#else +#define ENA_PAGE_SIZE PAGE_SIZE +#endif + #define ENA_MIN_MSIX_VEC 2 #define ENA_REG_BAR 0 @@ -70,7 +81,7 @@ #define ENA_DEFAULT_RING_SIZE (1024) #define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2) -#define ENA_DEFAULT_RX_COPYBREAK (128 - NET_IP_ALIGN) +#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN) /* limit the buffer size to 600 bytes to handle MTU changes from very * small to very large, in which case the number of buffers per packet @@ -95,10 +106,11 @@ */ #define ENA_TX_POLL_BUDGET_DIVIDER 4 -/* Refill Rx queue when number of available descriptors is below - * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER +/* Refill Rx queue when number of required descriptors is above + * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET */ #define ENA_RX_REFILL_THRESH_DIVIDER 8 +#define ENA_RX_REFILL_THRESH_PACKET 256 /* Number of queues to check for missing queues per timer service */ #define ENA_MONITORED_TX_QUEUES 4 @@ -151,6 +163,9 @@ struct ena_tx_buffer { /* num of buffers used by this skb */ u32 num_of_bufs; + /* Indicate if bufs[0] map the linear data of the skb. */ + u8 map_linear_data; + /* Used for detect missing tx packets to limit the number of prints */ u32 print_once; /* Save the last jiffies to detect missing tx packets @@ -186,6 +201,7 @@ struct ena_stats_tx { u64 tx_poll; u64 doorbells; u64 bad_req_id; + u64 llq_buffer_copy; u64 missed_tx; }; @@ -201,6 +217,7 @@ struct ena_stats_rx { u64 rx_copybreak_pkt; u64 bad_req_id; u64 empty_rx_ring; + u64 csum_unchecked; }; struct ena_ring { @@ -257,6 +274,8 @@ struct ena_ring { struct ena_stats_tx tx_stats; struct ena_stats_rx rx_stats; }; + + u8 *push_buf_intermediate_buf; int empty_rx_queue; } ____cacheline_aligned; @@ -355,15 +374,4 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); int ena_get_sset_count(struct net_device *netdev, int sset); -/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the - * driver passas 0. - * Since the max packet size the ENA handles is ~9kB limit the buffer length to - * 16kB. - */ -#if PAGE_SIZE > SZ_16K -#define ENA_PAGE_SIZE SZ_16K -#else -#define ENA_PAGE_SIZE PAGE_SIZE -#endif - #endif /* !(ENA_H) */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 48ca97fbe7bc..04fcafcc059c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -33,137 +33,125 @@ #define _ENA_REGS_H_ enum ena_regs_reset_reason_types { - ENA_REGS_RESET_NORMAL = 0, - - ENA_REGS_RESET_KEEP_ALIVE_TO = 1, - - ENA_REGS_RESET_ADMIN_TO = 2, - - ENA_REGS_RESET_MISS_TX_CMPL = 3, - - ENA_REGS_RESET_INV_RX_REQ_ID = 4, - - ENA_REGS_RESET_INV_TX_REQ_ID = 5, - - ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, - - ENA_REGS_RESET_INIT_ERR = 7, - - ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, - - ENA_REGS_RESET_OS_TRIGGER = 9, - - ENA_REGS_RESET_OS_NETDEV_WD = 10, - - ENA_REGS_RESET_SHUTDOWN = 11, - - ENA_REGS_RESET_USER_TRIGGER = 12, - - ENA_REGS_RESET_GENERIC = 13, - - ENA_REGS_RESET_MISS_INTERRUPT = 14, + ENA_REGS_RESET_NORMAL = 0, + ENA_REGS_RESET_KEEP_ALIVE_TO = 1, + ENA_REGS_RESET_ADMIN_TO = 2, + ENA_REGS_RESET_MISS_TX_CMPL = 3, + ENA_REGS_RESET_INV_RX_REQ_ID = 4, + ENA_REGS_RESET_INV_TX_REQ_ID = 5, + ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6, + ENA_REGS_RESET_INIT_ERR = 7, + ENA_REGS_RESET_DRIVER_INVALID_STATE = 8, + ENA_REGS_RESET_OS_TRIGGER = 9, + ENA_REGS_RESET_OS_NETDEV_WD = 10, + ENA_REGS_RESET_SHUTDOWN = 11, + ENA_REGS_RESET_USER_TRIGGER = 12, + ENA_REGS_RESET_GENERIC = 13, + ENA_REGS_RESET_MISS_INTERRUPT = 14, }; /* ena_registers offsets */ -#define ENA_REGS_VERSION_OFF 0x0 -#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 -#define ENA_REGS_CAPS_OFF 0x8 -#define ENA_REGS_CAPS_EXT_OFF 0xc -#define ENA_REGS_AQ_BASE_LO_OFF 0x10 -#define ENA_REGS_AQ_BASE_HI_OFF 0x14 -#define ENA_REGS_AQ_CAPS_OFF 0x18 -#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 -#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 -#define ENA_REGS_ACQ_CAPS_OFF 0x28 -#define ENA_REGS_AQ_DB_OFF 0x2c -#define ENA_REGS_ACQ_TAIL_OFF 0x30 -#define ENA_REGS_AENQ_CAPS_OFF 0x34 -#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 -#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c -#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 -#define ENA_REGS_AENQ_TAIL_OFF 0x44 -#define ENA_REGS_INTR_MASK_OFF 0x4c -#define ENA_REGS_DEV_CTL_OFF 0x54 -#define ENA_REGS_DEV_STS_OFF 0x58 -#define ENA_REGS_MMIO_REG_READ_OFF 0x5c -#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 -#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 + +/* 0 base */ +#define ENA_REGS_VERSION_OFF 0x0 +#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 +#define ENA_REGS_CAPS_OFF 0x8 +#define ENA_REGS_CAPS_EXT_OFF 0xc +#define ENA_REGS_AQ_BASE_LO_OFF 0x10 +#define ENA_REGS_AQ_BASE_HI_OFF 0x14 +#define ENA_REGS_AQ_CAPS_OFF 0x18 +#define ENA_REGS_ACQ_BASE_LO_OFF 0x20 +#define ENA_REGS_ACQ_BASE_HI_OFF 0x24 +#define ENA_REGS_ACQ_CAPS_OFF 0x28 +#define ENA_REGS_AQ_DB_OFF 0x2c +#define ENA_REGS_ACQ_TAIL_OFF 0x30 +#define ENA_REGS_AENQ_CAPS_OFF 0x34 +#define ENA_REGS_AENQ_BASE_LO_OFF 0x38 +#define ENA_REGS_AENQ_BASE_HI_OFF 0x3c +#define ENA_REGS_AENQ_HEAD_DB_OFF 0x40 +#define ENA_REGS_AENQ_TAIL_OFF 0x44 +#define ENA_REGS_INTR_MASK_OFF 0x4c +#define ENA_REGS_DEV_CTL_OFF 0x54 +#define ENA_REGS_DEV_STS_OFF 0x58 +#define ENA_REGS_MMIO_REG_READ_OFF 0x5c +#define ENA_REGS_MMIO_RESP_LO_OFF 0x60 +#define ENA_REGS_MMIO_RESP_HI_OFF 0x64 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68 /* version register */ -#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff -#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 -#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 +#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff +#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8 +#define ENA_REGS_VERSION_MAJOR_VERSION_MASK 0xff00 /* controller_version register */ -#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 -#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 -#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 -#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 +#define ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK 0xff +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT 8 +#define ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK 0xff00 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT 16 +#define ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK 0xff0000 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT 24 +#define ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK 0xff000000 /* caps register */ -#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 -#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 -#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 -#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 -#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 -#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 +#define ENA_REGS_CAPS_CONTIGUOUS_QUEUE_REQUIRED_MASK 0x1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT 1 +#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 +#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16 +#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000 /* aq_caps register */ -#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK 0xffff0000 /* acq_caps register */ -#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK 0xffff +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK 0xffff0000 /* aenq_caps register */ -#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 -#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 +#define ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK 0xffff +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT 16 +#define ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK 0xffff0000 /* dev_ctl register */ -#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 -#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 -#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 -#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 -#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 -#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 -#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 -#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 -#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 +#define ENA_REGS_DEV_CTL_DEV_RESET_MASK 0x1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_SHIFT 1 +#define ENA_REGS_DEV_CTL_AQ_RESTART_MASK 0x2 +#define ENA_REGS_DEV_CTL_QUIESCENT_SHIFT 2 +#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 +#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 +#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 +#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28 +#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000 /* dev_sts register */ -#define ENA_REGS_DEV_STS_READY_MASK 0x1 -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 -#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 -#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 -#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 -#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 -#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 -#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 -#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 -#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 +#define ENA_REGS_DEV_STS_READY_MASK 0x1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_SHIFT 1 +#define ENA_REGS_DEV_STS_AQ_RESTART_IN_PROGRESS_MASK 0x2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_SHIFT 2 +#define ENA_REGS_DEV_STS_AQ_RESTART_FINISHED_MASK 0x4 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_SHIFT 3 +#define ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK 0x8 +#define ENA_REGS_DEV_STS_RESET_FINISHED_SHIFT 4 +#define ENA_REGS_DEV_STS_RESET_FINISHED_MASK 0x10 +#define ENA_REGS_DEV_STS_FATAL_ERROR_SHIFT 5 +#define ENA_REGS_DEV_STS_FATAL_ERROR_MASK 0x20 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_SHIFT 6 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_IN_PROGRESS_MASK 0x40 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_SHIFT 7 +#define ENA_REGS_DEV_STS_QUIESCENT_STATE_ACHIEVED_MASK 0x80 /* mmio_reg_read register */ -#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff -#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 -#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 +#define ENA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff +#define ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT 16 +#define ENA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 /* rss_ind_entry_update register */ -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 -#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_INDEX_MASK 0xffff +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 +#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 #endif /*_ENA_REGS_H_ */ diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 29ebbf582010..9f23703dd509 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type) int i, ret; unsigned long esar_base; unsigned char *esar; + const char *desc; if (dec_lance_debug && version_printed++ == 0) printk(version); @@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type) */ switch (type) { case ASIC_LANCE: - printk("%s: IOASIC onboard LANCE", name); + desc = "IOASIC onboard LANCE"; break; case PMAD_LANCE: - printk("%s: PMAD-AA", name); + desc = "PMAD-AA"; break; case PMAX_LANCE: - printk("%s: PMAX onboard LANCE", name); + desc = "PMAX onboard LANCE"; break; } for (i = 0; i < 6; i++) dev->dev_addr[i] = esar[i * 4]; - printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); + printk("%s: %s, addr = %pM, irq = %d\n", + name, desc, dev->dev_addr, dev->irq); dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 750007513f9d..1d5d6b8df855 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -84,7 +84,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, const struct aq_hw_ops **ops, const struct aq_hw_caps_s **caps) { - int i = 0; + int i; if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) return -EINVAL; @@ -107,7 +107,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, int aq_pci_func_init(struct pci_dev *pdev) { - int err = 0; + int err; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!err) { @@ -141,7 +141,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, char *name, void *aq_vec, cpumask_t *affinity_mask) { struct pci_dev *pdev = self->pdev; - int err = 0; + int err; if (pdev->msix_enabled || pdev->msi_enabled) err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, @@ -164,7 +164,7 @@ int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, void aq_pci_func_free_irqs(struct aq_nic_s *self) { struct pci_dev *pdev = self->pdev; - unsigned int i = 0U; + unsigned int i; for (i = 32U; i--;) { if (!((1U << i) & self->msix_entry_mask)) @@ -194,8 +194,8 @@ static void aq_pci_free_irq_vectors(struct aq_nic_s *self) static int aq_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { - struct aq_nic_s *self = NULL; - int err = 0; + struct aq_nic_s *self; + int err; struct net_device *ndev; resource_size_t mmio_pa; u32 bar; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index faba55fd656a..4122553e224b 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1070,9 +1070,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) { u32 reg; - /* Stop monitoring MPD interrupt */ - intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); - /* Disable RXCHK, active filters and Broadcom tag matching */ reg = rxchk_readl(priv, RXCHK_CONTROL); reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << @@ -1082,6 +1079,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) /* Clear the MagicPacket detection logic */ mpd_enable_set(priv, false); + reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS); + if (reg & INTRL2_0_MPD) + netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); + + if (reg & INTRL2_0_BRCM_MATCH_TAG) { + reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & + RXCHK_BRCM_TAG_MATCH_MASK; + netdev_info(priv->netdev, + "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); + } + netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); } @@ -1106,7 +1114,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_tx_ring *txr; unsigned int ring, ring_bit; - u32 reg; priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); @@ -1132,16 +1139,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) bcm_sysport_tx_reclaim_all(priv); - if (priv->irq0_stat & INTRL2_0_MPD) - netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n"); - - if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) { - reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) & - RXCHK_BRCM_TAG_MATCH_MASK; - netdev_info(priv->netdev, - "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg); - } - if (!priv->is_lite) goto out; @@ -2645,9 +2642,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) /* UniMAC receive needs to be turned on */ umac_enable_set(priv, CMD_RX_EN, 1); - /* Enable the interrupt wake-up source */ - intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG); - netif_dbg(priv, wol, ndev, "entered WOL mode\n"); return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 61957b0bbd8c..e2d92548226a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ - if (unlikely(tx_pkts > bp->tx_wake_thresh)) + if (unlikely(tx_pkts > bp->tx_wake_thresh)) { rx_pkts = budget; + raw_cons = NEXT_RAW_CMP(raw_cons); + break; + } } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { if (likely(budget)) rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); @@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) } raw_cons = NEXT_RAW_CMP(raw_cons); - if (rx_pkts == budget) + if (rx_pkts && rx_pkts == budget) break; } @@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget) while (1) { work_done += bnxt_poll_work(bp, bnapi, budget - work_done); - if (work_done >= budget) + if (work_done >= budget) { + if (!budget) + BNXT_CP_DB_REARM(cpr->cp_doorbell, + cpr->cp_raw_cons); break; + } if (!bnxt_has_work(bp, cpr)) { if (napi_complete_done(napi, work_done)) @@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, - bp->hwrm_cmd_resp_dma_addr); - - bp->hwrm_cmd_resp_addr = NULL; + if (bp->hwrm_cmd_resp_addr) { + dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, + bp->hwrm_cmd_resp_dma_addr); + bp->hwrm_cmd_resp_addr = NULL; + } } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; enables |= ring_grps ? FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; req->num_rx_rings = cpu_to_le16(rx_rings); req->num_hw_ring_grps = cpu_to_le16(ring_grps); @@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_tx = hw_resc->max_tx_rings; *max_rx = hw_resc->max_rx_rings; *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), - hw_resc->max_irqs); + hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp)); *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); max_ring_grps = hw_resc->max_hw_ring_grps; if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { @@ -9050,6 +9058,7 @@ init_err_cleanup_tc: bnxt_clear_int_mode(bp); init_err_pci_clean: + bnxt_free_hwrm_resources(bp); bnxt_cleanup_pci(bp); init_err_free: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index ddc98c359488..a85d2be986af 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); for (i = 0; i < max_tc; i++) { - u8 qidx; + u8 qidx = bp->tc_to_qidx[i]; req.enables |= cpu_to_le32( - QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); + QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << + qidx); memset(&cos2bw, 0, sizeof(cos2bw)); - qidx = bp->tc_to_qidx[i]; cos2bw.queue_id = bp->q_info[qidx].queue_id; if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { cos2bw.tsa = diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 790c684f08ab..140dbd62106d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,9 +21,22 @@ static const struct devlink_ops bnxt_dl_ops = { #endif /* CONFIG_BNXT_SRIOV */ }; +enum bnxt_dl_param_id { + BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, +}; + static const struct bnxt_dl_nvm_param nvm_params[] = { {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, BNXT_NVM_SHARED_CFG, 1}, + {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, + BNXT_NVM_SHARED_CFG, 1}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7}, + {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, + BNXT_NVM_SHARED_CFG, 1}, }; static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, @@ -55,8 +68,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE; - if (nvm_param.num_bits == 1) - buf = &val->vbool; + switch (bytesize) { + case 1: + if (nvm_param.num_bits == 1) + buf = &val->vbool; + else + buf = &val->vu8; + break; + case 2: + buf = &val->vu16; + break; + case 4: + buf = &val->vu32; + break; + default: + return -EFAULT; + } data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, &data_dma_addr, GFP_KERNEL); @@ -78,8 +105,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, memcpy(buf, data_addr, bytesize); dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr); - if (rc) + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); + return -EACCES; + } else if (rc) { return -EIO; + } return 0; } @@ -88,9 +119,15 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, { struct hwrm_nvm_get_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + if (!rc) + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + + return rc; } static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, @@ -100,14 +137,55 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct bnxt *bp = bnxt_get_bp_from_dl(dl); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); } +static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + int max_val = -1; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) + max_val = BNXT_MSIX_VEC_MAX; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) + max_val = BNXT_MSIX_VEC_MIN_MAX; + + if (val.vu32 > max_val) { + NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); + return -EINVAL; + } + + return 0; +} + static const struct devlink_param bnxt_dl_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, NULL), + DEVLINK_PARAM_GENERIC(IGNORE_ARI, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, + "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), }; int bnxt_dl_register(struct bnxt *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 2f68dc048390..5b6b2c7d97cf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -33,8 +33,15 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) } } +#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 +#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 +#define NVM_OFF_IGNORE_ARI 164 +#define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define BNXT_MSIX_VEC_MAX 1280 +#define BNXT_MSIX_VEC_MIN_MAX 128 + enum bnxt_nvm_dir_type { BNXT_NVM_SHARED_CFG = 40, BNXT_NVM_PORT_CFG, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index b574fe8e974e..9a25c05aa571 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -521,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) return 0; } -int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(devlink); int rc = 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index 38b9a75ad724..d7287651422f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) bool bnxt_dev_is_vf_rep(struct net_device *dev); int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); -int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); #else diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index eb96b0613cf6..825a28e5b544 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -1732,7 +1732,7 @@ int liquidio_set_fec(struct lio *lio, int on_off) if (oct->props[lio->ifidx].fec != oct->props[lio->ifidx].fec_boot) { dev_dbg(&oct->pci_dev->dev, - "Reloade driver to chang fec to %s\n", + "Reload driver to change fec to %s\n", oct->props[lio->ifidx].fec ? "on" : "off"); } @@ -1796,7 +1796,7 @@ int liquidio_get_fec(struct lio *lio) if (oct->props[lio->ifidx].fec != oct->props[lio->ifidx].fec_boot) { dev_dbg(&oct->pci_dev->dev, - "Reloade driver to chang fec to %s\n", + "Reload driver to change fec to %s\n", oct->props[lio->ifidx].fec ? "on" : "off"); } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 9d70e5c6157f..3d24133e5e49 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3144,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) } static int -liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode) +liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct lio_devlink_priv *priv; struct octeon_device *oct; diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index e2cdfa75673f..75c1c5ed2387 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -67,6 +67,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) + depends on THERMAL || !THERMAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 8b0a253a18d8..1e82b9efe447 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2158,6 +2158,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EPERM; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; + if (t.cmd != CHELSIO_SET_QSET_PARAMS) + return -EINVAL; if (t.qset_idx >= SGE_QSETS) return -EINVAL; if (!in_range(t.intr_lat, 0, M_NEWTIMER) || @@ -2257,6 +2259,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; + if (t.cmd != CHELSIO_GET_QSET_PARAMS) + return -EINVAL; + /* Display qsets for all ports when offload enabled */ if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { q1 = 0; @@ -2302,6 +2307,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EBUSY; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; + if (edata.cmd != CHELSIO_SET_QSET_NUM) + return -EINVAL; if (edata.val < 1 || (edata.val > 1 && !(adapter->flags & USING_MSIX))) return -EINVAL; @@ -2342,6 +2349,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EPERM; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; + if (t.cmd != CHELSIO_LOAD_FW) + return -EINVAL; /* Check t.len sanity ? */ fw_data = memdup_user(useraddr + sizeof(t), t.len); if (IS_ERR(fw_data)) @@ -2365,6 +2374,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EBUSY; if (copy_from_user(&m, useraddr, sizeof(m))) return -EFAULT; + if (m.cmd != CHELSIO_SETMTUTAB) + return -EINVAL; if (m.nmtus != NMTUS) return -EINVAL; if (m.mtus[0] < 81) /* accommodate SACK */ @@ -2406,6 +2417,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EBUSY; if (copy_from_user(&m, useraddr, sizeof(m))) return -EFAULT; + if (m.cmd != CHELSIO_SET_PM) + return -EINVAL; if (!is_power_of_2(m.rx_pg_sz) || !is_power_of_2(m.tx_pg_sz)) return -EINVAL; /* not power of 2 */ @@ -2439,6 +2452,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EIO; /* need the memory controllers */ if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; + if (t.cmd != CHELSIO_GET_MEM) + return -EINVAL; if ((t.addr & 7) || (t.len & 7)) return -EINVAL; if (t.mem_id == MEM_CM) @@ -2491,6 +2506,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) return -EAGAIN; if (copy_from_user(&t, useraddr, sizeof(t))) return -EFAULT; + if (t.cmd != CHELSIO_SET_TRACE_FILTER) + return -EINVAL; tp = (const struct trace_params *)&t.sip; if (t.config_tx) diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index bea6a059a8f1..78e5d17a1d5f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -12,3 +12,6 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o +ifdef CONFIG_THERMAL +cxgb4-objs += cxgb4_thermal.o +endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index b5010bd32ea3..b16f4b3ef4c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -52,6 +52,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/ptp_classify.h> #include <linux/crash_dump.h> +#include <linux/thermal.h> #include <asm/io.h> #include "t4_chip_type.h" #include "cxgb4_uld.h" @@ -890,6 +891,14 @@ struct mps_encap_entry { atomic_t refcnt; }; +#if IS_ENABLED(CONFIG_THERMAL) +struct ch_thermal { + struct thermal_zone_device *tzdev; + int trip_temp; + int trip_type; +}; +#endif + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -1008,6 +1017,9 @@ struct adapter { /* Dump buffer for collecting logs in kdump kernel */ struct vmcoredd_data vmcoredd; +#if IS_ENABLED(CONFIG_THERMAL) + struct ch_thermal ch_thermal; +#endif }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1862,4 +1874,8 @@ void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, u16 vlan); int cxgb4_dcb_enabled(const struct net_device *dev); + +int cxgb4_thermal_init(struct adapter *adap); +int cxgb4_thermal_remove(struct adapter *adap); + #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1a93efa60e71..2de0590a62c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5864,6 +5864,10 @@ fw_attach_fail: if (!is_t4(adapter->params.chip)) cxgb4_ptp_init(adapter); + if (IS_ENABLED(CONFIG_THERMAL) && + !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) + cxgb4_thermal_init(adapter); + print_adapter_info(adapter); return 0; @@ -5929,6 +5933,8 @@ static void remove_one(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) cxgb4_ptp_stop(adapter); + if (IS_ENABLED(CONFIG_THERMAL)) + cxgb4_thermal_remove(adapter); /* If we allocated filters, free up state associated with any * valid filters ... diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c new file mode 100644 index 000000000000..28052e7504e5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Written by: Ganesh Goudar (ganeshgr@chelsio.com) + */ + +#include "cxgb4.h" + +#define CXGB4_NUM_TRIPS 1 + +static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev, + int *temp) +{ + struct adapter *adap = tzdev->devdata; + u32 param, val; + int ret; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP)); + + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret < 0 || val == 0) + return -1; + + *temp = val * 1000; + return 0; +} + +static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev, + int trip, enum thermal_trip_type *type) +{ + struct adapter *adap = tzdev->devdata; + + if (!adap->ch_thermal.trip_temp) + return -EINVAL; + + *type = adap->ch_thermal.trip_type; + return 0; +} + +static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev, + int trip, int *temp) +{ + struct adapter *adap = tzdev->devdata; + + if (!adap->ch_thermal.trip_temp) + return -EINVAL; + + *temp = adap->ch_thermal.trip_temp; + return 0; +} + +static struct thermal_zone_device_ops cxgb4_thermal_ops = { + .get_temp = cxgb4_thermal_get_temp, + .get_trip_type = cxgb4_thermal_get_trip_type, + .get_trip_temp = cxgb4_thermal_get_trip_temp, +}; + +int cxgb4_thermal_init(struct adapter *adap) +{ + struct ch_thermal *ch_thermal = &adap->ch_thermal; + int num_trip = CXGB4_NUM_TRIPS; + u32 param, val; + int ret; + + /* on older firmwares we may not get the trip temperature, + * set the num of trips to 0. + */ + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_MAXTMPTHRESH)); + + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, + ¶m, &val); + if (ret < 0) { + num_trip = 0; /* could not get trip temperature */ + } else { + ch_thermal->trip_temp = val * 1000; + ch_thermal->trip_type = THERMAL_TRIP_CRITICAL; + } + + ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip, + 0, adap, + &cxgb4_thermal_ops, + NULL, 0, 0); + if (IS_ERR(ch_thermal->tzdev)) { + ret = PTR_ERR(ch_thermal->tzdev); + dev_err(adap->pdev_dev, "Failed to register thermal zone\n"); + ch_thermal->tzdev = NULL; + return ret; + } + return 0; +} + +int cxgb4_thermal_remove(struct adapter *adap) +{ + if (adap->ch_thermal.tzdev) + thermal_zone_device_unregister(adap->ch_thermal.tzdev); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 7fc656680299..52edb688942b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -38,7 +38,6 @@ #include "cxgb4.h" #include "sched.h" -/* Spinlock must be held by caller */ static int t4_sched_class_fw_cmd(struct port_info *pi, struct ch_sched_params *p, enum sched_fw_ops op) @@ -67,7 +66,6 @@ static int t4_sched_class_fw_cmd(struct port_info *pi, return err; } -/* Spinlock must be held by caller */ static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg, enum sched_bind_type type, bool bind) { @@ -163,7 +161,6 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) if (e && index >= 0) { int i = 0; - spin_lock(&e->lock); list_for_each_entry(qe, &e->queue_list, list) { if (i == index) break; @@ -171,10 +168,8 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) } err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, false); - if (err) { - spin_unlock(&e->lock); - goto out; - } + if (err) + return err; list_del(&qe->list); kvfree(qe); @@ -182,9 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p) e->state = SCHED_STATE_UNUSED; memset(&e->info, 0, sizeof(e->info)); } - spin_unlock(&e->lock); } -out: return err; } @@ -210,10 +203,8 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) /* Unbind queue from any existing class */ err = t4_sched_queue_unbind(pi, p); - if (err) { - kvfree(qe); - goto out; - } + if (err) + goto out_err; /* Bind queue to specified class */ memset(qe, 0, sizeof(*qe)); @@ -221,18 +212,16 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p) memcpy(&qe->param, p, sizeof(qe->param)); e = &s->tab[qe->param.class]; - spin_lock(&e->lock); err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true); - if (err) { - kvfree(qe); - spin_unlock(&e->lock); - goto out; - } + if (err) + goto out_err; list_add_tail(&qe->list, &e->queue_list); atomic_inc(&e->refcnt); - spin_unlock(&e->lock); -out: + return err; + +out_err: + kvfree(qe); return err; } @@ -296,8 +285,6 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, enum sched_bind_type type) { struct port_info *pi = netdev2pinfo(dev); - struct sched_table *s; - int err = 0; u8 class_id; if (!can_sched(dev)) @@ -323,12 +310,8 @@ int cxgb4_sched_class_bind(struct net_device *dev, void *arg, if (class_id == SCHED_CLS_NONE) return -ENOTSUPP; - s = pi->sched_tbl; - write_lock(&s->rw_lock); - err = t4_sched_class_bind_unbind_op(pi, arg, type, true); - write_unlock(&s->rw_lock); + return t4_sched_class_bind_unbind_op(pi, arg, type, true); - return err; } /** @@ -343,8 +326,6 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, enum sched_bind_type type) { struct port_info *pi = netdev2pinfo(dev); - struct sched_table *s; - int err = 0; u8 class_id; if (!can_sched(dev)) @@ -367,12 +348,7 @@ int cxgb4_sched_class_unbind(struct net_device *dev, void *arg, if (!valid_class_id(dev, class_id)) return -EINVAL; - s = pi->sched_tbl; - write_lock(&s->rw_lock); - err = t4_sched_class_bind_unbind_op(pi, arg, type, false); - write_unlock(&s->rw_lock); - - return err; + return t4_sched_class_bind_unbind_op(pi, arg, type, false); } /* If @p is NULL, fetch any available unused class */ @@ -425,7 +401,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi, static struct sched_class *t4_sched_class_alloc(struct port_info *pi, struct ch_sched_params *p) { - struct sched_table *s = pi->sched_tbl; struct sched_class *e; u8 class_id; int err; @@ -441,7 +416,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, if (class_id != SCHED_CLS_NONE) return NULL; - write_lock(&s->rw_lock); /* See if there's an exisiting class with same * requested sched params */ @@ -452,27 +426,19 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi, /* Fetch any available unused class */ e = t4_sched_class_lookup(pi, NULL); if (!e) - goto out; + return NULL; memcpy(&np, p, sizeof(np)); np.u.params.class = e->idx; - - spin_lock(&e->lock); /* New class */ err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD); - if (err) { - spin_unlock(&e->lock); - e = NULL; - goto out; - } + if (err) + return NULL; memcpy(&e->info, &np, sizeof(e->info)); atomic_set(&e->refcnt, 0); e->state = SCHED_STATE_ACTIVE; - spin_unlock(&e->lock); } -out: - write_unlock(&s->rw_lock); return e; } @@ -517,14 +483,12 @@ struct sched_table *t4_init_sched(unsigned int sched_size) return NULL; s->sched_size = sched_size; - rwlock_init(&s->rw_lock); for (i = 0; i < s->sched_size; i++) { memset(&s->tab[i], 0, sizeof(struct sched_class)); s->tab[i].idx = i; s->tab[i].state = SCHED_STATE_UNUSED; INIT_LIST_HEAD(&s->tab[i].queue_list); - spin_lock_init(&s->tab[i].lock); atomic_set(&s->tab[i].refcnt, 0); } return s; @@ -545,11 +509,9 @@ void t4_cleanup_sched(struct adapter *adap) for (i = 0; i < s->sched_size; i++) { struct sched_class *e; - write_lock(&s->rw_lock); e = &s->tab[i]; if (e->state == SCHED_STATE_ACTIVE) t4_sched_class_free(pi, e); - write_unlock(&s->rw_lock); } kvfree(s); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h index 3a49e00a38a1..168fb4ce3759 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.h +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h @@ -69,13 +69,11 @@ struct sched_class { u8 idx; struct ch_sched_params info; struct list_head queue_list; - spinlock_t lock; /* Per class lock */ atomic_t refcnt; }; struct sched_table { /* per port scheduling table */ u8 sched_size; - rwlock_t rw_lock; /* Table lock */ struct sched_class tab[0]; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index f85eab57e9e1..cb523949c812 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4204,6 +4204,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { + unsigned int fw_caps = adap->params.fw_caps_support; struct fw_port_cmd c; memset(&c, 0, sizeof(c)); @@ -4211,9 +4212,14 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) FW_CMD_REQUEST_F | FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); c.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | + cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | FW_LEN16(c)); - c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG); + if (fw_caps == FW_CAPS16) + c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); + else + c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index b8f75a22fb6c..f152da1ce046 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -753,7 +753,6 @@ struct cpl_abort_req_rss { }; struct cpl_abort_req_rss6 { - WR_HDR; union opcode_tid ot; __be32 srqidx_status; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 6d2bc8789223..57584ab32043 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1332,6 +1332,7 @@ enum fw_params_param_dev_phyfw { enum fw_params_param_dev_diag { FW_PARAM_DEV_DIAG_TMP = 0x00, FW_PARAM_DEV_DIAG_VDD = 0x01, + FW_PARAM_DEV_DIAG_MAXTMPTHRESH = 0x02, }; enum fw_params_param_dev_fwcache { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 74d122616e76..534787291b44 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->features |= NETIF_F_GSO_UDP_TUNNEL; dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); @@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) adapter->vxlan_port = 0; netdev->hw_enc_features = 0; - netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); - netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); } static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, @@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev) struct be_adapter *adapter = netdev_priv(netdev); netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 7a30276e1ba6..d3a62bc1f1c6 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -96,13 +96,6 @@ config GIANFAR on the 8540. source "drivers/net/ethernet/freescale/dpaa/Kconfig" - -config FSL_DPAA2_ETH - tristate "Freescale DPAA2 Ethernet" - depends on FSL_MC_BUS && FSL_MC_DPIO - depends on NETDEVICES && ETHERNET - ---help--- - Ethernet driver for Freescale DPAA2 SoCs, using the - Freescale MC bus driver +source "drivers/net/ethernet/freescale/dpaa2/Kconfig" endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig new file mode 100644 index 000000000000..a7f365db0eed --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig @@ -0,0 +1,17 @@ +config FSL_DPAA2_ETH + tristate "Freescale DPAA2 Ethernet" + depends on FSL_MC_BUS && FSL_MC_DPIO + depends on ARCH_LAYERSCAPE || COMPILE_TEST + help + This is the DPAA2 Ethernet driver supporting Freescale SoCs + with DPAA2 (DataPath Acceleration Architecture v2). + The driver manages network objects discovered on the Freescale + MC bus. + +config FSL_DPAA2_PTP_CLOCK + tristate "Freescale DPAA2 PTP Clock" + depends on FSL_DPAA2_ETH && POSIX_TIMERS + select PTP_1588_CLOCK + help + This driver adds support for using the DPAA2 1588 timer module + as a PTP clock. diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 9315ecdba612..2f424e0a8225 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -3,9 +3,11 @@ # Makefile for the Freescale DPAA2 Ethernet controller # -obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o +obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 108c137ea593..156080d42a6c 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -289,10 +289,11 @@ err_frame_format: * * Observance of NAPI budget is not our concern, leaving that to the caller. */ -static int consume_frames(struct dpaa2_eth_channel *ch) +static int consume_frames(struct dpaa2_eth_channel *ch, + enum dpaa2_eth_fq_type *type) { struct dpaa2_eth_priv *priv = ch->priv; - struct dpaa2_eth_fq *fq; + struct dpaa2_eth_fq *fq = NULL; struct dpaa2_dq *dq; const struct dpaa2_fd *fd; int cleaned = 0; @@ -311,12 +312,23 @@ static int consume_frames(struct dpaa2_eth_channel *ch) fd = dpaa2_dq_fd(dq); fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); - fq->stats.frames++; fq->consume(priv, ch, fd, &ch->napi, fq->flowid); cleaned++; } while (!is_last); + if (!cleaned) + return 0; + + fq->stats.frames += cleaned; + ch->stats.frames += cleaned; + + /* A dequeue operation only pulls frames from a single queue + * into the store. Return the frame queue type as an out param. + */ + if (type) + *type = fq->type; + return cleaned; } @@ -921,14 +933,16 @@ static int pull_channel(struct dpaa2_eth_channel *ch) static int dpaa2_eth_poll(struct napi_struct *napi, int budget) { struct dpaa2_eth_channel *ch; - int cleaned = 0, store_cleaned; struct dpaa2_eth_priv *priv; + int rx_cleaned = 0, txconf_cleaned = 0; + enum dpaa2_eth_fq_type type; + int store_cleaned; int err; ch = container_of(napi, struct dpaa2_eth_channel, napi); priv = ch->priv; - while (cleaned < budget) { + do { err = pull_channel(ch); if (unlikely(err)) break; @@ -936,30 +950,32 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) /* Refill pool if appropriate */ refill_pool(priv, ch, priv->bpid); - store_cleaned = consume_frames(ch); - cleaned += store_cleaned; + store_cleaned = consume_frames(ch, &type); + if (type == DPAA2_RX_FQ) + rx_cleaned += store_cleaned; + else + txconf_cleaned += store_cleaned; - /* If we have enough budget left for a full store, - * try a new pull dequeue, otherwise we're done here + /* If we either consumed the whole NAPI budget with Rx frames + * or we reached the Tx confirmations threshold, we're done. */ - if (store_cleaned == 0 || - cleaned > budget - DPAA2_ETH_STORE_SIZE) - break; - } - - if (cleaned < budget && napi_complete_done(napi, cleaned)) { - /* Re-enable data available notifications */ - do { - err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); - cpu_relax(); - } while (err == -EBUSY); - WARN_ONCE(err, "CDAN notifications rearm failed on core %d", - ch->nctx.desired_cpu); - } + if (rx_cleaned >= budget || + txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) + return budget; + } while (store_cleaned); - ch->stats.frames += cleaned; + /* We didn't consume the entire budget, so finish napi and + * re-enable data availability notifications + */ + napi_complete_done(napi, rx_cleaned); + do { + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); + cpu_relax(); + } while (err == -EBUSY); + WARN_ONCE(err, "CDAN notifications rearm failed on core %d", + ch->nctx.desired_cpu); - return cleaned; + return max(rx_cleaned, 1); } static void enable_ch_napi(struct dpaa2_eth_priv *priv) @@ -1076,7 +1092,7 @@ static u32 drain_channel(struct dpaa2_eth_priv *priv, do { pull_channel(ch); - drained = consume_frames(ch); + drained = consume_frames(ch, NULL); total += drained; } while (drained); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 7a7a3e7bcde2..452a8e9c4f0e 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -40,6 +40,11 @@ */ #define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) +/* Maximum number of Tx confirmation frames to be processed + * in a single NAPI call + */ +#define DPAA2_ETH_TXCONF_PER_NAPI 256 + /* Buffer quota per queue. Must be large enough such that for minimum sized * frames taildrop kicks in before the bpool gets depleted, so we compute * how many 64B frames fit inside the taildrop threshold and add a margin diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c new file mode 100644 index 000000000000..84b942b1eccc --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/fsl/mc.h> + +#include "dpaa2-ptp.h" + +struct ptp_dpaa2_priv { + struct fsl_mc_device *ptp_mc_dev; + struct ptp_clock *clock; + struct ptp_clock_info caps; + u32 freq_comp; +}; + +/* PTP clock operations */ +static int ptp_dpaa2_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + u64 adj; + u32 diff, tmr_add; + int neg_adj = 0; + int err = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + tmr_add = ptp_dpaa2->freq_comp; + adj = tmr_add; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + err = dprtc_set_freq_compensation(mc_dev->mc_io, 0, + mc_dev->mc_handle, tmr_add); + if (err) + dev_err(dev, "dprtc_set_freq_compensation err %d\n", err); + return err; +} + +static int ptp_dpaa2_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + s64 now; + int err = 0; + + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &now); + if (err) { + dev_err(dev, "dprtc_get_time err %d\n", err); + return err; + } + + now += delta; + + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, now); + if (err) + dev_err(dev, "dprtc_set_time err %d\n", err); + return err; +} + +static int ptp_dpaa2_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + u64 ns; + u32 remainder; + int err = 0; + + err = dprtc_get_time(mc_dev->mc_io, 0, mc_dev->mc_handle, &ns); + if (err) { + dev_err(dev, "dprtc_get_time err %d\n", err); + return err; + } + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + return err; +} + +static int ptp_dpaa2_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ptp_dpaa2_priv *ptp_dpaa2 = + container_of(ptp, struct ptp_dpaa2_priv, caps); + struct fsl_mc_device *mc_dev = ptp_dpaa2->ptp_mc_dev; + struct device *dev = &mc_dev->dev; + u64 ns; + int err = 0; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + err = dprtc_set_time(mc_dev->mc_io, 0, mc_dev->mc_handle, ns); + if (err) + dev_err(dev, "dprtc_set_time err %d\n", err); + return err; +} + +static const struct ptp_clock_info ptp_dpaa2_caps = { + .owner = THIS_MODULE, + .name = "DPAA2 PTP Clock", + .max_adj = 512000, + .n_alarm = 2, + .n_ext_ts = 2, + .n_per_out = 3, + .n_pins = 0, + .pps = 1, + .adjfreq = ptp_dpaa2_adjfreq, + .adjtime = ptp_dpaa2_adjtime, + .gettime64 = ptp_dpaa2_gettime, + .settime64 = ptp_dpaa2_settime, +}; + +static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) +{ + struct device *dev = &mc_dev->dev; + struct ptp_dpaa2_priv *ptp_dpaa2; + u32 tmr_add = 0; + int err; + + ptp_dpaa2 = devm_kzalloc(dev, sizeof(*ptp_dpaa2), GFP_KERNEL); + if (!ptp_dpaa2) + return -ENOMEM; + + err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); + if (err) { + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + goto err_exit; + } + + err = dprtc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, + &mc_dev->mc_handle); + if (err) { + dev_err(dev, "dprtc_open err %d\n", err); + goto err_free_mcp; + } + + ptp_dpaa2->ptp_mc_dev = mc_dev; + + err = dprtc_get_freq_compensation(mc_dev->mc_io, 0, + mc_dev->mc_handle, &tmr_add); + if (err) { + dev_err(dev, "dprtc_get_freq_compensation err %d\n", err); + goto err_close; + } + + ptp_dpaa2->freq_comp = tmr_add; + ptp_dpaa2->caps = ptp_dpaa2_caps; + + ptp_dpaa2->clock = ptp_clock_register(&ptp_dpaa2->caps, dev); + if (IS_ERR(ptp_dpaa2->clock)) { + err = PTR_ERR(ptp_dpaa2->clock); + goto err_close; + } + + dpaa2_phc_index = ptp_clock_index(ptp_dpaa2->clock); + + dev_set_drvdata(dev, ptp_dpaa2); + + return 0; + +err_close: + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); +err_free_mcp: + fsl_mc_portal_free(mc_dev->mc_io); +err_exit: + return err; +} + +static int dpaa2_ptp_remove(struct fsl_mc_device *mc_dev) +{ + struct ptp_dpaa2_priv *ptp_dpaa2; + struct device *dev = &mc_dev->dev; + + ptp_dpaa2 = dev_get_drvdata(dev); + ptp_clock_unregister(ptp_dpaa2->clock); + + dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); + fsl_mc_portal_free(mc_dev->mc_io); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_ptp_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dprtc", + }, + {} +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_ptp_match_id_table); + +static struct fsl_mc_driver dpaa2_ptp_drv = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_ptp_probe, + .remove = dpaa2_ptp_remove, + .match_id_table = dpaa2_ptp_match_id_table, +}; + +module_fsl_mc_driver(dpaa2_ptp_drv); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DPAA2 PTP Clock Driver"); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h new file mode 100644 index 000000000000..ff2e177395d4 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2018 NXP + */ + +#ifndef __RTC_H +#define __RTC_H + +#include "dprtc.h" +#include "dprtc-cmd.h" + +extern int dpaa2_phc_index; + +#endif diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h new file mode 100644 index 000000000000..9af4ac71f347 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#ifndef _FSL_DPRTC_CMD_H +#define _FSL_DPRTC_CMD_H + +/* Command versioning */ +#define DPRTC_CMD_BASE_VERSION 1 +#define DPRTC_CMD_ID_OFFSET 4 + +#define DPRTC_CMD(id) (((id) << DPRTC_CMD_ID_OFFSET) | DPRTC_CMD_BASE_VERSION) + +/* Command IDs */ +#define DPRTC_CMDID_CLOSE DPRTC_CMD(0x800) +#define DPRTC_CMDID_OPEN DPRTC_CMD(0x810) + +#define DPRTC_CMDID_SET_FREQ_COMPENSATION DPRTC_CMD(0x1d1) +#define DPRTC_CMDID_GET_FREQ_COMPENSATION DPRTC_CMD(0x1d2) +#define DPRTC_CMDID_GET_TIME DPRTC_CMD(0x1d3) +#define DPRTC_CMDID_SET_TIME DPRTC_CMD(0x1d4) + +#pragma pack(push, 1) +struct dprtc_cmd_open { + __le32 dprtc_id; +}; + +struct dprtc_get_freq_compensation { + __le32 freq_compensation; +}; + +struct dprtc_time { + __le64 time; +}; + +#pragma pack(pop) + +#endif /* _FSL_DPRTC_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.c b/drivers/net/ethernet/freescale/dpaa2/dprtc.c new file mode 100644 index 000000000000..c13e09bc7b9d --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#include <linux/fsl/mc.h> + +#include "dprtc.h" +#include "dprtc-cmd.h" + +/** + * dprtc_open() - Open a control session for the specified object. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dprtc_id: DPRTC unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dprtc_create function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dprtc_id, + u16 *token) +{ + struct dprtc_cmd_open *cmd_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dprtc_cmd_open *)cmd.params; + cmd_params->dprtc_id = cpu_to_le32(dprtc_id); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dprtc_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_CLOSE, cmd_flags, + token); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_set_freq_compensation() - Sets a new frequency compensation value. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: The new frequency compensation value to set. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 freq_compensation) +{ + struct dprtc_get_freq_compensation *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_FREQ_COMPENSATION, + cmd_flags, + token); + cmd_params = (struct dprtc_get_freq_compensation *)cmd.params; + cmd_params->freq_compensation = cpu_to_le32(freq_compensation); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dprtc_get_freq_compensation() - Retrieves the frequency compensation value + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @freq_compensation: Frequency compensation value + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 *freq_compensation) +{ + struct dprtc_get_freq_compensation *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_FREQ_COMPENSATION, + cmd_flags, + token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_get_freq_compensation *)cmd.params; + *freq_compensation = le32_to_cpu(rsp_params->freq_compensation); + + return 0; +} + +/** + * dprtc_get_time() - Returns the current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: Current RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_get_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t *time) +{ + struct dprtc_time *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_GET_TIME, + cmd_flags, + token); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dprtc_time *)cmd.params; + *time = le64_to_cpu(rsp_params->time); + + return 0; +} + +/** + * dprtc_set_time() - Updates current RTC time. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPRTC object + * @time: New RTC time. + * + * Return: '0' on Success; Error code otherwise. + */ +int dprtc_set_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t time) +{ + struct dprtc_time *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPRTC_CMDID_SET_TIME, + cmd_flags, + token); + cmd_params = (struct dprtc_time *)cmd.params; + cmd_params->time = cpu_to_le64(time); + + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dprtc.h b/drivers/net/ethernet/freescale/dpaa2/dprtc.h new file mode 100644 index 000000000000..fe19618d6cdf --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dprtc.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2018 NXP + */ + +#ifndef __FSL_DPRTC_H +#define __FSL_DPRTC_H + +/* Data Path Real Time Counter API + * Contains initialization APIs and runtime control APIs for RTC + */ + +struct fsl_mc_io; + +int dprtc_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dprtc_id, + u16 *token); + +int dprtc_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dprtc_set_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 freq_compensation); + +int dprtc_get_freq_compensation(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u32 *freq_compensation); + +int dprtc_get_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t *time); + +int dprtc_set_time(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + uint64_t time); + +#endif /* __FSL_DPRTC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ce74b7a46d07..a17cc973d9a3 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work) napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_restart(ndev); - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); } @@ -1273,7 +1273,7 @@ skb_done: /* Since we have freed up a buffer, the ring is no longer full */ - if (netif_queue_stopped(ndev)) { + if (netif_tx_queue_stopped(nq)) { entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free >= txq->tx_wake_threshold) netif_tx_wake_queue(nq); @@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_restart(ndev); - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); } @@ -2240,7 +2240,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, napi_disable(&fep->napi); netif_tx_lock_bh(ndev); fec_restart(ndev); - netif_wake_queue(ndev); + netif_tx_wake_all_queues(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); } diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index a051e582d541..79d03f8ee7b1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) if (cb->type == DESC_TYPE_SKB) dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); - else + else if (cb->length) dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index f56855e63c96..28e907831b0e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -40,9 +40,9 @@ #define SKB_TMP_LEN(SKB) \ (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) -static void fill_v2_desc(struct hnae_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type, int mtu) +static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size, + int send_sz, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, desc_cb->type = type; desc->addr = cpu_to_le64(dma); - desc->tx.send_size = cpu_to_le16((u16)size); + desc->tx.send_size = cpu_to_le16((u16)send_sz); /* config bd buffer end */ hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); @@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, ring_ptr_move_fw(ring, next_to_use); } +static void fill_v2_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + fill_v2_desc_hw(ring, priv, size, size, dma, frag_end, + buf_num, type, mtu); +} + static const struct acpi_device_id hns_enet_acpi_match[] = { { "HISI00C1", 0 }, { "HISI00C2", 0 }, @@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, /* when the frag size is bigger than hardware, split this frag */ for (k = 0; k < frag_buf_num; k++) - fill_v2_desc(ring, priv, - (k == frag_buf_num - 1) ? + fill_v2_desc_hw(ring, priv, k == 0 ? size : 0, + (k == frag_buf_num - 1) ? sizeoflast : BD_MAX_SEND_SIZE, - dma + BD_MAX_SEND_SIZE * k, - frag_end && (k == frag_buf_num - 1) ? 1 : 0, - buf_num, - (type == DESC_TYPE_SKB && !k) ? + dma + BD_MAX_SEND_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + buf_num, + (type == DESC_TYPE_SKB && !k) ? DESC_TYPE_SKB : DESC_TYPE_PAGE, - mtu); + mtu); } netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, @@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, return phy_mii_ioctl(phy_dev, ifr, cmd); } -/* use only for netconsole to poll with the device without interrupt */ -#ifdef CONFIG_NET_POLL_CONTROLLER -static void hns_nic_poll_controller(struct net_device *ndev) -{ - struct hns_nic_priv *priv = netdev_priv(ndev); - unsigned long flags; - int i; - - local_irq_save(flags); - for (i = 0; i < priv->ae_handle->q_num * 2; i++) - napi_schedule(&priv->ring_data[i].napi); - local_irq_restore(flags); -} -#endif - static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_set_features = hns_nic_set_features, .ndo_fix_features = hns_nic_fix_features, .ndo_get_stats64 = hns_nic_get_stats64, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = hns_nic_poll_controller, -#endif .ndo_set_rx_mode = hns_nic_set_rx_mode, .ndo_select_queue = hns_nic_select_queue, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index be9dc08ccf67..038326cfda93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,9 +46,6 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 591ee2ee4bf6..c3bd2a10bc7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -355,8 +355,6 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); - int (*update_mta_status)(struct hnae3_handle *handle); - void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); @@ -481,6 +479,7 @@ struct hnae3_knic_private_info { const struct hnae3_dcb_ops *dcb_ops; u16 int_rl_setting; + enum pkt_hash_types rss_type; }; struct hnae3_roce_private_info { @@ -504,6 +503,15 @@ struct hnae3_unic_private_info { #define HNAE3_SUPPORT_VF BIT(3) #define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4) +#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */ +#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */ +#define HNAE3_BPE BIT(2) /* broadcast promisc enable */ +#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */ +#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */ +#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */ +#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) +#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) + struct hnae3_handle { struct hnae3_client *client; struct pci_dev *pdev; @@ -522,6 +530,8 @@ struct hnae3_handle { }; u32 numa_node_mask; /* for multi-chip support */ + + u8 netdev_flags; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c2692563a4d9..ce93fcce2732 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,7 @@ static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); +static void hns3_remove_hw_addr(struct net_device *netdev); static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; @@ -458,26 +459,81 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, return 0; } +static u8 hns3_get_netdev_flags(struct net_device *netdev) +{ + u8 flags = 0; + + if (netdev->flags & IFF_PROMISC) { + flags = HNAE3_USER_UPE | HNAE3_USER_MPE; + } else { + flags |= HNAE3_VLAN_FLTR; + if (netdev->flags & IFF_ALLMULTI) + flags |= HNAE3_USER_MPE; + } + + return flags; +} + static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); + u8 new_flags; + int ret; - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, true, true); - else if (netdev->flags & IFF_ALLMULTI) - h->ae_algo->ops->set_promisc_mode(h, false, true); - else - h->ae_algo->ops->set_promisc_mode(h, false, false); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + new_flags = hns3_get_netdev_flags(netdev); + + ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); + if (ret) { netdev_err(netdev, "sync uc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_UPE; + } + if (netdev->flags & IFF_MULTICAST) { - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + ret = __dev_mc_sync(netdev, hns3_nic_mc_sync, + hns3_nic_mc_unsync); + if (ret) { netdev_err(netdev, "sync mc address fail\n"); + if (ret == -ENOSPC) + new_flags |= HNAE3_OVERFLOW_MPE; + } + } - if (h->ae_algo->ops->update_mta_status) - h->ae_algo->ops->update_mta_status(h); + hns3_update_promisc_mode(netdev, new_flags); + /* User mode Promisc mode enable and vlan filtering is disabled to + * let all packets in. MAC-VLAN Table overflow Promisc enabled and + * vlan fitering is enabled + */ + hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); + h->netdev_flags = new_flags; +} + +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + h->ae_algo->ops->set_promisc_mode(h, + promisc_flags & HNAE3_UPE, + promisc_flags & HNAE3_MPE); + } +} + +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool last_state; + + if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { + last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; + if (enable != last_state) { + netdev_info(netdev, + "%s vlan filter\n", + enable ? "enable" : "disable"); + h->ae_algo->ops->enable_vlan_filter(h, enable); + } } } @@ -2202,18 +2258,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } -static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, - struct hns3_desc *desc, u32 l234info) +static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info, + u16 *vlan_tag) { struct pci_dev *pdev = ring->tqp->handle->pdev; - u16 vlan_tag; if (pdev->revision == 0x20) { - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(*vlan_tag & VLAN_VID_MASK)) + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - return vlan_tag; + return (*vlan_tag != 0); } #define HNS3_STRP_OUTER_VLAN 0x1 @@ -2222,17 +2278,29 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + return true; case HNS3_STRP_INNER_VLAN: - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - break; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + return true; default: - vlan_tag = 0; - break; + return false; } +} + +static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + struct hnae3_handle *handle = ring->tqp->handle; + enum pkt_hash_types rss_type; - return vlan_tag; + if (le32_to_cpu(desc->rx.rss_hash)) + rss_type = handle->kinfo.rss_type; + else + rss_type = PKT_HASH_TYPE_NONE; + + skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); } static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, @@ -2334,8 +2402,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { u16 vlan_tag; - vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); - if (vlan_tag & VLAN_VID_MASK) + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); @@ -2377,6 +2444,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->tqp_vector->rx_group.total_bytes += skb->len; hns3_rx_checksum(ring, skb, desc); + hns3_set_rx_skb_rss_type(ring, skb); + return 0; } @@ -3155,15 +3224,6 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init) } -static void hns3_uninit_mac_addr(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - - if (h->ae_algo->ops->rm_uc_addr) - h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); -} - static int hns3_restore_fd_rules(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3296,6 +3356,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + hns3_remove_hw_addr(netdev); + if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); @@ -3319,8 +3381,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; - hns3_uninit_mac_addr(netdev); - free_netdev(netdev); } @@ -3392,6 +3452,25 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } +static void hns3_remove_hw_addr(struct net_device *netdev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + hns3_nic_uc_unsync(netdev, netdev->dev_addr); + + /* go through and unsync uc_addr entries to the device */ + list = &netdev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_unsync(netdev, ha->addr); + + /* go through and unsync mc_addr entries to the device */ + list = &netdev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + if (ha->refcount > 1) + hns3_nic_mc_unsync(netdev, ha->addr); +} + static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { while (ring->next_to_clean != ring->next_to_use) { @@ -3586,11 +3665,15 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); + bool vlan_filter_enable; int ret; hns3_init_mac_addr(netdev, false); - hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); + hns3_update_promisc_mode(netdev, handle->netdev_flags); + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(netdev, vlan_filter_enable); + /* Hardware table is only clear when pf resets */ if (!(handle->flags & HNAE3_SUPPORT_VF)) @@ -3637,14 +3720,14 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); - hns3_uninit_mac_addr(netdev); - - /* it is cumbersome for hardware to pick-and-choose rules for deletion - * from TCAM. Hence, for function reset software intervention is - * required to delete the rules + /* it is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries */ - if (hns3_dev_ongoing_func_reset(ae_dev)) + if (hns3_dev_ongoing_func_reset(ae_dev)) { + hns3_remove_hw_addr(netdev); hns3_del_all_fd_rules(netdev, false); + } return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index ac881e8fc05d..f25b281ff2aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -640,6 +640,9 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); +void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); +void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); + #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); #else diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 7d79a074a214..a4762c2b8ba1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -71,6 +71,7 @@ struct hns3_link_mode_mapping { static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); + bool vlan_filter_enable; int ret; if (!h->ae_algo->ops->set_loopback || @@ -91,7 +92,14 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) if (ret) return ret; - h->ae_algo->ops->set_promisc_mode(h, en, en); + if (en) { + h->ae_algo->ops->set_promisc_mode(h, true, true); + } else { + /* recover promisc mode before loopback test */ + hns3_update_promisc_mode(ndev, h->netdev_flags); + vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(ndev, vlan_filter_enable); + } return ret; } @@ -678,12 +686,13 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) return -EOPNOTSUPP; - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); + if ((h->pdev->revision == 0x20 && + hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { + netdev_err(netdev, "hash func not supported\n"); return -EOPNOTSUPP; } + if (!indir) { netdev_err(netdev, "set rss failed for indir is empty\n"); @@ -1077,6 +1086,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, .get_rxfh_indir_size = hns3_get_rss_indir_size, .get_rxfh = hns3_get_rss, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index e5e66b27e03e..1ccde67db770 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -175,15 +175,9 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, - HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, - - /* Multicast linear table commands */ - HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, - HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, - HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, - HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, /* VLAN commands */ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, @@ -402,6 +396,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) #define HCLGE_CFG_SPEED_ABILITY_S 0 #define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0) +#define HCLGE_CFG_UMV_TBL_SPACE_S 16 +#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) struct hclge_cfg_param_cmd { __le32 offset; @@ -591,13 +587,12 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; -#define HCLGE_VLAN_MASK_EN_B 0 -struct hclge_mac_vlan_mask_entry_cmd { - u8 rsv0[2]; - u8 vlan_mask; - u8 rsv1; - u8 mac_mask[6]; - u8 rsv2[14]; +#define HCLGE_UMV_SPC_ALC_B 0 +struct hclge_umv_spc_alc_cmd { + u8 allocate; + u8 rsv1[3]; + __le32 space_size; + u8 rsv2[16]; }; #define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0) @@ -622,30 +617,6 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 rsv3[2]; }; -#define HCLGE_CFG_MTA_MAC_SEL_S 0 -#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) -#define HCLGE_CFG_MTA_MAC_EN_B 7 -struct hclge_mta_filter_mode_cmd { - u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ - u8 rsv[23]; -}; - -#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0 -struct hclge_cfg_func_mta_filter_cmd { - u8 accept; /* Only used lowest 1 bit */ - u8 function_id; - u8 rsv[22]; -}; - -#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0 -#define HCLGE_CFG_MTA_ITEM_IDX_S 0 -#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) -struct hclge_cfg_func_mta_item_cmd { - __le16 item_idx; /* Only used lowest 12 bit */ - u8 accept; /* Only used lowest 1 bit */ - u8 rsv[21]; -}; - struct hclge_mac_vlan_add_cmd { __le16 flags; __le16 mac_addr_hi16; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 21ca4af3b37a..1bd83e8268fc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -25,12 +25,11 @@ #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable); static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc); static struct hnae3_ae_algo ae_algo; @@ -778,6 +777,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_SPEED_ABILITY_M, HCLGE_CFG_SPEED_ABILITY_S); + cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_UMV_TBL_SPACE_M, + HCLGE_CFG_UMV_TBL_SPACE_S); + if (!cfg->umv_space) + cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } /* hclge_get_cfg: query the static parameter from flash @@ -856,6 +860,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; + hdev->wanted_umv_size = cfg.umv_space; ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { @@ -1939,40 +1944,13 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) return hdev->hw.mac.autoneg; } -static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, - bool mask_vlan, - u8 *mac_mask) -{ - struct hclge_mac_vlan_mask_entry_cmd *req; - struct hclge_desc desc; - int status; - - req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); - - hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, - mask_vlan ? 1 : 0); - ether_addr_copy(req->mac_mask, mac_mask); - - status = hclge_cmd_send(&hdev->hw, &desc, 1); - if (status) - dev_err(&hdev->pdev->dev, - "Config mac_vlan_mask failed for cmd_send, ret =%d\n", - status); - - return status; -} - static int hclge_mac_init(struct hclge_dev *hdev) { struct hnae3_handle *handle = &hdev->vport[0].nic; struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - struct hclge_vport *vport; int mtu; int ret; - int i; hdev->hw.mac.duplex = HCLGE_MAC_FULL; ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, @@ -1985,39 +1963,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - /* Initialize the MTA table work mode */ - hdev->enable_mta = true; - hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; - - ret = hclge_set_mta_filter_mode(hdev, - hdev->mta_mac_sel_type, - hdev->enable_mta); - if (ret) { - dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", - ret); - return ret; - } - - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->accept_mta_mc = false; - - memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); - ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; - } - } - - ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); - if (ret) { - dev_err(&hdev->pdev->dev, - "set default mac_vlan_mask fail ret=%d\n", ret); - return ret; - } - if (netdev) mtu = netdev->mtu; else @@ -2828,6 +2773,22 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, return ret; } +static void hclge_get_rss_type(struct hclge_vport *vport) +{ + if (vport->rss_tuple_sets.ipv4_tcp_en || + vport->rss_tuple_sets.ipv4_udp_en || + vport->rss_tuple_sets.ipv4_sctp_en || + vport->rss_tuple_sets.ipv6_tcp_en || + vport->rss_tuple_sets.ipv6_udp_en || + vport->rss_tuple_sets.ipv6_sctp_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; + else if (vport->rss_tuple_sets.ipv4_fragment_en || + vport->rss_tuple_sets.ipv6_fragment_en) + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; + else + vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; +} + static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { struct hclge_rss_input_tuple_cmd *req; @@ -2847,6 +2808,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; + hclge_get_rss_type(&hdev->vport[0]); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, @@ -2861,8 +2823,19 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, int i; /* Get hash algorithm */ - if (hfunc) - *hfunc = vport->rss_algo; + if (hfunc) { + switch (vport->rss_algo) { + case HCLGE_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGE_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } + } /* Get the RSS Key required by the user */ if (key) @@ -2886,12 +2859,20 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, /* Set the RSS Hash Key if specififed by the user */ if (key) { - - if (hfunc == ETH_RSS_HASH_TOP || - hfunc == ETH_RSS_HASH_NO_CHANGE) + switch (hfunc) { + case ETH_RSS_HASH_TOP: hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; - else + break; + case ETH_RSS_HASH_XOR: + hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + hash_algo = vport->rss_algo; + break; + default: return -EINVAL; + } + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); if (ret) return ret; @@ -3009,6 +2990,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + hclge_get_rss_type(vport); return 0; } @@ -4978,174 +4960,6 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } -static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, - const u8 *addr) -{ - u16 high_val = addr[1] | (addr[0] << 8); - struct hclge_dev *hdev = vport->back; - u32 rsh = 4 - hdev->mta_mac_sel_type; - u16 ret_val = (high_val >> rsh) & 0xfff; - - return ret_val; -} - -static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, - enum hclge_mta_dmac_sel_type mta_mac_sel, - bool enable) -{ - struct hclge_mta_filter_mode_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_mta_filter_mode_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); - - hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, - enable); - hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, - HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config mat filter mode failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable) -{ - struct hclge_cfg_func_mta_filter_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); - - hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, - enable); - req->function_id = func_id; - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Config func_id enable failed for cmd_send, ret =%d.\n", - ret); - - return ret; -} - -static int hclge_set_mta_table_item(struct hclge_vport *vport, - u16 idx, - bool enable) -{ - struct hclge_dev *hdev = vport->back; - struct hclge_cfg_func_mta_item_cmd *req; - struct hclge_desc desc; - u16 item_idx = 0; - int ret; - - req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); - hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - - hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, - HCLGE_CFG_MTA_ITEM_IDX_S, idx); - req->item_idx = cpu_to_le16(item_idx); - - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Config mta table item failed for cmd_send, ret =%d.\n", - ret); - return ret; - } - - if (enable) - set_bit(idx, vport->mta_shadow); - else - clear_bit(idx, vport->mta_shadow); - - return 0; -} - -static int hclge_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; - struct hclge_vport *vport = hclge_get_vport(handle); - struct net_device *netdev = handle->kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - memset(mta_status, 0, sizeof(mta_status)); - - /* update mta_status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclge_update_mta_status_common(vport, mta_status, - 0, HCLGE_MTA_TBL_SIZE, true); -} - -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter) -{ - struct hclge_dev *hdev = vport->back; - u16 update_max = idx + count; - u16 check_max; - int ret = 0; - bool used; - u16 i; - - /* setup mta check range */ - if (update_filter) { - i = 0; - check_max = HCLGE_MTA_TBL_SIZE; - } else { - i = idx; - check_max = update_max; - } - - used = false; - /* check and update all mta item */ - for (; i < check_max; i++) { - /* ignore unused item */ - if (!test_bit(i, vport->mta_shadow)) - continue; - - /* if i in update range then update it */ - if (i >= idx && i < update_max) - if (!test_bit(i - idx, status)) - hclge_set_mta_table_item(vport, i, false); - - if (!used && test_bit(i, vport->mta_shadow)) - used = true; - } - - /* no longer use mta, disable it */ - if (vport->accept_mta_mc && update_filter && !used) { - ret = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - false); - if (ret) - dev_err(&hdev->pdev->dev, - "disable func mta filter fail ret=%d\n", - ret); - else - vport->accept_mta_mc = false; - } - - return ret; -} - static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -5269,6 +5083,118 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, return cfg_status; } +static int hclge_init_umv_space(struct hclge_dev *hdev) +{ + u16 allocated_size = 0; + int ret; + + ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, + true); + if (ret) + return ret; + + if (allocated_size < hdev->wanted_umv_size) + dev_warn(&hdev->pdev->dev, + "Alloc umv space failed, want %d, get %d\n", + hdev->wanted_umv_size, allocated_size); + + mutex_init(&hdev->umv_mutex); + hdev->max_umv_size = allocated_size; + hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + + return 0; +} + +static int hclge_uninit_umv_space(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->max_umv_size > 0) { + ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, + false); + if (ret) + return ret; + hdev->max_umv_size = 0; + } + mutex_destroy(&hdev->umv_mutex); + + return 0; +} + +static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, + u16 *allocated_size, bool is_alloc) +{ + struct hclge_umv_spc_alc_cmd *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_umv_spc_alc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); + hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); + req->space_size = cpu_to_le32(space_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "%s umv space failed for cmd_send, ret =%d\n", + is_alloc ? "allocate" : "free", ret); + return ret; + } + + if (is_alloc && allocated_size) + *allocated_size = le32_to_cpu(desc.data[1]); + + return 0; +} + +static void hclge_reset_umv_space(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->used_umv_num = 0; + } + + mutex_lock(&hdev->umv_mutex); + hdev->share_umv_size = hdev->priv_umv_size + + hdev->max_umv_size % (hdev->num_req_vfs + 2); + mutex_unlock(&hdev->umv_mutex); +} + +static bool hclge_is_umv_space_full(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + bool is_full; + + mutex_lock(&hdev->umv_mutex); + is_full = (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size == 0); + mutex_unlock(&hdev->umv_mutex); + + return is_full; +} + +static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) +{ + struct hclge_dev *hdev = vport->back; + + mutex_lock(&hdev->umv_mutex); + if (is_free) { + if (vport->used_umv_num > hdev->priv_umv_size) + hdev->share_umv_size++; + vport->used_umv_num--; + } else { + if (vport->used_umv_num >= hdev->priv_umv_size) + hdev->share_umv_size--; + vport->used_umv_num++; + } + mutex_unlock(&hdev->umv_mutex); +} + static int hclge_add_uc_addr(struct hnae3_handle *handle, const unsigned char *addr) { @@ -5314,8 +5240,19 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, * is not allowed in the mac vlan table. */ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); - if (ret == -ENOENT) - return hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (ret == -ENOENT) { + if (!hclge_is_umv_space_full(vport)) { + ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); + if (!ret) + hclge_update_umv_space(vport, false); + return ret; + } + + dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", + hdev->priv_umv_size); + + return -ENOSPC; + } /* check if we just hit the duplicate */ if (!ret) @@ -5358,6 +5295,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr); ret = hclge_remove_mac_vlan_tbl(vport, &req); + if (!ret) + hclge_update_umv_space(vport, true); return ret; } @@ -5376,7 +5315,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; - u16 tbl_idx; int status; /* mac addr check */ @@ -5406,25 +5344,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* If mc mac vlan table is full, use MTA table */ - if (status == -ENOSPC) { - if (!vport->accept_mta_mc) { - status = hclge_cfg_func_mta_filter(hdev, - vport->vport_id, - true); - if (status) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", - status); - return status; - } - vport->accept_mta_mc = true; - } - - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); - } + if (status == -ENOSPC) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); return status; } @@ -5639,7 +5560,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, } static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - bool filter_en) + u8 fe_type, bool filter_en) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -5649,7 +5570,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; - req->vlan_fe = filter_en; + req->vlan_fe = filter_en ? fe_type : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -5661,13 +5582,34 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, #define HCLGE_FILTER_TYPE_VF 0 #define HCLGE_FILTER_TYPE_PORT 1 +#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) +#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) +#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) +#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) +#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) +#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ + | HCLGE_FILTER_FE_ROCE_EGRESS_B) +#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ + | HCLGE_FILTER_FE_ROCE_INGRESS_B) static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); + if (hdev->pdev->revision >= 0x21) { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, enable); + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, enable); + } else { + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, enable); + } + if (enable) + handle->netdev_flags |= HNAE3_VLAN_FLTR; + else + handle->netdev_flags &= ~HNAE3_VLAN_FLTR; } static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, @@ -5964,18 +5906,30 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) { #define HCLGE_DEF_VLAN_TYPE 0x8100 - struct hnae3_handle *handle; + struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); - if (ret) - return ret; + if (hdev->pdev->revision >= 0x21) { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, true); + if (ret) + return ret; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); - if (ret) - return ret; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true); + if (ret) + return ret; + } else { + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true); + if (ret) + return ret; + } + + handle->netdev_flags |= HNAE3_VLAN_FLTR; hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; @@ -6021,7 +5975,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; } - handle = &hdev->vport[0].nic; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -6746,6 +6699,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) } } + ret = hclge_init_umv_space(hdev); + if (ret) { + dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); + goto err_msi_irq_uninit; + } + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -6866,6 +6825,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_reset_umv_space(hdev); + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); @@ -6919,6 +6880,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (mac->phydev) mdiobus_unregister(mac->mdio_bus); + hclge_uninit_umv_space(hdev); + /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); @@ -7317,7 +7280,6 @@ static const struct hnae3_ae_ops hclge_ops = { .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, - .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 06adbdd27b95..e3dfd654eca9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -14,6 +14,8 @@ #define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" +#define HCLGE_MAX_PF_NUM 8 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -53,7 +55,9 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 -#define HCLGE_MTA_TBL_SIZE 4096 +#define HCLGE_UMV_TBL_SIZE 3072 +#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ + (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) #define HCLGE_TQP_RESET_TRY_TIMES 10 @@ -162,13 +166,6 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; -enum hclge_mta_dmac_sel_type { - HCLGE_MAC_ADDR_47_36, - HCLGE_MAC_ADDR_46_35, - HCLGE_MAC_ADDR_45_34, - HCLGE_MAC_ADDR_44_33, -}; - struct hclge_mac { u8 phy_addr; u8 flag; @@ -251,6 +248,7 @@ struct hclge_cfg { u8 default_speed; u32 numa_node_map; u8 speed_ability; + u16 umv_space; }; struct hclge_tm_info { @@ -670,9 +668,6 @@ struct hclge_dev { u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 mps; /* Max packet size */ - enum hclge_mta_dmac_sel_type mta_mac_sel_type; - bool enable_mta; /* Multicast filter enable */ - struct hclge_vlan_type_cfg vlan_type_cfg; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; @@ -680,6 +675,15 @@ struct hclge_dev { struct hclge_fd_cfg fd_cfg; struct hlist_head fd_rule_list; u16 hclge_fd_rule_num; + + u16 wanted_umv_size; + /* max available unicast mac vlan space */ + u16 max_umv_size; + /* private unicast mac vlan space, it's same for PF and its VFs */ + u16 priv_umv_size; + /* unicast mac vlan space shared by PF and its VFs */ + u16 share_umv_size; + struct mutex umv_mutex; /* protect share_umv_size */ }; /* VPort level vlan tag configuration for TX direction */ @@ -732,13 +736,12 @@ struct hclge_vport { struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; + u16 used_umv_num; + int vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; - - bool accept_mta_mc; /* whether to accept mta filter multicast */ - unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -753,15 +756,6 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr); -int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, - u8 func_id, - bool enable); -int hclge_update_mta_status_common(struct hclge_vport *vport, - unsigned long *status, - u16 idx, - u16 count, - bool update_filter); - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f34851c91eb3..04462a347a94 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -233,43 +233,6 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } -static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, - u8 *msg, u8 idx, bool is_end) -{ -#define HCLGE_MTA_STATUS_MSG_SIZE 13 -#define HCLGE_MTA_STATUS_MSG_BITS \ - (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGE_MTA_STATUS_MSG_END_BITS \ - (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) - unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_ofs; - u8 msg_bit; - - tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : - HCLGE_MTA_STATUS_MSG_BITS; - - /* set msg field */ - msg_ofs = 0; - msg_bit = 0; - memset(status, 0, sizeof(status)); - for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { - if (msg[msg_ofs] & BIT(msg_bit)) - set_bit(tbl_idx, status); - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - return hclge_update_mta_status_common(vport, - status, idx * HCLGE_MTA_STATUS_MSG_BITS, - tbl_cnt, is_end); -} - static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -284,27 +247,6 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, status = hclge_add_mc_addr_common(vport, mac_addr); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) { - u8 func_id = vport->vport_id; - bool enable = mbx_req->msg[2]; - - status = hclge_cfg_func_mta_filter(hdev, func_id, enable); - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { - resp_data = hdev->mta_mac_sel_type; - resp_len = sizeof(u8); - gen_resp = true; - status = 0; - } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { - /* mta status update msg format - * msg[2.6 : 2.0] msg index - * msg[2.7] msg is end - * msg[15 : 3] mta status bits[103 : 0] - */ - bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; - - status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], - mbx_req->msg[2] & 0x7F, - is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 19b32860309c..bc294b0c8b62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -89,6 +89,7 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07, HCLGEVF_OPC_RSS_TC_MODE = 0x0D08, /* Mailbox cmd */ @@ -148,7 +149,8 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; -#define HCLGEVF_RSS_HASH_KEY_OFFSET 4 +#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 struct hclgevf_rss_config_cmd { u8 hash_config; @@ -159,11 +161,11 @@ struct hclgevf_rss_config_cmd { struct hclgevf_rss_input_tuple_cmd { u8 ipv4_tcp_en; u8 ipv4_udp_en; - u8 ipv4_stcp_en; + u8 ipv4_sctp_en; u8 ipv4_fragment_en; u8 ipv6_tcp_en; u8 ipv6_udp_en; - u8 ipv6_stcp_en; + u8 ipv6_sctp_en; u8 ipv6_fragment_en; u8 rsv[16]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 8f858cb2a67b..ac67fecb9408 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -386,6 +386,47 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) return -EINVAL; } +static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclgevf_rss_config_cmd *req; + struct hclgevf_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclgevf_rss_config_cmd *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclgevf_cmd_setup_basic_desc(&desc, + HCLGEVF_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK); + req->hash_config |= + (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGEVF_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + + return 0; +} + static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle) { return HCLGEVF_RSS_KEY_SIZE; @@ -466,68 +507,40 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } -static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash, - u8 *key) +static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_config_cmd *req; - int lkup_times = key ? 3 : 1; - struct hclgevf_desc desc; - int key_offset; - int key_size; - int status; - - req = (struct hclgevf_rss_config_cmd *)desc.data; - lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0); - - for (key_offset = 0; key_offset < lkup_times; key_offset++) { - hclgevf_cmd_setup_basic_desc(&desc, - HCLGEVF_OPC_RSS_GENERIC_CONFIG, - true); - req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int i; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) { - dev_err(&hdev->pdev->dev, - "failed to get hardware RSS cfg, status = %d\n", - status); - return status; + if (handle->pdev->revision >= 0x21) { + /* Get hash algorithm */ + if (hfunc) { + switch (rss_cfg->hash_algo) { + case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ: + *hfunc = ETH_RSS_HASH_TOP; + break; + case HCLGEVF_RSS_HASH_ALGO_SIMPLE: + *hfunc = ETH_RSS_HASH_XOR; + break; + default: + *hfunc = ETH_RSS_HASH_UNKNOWN; + break; + } } - if (key_offset == 2) - key_size = - HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2; - else - key_size = HCLGEVF_RSS_HASH_KEY_NUM; - + /* Get the RSS Key required by the user */ if (key) - memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, - req->hash_key, - key_size); + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); } - if (hash) { - if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ) - *hash = ETH_RSS_HASH_TOP; - else - *hash = ETH_RSS_HASH_UNKNOWN; - } - - return 0; -} - -static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; - if (indir) for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; - return hclgevf_get_rss_hw_cfg(handle, hfunc, key); + return 0; } static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, @@ -535,7 +548,36 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int ret, i; + + if (handle->pdev->revision >= 0x21) { + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + switch (hfunc) { + case ETH_RSS_HASH_TOP: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + break; + case ETH_RSS_HASH_XOR: + rss_cfg->hash_algo = + HCLGEVF_RSS_HASH_ALGO_SIMPLE; + break; + case ETH_RSS_HASH_NO_CHANGE: + break; + default: + return -EINVAL; + } + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + key); + if (ret) + return ret; + + /* Update the shadow RSS key with user specified qids */ + memcpy(rss_cfg->rss_hash_key, key, + HCLGEVF_RSS_KEY_SIZE); + } + } /* update the shadow RSS table with user specified qids */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) @@ -545,6 +587,193 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, return hclgevf_set_rss_indir_table(hdev); } +static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGEVF_D_PORT_BIT; + else + hash_sets &= ~HCLGEVF_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGEVF_S_IP_BIT; + else + hash_sets &= ~HCLGEVF_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGEVF_D_IP_BIT; + else + hash_sets &= ~HCLGEVF_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGEVF_V_TAG_BIT; + + return hash_sets; +} + +static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + u8 tuple_sets; + int ret; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + tuple_sets = hclgevf_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set rss tuple fail, status = %d\n", ret); + return ret; + } + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; + rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; + rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; + return 0; +} + +static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 tuple_sets; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + nfc->data = 0; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + break; + case UDP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; + break; + case TCP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + break; + case UDP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; + break; + case SCTP_V4_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; + break; + default: + return -EINVAL; + } + + if (!tuple_sets) + return 0; + + if (tuple_sets & HCLGEVF_D_PORT_BIT) + nfc->data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGEVF_S_PORT_BIT) + nfc->data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGEVF_D_IP_BIT) + nfc->data |= RXH_IP_DST; + if (tuple_sets & HCLGEVF_S_IP_BIT) + nfc->data |= RXH_IP_SRC; + + return 0; +} + +static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev, + struct hclgevf_rss_cfg *rss_cfg) +{ + struct hclgevf_rss_input_tuple_cmd *req; + struct hclgevf_desc desc; + int ret; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclgevf_rss_input_tuple_cmd *)desc.data; + + req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; + req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; + req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en; + req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en; + req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en; + req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en; + req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; + req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; +} + static int hclgevf_get_tc_size(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -746,126 +975,6 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) } } -static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) -{ - u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; - int ret; - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, - NULL, 0, true, &resp_msg, sizeof(u8)); - - if (ret) { - dev_err(&hdev->pdev->dev, - "Read mta type fail, ret=%d.\n", ret); - return ret; - } - - if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { - dev_err(&hdev->pdev->dev, - "Read mta type invalid, resp=%d.\n", resp_msg); - return -EINVAL; - } - - hdev->mta_mac_sel_type = resp_msg; - - return 0; -} - -static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, - const u8 *addr) -{ - u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; - u16 high_val = addr[1] | (addr[0] << 8); - - return (high_val >> rsh) & 0xfff; -} - -static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, - unsigned long *status) -{ -#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 -#define HCLGEVF_MTA_STATUS_MSG_BITS \ - (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) -#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ - (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) - u16 tbl_cnt; - u16 tbl_idx; - u8 msg_cnt; - u8 msg_idx; - int ret; - - msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, - HCLGEVF_MTA_STATUS_MSG_BITS); - tbl_idx = 0; - msg_idx = 0; - while (msg_cnt--) { - u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; - u8 *p = &msg[1]; - u8 msg_ofs; - u8 msg_bit; - - memset(msg, 0, sizeof(msg)); - - /* set index field */ - msg[0] = 0x7F & msg_idx; - - /* set end flag field */ - if (msg_cnt == 0) { - msg[0] |= 0x80; - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; - } else { - tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; - } - - /* set status field */ - msg_ofs = 0; - msg_bit = 0; - while (tbl_cnt--) { - if (test_bit(tbl_idx, status)) - p[msg_ofs] |= BIT(msg_bit); - - tbl_idx++; - - msg_bit++; - if (msg_bit == BITS_PER_BYTE) { - msg_bit = 0; - msg_ofs++; - } - } - - ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, - msg, sizeof(msg), false, NULL, 0); - if (ret) - break; - - msg_idx++; - } - - return ret; -} - -static int hclgevf_update_mta_status(struct hnae3_handle *handle) -{ - unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - struct net_device *netdev = hdev->nic.kinfo.netdev; - struct netdev_hw_addr *ha; - u16 tbl_idx; - - /* clear status */ - memset(mta_status, 0, sizeof(mta_status)); - - /* update status from mc addr list */ - netdev_for_each_mc_addr(ha, netdev) { - tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); - set_bit(tbl_idx, mta_status); - } - - return hclgevf_do_update_mta_status(hdev, mta_status); -} - static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1396,6 +1505,39 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) rss_cfg->rss_size = hdev->rss_size_max; + if (hdev->pdev->revision >= 0x21) { + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; + netdev_rss_key_fill(rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + + ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, + rss_cfg->rss_hash_key); + if (ret) + return ret; + + rss_cfg->rss_tuple_sets.ipv4_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv4_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv4_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_tcp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_udp_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + rss_cfg->rss_tuple_sets.ipv6_sctp_en = + HCLGEVF_RSS_INPUT_TUPLE_SCTP; + rss_cfg->rss_tuple_sets.ipv6_fragment_en = + HCLGEVF_RSS_INPUT_TUPLE_OTHER; + + ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); + if (ret) + return ret; + + } + /* Initialize RSS indirect table for each vport */ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max; @@ -1871,14 +2013,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize mta type for this VF */ - ret = hclgevf_cfg_func_mta_type(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed(%d) to initialize MTA type\n", ret); - goto err_config; - } - /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -2038,7 +2172,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, - .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, @@ -2047,6 +2180,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_rss_indir_size = hclgevf_get_rss_indir_size, .get_rss = hclgevf_get_rss, .set_rss = hclgevf_set_rss, + .get_rss_tuple = hclgevf_get_rss_tuple, + .set_rss_tuple = hclgevf_set_rss_tuple, .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 2af01f107c63..aed241e8ffab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -46,9 +46,13 @@ #define HCLGEVF_RSS_HASH_ALGO_MASK 0xf #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) - -#define HCLGEVF_MTA_TBL_SIZE 4096 -#define HCLGEVF_MTA_TYPE_SEL_MAX 4 +#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HCLGEVF_D_PORT_BIT BIT(0) +#define HCLGEVF_S_PORT_BIT BIT(1) +#define HCLGEVF_D_IP_BIT BIT(2) +#define HCLGEVF_S_IP_BIT BIT(3) +#define HCLGEVF_V_TAG_BIT BIT(4) /* states of hclgevf device & tasks */ enum hclgevf_states { @@ -109,12 +113,24 @@ struct hclgevf_cfg { u32 numa_node_map; }; +struct hclgevf_rss_tuple_cfg { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; +}; + struct hclgevf_rss_cfg { u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */ u32 hash_algo; u32 rss_size; u8 hw_tc_map; u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */ + struct hclgevf_rss_tuple_cfg rss_tuple_sets; }; struct hclgevf_misc_vector { @@ -157,8 +173,6 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; - bool accept_mta_mc; /* whether to accept mta filter multicast */ - u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 09e9da10b786..4a8f82938ed5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev, stats->tx_errors = nic_tx_stats->tx_dropped; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void hinic_netpoll(struct net_device *netdev) -{ - struct hinic_dev *nic_dev = netdev_priv(netdev); - int i, num_qps; - - num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); - for (i = 0; i < num_qps; i++) { - struct hinic_txq *txq = &nic_dev->txqs[i]; - struct hinic_rxq *rxq = &nic_dev->rxqs[i]; - - napi_schedule(&txq->napi); - napi_schedule(&rxq->napi); - } -} -#endif - static const struct net_device_ops hinic_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, @@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = { .ndo_start_xmit = hinic_xmit_frame, .ndo_tx_timeout = hinic_tx_timeout, .ndo_get_stats64 = hinic_get_stats64, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = hinic_netpoll, -#endif }; static void netdev_features_init(struct net_device *netdev) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index aa0b89777e74..3baabdc89726 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -920,17 +920,6 @@ static int ehea_poll(struct napi_struct *napi, int budget) return rx; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void ehea_netpoll(struct net_device *dev) -{ - struct ehea_port *port = netdev_priv(dev); - int i; - - for (i = 0; i < port->num_def_qps; i++) - napi_schedule(&port->port_res[i].napi); -} -#endif - static irqreturn_t ehea_recv_irq_handler(int irq, void *param) { struct ehea_port_res *pr = param; @@ -2952,9 +2941,6 @@ static const struct net_device_ops ehea_netdev_ops = { .ndo_open = ehea_open, .ndo_stop = ehea_stop, .ndo_start_xmit = ehea_start_xmit, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ehea_netpoll, -#endif .ndo_get_stats64 = ehea_get_stats64, .ndo_set_mac_address = ehea_set_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ad898e8eaca1..7893beffcc71 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2207,19 +2207,6 @@ restart_poll: return frames_processed; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void ibmvnic_netpoll_controller(struct net_device *dev) -{ - struct ibmvnic_adapter *adapter = netdev_priv(dev); - int i; - - replenish_pools(netdev_priv(dev)); - for (i = 0; i < adapter->req_rx_queues; i++) - ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, - adapter->rx_scrq[i]); -} -#endif - static int wait_for_reset(struct ibmvnic_adapter *adapter) { int rc, ret; @@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_set_mac_address = ibmvnic_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = ibmvnic_tx_timeout, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ibmvnic_netpoll_controller, -#endif .ndo_change_mtu = ibmvnic_change_mtu, .ndo_features_check = ibmvnic_features_check, }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 56b911a5dd8b..a20d1cf058ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -132,8 +132,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } - dev_info(&pf->pdev->dev, " active_vlans is %s\n", - vsi->active_vlans ? "<valid>" : "<null>"); dev_info(&pf->pdev->dev, " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index f4bb2779f03a..81b0e1f8d14b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -4256,7 +4256,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf->link_forced = true; vf->link_up = true; pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index a512f7521841..272d76b733aa 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -342,7 +342,7 @@ struct iavf_adapter { struct iavf_channel_config ch_config; u8 num_tc; struct list_head cloud_filter_list; - /* lock to protest access to the cloud filter list */ + /* lock to protect access to the cloud filter list */ spinlock_t cloud_filter_list_lock; u16 num_cloud_filters; }; diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 45125bd074d9..e5d6f684437e 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -16,3 +16,4 @@ ice-y := ice_main.o \ ice_lib.o \ ice_txrx.o \ ice_ethtool.o +ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 0b269c470343..4c4b5717a627 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -28,6 +28,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_bridge.h> +#include <linux/avf/virtchnl.h> #include <net/ipv6.h> #include "ice_devids.h" #include "ice_type.h" @@ -35,6 +36,8 @@ #include "ice_switch.h" #include "ice_common.h" #include "ice_sched.h" +#include "ice_virtchnl_pf.h" +#include "ice_sriov.h" extern const char ice_drv_ver[]; #define ICE_BAR0 0 @@ -46,6 +49,7 @@ extern const char ice_drv_ver[]; #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_ETHTOOL_FWVER_LEN 32 #define ICE_AQ_LEN 64 +#define ICE_MBXQ_LEN 64 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff #define ICE_MAX_VSI_ALLOC 130 @@ -63,6 +67,14 @@ extern const char ice_drv_ver[]; #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_VFID 256 +#define ICE_MAX_VF_COUNT 256 +#define ICE_MAX_QS_PER_VF 256 +#define ICE_MIN_QS_PER_VF 1 +#define ICE_DFLT_QS_PER_VF 4 +#define ICE_MAX_BASE_QS_PER_VF 16 +#define ICE_MAX_INTR_PER_VF 65 +#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) +#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1) #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) @@ -133,9 +145,21 @@ enum ice_state { __ICE_EMPR_RECV, /* set by OICR handler */ __ICE_SUSPENDED, /* set on module remove path */ __ICE_RESET_FAILED, /* set by reset/rebuild */ + /* When checking for the PF to be in a nominal operating state, the + * bits that are grouped at the beginning of the list need to be + * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * be checked. If you need to add a bit into consideration for nominal + * operating state, it must be added before + * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * without appropriate consideration. + */ + __ICE_STATE_NOMINAL_CHECK_BITS, __ICE_ADMINQ_EVENT_PENDING, + __ICE_MAILBOXQ_EVENT_PENDING, __ICE_MDD_EVENT_PENDING, + __ICE_VFLR_EVENT_PENDING, __ICE_FLTR_OVERFLOW_PROMISC, + __ICE_VF_DIS, __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, __ICE_SERVICE_DIS, @@ -181,6 +205,8 @@ struct ice_vsi { /* Interrupt thresholds */ u16 work_lmt; + s16 vf_id; /* VF ID for SR-IOV VSIs */ + /* RSS config */ u16 rss_table_size; /* HW RSS table size */ u16 rss_size; /* Allocated RSS queues */ @@ -240,6 +266,8 @@ enum ice_pf_flags { ICE_FLAG_MSIX_ENA, ICE_FLAG_FLTR_SYNC, ICE_FLAG_RSS_ENA, + ICE_FLAG_SRIOV_ENA, + ICE_FLAG_SRIOV_CAPABLE, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -255,6 +283,12 @@ struct ice_pf { struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_sw *first_sw; /* first switch created by firmware */ + /* Virtchnl/SR-IOV config info */ + struct ice_vf *vf; + int num_alloc_vfs; /* actual number of VFs allocated */ + u16 num_vfs_supported; /* num VFs supported for this PF */ + u16 num_vf_qps; /* num queue pairs per VF */ + u16 num_vf_msix; /* num vectors per VF */ DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS); DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS); diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index c100b4bda195..6653555f55dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -87,6 +87,8 @@ struct ice_aqc_list_caps { /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; +#define ICE_AQC_CAPS_SRIOV 0x0012 +#define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 @@ -1075,6 +1077,19 @@ struct ice_aqc_nvm { __le32 addr_low; }; +/** + * Send to PF command (indirect 0x0801) id is only used by PF + * + * Send to VF command (indirect 0x0802) id is only used by PF + * + */ +struct ice_aqc_pf_vf_msg { + __le32 id; + u32 reserved; + __le32 addr_high; + __le32 addr_low; +}; + /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) @@ -1332,6 +1347,7 @@ struct ice_aq_desc { struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_add_move_delete_elem add_move_delete_elem; struct ice_aqc_nvm nvm; + struct ice_aqc_pf_vf_msg virt; struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; @@ -1429,6 +1445,10 @@ enum ice_adminq_opc { /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, + /* PF/VF mailbox commands */ + ice_mbx_opc_send_msg_to_pf = 0x0801, + ice_mbx_opc_send_msg_to_vf = 0x0802, + /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, ice_aqc_opc_set_rss_lut = 0x0B03, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 68fbbb92d504..c52f450f2c0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1406,6 +1406,28 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, u16 cap = le16_to_cpu(cap_resp->cap); switch (cap) { + case ICE_AQC_CAPS_SRIOV: + caps->sr_iov_1_1 = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1); + break; + case ICE_AQC_CAPS_VF: + if (dev_p) { + dev_p->num_vfs_exposed = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VFs exposed = %d\n", + dev_p->num_vfs_exposed); + } else if (func_p) { + func_p->num_allocd_vfs = number; + func_p->vf_base_id = logical_id; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VFs allocated = %d\n", + func_p->num_allocd_vfs); + ice_debug(hw, ICE_DBG_INIT, + "HW caps: VF base_id = %d\n", + func_p->vf_base_id); + } + break; case ICE_AQC_CAPS_VSI: if (dev_p) { dev_p->num_vsi_allocd_to_host = number; @@ -2265,6 +2287,8 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, * @num_qgrps: number of groups in the list * @qg_list: the list of groups to disable * @buf_size: the total size of the qg_list buffer in bytes + * @rst_src: if called due to reset, specifies the RST source + * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * Disable LAN Tx queue (0x0C31) @@ -2272,6 +2296,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, static enum ice_status ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, + enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { struct ice_aqc_dis_txqs *cmd; @@ -2281,14 +2306,45 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); - if (!qg_list) + /* qg_list can be NULL only in VM/VF reset flow */ + if (!qg_list && !rst_src) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + cmd->num_entries = num_qgrps; + cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & + ICE_AQC_Q_DIS_TIMEOUT_M); + + switch (rst_src) { + case ICE_VM_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; + cmd->vmvf_and_timeout |= + cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_VF_RESET: + cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; + /* In this case, FW expects vmvf_num to be absolute VF id */ + cmd->vmvf_and_timeout |= + cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & + ICE_AQC_Q_DIS_VMVF_NUM_M); + break; + case ICE_NO_RESET: + default: + break; + } + + /* If no queue group info, we are in a reset flow. Issue the AQ */ + if (!qg_list) + goto do_aq; + + /* set RD bit to indicate that command buffer is provided by the driver + * and it needs to be read by the firmware + */ + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + for (i = 0; i < num_qgrps; ++i) { /* Calculate the size taken up by the queue IDs in this group */ sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); @@ -2304,6 +2360,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, if (buf_size != sz) return ICE_ERR_PARAM; +do_aq: return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); } @@ -2610,13 +2667,16 @@ ena_txq_exit: * @num_queues: number of queues * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids + * @rst_src: if called due to reset, specifies the RST source + * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * This function removes queues and their corresponding nodes in SW DB */ enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, struct ice_sq_cd *cd) + u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item qg_list; @@ -2625,6 +2685,15 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; + /* if queue is disabled already yet the disable queue command has to be + * sent to complete the VF reset, then call ice_aq_dis_lan_txq without + * any queue information + */ + + if (!num_queues && rst_src) + return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num, + NULL); + mutex_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { @@ -2637,7 +2706,8 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, qg_list.num_qs = 1; qg_list.q_id[0] = cpu_to_le16(q_ids[i]); status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, - sizeof(qg_list), cd); + sizeof(qg_list), rst_src, vmvf_num, + cd); if (status) break; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 7b2a5bb2e550..1900681289a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -7,6 +7,7 @@ #include "ice.h" #include "ice_type.h" #include "ice_switch.h" +#include <linux/avf/virtchnl.h> void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len); @@ -89,7 +90,8 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, - u32 *q_teids, struct ice_sq_cd *cmd_details); + u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, + struct ice_sq_cd *cmd_details); enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, u16 *max_lanqs); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index b25ce4f587f5..84c967294eaf 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -33,6 +33,36 @@ static void ice_adminq_init_regs(struct ice_hw *hw) } /** + * ice_mailbox_init_regs - Initialize Mailbox registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_sq and alloc_rq functions have already been called + */ +static void ice_mailbox_init_regs(struct ice_hw *hw) +{ + struct ice_ctl_q_info *cq = &hw->mailboxq; + + /* set head and tail registers in our local struct */ + cq->sq.head = PF_MBX_ATQH; + cq->sq.tail = PF_MBX_ATQT; + cq->sq.len = PF_MBX_ATQLEN; + cq->sq.bah = PF_MBX_ATQBAH; + cq->sq.bal = PF_MBX_ATQBAL; + cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M; + cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M; + cq->sq.head_mask = PF_MBX_ATQH_ATQH_M; + + cq->rq.head = PF_MBX_ARQH; + cq->rq.tail = PF_MBX_ARQT; + cq->rq.len = PF_MBX_ARQLEN; + cq->rq.bah = PF_MBX_ARQBAH; + cq->rq.bal = PF_MBX_ARQBAL; + cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M; + cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M; + cq->rq.head_mask = PF_MBX_ARQH_ARQH_M; +} + +/** * ice_check_sq_alive * @hw: pointer to the hw struct * @cq: pointer to the specific Control queue @@ -639,6 +669,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) ice_adminq_init_regs(hw); cq = &hw->adminq; break; + case ICE_CTL_Q_MAILBOX: + ice_mailbox_init_regs(hw); + cq = &hw->mailboxq; + break; default: return ICE_ERR_PARAM; } @@ -696,7 +730,12 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) if (ret_code) return ret_code; - return ice_init_check_adminq(hw); + ret_code = ice_init_check_adminq(hw); + if (ret_code) + return ret_code; + + /* Init Mailbox queue */ + return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** @@ -714,6 +753,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) if (ice_check_sq_alive(hw, cq)) ice_aq_q_shutdown(hw, true); break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + break; default: return; } @@ -736,6 +778,8 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { /* Shutdown FW admin queue */ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + /* Shutdown PF-VF Mailbox */ + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index ea02b89243e2..437f832fd7c4 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -8,6 +8,7 @@ /* Maximum buffer lengths for all control queue types */ #define ICE_AQ_MAX_BUF_LEN 4096 +#define ICE_MBXQ_MAX_BUF_LEN 4096 #define ICE_CTL_Q_DESC(R, i) \ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) @@ -28,6 +29,7 @@ enum ice_ctl_q { ICE_CTL_Q_UNKNOWN = 0, ICE_CTL_Q_ADMIN, + ICE_CTL_Q_MAILBOX, }; /* Control Queue default settings */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 9a78d83eaa3e..a6679a9bfd3a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -29,6 +29,22 @@ #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQT 0x00080400 +#define PF_MBX_ARQBAH 0x0022E400 +#define PF_MBX_ARQBAL 0x0022E380 +#define PF_MBX_ARQH 0x0022E500 +#define PF_MBX_ARQH_ARQH_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN 0x0022E480 +#define PF_MBX_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) +#define PF_MBX_ARQT 0x0022E580 +#define PF_MBX_ATQBAH 0x0022E180 +#define PF_MBX_ATQBAL 0x0022E100 +#define PF_MBX_ATQH 0x0022E280 +#define PF_MBX_ATQH_ATQH_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN 0x0022E200 +#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) +#define PF_MBX_ATQT 0x0022E300 #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) @@ -74,10 +90,16 @@ #define GLGEN_RTRIG_CORER_M BIT(0) #define GLGEN_RTRIG_GLOBR_M BIT(1) #define GLGEN_STAT 0x000B612C +#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) #define PFGEN_CTRL 0x00091000 #define PFGEN_CTRL_PFSWR_M BIT(0) #define PFGEN_STATE 0x00088000 #define PRTGEN_STATUS 0x000B8100 +#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4)) +#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4)) +#define VPGEN_VFRSTAT_VFRD_M BIT(0) +#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) +#define VPGEN_VFRTRIG_VFSWR_M BIT(0) #define PFHMC_ERRORDATA 0x00520500 #define PFHMC_ERRORINFO 0x00520400 #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) @@ -90,11 +112,23 @@ #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) #define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) #define GLINT_RATE_INTRL_ENA_M BIT(6) +#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) +#define GLINT_VECT2FUNC_VF_NUM_S 0 +#define GLINT_VECT2FUNC_VF_NUM_M ICE_M(0xFF, 0) +#define GLINT_VECT2FUNC_PF_NUM_S 12 +#define GLINT_VECT2FUNC_PF_NUM_M ICE_M(0x7, 12) +#define GLINT_VECT2FUNC_IS_PF_S 16 +#define GLINT_VECT2FUNC_IS_PF_M BIT(16) #define PFINT_FW_CTL 0x0016C800 #define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_FW_CTL_ITR_INDX_S 11 #define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11) #define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_MBX_CTL 0x0016B280 +#define PFINT_MBX_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_MBX_CTL_ITR_INDX_S 11 +#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30) #define PFINT_OICR 0x0016CA00 #define PFINT_OICR_ECC_ERR_M BIT(16) #define PFINT_OICR_MAL_DETECT_M BIT(19) @@ -102,6 +136,7 @@ #define PFINT_OICR_PCI_EXCEPTION_M BIT(21) #define PFINT_OICR_HMC_ERR_M BIT(26) #define PFINT_OICR_PE_CRITERR_M BIT(28) +#define PFINT_OICR_VFLR_M BIT(29) #define PFINT_OICR_CTL 0x0016CA80 #define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) #define PFINT_OICR_CTL_ITR_INDX_S 11 @@ -116,6 +151,12 @@ #define QINT_TQCTL_MSIX_INDX_S 0 #define QINT_TQCTL_ITR_INDX_S 11 #define QINT_TQCTL_CAUSE_ENA_M BIT(30) +#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) +#define VPINT_ALLOC_FIRST_S 0 +#define VPINT_ALLOC_FIRST_M ICE_M(0x7FF, 0) +#define VPINT_ALLOC_LAST_S 12 +#define VPINT_ALLOC_LAST_M ICE_M(0x7FF, 12) +#define VPINT_ALLOC_VALID_M BIT(31) #define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) #define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) #define QRX_CTRL_MAX_INDEX 2047 @@ -128,6 +169,20 @@ #define QRX_TAIL_MAX_INDEX 2047 #define QRX_TAIL_TAIL_S 0 #define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) +#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4)) +#define VPLAN_RX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_RX_QBASE_VFFIRSTQ_M ICE_M(0x7FF, 0) +#define VPLAN_RX_QBASE_VFNUMQ_S 16 +#define VPLAN_RX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) +#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4)) +#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0) +#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4)) +#define VPLAN_TX_QBASE_VFFIRSTQ_S 0 +#define VPLAN_TX_QBASE_VFFIRSTQ_M ICE_M(0x3FFF, 0) +#define VPLAN_TX_QBASE_VFNUMQ_S 16 +#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16) +#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) +#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0) #define GL_MDET_RX 0x00294C00 #define GL_MDET_RX_QNUM_S 0 #define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) @@ -164,6 +219,14 @@ #define PF_MDET_TX_PQM_VALID_M BIT(0) #define PF_MDET_TX_TCLAN 0x000FC000 #define PF_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) +#define VP_MDET_RX_VALID_M BIT(0) +#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4)) +#define VP_MDET_TX_PQM_VALID_M BIT(0) +#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4)) +#define VP_MDET_TX_TCLAN_VALID_M BIT(0) +#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) +#define VP_MDET_TX_TDPU_VALID_M BIT(0) #define GLNVM_FLA 0x000B6108 #define GLNVM_FLA_LOCKED_M BIT(6) #define GLNVM_GENS 0x000B6100 @@ -175,6 +238,9 @@ #define PF_FUNC_RID 0x0009E880 #define PF_FUNC_RID_FUNC_NUM_S 0 #define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) +#define PF_PCI_CIAA 0x0009E580 +#define PF_PCI_CIAA_VF_NUM_S 12 +#define PF_PCI_CIAD 0x0009E500 #define GL_PWR_MODE_CTL 0x000B820C #define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30 #define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30) @@ -255,5 +321,8 @@ #define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define VSIQF_HKEY_MAX_INDEX 12 +#define VSIQF_HLUT_MAX_INDEX 15 +#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) +#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 94504023d86e..7d2a66739e3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -418,6 +418,7 @@ struct ice_tlan_ctx { u8 pf_num; u16 vmvf_num; u8 vmvf_type; +#define ICE_TLAN_CTX_VMVF_TYPE_VF 0 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; @@ -473,4 +474,16 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) { return ice_ptype_lkup[ptype]; } + +#define ICE_LINK_SPEED_UNKNOWN 0 +#define ICE_LINK_SPEED_10MBPS 10 +#define ICE_LINK_SPEED_100MBPS 100 +#define ICE_LINK_SPEED_1000MBPS 1000 +#define ICE_LINK_SPEED_2500MBPS 2500 +#define ICE_LINK_SPEED_5000MBPS 5000 +#define ICE_LINK_SPEED_10000MBPS 10000 +#define ICE_LINK_SPEED_20000MBPS 20000 +#define ICE_LINK_SPEED_25000MBPS 25000 +#define ICE_LINK_SPEED_40000MBPS 40000 + #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index acf3478a3f3b..49f1940772ed 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -68,18 +68,20 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format */ - regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); - regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & - QRXFLXP_CNTXT_RXDID_IDX_M; - - /* increasing context priority to pick up profile id; - * default is 0x01; setting to 0x03 to ensure profile - * is programming if prev context is of same priority - */ - regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & - QRXFLXP_CNTXT_RXDID_PRIO_M; + if (vsi->type != ICE_VSI_VF) { + regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); + regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile id; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; - wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); + } /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); @@ -90,6 +92,9 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) return -EIO; } + if (vsi->type == ICE_VSI_VF) + return 0; + /* init queue specific tail register */ ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); @@ -132,6 +137,11 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) case ICE_VSI_PF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; + case ICE_VSI_VF: + /* Firmware expects vmvf_num to be absolute VF id */ + tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; + tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; + break; default: return; } @@ -285,6 +295,16 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE); vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx); break; + case ICE_VSI_VF: + vsi->alloc_txq = pf->num_vf_qps; + vsi->alloc_rxq = pf->num_vf_qps; + /* pf->num_vf_msix includes (VF miscellaneous vector + + * data queue interrupts). Since vsi->num_q_vectors is number + * of queues vectors, subtract 1 from the original vector + * count + */ + vsi->num_q_vectors = pf->num_vf_msix - 1; + break; default: dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -331,6 +351,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) struct ice_vsi_ctx ctxt; enum ice_status status; + if (vsi->type == ICE_VSI_VF) + ctxt.vf_num = vsi->vf_id; ctxt.vsi_num = vsi->vsi_num; memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); @@ -466,6 +488,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type) /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; break; + case ICE_VSI_VF: + if (ice_vsi_alloc_arrays(vsi, true)) + goto err_rings; + break; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); goto unlock_pf; @@ -685,6 +711,15 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) BIT(cap->rss_table_entry_width)); vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; + case ICE_VSI_VF: + /* VF VSI will gets a small RSS table + * For VSI_LUT, LUT size should be set to 64 bytes + */ + vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + vsi->rss_size = min_t(int, num_online_cpus(), + BIT(cap->rss_table_entry_width)); + vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; + break; default: dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -773,17 +808,17 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * Setup number and offset of Rx queues for all TCs for the VSI */ + qcount = numq_tc; /* qcount will change if RSS is enabled */ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { - if (vsi->type == ICE_VSI_PF) - max_rss = ICE_MAX_LG_RSS_QS; - else - max_rss = ICE_MAX_SMALL_RSS_QS; - - qcount = min_t(int, numq_tc, max_rss); - qcount = min_t(int, qcount, vsi->rss_size); - } else { - qcount = numq_tc; + if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { + if (vsi->type == ICE_VSI_PF) + max_rss = ICE_MAX_LG_RSS_QS; + else + max_rss = ICE_MAX_SMALL_RSS_QS; + qcount = min_t(int, numq_tc, max_rss); + qcount = min_t(int, qcount, vsi->rss_size); + } } /* find the (rounded up) power-of-2 of qcount */ @@ -813,6 +848,14 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->num_txq = qcount_tx; vsi->num_rxq = offset; + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { + dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); + /* since there is a chance that num_rxq could have been changed + * in the above for loop, make num_txq equal to num_rxq. + */ + vsi->num_txq = vsi->num_rxq; + } + /* Rx queue mapping */ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); /* q_mapping buffer holds the info for the first queue allocated for @@ -838,6 +881,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; + case ICE_VSI_VF: + /* VF VSI will gets a small RSS table which is a VSI LUT type */ + lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; + hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + break; default: dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -868,6 +916,11 @@ static int ice_vsi_init(struct ice_vsi *vsi) case ICE_VSI_PF: ctxt.flags = ICE_AQ_VSI_TYPE_PF; break; + case ICE_VSI_VF: + ctxt.flags = ICE_AQ_VSI_TYPE_VF; + /* VF number here is the absolute VF number (0-255) */ + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + break; default: return -ENODEV; } @@ -961,6 +1014,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) q_vector->vsi = vsi; q_vector->v_idx = v_idx; + if (vsi->type == ICE_VSI_VF) + goto out; /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) cpumask_set_cpu(v_idx, &q_vector->affinity_mask); @@ -973,6 +1028,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, NAPI_POLL_WEIGHT); +out: /* tie q_vector and VSI together */ vsi->q_vectors[v_idx] = q_vector; @@ -1067,6 +1123,13 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, num_q_vectors, vsi->idx); break; + case ICE_VSI_VF: + /* take VF misc vector and data vectors into account */ + num_q_vectors = pf->num_vf_msix; + /* For VF VSI, reserve slots only from HW interrupts */ + vsi->hw_base_vector = ice_get_res(pf, pf->hw_irq_tracker, + num_q_vectors, vsi->idx); + break; default: dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", vsi->type); @@ -1077,9 +1140,11 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) dev_err(&pf->pdev->dev, "Failed to get tracking for %d HW vectors for VSI %d, err=%d\n", num_q_vectors, vsi->vsi_num, vsi->hw_base_vector); - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, - vsi->idx); - pf->num_avail_sw_msix += num_q_vectors; + if (vsi->type != ICE_VSI_VF) { + ice_free_res(vsi->back->sw_irq_tracker, + vsi->sw_base_vector, vsi->idx); + pf->num_avail_sw_msix += num_q_vectors; + } return -ENOENT; } @@ -1139,7 +1204,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->dev = &pf->pdev->dev; ring->count = vsi->num_desc; - ring->itr_setting = ICE_DFLT_TX_ITR; vsi->tx_rings[i] = ring; } @@ -1159,7 +1223,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->netdev = vsi->netdev; ring->dev = &pf->pdev->dev; ring->count = vsi->num_desc; - ring->itr_setting = ICE_DFLT_RX_ITR; vsi->rx_rings[i] = ring; } @@ -1196,6 +1259,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); q_vector->num_ring_tx = tx_rings_per_v; q_vector->tx.ring = NULL; + q_vector->tx.itr_idx = ICE_TX_ITR; q_base = vsi->num_txq - tx_rings_rem; for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { @@ -1211,6 +1275,7 @@ static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); q_vector->num_ring_rx = rx_rings_per_v; q_vector->rx.ring = NULL; + q_vector->rx.itr_idx = ICE_RX_ITR; q_base = vsi->num_rxq - rx_rings_rem; for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { @@ -1512,6 +1577,9 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) int err = 0; u16 i; + if (vsi->type == ICE_VSI_VF) + goto setup_rings; + if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) vsi->max_frame = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; @@ -1519,6 +1587,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) vsi->max_frame = ICE_RXBUF_2048; vsi->rx_buf_len = ICE_RXBUF_2048; +setup_rings: /* set up individual rings */ for (i = 0; i < vsi->num_rxq && !err; i++) err = ice_setup_rx_ctx(vsi->rx_rings[i]); @@ -1615,6 +1684,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) } /** + * ice_cfg_itr - configure the initial interrupt throttle values + * @hw: pointer to the HW structure + * @q_vector: interrupt vector that's being configured + * @vector: HW vector index to apply the interrupt throttling to + * + * Configure interrupt throttling values for the ring containers that are + * associated with the interrupt vector passed in. + */ +static void +ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) +{ + u8 itr_gran = hw->itr_gran; + + if (q_vector->num_ring_rx) { + struct ice_ring_container *rc = &q_vector->rx; + + rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran); + rc->latency_range = ICE_LOW_LATENCY; + wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + } + + if (q_vector->num_ring_tx) { + struct ice_ring_container *rc = &q_vector->tx; + + rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran); + rc->latency_range = ICE_LOW_LATENCY; + wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + } +} + +/** * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * @vsi: the VSI being configured */ @@ -1624,31 +1724,13 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) u16 vector = vsi->hw_base_vector; struct ice_hw *hw = &pf->hw; u32 txq = 0, rxq = 0; - int i, q, itr; - u8 itr_gran; + int i, q; for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - itr_gran = hw->itr_gran; - - q_vector->intrl = ICE_DFLT_INTRL; - - if (q_vector->num_ring_rx) { - q_vector->rx.itr = - ITR_TO_REG(vsi->rx_rings[rxq]->itr_setting, - itr_gran); - q_vector->rx.latency_range = ICE_LOW_LATENCY; - } + ice_cfg_itr(hw, q_vector, vector); - if (q_vector->num_ring_tx) { - q_vector->tx.itr = - ITR_TO_REG(vsi->tx_rings[txq]->itr_setting, - itr_gran); - q_vector->tx.latency_range = ICE_LOW_LATENCY; - } - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); wr32(hw, GLINT_RATE(vector), ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran)); @@ -1664,23 +1746,33 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) * tracked for this PF. */ for (q = 0; q < q_vector->num_ring_tx; q++) { + int itr_idx = q_vector->tx.itr_idx; u32 val; - itr = ICE_ITR_NONE; - val = QINT_TQCTL_CAUSE_ENA_M | - (itr << QINT_TQCTL_ITR_INDX_S) | - (vector << QINT_TQCTL_MSIX_INDX_S); + if (vsi->type == ICE_VSI_VF) + val = QINT_TQCTL_CAUSE_ENA_M | + (itr_idx << QINT_TQCTL_ITR_INDX_S) | + ((i + 1) << QINT_TQCTL_MSIX_INDX_S); + else + val = QINT_TQCTL_CAUSE_ENA_M | + (itr_idx << QINT_TQCTL_ITR_INDX_S) | + (vector << QINT_TQCTL_MSIX_INDX_S); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); txq++; } for (q = 0; q < q_vector->num_ring_rx; q++) { + int itr_idx = q_vector->rx.itr_idx; u32 val; - itr = ICE_ITR_NONE; - val = QINT_RQCTL_CAUSE_ENA_M | - (itr << QINT_RQCTL_ITR_INDX_S) | - (vector << QINT_RQCTL_MSIX_INDX_S); + if (vsi->type == ICE_VSI_VF) + val = QINT_RQCTL_CAUSE_ENA_M | + (itr_idx << QINT_RQCTL_ITR_INDX_S) | + ((i + 1) << QINT_RQCTL_MSIX_INDX_S); + else + val = QINT_RQCTL_CAUSE_ENA_M | + (itr_idx << QINT_RQCTL_ITR_INDX_S) | + (vector << QINT_RQCTL_MSIX_INDX_S); wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); rxq++; } @@ -1784,8 +1876,11 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) /** * ice_vsi_stop_tx_rings - Disable Tx rings * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative id of VF/VM */ -int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) +int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; @@ -1837,7 +1932,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); } status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, - NULL); + rst_src, rel_vmvf_num, NULL); /* if the disable queue command was exercised during an active reset * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as * the reset operation disables queues at the hardware level anyway. @@ -1934,7 +2029,7 @@ err_out: */ struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, - enum ice_vsi_type type, u16 __always_unused vf_id) + enum ice_vsi_type type, u16 vf_id) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = &pf->pdev->dev; @@ -1949,6 +2044,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, vsi->port_info = pi; vsi->vsw = pf->first_sw; + if (vsi->type == ICE_VSI_VF) + vsi->vf_id = vf_id; if (ice_vsi_get_qs(vsi)) { dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", @@ -1987,6 +2084,34 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_vsi_cfg_rss_lut_key(vsi); break; + case ICE_VSI_VF: + /* VF driver will take care of creating netdev for this type and + * map queues to vectors through Virtchnl, PF driver only + * creates a VSI and corresponding structures for bookkeeping + * purpose + */ + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto unroll_vsi_init; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_alloc_q_vector; + + /* Setup Vector base only during VF init phase or when VF asks + * for more vectors than assigned number. In all other cases, + * assign hw_base_vector to the value given earlier. + */ + if (test_bit(ICE_VF_STATE_CFG_INTR, pf->vf[vf_id].vf_states)) { + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto unroll_vector_base; + } else { + vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx; + } + pf->q_left_tx -= vsi->alloc_txq; + pf->q_left_rx -= vsi->alloc_rxq; + break; default: /* if VSI type is not recognized, clean up the resources and * exit @@ -2045,8 +2170,8 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++, vector++) { struct ice_q_vector *q_vector = vsi->q_vectors[i]; - wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0); - wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0); + wr32(hw, GLINT_ITR(ICE_IDX_ITR0, vector), 0); + wr32(hw, GLINT_ITR(ICE_IDX_ITR1, vector), 0); for (q = 0; q < q_vector->num_ring_tx; q++) { wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); txq++; @@ -2077,6 +2202,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; ice_vsi_release_msix(vsi); + if (vsi->type == ICE_VSI_VF) + return; vsi->irqs_ready = false; for (i = 0; i < vsi->num_q_vectors; i++) { @@ -2317,10 +2444,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi) { struct ice_pf *pf; + struct ice_vf *vf; if (!vsi->back) return -ENODEV; pf = vsi->back; + vf = &pf->vf[vsi->vf_id]; /* do not unregister and free netdevs while driver is in the reset * recovery pending state. Since reset/rebuild happens through PF * service task workqueue, its not a good idea to unregister netdev @@ -2342,10 +2471,23 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_vsi_close(vsi); /* reclaim interrupt vectors back to PF */ - ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); - pf->num_avail_sw_msix += vsi->num_q_vectors; - ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); - pf->num_avail_hw_msix += vsi->num_q_vectors; + if (vsi->type != ICE_VSI_VF) { + /* reclaim SW interrupts back to the common pool */ + ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, + vsi->idx); + pf->num_avail_sw_msix += vsi->num_q_vectors; + /* reclaim HW interrupts back to the common pool */ + ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, + vsi->idx); + pf->num_avail_hw_msix += vsi->num_q_vectors; + } else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) { + /* Reclaim VF resources back only while freeing all VFs or + * vector reassignment is requested + */ + ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx, + vsi->idx); + pf->num_avail_hw_msix += pf->num_vf_msix; + } ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_vsi_delete(vsi); @@ -2414,6 +2556,22 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_map_rings_to_vectors(vsi); break; + case ICE_VSI_VF: + ret = ice_vsi_alloc_q_vectors(vsi); + if (ret) + goto err_rings; + + ret = ice_vsi_setup_vector_base(vsi); + if (ret) + goto err_vectors; + + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto err_vectors; + + vsi->back->q_left_tx -= vsi->alloc_txq; + vsi->back->q_left_rx -= vsi->alloc_rxq; + break; default: break; } diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 2617afe01c82..677db40338f5 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi); int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); -int ice_vsi_stop_tx_rings(struct ice_vsi *vsi); +int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num); int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 46ccf265c218..8f61b375e768 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -8,7 +8,7 @@ #include "ice.h" #include "ice_lib.h" -#define DRV_VERSION "0.7.1-k" +#define DRV_VERSION "0.7.2-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -342,6 +342,10 @@ ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; + /* Notify VFs of impending reset */ + if (ice_check_sq_alive(hw, &hw->mailboxq)) + ice_vc_notify_reset(pf); + /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf); @@ -661,6 +665,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) } } + ice_vc_notify_link_state(pf); + return 0; } @@ -711,6 +717,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) cq = &hw->adminq; qtype = "Admin"; break; + case ICE_CTL_Q_MAILBOX: + cq = &hw->mailboxq; + qtype = "Mailbox"; + break; default: dev_warn(&pf->pdev->dev, "Unknown control queue type 0x%x\n", q_type); @@ -792,6 +802,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; + case ice_mbx_opc_send_msg_to_pf: + ice_vc_process_vf_msg(pf, &event); + break; case ice_aqc_opc_fw_logging: ice_output_fw_log(hw, &event.desc, event.msg_buf); break; @@ -851,6 +864,28 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) } /** + * ice_clean_mailboxq_subtask - clean the MailboxQ rings + * @pf: board private structure + */ +static void ice_clean_mailboxq_subtask(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + + if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state)) + return; + + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX)) + return; + + clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); + + if (ice_ctrlq_pending(hw, &hw->mailboxq)) + __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX); + + ice_flush(hw); +} + +/** * ice_service_task_schedule - schedule the service task to wake up * @pf: board private structure * @@ -916,6 +951,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; bool mdd_detected = false; u32 reg; + int i; if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) return; @@ -1005,6 +1041,51 @@ static void ice_handle_mdd_event(struct ice_pf *pf) } } + /* see if one of the VFs needs to be reset */ + for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + struct ice_vf *vf = &pf->vf[i]; + + reg = rd32(hw, VP_MDET_TX_PQM(i)); + if (reg & VP_MDET_TX_PQM_VALID_M) { + wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_TX_TCLAN(i)); + if (reg & VP_MDET_TX_TCLAN_VALID_M) { + wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_TX_TDPU(i)); + if (reg & VP_MDET_TX_TDPU_VALID_M) { + wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, VP_MDET_RX(i)); + if (reg & VP_MDET_RX_VALID_M) { + wr32(hw, VP_MDET_RX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); + } + + if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) { + dev_info(&pf->pdev->dev, + "Too many MDD events on VF %d, disabled\n", i); + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF\n"); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + } + } + /* re-enable MDD interrupt cause */ clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, PFINT_OICR_ENA); @@ -1038,8 +1119,10 @@ static void ice_service_task(struct work_struct *work) ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); ice_handle_mdd_event(pf); + ice_process_vflr_event(pf); ice_watchdog_subtask(pf); ice_clean_adminq_subtask(pf); + ice_clean_mailboxq_subtask(pf); /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */ ice_service_task_complete(pf); @@ -1050,6 +1133,8 @@ static void ice_service_task(struct work_struct *work) */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || + test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -1064,6 +1149,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw) hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; + hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; + hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; + hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; + hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } /** @@ -1197,6 +1286,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf) PFINT_OICR_MAL_DETECT_M | PFINT_OICR_GRST_M | PFINT_OICR_PCI_EXCEPTION_M | + PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_CRITERR_M); @@ -1220,6 +1310,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) u32 oicr, ena_mask; set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); + set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state); oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); @@ -1228,6 +1319,10 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) ena_mask &= ~PFINT_OICR_MAL_DETECT_M; set_bit(__ICE_MDD_EVENT_PENDING, pf->state); } + if (oicr & PFINT_OICR_VFLR_M) { + ena_mask &= ~PFINT_OICR_VFLR_M; + set_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + } if (oicr & PFINT_OICR_GRST_M) { u32 reset; @@ -1406,6 +1501,11 @@ skip_req_irq: PFINT_FW_CTL_CAUSE_ENA_M); wr32(hw, PFINT_FW_CTL, val); + /* This enables Mailbox queue Interrupt causes */ + val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | + PFINT_MBX_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_MBX_CTL, val); + itr_gran = hw->itr_gran; wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), @@ -1775,6 +1875,15 @@ static void ice_init_pf(struct ice_pf *pf) { bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); set_bit(ICE_FLAG_MSIX_ENA, pf->flags); +#ifdef CONFIG_PCI_IOV + if (pf->hw.func_caps.common_cap.sr_iov_1_1) { + struct ice_hw *hw = &pf->hw; + + set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); + pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs, + ICE_MAX_VF_COUNT); + } +#endif /* CONFIG_PCI_IOV */ mutex_init(&pf->sw_mutex); mutex_init(&pf->avail_q_mutex); @@ -2138,6 +2247,8 @@ static void ice_remove(struct pci_dev *pdev) set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + ice_free_vfs(pf); ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { @@ -2173,6 +2284,7 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, + .sriov_configure = ice_sriov_configure, }; /** @@ -2908,7 +3020,7 @@ int ice_down(struct ice_vsi *vsi) } ice_vsi_dis_irq(vsi); - tx_err = ice_vsi_stop_tx_rings(vsi); + tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); if (tx_err) netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", @@ -3102,13 +3214,14 @@ static void ice_dis_vsi(struct ice_vsi *vsi) set_bit(__ICE_NEEDS_RESTART, vsi->state); - if (vsi->netdev && netif_running(vsi->netdev) && - vsi->type == ICE_VSI_PF) { - rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - rtnl_unlock(); - } else { - ice_vsi_close(vsi); + if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (netif_running(vsi->netdev)) { + rtnl_lock(); + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } } } @@ -3120,12 +3233,16 @@ static int ice_ena_vsi(struct ice_vsi *vsi) { int err = 0; - if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) - if (vsi->netdev && netif_running(vsi->netdev)) { + if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && + vsi->netdev) { + if (netif_running(vsi->netdev)) { rtnl_lock(); err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); rtnl_unlock(); + } else { + err = ice_vsi_open(vsi); } + } return err; } @@ -3174,6 +3291,10 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf) if (!pf->vsi[i]) continue; + /* VF VSI rebuild isn't supported yet */ + if (pf->vsi[i]->type == ICE_VSI_VF) + continue; + err = ice_vsi_rebuild(pf->vsi[i]); if (err) { dev_err(&pf->pdev->dev, @@ -3310,6 +3431,7 @@ static void ice_rebuild(struct ice_pf *pf) goto err_vsi_rebuild; } + ice_reset_all_vfs(pf, true); /* if we get here, reset flow is successful */ clear_bit(__ICE_RESET_FAILED, pf->state); return; @@ -3818,6 +3940,12 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, + .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, + .ndo_set_vf_mac = ice_set_vf_mac, + .ndo_get_vf_config = ice_get_vf_cfg, + .ndo_set_vf_trust = ice_set_vf_trust, + .ndo_set_vf_vlan = ice_set_vf_port_vlan, + .ndo_set_vf_link_state = ice_set_vf_link_state, .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c new file mode 100644 index 000000000000..027eba4e13f8 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice_common.h" +#include "ice_adminq_cmd.h" +#include "ice_sriov.h" + +/** + * ice_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF ID to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cd: pointer to command details + * + * Send message to VF driver (0x0802) using mailbox + * queue and asynchronously sending message via + * ice_sq_send_cmd() function + */ +enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd) +{ + struct ice_aqc_pf_vf_msg *cmd; + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf); + + cmd = &desc.params.virt; + cmd->id = cpu_to_le32(vfid); + + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + + if (msglen) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd); +} + +/** + * ice_conv_link_speed_to_virtchnl + * @adv_link_support: determines the format of the returned link speed + * @link_speed: variable containing the link_speed to be converted + * + * Convert link speed supported by HW to link speed supported by virtchnl. + * If adv_link_support is true, then return link speed in Mbps. Else return + * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller + * needs to cast back to an enum virtchnl_link_speed in the case where + * adv_link_support is false, but when adv_link_support is true the caller can + * expect the speed in Mbps. + */ +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + if (adv_link_support) + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + speed = ICE_LINK_SPEED_10MBPS; + break; + case ICE_AQ_LINK_SPEED_100MB: + speed = ICE_LINK_SPEED_100MBPS; + break; + case ICE_AQ_LINK_SPEED_1000MB: + speed = ICE_LINK_SPEED_1000MBPS; + break; + case ICE_AQ_LINK_SPEED_2500MB: + speed = ICE_LINK_SPEED_2500MBPS; + break; + case ICE_AQ_LINK_SPEED_5GB: + speed = ICE_LINK_SPEED_5000MBPS; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = ICE_LINK_SPEED_10000MBPS; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = ICE_LINK_SPEED_20000MBPS; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = ICE_LINK_SPEED_25000MBPS; + break; + case ICE_AQ_LINK_SPEED_40GB: + speed = ICE_LINK_SPEED_40000MBPS; + break; + default: + speed = ICE_LINK_SPEED_UNKNOWN; + break; + } + else + /* Virtchnl speeds are not defined for every speed supported in + * the hardware. To maintain compatibility with older AVF + * drivers, while reporting the speed the new speed values are + * resolved to the closest known virtchnl speeds + */ + switch (link_speed) { + case ICE_AQ_LINK_SPEED_10MB: + case ICE_AQ_LINK_SPEED_100MB: + speed = (u32)VIRTCHNL_LINK_SPEED_100MB; + break; + case ICE_AQ_LINK_SPEED_1000MB: + case ICE_AQ_LINK_SPEED_2500MB: + case ICE_AQ_LINK_SPEED_5GB: + speed = (u32)VIRTCHNL_LINK_SPEED_1GB; + break; + case ICE_AQ_LINK_SPEED_10GB: + speed = (u32)VIRTCHNL_LINK_SPEED_10GB; + break; + case ICE_AQ_LINK_SPEED_20GB: + speed = (u32)VIRTCHNL_LINK_SPEED_20GB; + break; + case ICE_AQ_LINK_SPEED_25GB: + speed = (u32)VIRTCHNL_LINK_SPEED_25GB; + break; + case ICE_AQ_LINK_SPEED_40GB: + /* fall through */ + speed = (u32)VIRTCHNL_LINK_SPEED_40GB; + break; + default: + speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h new file mode 100644 index 000000000000..3d78a0795138 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_SRIOV_H_ +#define _ICE_SRIOV_H_ + +#include "ice_common.h" + +#ifdef CONFIG_PCI_IOV +enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, + u8 *msg, u16 msglen, struct ice_sq_cd *cd); + +u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +#else /* CONFIG_PCI_IOV */ +static inline enum ice_status +ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw, + u16 __always_unused vfid, u32 __always_unused v_opcode, + u32 __always_unused v_retval, u8 __always_unused *msg, + u16 __always_unused msglen, + struct ice_sq_cd __always_unused *cd) +{ + return 0; +} + +static inline u32 +ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support, + u16 __always_unused link_speed) +{ + return 0; +} + +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_SRIOV_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index d2dae913d81e..f49f299ddf2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -6,6 +6,9 @@ /* Error Codes */ enum ice_status { + ICE_SUCCESS = 0, + + /* Generic codes : Range -1..-49 */ ICE_ERR_PARAM = -1, ICE_ERR_NOT_IMPL = -2, ICE_ERR_NOT_READY = -3, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index e949224b5282..33403f39f1b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -187,6 +187,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, if (!vsi_ctx->alloc_from_pool) cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); @@ -655,6 +656,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, u8 *eth_hdr; u32 act = 0; __be16 *off; + u8 q_rgn; if (opc == ice_aqc_opc_remove_sw_rules) { s_rule->pdata.lkup_tx_rx.act = 0; @@ -693,14 +695,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & ICE_SINGLE_ACT_Q_INDEX_M; break; + case ICE_DROP_PACKET: + act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | + ICE_SINGLE_ACT_VALID_BIT; + break; case ICE_FWD_TO_QGRP: + q_rgn = f_info->qgrp_size > 0 ? + (u8)ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; - act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) & + act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & + ICE_SINGLE_ACT_Q_INDEX_M; + act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & ICE_SINGLE_ACT_Q_REGION_M; break; - case ICE_DROP_PACKET: - act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP; - break; default: return; } @@ -1415,8 +1422,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, fm_list->vsi_count--; clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); - if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || - (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { + if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { + struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; struct ice_vsi_list_map_info *vsi_list_info = fm_list->vsi_list_info; u16 rem_vsi_handle; @@ -1425,6 +1432,8 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) return ICE_ERR_OUT_OF_RANGE; + + /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, vsi_list_id, true, ice_aqc_opc_update_sw_rules, @@ -1432,16 +1441,34 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, if (status) return status; + tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp_fltr_info.fwd_id.hw_vsi_id = + ice_get_hw_vsi_num(hw, rem_vsi_handle); + tmp_fltr_info.vsi_handle = rem_vsi_handle; + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + tmp_fltr_info.fwd_id.hw_vsi_id, status); + return status; + } + + fm_list->fltr_info = tmp_fltr_info; + } + + if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || + (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + /* Remove the VSI list since it is no longer used */ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); - if (status) + if (status) { + ice_debug(hw, ICE_DBG_SW, + "Failed to remove VSI list %d, error %d\n", + vsi_list_id, status); return status; - - /* Change the list entry action from VSI_LIST to VSI */ - fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI; - fm_list->fltr_info.fwd_id.hw_vsi_id = - ice_get_hw_vsi_num(hw, rem_vsi_handle); - fm_list->fltr_info.vsi_handle = rem_vsi_handle; + } list_del(&vsi_list_info->list_entry); devm_kfree(ice_hw_to_dev(hw), vsi_list_info); @@ -1983,12 +2010,12 @@ out: enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { - struct ice_fltr_list_entry *list_itr; + struct ice_fltr_list_entry *list_itr, *tmp; if (!m_list) return ICE_ERR_PARAM; - list_for_each_entry(list_itr, m_list, list_entry) { + list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) { enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_MAC) @@ -2010,12 +2037,12 @@ ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { - struct ice_fltr_list_entry *v_list_itr; + struct ice_fltr_list_entry *v_list_itr, *tmp; if (!v_list || !hw) return ICE_ERR_PARAM; - list_for_each_entry(v_list_itr, v_list, list_entry) { + list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) { enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_VLAN) @@ -2115,7 +2142,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_info *fi; fi = &fm_entry->fltr_info; - if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) + if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) continue; status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, @@ -2232,7 +2259,8 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, goto end; continue; } - if (!test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) + if (!itr->vsi_list_info || + !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) continue; /* Clearing it so that the logic can add it back */ clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 7706e9b6003c..b88d96a1ef69 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -19,6 +19,7 @@ struct ice_vsi_ctx { struct ice_aqc_vsi_props info; struct ice_sched_vsi_info sched; u8 alloc_from_pool; + u8 vf_num; }; enum ice_sw_fwd_act_type { diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index a9b92974e041..1d0f58bd389b 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -105,8 +105,9 @@ enum ice_rx_dtype { #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define ICE_ITR_8K 125 -#define ICE_DFLT_TX_ITR ICE_ITR_8K -#define ICE_DFLT_RX_ITR ICE_ITR_8K +#define ICE_ITR_20K 50 +#define ICE_DFLT_TX_ITR ICE_ITR_20K +#define ICE_DFLT_RX_ITR ICE_ITR_20K /* apply ITR granularity translation to program the register. itr_gran is either * 2 or 4 usecs so we need to divide by 2 first then shift by that value */ @@ -135,13 +136,6 @@ struct ice_ring { u16 q_index; /* Queue number of ring */ u32 txq_teid; /* Added Tx queue TEID */ - /* high bit set means dynamic, use accessor routines to read/write. - * hardware supports 4us/2us resolution for the ITR registers. - * these values always store the USER setting, and must be converted - * before programming to a register. - */ - u16 itr_setting; - u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ @@ -178,6 +172,7 @@ struct ice_ring_container { unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_pkts; /* total packets processed this int */ enum ice_latency_range latency_range; + int itr_idx; /* index in the interrupt vector */ u16 itr; }; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index f5c8de0ed0eb..12f9432abf11 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -84,6 +84,7 @@ enum ice_media_type { enum ice_vsi_type { ICE_VSI_PF = 0, + ICE_VSI_VF, }; struct ice_link_status { @@ -103,6 +104,15 @@ struct ice_link_status { u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; }; +/* Different reset sources for which a disable queue AQ call has to be made in + * order to clean the TX scheduler as a part of the reset + */ +enum ice_disq_rst_src { + ICE_NO_RESET = 0, + ICE_VM_RESET, + ICE_VF_RESET, +}; + /* PHY info such as phy_type, etc... */ struct ice_phy_info { struct ice_link_status link_info; @@ -127,6 +137,9 @@ struct ice_hw_common_caps { /* Max MTU for function or device */ u16 max_mtu; + /* Virtualization support */ + u8 sr_iov_1_1; /* SR-IOV enabled */ + /* RSS related capabilities */ u16 rss_table_size; /* 512 for PFs and 64 for VFs */ u8 rss_table_entry_width; /* RSS Entry width in bits */ @@ -135,12 +148,15 @@ struct ice_hw_common_caps { /* Function specific capabilities */ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; + u32 num_allocd_vfs; /* Number of allocated VFs */ + u32 vf_base_id; /* Logical ID of the first VF */ u32 guaranteed_num_vsi; }; /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; + u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ }; @@ -321,6 +337,7 @@ struct ice_hw { /* Control Queue info */ struct ice_ctl_q_info adminq; + struct ice_ctl_q_info mailboxq; u8 api_branch; /* API branch version */ u8 api_maj_ver; /* API major version */ @@ -426,4 +443,7 @@ struct ice_hw_port_stats { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_WORDS_IN_1KB 512 +/* Hash redirection LUT for VSI - maximum array size */ +#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4) + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c new file mode 100644 index 000000000000..c25e486706f3 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -0,0 +1,2668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +#include "ice.h" +#include "ice_lib.h" + +/** + * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF + * @pf: pointer to the PF structure + * @v_opcode: operation code + * @v_retval: return value + * @msg: pointer to the msg buffer + * @msglen: msg length + */ +static void +ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode, + enum ice_status v_retval, u8 *msg, u16 msglen) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + continue; + + /* Ignore return value on purpose - a given VF may fail, but + * we need to keep going and send to all of them + */ + ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg, + msglen, NULL); + } +} + +/** + * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event + * @vf: pointer to the VF structure + * @pfe: pointer to the virtchnl_pf_event to set link speed/status for + * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_* + * @link_up: whether or not to set the link up/down + */ +static void +ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe, + int ice_link_speed, bool link_up) +{ + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + pfe->event_data.link_event_adv.link_status = link_up; + /* Speed in Mbps */ + pfe->event_data.link_event_adv.link_speed = + ice_conv_link_speed_to_virtchnl(true, ice_link_speed); + } else { + pfe->event_data.link_event.link_status = link_up; + /* Legacy method for virtchnl link speeds */ + pfe->event_data.link_event.link_speed = + (enum virtchnl_link_speed) + ice_conv_link_speed_to_virtchnl(false, ice_link_speed); + } +} + +/** + * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status + * @vf: pointer to the VF structure + * @pfe: pointer to the virtchnl_pf_event to set link speed/status for + * @link_up: whether or not to set the link up/down + */ +static void +ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe, + bool link_up) +{ + u16 link_speed; + + if (link_up) + link_speed = ICE_AQ_LINK_SPEED_40GB; + else + link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; + + ice_set_pfe_link(vf, pfe, link_speed, link_up); +} + +/** + * ice_vc_notify_vf_link_state - Inform a VF of link status + * @vf: pointer to the VF structure + * + * send a link status message to a single VF + */ +static void ice_vc_notify_vf_link_state(struct ice_vf *vf) +{ + struct virtchnl_pf_event pfe = { 0 }; + struct ice_link_status *ls; + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + + hw = &pf->hw; + ls = &hw->port_info->phy.link_info; + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + + if (vf->link_forced) + ice_set_pfe_link_forced(vf, &pfe, vf->link_up); + else + ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & + ICE_AQ_LINK_UP); + + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(pfe), NULL); +} + +/** + * ice_get_vf_vector - get VF interrupt vector register offset + * @vf_msix: number of MSIx vector per VF on a PF + * @vf_id: VF identifier + * @i: index of MSIx vector + */ +static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i) +{ + return ((i == 0) ? VFINT_DYN_CTLN(vf_id) : + VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1))); +} + +/** + * ice_free_vf_res - Free a VF's resources + * @vf: pointer to the VF info + */ +static void ice_free_vf_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + int i, pf_vf_msix; + + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's VSI after it's freed or invalidated. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* free vsi & disconnect it from the parent uplink */ + if (vf->lan_vsi_idx) { + ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; + vf->lan_vsi_num = 0; + vf->num_mac = 0; + } + + pf_vf_msix = pf->num_vf_msix; + /* Disable interrupts so that VF starts in a known state */ + for (i = 0; i < pf_vf_msix; i++) { + u32 reg_idx; + + reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i); + wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M); + ice_flush(&pf->hw); + } + /* reset some of the state variables keeping track of the resources */ + clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states); + clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); +} + +/***********************enable_vf routines*****************************/ + +/** + * ice_dis_vf_mappings + * @vf: pointer to the VF structure + */ +static void ice_dis_vf_mappings(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int first, last, v; + struct ice_hw *hw; + + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + + wr32(hw, VPINT_ALLOC(vf->vf_id), 0); + + first = vf->first_vector_idx; + last = first + pf->num_vf_msix - 1; + for (v = first; v <= last; v++) { + u32 reg; + + reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) & + GLINT_VECT2FUNC_IS_PF_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0); + else + dev_err(&pf->pdev->dev, + "Scattered mode for VF Tx queues is not yet implemented\n"); + + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); + else + dev_err(&pf->pdev->dev, + "Scattered mode for VF Rx queues is not yet implemented\n"); +} + +/** + * ice_free_vfs - Free all VFs + * @pf: pointer to the PF structure + */ +void ice_free_vfs(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int tmp, i; + + if (!pf->vf) + return; + + while (test_and_set_bit(__ICE_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Avoid wait time by stopping all VFs at the same time */ + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) + continue; + + /* stop rings without wait time */ + ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + ICE_NO_RESET, i); + ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); + + clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); + } + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + + tmp = pf->num_alloc_vfs; + pf->num_vf_qps = 0; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { + if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + /* disable VF qp mappings */ + ice_dis_vf_mappings(&pf->vf[i]); + + /* Set this state so that assigned VF vectors can be + * reclaimed by PF for reuse in ice_vsi_release(). No + * need to clear this bit since pf->vf array is being + * freed anyways after this for loop + */ + set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states); + ice_free_vf_res(&pf->vf[i]); + } + } + + devm_kfree(&pf->pdev->dev, pf->vf); + pf->vf = NULL; + + /* This check is for when the driver is unloaded while VFs are + * assigned. Setting the number of VFs to 0 through sysfs is caught + * before this function ever gets called. + */ + if (!pci_vfs_assigned(pf->pdev)) { + int vf_id; + + /* Acknowledge VFLR for all VFs. Without this, VFs will fail to + * work correctly when SR-IOV gets re-enabled. + */ + for (vf_id = 0; vf_id < tmp; vf_id++) { + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + } + } + clear_bit(__ICE_VF_DIS, pf->state); + clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags); +} + +/** + * ice_trigger_vf_reset - Reset a VF on HW + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * + * Trigger hardware to start a reset for a particular VF. Expects the caller + * to wait the proper amount of time to allow hardware to reset the VF before + * it cleans up and restores VF functionality. + */ +static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + u32 reg, reg_idx, bit_idx; + struct ice_hw *hw; + int vf_abs_id, i; + + hw = &pf->hw; + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* Inform VF that it is no longer active, as a warning */ + clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + + /* Disable VF's configuration API during reset. The flag is re-enabled + * in ice_alloc_vf_res(), when it's safe again to access VF's VSI. + * It's normally disabled in ice_free_vf_res(), but it's safer + * to do it earlier to give some time to finish to any VF config + * functions that may still be running at this point. + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. + */ + if (!is_vflr) { + /* reset VF using VPGEN_VFRTRIG reg */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg |= VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + } + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (vf_abs_id) / 32; + bit_idx = (vf_abs_id) % 32; + wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + ice_flush(hw); + + wr32(hw, PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S)); + for (i = 0; i < 100; i++) { + reg = rd32(hw, PF_PCI_CIAD); + if ((reg & VF_TRANS_PENDING_M) != 0) + dev_err(&pf->pdev->dev, + "VF %d PCI transactions stuck\n", vf->vf_id); + udelay(1); + } +} + +/** + * ice_vsi_set_pvid - Set port VLAN id for the VSI + * @vsi: the VSI being changed + * @vid: the VLAN id to set as a PVID + */ +static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR; + ctxt.info.pvid = cpu_to_le16(vid); + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + + status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + if (status) { + dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + return -EIO; + } + + vsi->info.pvid = ctxt.info.pvid; + vsi->info.vlan_flags = ctxt.info.vlan_flags; + return 0; +} + +/** + * ice_vsi_kill_pvid - Remove port VLAN id from the VSI + * @vsi: the VSI being changed + */ +static int ice_vsi_kill_pvid(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + + if (ice_vsi_manage_vlan_stripping(vsi, false)) { + dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n", + vsi->vsi_num); + return -ENODEV; + } + + vsi->info.pvid = 0; + return 0; +} + +/** + * ice_vf_vsi_setup - Set up a VF VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * @vf_id: defines VF id to which this VSI connects. + * + * Returns pointer to the successfully allocated VSI struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id); +} + +/** + * ice_alloc_vsi_res - Setup VF VSI and its resources + * @vf: pointer to the VF structure + * + * Returns 0 on success, negative value on failure + */ +static int ice_alloc_vsi_res(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + LIST_HEAD(tmp_add_list); + u8 broadcast[ETH_ALEN]; + struct ice_vsi *vsi; + int status = 0; + + vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); + + if (!vsi) { + dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); + return -ENOMEM; + } + + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_num = vsi->vsi_num; + + /* first vector index is the VFs OICR index */ + vf->first_vector_idx = vsi->hw_base_vector; + /* Since hw_base_vector holds the vector where data queue interrupts + * starts, increment by 1 since VFs allocated vectors include OICR intr + * as well. + */ + vsi->hw_base_vector += 1; + + /* Check if port VLAN exist before, and restore it accordingly */ + if (vf->port_vlan_id) + ice_vsi_set_pvid(vsi, vf->port_vlan_id); + + eth_broadcast_addr(broadcast); + + status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); + if (status) + goto ice_alloc_vsi_res_exit; + + if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) { + status = ice_add_mac_to_list(vsi, &tmp_add_list, + vf->dflt_lan_addr.addr); + if (status) + goto ice_alloc_vsi_res_exit; + } + + status = ice_add_mac(&pf->hw, &tmp_add_list); + if (status) + dev_err(&pf->pdev->dev, "could not add mac filters\n"); + + /* Clear this bit after VF initialization since we shouldn't reclaim + * and reassign interrupts for synchronous or asynchronous VFR events. + * We don't want to reconfigure interrupts since AVF driver doesn't + * expect vector assignment to be changed unless there is a request for + * more vectors. + */ + clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states); +ice_alloc_vsi_res_exit: + ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; +} + +/** + * ice_alloc_vf_res - Allocate VF resources + * @vf: pointer to the VF structure + */ +static int ice_alloc_vf_res(struct ice_vf *vf) +{ + int status; + + /* setup VF VSI and necessary resources */ + status = ice_alloc_vsi_res(vf); + if (status) + goto ice_alloc_vf_res_exit; + + if (vf->trusted) + set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + else + clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + /* VF is now completely initialized */ + set_bit(ICE_VF_STATE_INIT, vf->vf_states); + + return status; + +ice_alloc_vf_res_exit: + ice_free_vf_res(vf); + return status; +} + +/** + * ice_ena_vf_mappings + * @vf: pointer to the VF structure + * + * Enable VF vectors and queues allocation by writing the details into + * respective registers. + */ +static void ice_ena_vf_mappings(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int first, last, v; + struct ice_hw *hw; + int abs_vf_id; + u32 reg; + + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + first = vf->first_vector_idx; + last = (first + pf->num_vf_msix) - 1; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* VF Vector allocation */ + reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) | + ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) | + VPINT_ALLOC_VALID_M); + wr32(hw, VPINT_ALLOC(vf->vf_id), reg); + + /* map the interrupts to its functions */ + for (v = first; v <= last; v++) { + reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) & + GLINT_VECT2FUNC_VF_NUM_M) | + ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & + GLINT_VECT2FUNC_PF_NUM_M)); + wr32(hw, GLINT_VECT2FUNC(v), reg); + } + + /* VF Tx queues allocation */ + if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) { + wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), + VPLAN_TXQ_MAPENA_TX_ENA_M); + /* set the VF PF Tx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) & + VPLAN_TX_QBASE_VFFIRSTQ_M) | + (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) & + VPLAN_TX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg); + } else { + dev_err(&pf->pdev->dev, + "Scattered mode for VF Tx queues is not yet implemented\n"); + } + + /* VF Rx queues allocation */ + if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) { + wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), + VPLAN_RXQ_MAPENA_RX_ENA_M); + /* set the VF PF Rx queue range + * VFNUMQ value should be set to (number of queues - 1). A value + * of 0 means 1 queue and a value of 255 means 256 queues + */ + reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) & + VPLAN_RX_QBASE_VFFIRSTQ_M) | + (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) & + VPLAN_RX_QBASE_VFNUMQ_M)); + wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg); + } else { + dev_err(&pf->pdev->dev, + "Scattered mode for VF Rx queues is not yet implemented\n"); + } +} + +/** + * ice_determine_res + * @pf: pointer to the PF structure + * @avail_res: available resources in the PF structure + * @max_res: maximum resources that can be given per VF + * @min_res: minimum resources that can be given per VF + * + * Returns non-zero value if resources (queues/vectors) are available or + * returns zero if PF cannot accommodate for all num_alloc_vfs. + */ +static int +ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res) +{ + bool checked_min_res = false; + int res; + + /* start by checking if PF can assign max number of resources for + * all num_alloc_vfs. + * if yes, return number per VF + * If no, divide by 2 and roundup, check again + * repeat the loop till we reach a point where even minimum resources + * are not available, in that case return 0 + */ + res = max_res; + while ((res >= min_res) && !checked_min_res) { + int num_all_res; + + num_all_res = pf->num_alloc_vfs * res; + if (num_all_res <= avail_res) + return res; + + if (res == min_res) + checked_min_res = true; + + res = DIV_ROUND_UP(res, 2); + } + return 0; +} + +/** + * ice_check_avail_res - check if vectors and queues are available + * @pf: pointer to the PF structure + * + * This function is where we calculate actual number of resources for VF VSIs, + * we don't reserve ahead of time during probe. Returns success if vectors and + * queues resources are available, otherwise returns error code + */ +static int ice_check_avail_res(struct ice_pf *pf) +{ + u16 num_msix, num_txq, num_rxq; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + /* Grab from HW interrupts common pool + * Note: By the time the user decides it needs more vectors in a VF + * its already too late since one must decide this prior to creating the + * VF interface. So the best we can do is take a guess as to what the + * user might want. + * + * We have two policies for vector allocation: + * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small + * number of NFV VFs used for NFV appliances, since this is a special + * case, we try to assign maximum vectors per VF (65) as much as + * possible, based on determine_resources algorithm. + * 2. if num_alloc_vfs is from 17 to 256, then its large number of + * regular VFs which are not used for any special purpose. Hence try to + * grab default interrupt vectors (5 as supported by AVF driver). + */ + if (pf->num_alloc_vfs <= 16) { + num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + ICE_MAX_INTR_PER_VF, + ICE_MIN_INTR_PER_VF); + } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) { + num_msix = ice_determine_res(pf, pf->num_avail_hw_msix, + ICE_DFLT_INTR_PER_VF, + ICE_MIN_INTR_PER_VF); + } else { + dev_err(&pf->pdev->dev, + "Number of VFs %d exceeds max VF count %d\n", + pf->num_alloc_vfs, ICE_MAX_VF_COUNT); + return -EIO; + } + + if (!num_msix) + return -EIO; + + /* Grab from the common pool + * start by requesting Default queues (4 as supported by AVF driver), + * Note that, the main difference between queues and vectors is, latter + * can only be reserved at init time but queues can be requested by VF + * at runtime through Virtchnl, that is the reason we start by reserving + * few queues. + */ + num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF, + ICE_MIN_QS_PER_VF); + + num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF, + ICE_MIN_QS_PER_VF); + + if (!num_txq || !num_rxq) + return -EIO; + + /* since AVF driver works with only queue pairs which means, it expects + * to have equal number of Rx and Tx queues, so take the minimum of + * available Tx or Rx queues + */ + pf->num_vf_qps = min_t(int, num_txq, num_rxq); + pf->num_vf_msix = num_msix; + + return 0; +} + +/** + * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset + * @vf: pointer to the VF structure + * + * Cleanup a VF after the hardware reset is finished. Expects the caller to + * have verified whether the reset is finished properly, and ensure the + * minimum amount of wait time has passed. Reallocate VF resources back to make + * VF state active + */ +static void ice_cleanup_and_realloc_vf(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; + u32 reg; + + hw = &pf->hw; + + /* PF software completes the flow by notifying VF that reset flow is + * completed. This is done by enabling hardware by clearing the reset + * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT + * register to VFR completed (done at the end of this function) + * By doing this we allow HW to access VF memory at any point. If we + * did it any sooner, HW could access memory while it was being freed + * in ice_free_vf_res(), causing an IOMMU fault. + * + * On the other hand, this needs to be done ASAP, because the VF driver + * is waiting for this to happen and may report a timeout. It's + * harmless, but it gets logged into Guest OS kernel log, so best avoid + * it. + */ + reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~VPGEN_VFRTRIG_VFSWR_M; + wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg); + + /* reallocate VF resources to finish resetting the VSI state */ + if (!ice_alloc_vf_res(vf)) { + ice_ena_vf_mappings(vf); + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + clear_bit(ICE_VF_STATE_DIS, vf->vf_states); + vf->num_vlan = 0; + } + + /* Tell the VF driver the reset is done. This needs to be done only + * after VF has been fully initialized, because the VF driver may + * request resources immediately after setting this flag. + */ + wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); +} + +/** + * ice_reset_all_vfs - reset all allocated VFs in one go + * @pf: pointer to the PF structure + * @is_vflr: true if VFLR was issued, false if not + * + * First, tell the hardware to reset each VF, then do all the waiting in one + * chunk, and finally finish restoring each VF after the wait. This is useful + * during PF routines which need to reset all VFs, as otherwise it must perform + * these resets in a serialized fashion. + * + * Returns true if any VFs were reset, and false otherwise. + */ +bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) +{ + struct ice_hw *hw = &pf->hw; + int v, i; + + /* If we don't have any VFs, then there is nothing to reset */ + if (!pf->num_alloc_vfs) + return false; + + /* If VFs have been disabled, there is no need to reset */ + if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + return false; + + /* Begin reset on all VFs at once */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_trigger_vf_reset(&pf->vf[v], is_vflr); + + /* Call Disable LAN Tx queue AQ call with VFR bit set and 0 + * queues to inform Firmware about VF reset. + */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL, + ICE_VF_RESET, v, NULL); + + /* HW requires some time to make sure it can flush the FIFO for a VF + * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in + * sequence to make sure that it has completed. We'll keep track of + * the VFs using a simple iterator that increments once that VF has + * finished resetting. + */ + for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { + usleep_range(10000, 20000); + + /* Check each VF in sequence */ + while (v < pf->num_alloc_vfs) { + struct ice_vf *vf = &pf->vf[v]; + u32 reg; + + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & VPGEN_VFRSTAT_VFRD_M)) + break; + + /* If the current VF has finished resetting, move on + * to the next VF in sequence. + */ + v++; + } + } + + /* Display a warning if at least one VF didn't manage to reset in + * time, but continue on with the operation. + */ + if (v < pf->num_alloc_vfs) + dev_warn(&pf->pdev->dev, "VF reset check timeout\n"); + usleep_range(10000, 20000); + + /* free VF resources to begin resetting the VSI state */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_free_vf_res(&pf->vf[v]); + + if (ice_check_avail_res(pf)) { + dev_err(&pf->pdev->dev, + "Cannot allocate VF resources, try with fewer number of VFs\n"); + return false; + } + + /* Finish the reset on each VF */ + for (v = 0; v < pf->num_alloc_vfs; v++) + ice_cleanup_and_realloc_vf(&pf->vf[v]); + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** + * ice_reset_vf - Reset a particular VF + * @vf: pointer to the VF structure + * @is_vflr: true if VFLR was issued, false if not + * + * Returns true if the VF is reset, false otherwise. + */ +static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) +{ + struct ice_pf *pf = vf->pf; + struct ice_hw *hw = &pf->hw; + bool rsd = false; + u32 reg; + int i; + + /* If the VFs have been disabled, this means something else is + * resetting the VF, so we shouldn't continue. + */ + if (test_and_set_bit(__ICE_VF_DIS, pf->state)) + return false; + + ice_trigger_vf_reset(vf, is_vflr); + + if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET, + vf->vf_id); + ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]); + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + } else { + /* Call Disable LAN Tx queue AQ call even when queues are not + * enabled. This is needed for successful completiom of VFR + */ + ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0, + NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL); + } + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. + */ + usleep_range(10000, 20000); + reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id)); + if (reg & VPGEN_VFRSTAT_VFRD_M) { + rsd = true; + break; + } + } + + /* Display a warning if VF didn't manage to reset in time, but need to + * continue on with the operation. + */ + if (!rsd) + dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + vf->vf_id); + + usleep_range(10000, 20000); + + /* free VF resources to begin resetting the VSI state */ + ice_free_vf_res(vf); + + ice_cleanup_and_realloc_vf(vf); + + ice_flush(hw); + clear_bit(__ICE_VF_DIS, pf->state); + + return true; +} + +/** + * ice_vc_notify_link_state - Inform all VFs on a PF of link status + * @pf: pointer to the PF structure + */ +void ice_vc_notify_link_state(struct ice_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + ice_vc_notify_vf_link_state(&pf->vf[i]); +} + +/** + * ice_vc_notify_reset - Send pending reset message to all VFs + * @pf: pointer to the PF structure + * + * indicate a pending reset to all VFs on a given PF + */ +void ice_vc_notify_reset(struct ice_pf *pf) +{ + struct virtchnl_pf_event pfe; + + if (!pf->num_alloc_vfs) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS, + (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); +} + +/** + * ice_vc_notify_vf_reset - Notify VF of a reset event + * @vf: pointer to the VF structure + */ +static void ice_vc_notify_vf_reset(struct ice_vf *vf) +{ + struct virtchnl_pf_event pfe; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) && + !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(pfe), NULL); +} + +/** + * ice_alloc_vfs - Allocate and set up VFs resources + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate + */ +static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) +{ + struct ice_hw *hw = &pf->hw; + struct ice_vf *vfs; + int i, ret; + + /* Disable global interrupt 0 so we don't try to handle the VFLR. */ + wr32(hw, GLINT_DYN_CTL(pf->hw_oicr_idx), + ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); + + ice_flush(hw); + + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + pf->num_alloc_vfs = 0; + goto err_unroll_intr; + } + /* allocate memory */ + vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs), + GFP_KERNEL); + if (!vfs) { + ret = -ENOMEM; + goto err_unroll_sriov; + } + pf->vf = vfs; + + /* apply default profile */ + for (i = 0; i < num_alloc_vfs; i++) { + vfs[i].pf = pf; + vfs[i].vf_sw_id = pf->first_sw; + vfs[i].vf_id = i; + + /* assign default capabilities */ + set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); + vfs[i].spoofchk = true; + + /* Set this state so that PF driver does VF vector assignment */ + set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states); + } + pf->num_alloc_vfs = num_alloc_vfs; + + /* VF resources get allocated during reset */ + if (!ice_reset_all_vfs(pf, false)) + goto err_unroll_sriov; + + goto err_unroll_intr; + +err_unroll_sriov: + pci_disable_sriov(pf->pdev); +err_unroll_intr: + /* rearm interrupts here */ + ice_irq_dynamic_ena(hw, NULL, NULL); + return ret; +} + +/** + * ice_pf_state_is_nominal - checks the pf for nominal state + * @pf: pointer to pf to check + * + * Check the PF's state for a collection of bits that would indicate + * the PF is in a state that would inhibit normal operation for + * driver functionality. + * + * Returns true if PF is in a nominal state. + * Returns false otherwise + */ +static bool ice_pf_state_is_nominal(struct ice_pf *pf) +{ + DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; + + if (!pf) + return false; + + bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) + return false; + + return true; +} + +/** + * ice_pci_sriov_ena - Enable or change number of VFs + * @pf: pointer to the PF structure + * @num_vfs: number of VFs to allocate + */ +static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = &pf->pdev->dev; + int err; + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Cannot enable SR-IOV, device not ready\n"); + return -EBUSY; + } + + if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) { + dev_err(dev, "This device is not capable of SR-IOV\n"); + return -ENODEV; + } + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ice_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return num_vfs; + + if (num_vfs > pf->num_vfs_supported) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", + num_vfs, pf->num_vfs_supported); + return -ENOTSUPP; + } + + dev_info(dev, "Allocating %d VFs\n", num_vfs); + err = ice_alloc_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + set_bit(ICE_FLAG_SRIOV_ENA, pf->flags); + return num_vfs; +} + +/** + * ice_sriov_configure - Enable or change number of VFs via sysfs + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * This function is called when the user updates the number of VFs in sysfs. + */ +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + if (num_vfs) + return ice_pci_sriov_ena(pf, num_vfs); + + if (!pci_vfs_assigned(pdev)) { + ice_free_vfs(pf); + } else { + dev_err(&pf->pdev->dev, + "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + return 0; +} + +/** + * ice_process_vflr_event - Free VF resources via IRQ calls + * @pf: pointer to the PF structure + * + * called from the VLFR IRQ handler to + * free up VF resources and state variables + */ +void ice_process_vflr_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + int vf_id; + u32 reg; + + if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) || + !pf->num_alloc_vfs) + return; + + /* Re-enable the VFLR interrupt cause here, before looking for which + * VF got reset. Otherwise, if another VF gets a reset while the + * first one is being processed, that interrupt will be lost, and + * that VF will be stuck in reset forever. + */ + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_VFLR_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); + + clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state); + for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + struct ice_vf *vf = &pf->vf[vf_id]; + u32 reg_idx, bit_idx; + + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); + if (reg & BIT(bit_idx)) + /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + ice_reset_vf(vf, true); + } +} + +/** + * ice_vc_dis_vf - Disable a given VF via SW reset + * @vf: pointer to the VF info + * + * Disable the VF through a SW reset + */ +static void ice_vc_dis_vf(struct ice_vf *vf) +{ + ice_vc_notify_vf_reset(vf); + ice_reset_vf(vf, false); +} + +/** + * ice_vc_send_msg_to_vf - Send message to VF + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + */ +static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum ice_status v_retval, u8 *msg, u16 msglen) +{ + enum ice_status aq_ret; + struct ice_pf *pf; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return -EINVAL; + + pf = vf->pf; + + /* single place to detect unsuccessful return values */ + if (v_retval) { + vf->num_inval_msgs++; + dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", + vf->vf_id, v_opcode, v_retval); + if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { + dev_err(&pf->pdev->dev, + "Number of invalid messages exceeded for VF %d\n", + vf->vf_id); + dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + return -EIO; + } + } else { + vf->num_valid_msgs++; + /* reset the invalid counter, if a valid message is received. */ + vf->num_inval_msgs = 0; + } + + aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.mailboxq.sq_last_status); + return -EIO; + } + + return 0; +} + +/** + * ice_vc_get_ver_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to request the API version used by the PF + */ +static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_version_info info = { + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR + }; + + vf->vf_ver = *(struct virtchnl_version_info *)msg; + /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ + if (VF_IS_V10(&vf->vf_ver)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS, + (u8 *)&info, + sizeof(struct virtchnl_version_info)); +} + +/** + * ice_vc_get_vf_res_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to request its resources + */ +static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vf_resource *vfres = NULL; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int len = 0; + int ret; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto err; + } + + len = sizeof(struct virtchnl_vf_resource); + + vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL); + if (!vfres) { + aq_ret = ICE_ERR_NO_MEMORY; + len = 0; + goto err; + } + if (VF_IS_V11(&vf->vf_ver)) + vf->driver_caps = *(u32 *)msg; + else + vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN; + + vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi->info.pvid) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; + } else { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; + else + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; + } + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + + if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED; + + vfres->num_vsis = 1; + /* Tx and Rx queue are equal for VF */ + vfres->num_queue_pairs = vsi->num_txq; + vfres->max_vectors = pf->num_vf_msix; + vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE; + vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE; + + vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; + vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; + vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; + ether_addr_copy(vfres->vsi_res[0].default_mac_addr, + vf->dflt_lan_addr.addr); + + set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, + (u8 *)vfres, len); + + devm_kfree(&pf->pdev->dev, vfres); + return ret; +} + +/** + * ice_vc_reset_vf_msg + * @vf: pointer to the VF info + * + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF + */ +static void ice_vc_reset_vf_msg(struct ice_vf *vf) +{ + if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + ice_reset_vf(vf, false); +} + +/** + * ice_find_vsi_from_id + * @pf: the pf structure to search for the VSI + * @id: id of the VSI it is searching for + * + * searches for the VSI with the given id + */ +static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) + return pf->vsi[i]; + + return NULL; +} + +/** + * ice_vc_isvalid_vsi_id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id + * + * check for the valid VSI id + */ +static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_find_vsi_from_id(pf, vsi_id); + + return (vsi && (vsi->vf_id == vf->vf_id)); +} + +/** + * ice_vc_isvalid_q_id + * @vf: pointer to the VF info + * @vsi_id: VSI id + * @qid: VSI relative queue id + * + * check for the valid queue id + */ +static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) +{ + struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + /* allocated Tx and Rx queues should be always equal for VF VSI */ + return (vsi && (qid < vsi->alloc_txq)); +} + +/** + * ice_vc_config_rss_key + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS key + */ +static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + struct ice_vsi *vsi = NULL; + enum ice_status aq_ret; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + ret = ice_set_rss(vsi, vrk->key, NULL, 0); + aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret, + NULL, 0); +} + +/** + * ice_vc_config_rss_lut + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Configure the VF's RSS LUT + */ +static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + struct ice_vsi *vsi = NULL; + enum ice_status aq_ret; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE); + aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS; +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret, + NULL, 0); +} + +/** + * ice_vc_get_stats_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to get VSI stats + */ +static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_eth_stats stats; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + memset(&stats, 0, sizeof(struct ice_eth_stats)); + ice_update_eth_stats(vsi); + + stats = vsi->eth_stats; + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, + (u8 *)&stats, sizeof(stats)); +} + +/** + * ice_vc_ena_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to enable all or specific queue(s) + */ +static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + /* Enable only Rx rings, Tx rings were enabled by the FW when the + * Tx queue group list was configured and the context bits were + * programmed using ice_vsi_cfg_txqs + */ + if (ice_vsi_start_rx_rings(vsi)) + aq_ret = ICE_ERR_PARAM; + + /* Set flag to indicate that queues are enabled */ + if (!aq_ret) + set_bit(ICE_VF_STATE_ENA, vf->vf_states); + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_vc_dis_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to disable all or specific + * queue(s) + */ +static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && + !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!vqs->rx_queues && !vqs->tx_queues) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop tx rings on VSI %d\n", + vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + } + + if (ice_vsi_stop_rx_rings(vsi)) { + dev_err(&vsi->back->pdev->dev, + "Failed to stop rx rings on VSI %d\n", + vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + } + + /* Clear enabled queues flag */ + if (!aq_ret) + clear_bit(ICE_VF_STATE_ENA, vf->vf_states); + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_vc_cfg_irq_map_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure the IRQ to queue map + */ +static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_irq_map_info *irqmap_info = + (struct virtchnl_irq_map_info *)msg; + u16 vsi_id, vsi_q_id, vector_id; + struct virtchnl_vector_map *map; + struct ice_vsi *vsi = NULL; + struct ice_pf *pf = vf->pf; + enum ice_status aq_ret = 0; + unsigned long qmap; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < irqmap_info->num_vectors; i++) { + map = &irqmap_info->vecmap[i]; + + vector_id = map->vector_id; + vsi_id = map->vsi_id; + /* validate msg params */ + if (!(vector_id < pf->hw.func_caps.common_cap + .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + /* lookout for the invalid queue index */ + qmap = map->rxq_map; + for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { + struct ice_q_vector *q_vector; + + if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + q_vector = vsi->q_vectors[i]; + q_vector->num_ring_rx++; + q_vector->rx.itr_idx = map->rxitr_idx; + vsi->rx_rings[vsi_q_id]->q_vector = q_vector; + } + + qmap = map->txq_map; + for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) { + struct ice_q_vector *q_vector; + + if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + q_vector = vsi->q_vectors[i]; + q_vector->num_ring_tx++; + q_vector->tx.itr_idx = map->txitr_idx; + vsi->tx_rings[vsi_q_id]->q_vector = q_vector; + } + } + + if (vsi) + ice_vsi_cfg_msix(vsi); +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret, + NULL, 0); +} + +/** + * ice_vc_cfg_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * called from the VF to configure the Rx/Tx queues + */ +static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vsi_queue_config_info *qci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *qpi; + enum ice_status aq_ret = 0; + struct ice_vsi *vsi; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < qci->num_queue_pairs; i++) { + qpi = &qci->qpair[i]; + if (qpi->txq.vsi_id != qci->vsi_id || + qpi->rxq.vsi_id != qci->vsi_id || + qpi->rxq.queue_id != qpi->txq.queue_id || + !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + /* copy Tx queue info from VF into VSI */ + vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[i]->count = qpi->txq.ring_len; + /* copy Rx queue info from VF into vsi */ + vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; + vsi->rx_rings[i]->count = qpi->rxq.ring_len; + if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + vsi->rx_buf_len = qpi->rxq.databuffer_size; + if (qpi->rxq.max_pkt_size >= (16 * 1024) || + qpi->rxq.max_pkt_size < 64) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + vsi->max_frame = qpi->rxq.max_pkt_size; + } + + /* VF can request to configure less than allocated queues + * or default allocated queues. So update the VSI with new number + */ + vsi->num_txq = qci->num_queue_pairs; + vsi->num_rxq = qci->num_queue_pairs; + + if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) + aq_ret = 0; + else + aq_ret = ICE_ERR_PARAM; + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret, + NULL, 0); +} + +/** + * ice_is_vf_trusted + * @vf: pointer to the VF info + */ +static bool ice_is_vf_trusted(struct ice_vf *vf) +{ + return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); +} + +/** + * ice_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool ice_can_vf_change_mac(struct ice_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !ice_is_vf_trusted(vf)) + return false; + + return true; +} + +/** + * ice_vc_handle_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @set: true if mac filters are being set, false otherwise + * + * add guest mac address filter + */ +static int +ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) +{ + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; + struct ice_pf *pf = vf->pf; + enum virtchnl_ops vc_op; + enum ice_status ret; + LIST_HEAD(mac_list); + struct ice_vsi *vsi; + int mac_count = 0; + int i; + + if (set) + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + else + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) { + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + if (set && !ice_is_vf_trusted(vf) && + (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { + dev_err(&pf->pdev->dev, + "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n"); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + + for (i = 0; i < al->num_elements; i++) { + u8 *maddr = al->list[i].addr; + + if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) || + is_broadcast_ether_addr(maddr)) { + if (set) { + /* VF is trying to add filters that the PF + * already added. Just continue. + */ + dev_info(&pf->pdev->dev, + "mac %pM already set for VF %d\n", + maddr, vf->vf_id); + continue; + } else { + /* VF can't remove dflt_lan_addr/bcast mac */ + dev_err(&pf->pdev->dev, + "can't remove mac %pM for VF %d\n", + maddr, vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + } + + /* check for the invalid cases and bail if necessary */ + if (is_zero_ether_addr(maddr)) { + dev_err(&pf->pdev->dev, + "invalid mac %pM provided for VF %d\n", + maddr, vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + if (is_unicast_ether_addr(maddr) && + !ice_can_vf_change_mac(vf)) { + dev_err(&pf->pdev->dev, + "can't change unicast mac for untrusted VF %d\n", + vf->vf_id); + ret = ICE_ERR_PARAM; + goto handle_mac_exit; + } + + /* get here if maddr is multicast or if VF can change mac */ + if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) { + ret = ICE_ERR_NO_MEMORY; + goto handle_mac_exit; + } + mac_count++; + } + + /* program the updated filter list */ + if (set) + ret = ice_add_mac(&pf->hw, &mac_list); + else + ret = ice_remove_mac(&pf->hw, &mac_list); + + if (ret) { + dev_err(&pf->pdev->dev, + "can't update mac filters for VF %d, error %d\n", + vf->vf_id, ret); + } else { + if (set) + vf->num_mac += mac_count; + else + vf->num_mac -= mac_count; + } + +handle_mac_exit: + ice_free_fltr_list(&pf->pdev->dev, &mac_list); + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0); +} + +/** + * ice_vc_add_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * add guest MAC address filter + */ +static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_handle_mac_addr_msg(vf, msg, true); +} + +/** + * ice_vc_del_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * remove guest MAC address filter + */ +static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_handle_mac_addr_msg(vf, msg, false); +} + +/** + * ice_vc_request_qs_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * VFs get a default number of queues but can use this message to request a + * different number. If the request is successful, PF will reset the VF and + * return 0. If unsuccessful, PF will send message informing VF of number of + * available queue pairs via virtchnl message response to VF. + */ +static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + int req_queues = vfres->num_queue_pairs; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + int tx_rx_queue_left; + int cur_queues; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + cur_queues = pf->num_vf_qps; + tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); + if (req_queues <= 0) { + dev_err(&pf->pdev->dev, + "VF %d tried to request %d queues. Ignoring.\n", + vf->vf_id, req_queues); + } else if (req_queues > ICE_MAX_QS_PER_VF) { + dev_err(&pf->pdev->dev, + "VF %d tried to request more than %d queues.\n", + vf->vf_id, ICE_MAX_QS_PER_VF); + vfres->num_queue_pairs = ICE_MAX_QS_PER_VF; + } else if (req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(&pf->pdev->dev, + "VF %d requested %d more queues, but only %d left.\n", + vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); + vfres->num_queue_pairs = tx_rx_queue_left + cur_queues; + } else { + /* request is successful, then reset VF */ + vf->num_req_qs = req_queues; + ice_vc_dis_vf(vf); + dev_info(&pf->pdev->dev, + "VF %d granted request of %d queues.\n", + vf->vf_id, req_queues); + return 0; + } + +error_param: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + aq_ret, (u8 *)vfres, sizeof(*vfres)); +} + +/** + * ice_set_vf_port_vlan + * @netdev: network interface device structure + * @vf_id: VF identifier + * @vlan_id: VLAN id being set + * @qos: priority setting + * @vlan_proto: VLAN protocol + * + * program VF Port VLAN id and/or qos + */ +int +ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto) +{ + u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S); + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct ice_vsi *vsi; + struct ice_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + if (vlan_id > ICE_MAX_VLANID || qos > 7) { + dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + return -EINVAL; + } + + if (vlan_proto != htons(ETH_P_8021Q)) { + dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n"); + return -EPROTONOSUPPORT; + } + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (le16_to_cpu(vsi->info.pvid) == vlanprio) { + /* duplicate request, so just return success */ + dev_info(&pf->pdev->dev, + "Duplicate pvid %d request\n", vlanprio); + return ret; + } + + /* If pvid, then remove all filters on the old VLAN */ + if (vsi->info.pvid) + ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & + VLAN_VID_MASK)); + + if (vlan_id || qos) { + ret = ice_vsi_set_pvid(vsi, vlanprio); + if (ret) + goto error_set_pvid; + } else { + ice_vsi_kill_pvid(vsi); + } + + if (vlan_id) { + dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan_id, qos, vf_id); + + /* add new VLAN filter for each MAC */ + ret = ice_vsi_add_vlan(vsi, vlan_id); + if (ret) + goto error_set_pvid; + } + + /* The Port VLAN needs to be saved across resets the same as the + * default LAN MAC address. + */ + vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + +error_set_pvid: + return ret; +} + +/** + * ice_vc_process_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @add_v: Add VLAN if true, otherwise delete VLAN + * + * Process virtchnl op to add or remove programmed guest VLAN id + */ +static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) +{ + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + int i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (add_v && !ice_is_vf_trusted(vf) && + vf->num_vlan >= ICE_MAX_VLAN_PER_VF) { + dev_info(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n"); + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > ICE_MAX_VLANID) { + aq_ret = ICE_ERR_PARAM; + dev_err(&pf->pdev->dev, + "invalid VF VLAN id %d\n", vfl->vlan_id[i]); + goto error_param; + } + } + + vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id); + if (!vsi) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (vsi->info.pvid) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (ice_vsi_manage_vlan_stripping(vsi, add_v)) { + dev_err(&pf->pdev->dev, + "%sable VLAN stripping failed for VSI %i\n", + add_v ? "en" : "dis", vsi->vsi_num); + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + if (add_v) { + for (i = 0; i < vfl->num_elements; i++) { + u16 vid = vfl->vlan_id[i]; + + if (!ice_vsi_add_vlan(vsi, vid)) { + vf->num_vlan++; + set_bit(vid, vsi->active_vlans); + + /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) + if (ice_cfg_vlan_pruning(vsi, true)) + aq_ret = ICE_ERR_PARAM; + } else { + aq_ret = ICE_ERR_PARAM; + } + } + } else { + for (i = 0; i < vfl->num_elements; i++) { + u16 vid = vfl->vlan_id[i]; + + /* Make sure ice_vsi_kill_vlan is successful before + * updating VLAN information + */ + if (!ice_vsi_kill_vlan(vsi, vid)) { + vf->num_vlan--; + clear_bit(vid, vsi->active_vlans); + + /* Disable VLAN pruning when removing VLAN 0 */ + if (unlikely(!vid)) + ice_cfg_vlan_pruning(vsi, false); + } + } + } + +error_param: + /* send the response to the VF */ + if (add_v) + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret, + NULL, 0); + else + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret, + NULL, 0); +} + +/** + * ice_vc_add_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * Add and program guest VLAN id + */ +static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_process_vlan_msg(vf, msg, true); +} + +/** + * ice_vc_remove_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * + * remove programmed guest VLAN id + */ +static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg) +{ + return ice_vc_process_vlan_msg(vf, msg, false); +} + +/** + * ice_vc_ena_vlan_stripping + * @vf: pointer to the VF info + * + * Enable VLAN header stripping for a given VF + */ +static int ice_vc_ena_vlan_stripping(struct ice_vf *vf) +{ + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_manage_vlan_stripping(vsi, true)) + aq_ret = ICE_ERR_AQ_ERROR; + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + aq_ret, NULL, 0); +} + +/** + * ice_vc_dis_vlan_stripping + * @vf: pointer to the VF info + * + * Disable VLAN header stripping for a given VF + */ +static int ice_vc_dis_vlan_stripping(struct ice_vf *vf) +{ + enum ice_status aq_ret = 0; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + aq_ret = ICE_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (ice_vsi_manage_vlan_stripping(vsi, false)) + aq_ret = ICE_ERR_AQ_ERROR; + +error_param: + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + aq_ret, NULL, 0); +} + +/** + * ice_vc_process_vf_msg - Process request from VF + * @pf: pointer to the PF structure + * @event: pointer to the AQ event + * + * called from the common asq/arq handler to + * process request from VF + */ +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + u32 v_opcode = le32_to_cpu(event->desc.cookie_high); + s16 vf_id = le16_to_cpu(event->desc.retval); + u16 msglen = event->msg_len; + u8 *msg = event->msg_buf; + struct ice_vf *vf = NULL; + int err = 0; + + if (vf_id >= pf->num_alloc_vfs) { + err = -EINVAL; + goto error_handler; + } + + vf = &pf->vf[vf_id]; + + /* Check if VF is disabled. */ + if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { + err = -EPERM; + goto error_handler; + } + + /* Perform basic checks on the msg */ + err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); + if (err) { + if (err == VIRTCHNL_ERR_PARAM) + err = -EPERM; + else + err = -EINVAL; + goto error_handler; + } + + /* Perform additional checks specific to RSS and Virtchnl */ + if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { + struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + + if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) + err = -EINVAL; + } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) + err = -EINVAL; + } + +error_handler: + if (err) { + ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0); + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", + vf_id, v_opcode, msglen, err); + return; + } + + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + err = ice_vc_get_ver_msg(vf, msg); + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ice_vc_get_vf_res_msg(vf, msg); + break; + case VIRTCHNL_OP_RESET_VF: + ice_vc_reset_vf_msg(vf); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ice_vc_add_mac_addr_msg(vf, msg); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ice_vc_del_mac_addr_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + err = ice_vc_cfg_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ice_vc_ena_qs_msg(vf, msg); + ice_vc_notify_vf_link_state(vf); + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + err = ice_vc_dis_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ice_vc_request_qs_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + err = ice_vc_cfg_irq_map_msg(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + err = ice_vc_config_rss_key(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + err = ice_vc_config_rss_lut(vf, msg); + break; + case VIRTCHNL_OP_GET_STATS: + err = ice_vc_get_stats_msg(vf, msg); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ice_vc_add_vlan_msg(vf, msg); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ice_vc_remove_vlan_msg(vf, msg); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + err = ice_vc_ena_vlan_stripping(vf); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + err = ice_vc_dis_vlan_stripping(vf); + break; + case VIRTCHNL_OP_UNKNOWN: + default: + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", + v_opcode, vf_id); + err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL, + NULL, 0); + break; + } + if (err) { + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(&pf->pdev->dev, + "PF failed to honor VF %d, opcode %d\n, error %d\n", + vf_id, v_opcode, err); + } +} + +/** + * ice_get_vf_cfg + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ivi: VF configuration structure + * + * return VF configuration + */ +int ice_get_vf_cfg(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + ivi->vf = vf_id; + ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr); + + /* VF configuration for VLAN and applicable QoS */ + ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M; + ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >> + ICE_VLAN_PRIORITY_S; + + ivi->trusted = vf->trusted; + ivi->spoofchk = vf->spoofchk; + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + return 0; +} + +/** + * ice_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ena: flag to enable or disable feature + * + * Enable or disable VF spoof checking + */ +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi_ctx ctx = { 0 }; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + int status; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (ena == vf->spoofchk) { + dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n", + ena ? "ON" : "OFF"); + return 0; + } + + ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + + if (ena) { + ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; + } + + status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL); + if (status) { + dev_dbg(&pf->pdev->dev, + "Error %d, failed to update VSI* parameters\n", status); + return -EIO; + } + + vf->spoofchk = ena; + vsi->info.sec_flags = ctx.info.sec_flags; + vsi->info.sw_flags2 = ctx.info.sw_flags2; + + return status; +} + +/** + * ice_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: mac address + * + * program VF mac address + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + netdev_err(netdev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) { + netdev_err(netdev, "%pM not a valid unicast address\n", mac); + return -EINVAL; + } + + /* copy mac into dflt_lan_addr and trigger a VF reset. The reset + * flow will use the updated dflt_lan_addr and add a MAC filter + * using ice_add_mac. Also set pf_set_mac to indicate that the PF has + * set the MAC address for this VF. + */ + ether_addr_copy(vf->dflt_lan_addr.addr, mac); + vf->pf_set_mac = true; + netdev_info(netdev, + "mac on VF %d set to %pM\n. VF driver will be reinitialized\n", + vf_id, mac); + + ice_vc_dis_vf(vf); + return ret; +} + +/** + * ice_set_vf_trust + * @netdev: network interface device structure + * @vf_id: VF identifier + * @trusted: Boolean value to enable/disable trusted VF + * + * Enable or disable a given VF as trusted + */ +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_vf *vf; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + ice_vc_dis_vf(vf); + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + vf_id, trusted ? "" : "un"); + + return 0; +} + +/** + * ice_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link_state: required link state + * + * Set VF's link state, irrespective of physical link state status + */ +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + struct virtchnl_pf_event pfe = { 0 }; + struct ice_link_status *ls; + struct ice_vf *vf; + struct ice_hw *hw; + + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + hw = &pf->hw; + ls = &pf->hw.port_info->phy.link_info; + + if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id); + return -EBUSY; + } + + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + vf->link_up = ls->link_info & ICE_AQ_LINK_UP; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + return -EINVAL; + } + + if (vf->link_forced) + ice_set_pfe_link_forced(vf, &pfe, vf->link_up); + else + ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); + + /* Notify the VF of its new link state */ + ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, + sizeof(pfe), NULL); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h new file mode 100644 index 000000000000..10131e0180f9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, Intel Corporation. */ + +#ifndef _ICE_VIRTCHNL_PF_H_ +#define _ICE_VIRTCHNL_PF_H_ +#include "ice.h" + +#define ICE_MAX_VLANID 4095 +#define ICE_VLAN_PRIORITY_S 12 +#define ICE_VLAN_M 0xFFF +#define ICE_PRIORITY_M 0x7000 + +/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ +#define ICE_MAX_VLAN_PER_VF 8 +#define ICE_MAX_MACADDR_PER_VF 12 + +/* Malicious Driver Detection */ +#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3 +#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 + +/* Static VF transaction/status register def */ +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_M 0x20 + +/* Specific VF states */ +enum ice_vf_states { + ICE_VF_STATE_INIT = 0, + ICE_VF_STATE_ACTIVE, + ICE_VF_STATE_ENA, + ICE_VF_STATE_DIS, + ICE_VF_STATE_MC_PROMISC, + ICE_VF_STATE_UC_PROMISC, + /* state to indicate if PF needs to do vector assignment for VF. + * This needs to be set during first time VF initialization or later + * when VF asks for more Vectors through virtchnl OP. + */ + ICE_VF_STATE_CFG_INTR, + ICE_VF_STATES_NBITS +}; + +/* VF capabilities */ +enum ice_virtchnl_cap { + ICE_VIRTCHNL_VF_CAP_L2 = 0, + ICE_VIRTCHNL_VF_CAP_PRIVILEGE, +}; + +/* VF information structure */ +struct ice_vf { + struct ice_pf *pf; + + s16 vf_id; /* VF id in the PF space */ + u32 driver_caps; /* reported by VF driver */ + int first_vector_idx; /* first vector index of this VF */ + struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */ + struct virtchnl_version_info vf_ver; + struct virtchnl_ether_addr dflt_lan_addr; + u16 port_vlan_id; + u8 pf_set_mac; /* VF MAC address set by VMM admin */ + u8 trusted; + u16 lan_vsi_idx; /* index into PF struct */ + u16 lan_vsi_num; /* ID as used by firmware */ + u64 num_mdd_events; /* number of mdd events detected */ + u64 num_inval_msgs; /* number of continuous invalid msgs */ + u64 num_valid_msgs; /* number of valid msgs detected */ + unsigned long vf_caps; /* vf's adv. capabilities */ + DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + u8 link_forced; + u8 link_up; /* only valid if VF link is forced */ + u8 spoofchk; + u16 num_mac; + u16 num_vlan; + u8 num_req_qs; /* num of queue pairs requested by VF */ +}; + +#ifdef CONFIG_PCI_IOV +void ice_process_vflr_event(struct ice_pf *pf); +int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +int ice_get_vf_cfg(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi); + +void ice_free_vfs(struct ice_pf *pf); +void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event); +void ice_vc_notify_link_state(struct ice_pf *pf); +void ice_vc_notify_reset(struct ice_pf *pf); +bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); + +int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, + u16 vlan_id, u8 qos, __be16 vlan_proto); + +int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); + +int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); + +int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); +#else /* CONFIG_PCI_IOV */ +#define ice_process_vflr_event(pf) do {} while (0) +#define ice_free_vfs(pf) do {} while (0) +#define ice_vc_process_vf_msg(pf, event) do {} while (0) +#define ice_vc_notify_link_state(pf) do {} while (0) +#define ice_vc_notify_reset(pf) do {} while (0) + +static inline bool +ice_reset_all_vfs(struct ice_pf __always_unused *pf, + bool __always_unused is_vflr) +{ + return true; +} + +static inline int +ice_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_mac(struct net_device __always_unused *netdev, + int __always_unused vf_id, u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_get_vf_cfg(struct net_device __always_unused *netdev, + int __always_unused vf_id, + struct ifla_vf_info __always_unused *ivi) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_trust(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused trusted) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_port_vlan(struct net_device __always_unused *netdev, + int __always_unused vf_id, u16 __always_unused vid, + u8 __always_unused qos, __be16 __always_unused v_proto) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_spoofchk(struct net_device __always_unused *netdev, + int __always_unused vf_id, bool __always_unused ena) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_link_state(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused link_state) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_set_vf_bw(struct net_device __always_unused *netdev, + int __always_unused vf_id, int __always_unused min_tx_rate, + int __always_unused max_tx_rate) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_PCI_IOV */ +#endif /* _ICE_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 5414685189ce..ca6b0c458e4a 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -8,7 +8,8 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ + ixgbe_xsk.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5c6fd42e90ed..7a7679e7be84 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -228,13 +228,17 @@ struct ixgbe_tx_buffer { struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; + union { + struct { + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct ixgbe_queue_stats { @@ -271,6 +275,7 @@ enum ixgbe_ring_state_t { __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, __IXGBE_TX_XDP_RING, + __IXGBE_TX_DISABLED, }; #define ring_uses_build_skb(ring) \ @@ -347,6 +352,10 @@ struct ixgbe_ring { struct ixgbe_rx_queue_stats rx_stats; }; struct xdp_rxq_info xdp_rxq; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* ZC allocator anchor */ + u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ + u16 rx_buf_len; } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -764,6 +773,11 @@ struct ixgbe_adapter { #ifdef CONFIG_XFRM_OFFLOAD struct ixgbe_ipsec *ipsec; #endif /* CONFIG_XFRM_OFFLOAD */ + + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 970f71d5da04..0bd1294ba517 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3485,17 +3485,6 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) } /** - * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode - * @hw: pointer to hardware structure - */ -bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw) -{ - if (hw->mac.ops.fw_recovery_mode) - return hw->mac.ops.fw_recovery_mode(hw); - return false; -} - -/** * ixgbe_get_device_caps_generic - Get additional device capabilities * @hw: pointer to hardware structure * @device_caps: the EEPROM word with the extra device capabilities diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index d361f570ca37..62e6499e4146 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1055,7 +1055,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) int txr_remaining = adapter->num_tx_queues; int xdp_remaining = adapter->num_xdp_queues; int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0; - int err; + int err, i; /* only one q_vector if MSI-X is disabled. */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) @@ -1097,6 +1097,21 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) xdp_idx += xqpv; } + for (i = 0; i < adapter->num_rx_queues; i++) { + if (adapter->rx_ring[i]) + adapter->rx_ring[i]->ring_idx = i; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + if (adapter->tx_ring[i]) + adapter->tx_ring[i]->ring_idx = i; + } + + for (i = 0; i < adapter->num_xdp_queues; i++) { + if (adapter->xdp_ring[i]) + adapter->xdp_ring[i]->ring_idx = i; + } + return 0; err_out: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 140e87a10ff5..51268772a999 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -34,12 +34,14 @@ #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> #include <net/mpls.h> +#include <net/xdp_sock.h> #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" #include "ixgbe_model.h" +#include "ixgbe_txrx_common.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -893,8 +895,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, } } -static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, - u64 qmask) +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) { u32 mask; @@ -1673,9 +1675,9 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; u32 flags = rx_ring->q_vector->adapter->flags; @@ -1708,8 +1710,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, skb->protocol = eth_type_trans(skb, dev); } -static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) { napi_gro_receive(&q_vector->napi, skb); } @@ -1868,9 +1870,9 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, * * Returns true if an error was encountered and skb was freed. **/ -static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *netdev = rx_ring->netdev; @@ -2186,14 +2188,6 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, return skb; } -#define IXGBE_XDP_PASS 0 -#define IXGBE_XDP_CONSUMED BIT(0) -#define IXGBE_XDP_TX BIT(1) -#define IXGBE_XDP_REDIR BIT(2) - -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_frame *xdpf); - static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) @@ -3167,7 +3161,11 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) { - if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + bool wd = ring->xsk_umem ? + ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) : + ixgbe_clean_tx_irq(q_vector, ring, budget); + + if (!wd) clean_complete = false; } @@ -3183,7 +3181,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget = budget; ixgbe_for_each_ring(ring, q_vector->rx) { - int cleaned = ixgbe_clean_rx_irq(q_vector, ring, + int cleaned = ring->xsk_umem ? + ixgbe_clean_rx_irq_zc(q_vector, ring, + per_ring_budget) : + ixgbe_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; @@ -3196,11 +3197,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ - napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting & 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); + if (likely(napi_complete_done(napi, work_done))) { + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, + BIT_ULL(q_vector->v_idx)); + } return min(work_done, budget - 1); } @@ -3473,6 +3476,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u32 txdctl = IXGBE_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; + ring->xsk_umem = NULL; + if (ring_is_xdp(ring)) + ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + /* disable queue to avoid issues while updating state */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); IXGBE_WRITE_FLUSH(hw); @@ -3577,12 +3584,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) else mtqc |= IXGBE_MTQC_64VF; } else { - if (tcs > 4) + if (tcs > 4) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - else if (tcs > 1) + } else if (tcs > 1) { mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; - else - mtqc = IXGBE_MTQC_64Q_1PB; + } else { + u8 max_txq = adapter->num_tx_queues + + adapter->num_xdp_queues; + if (max_txq > 63) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } } IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); @@ -3705,10 +3718,27 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ - if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) + if (rx_ring->xsk_umem) { + u32 xsk_buf_len = rx_ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + + /* If the MAC support setting RXDCTL.RLPML, the + * SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and + * RXDCTL.RLPML is set to the actual UMEM buffer + * size. If not, then we are stuck with a 1k buffer + * size resolution. In this case frames larger than + * the UMEM buffer size viewed in a 1k resolution will + * be dropped. + */ + if (hw->mac.type != ixgbe_mac_82599EB) + srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= xsk_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } else if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) { srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else + } else { srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -4031,6 +4061,19 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, u32 rxdctl; u8 reg_idx = ring->reg_idx; + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_umem = ixgbe_xsk_umem(adapter, ring); + if (ring->xsk_umem) { + ring->zca.free = ixgbe_zca_free; + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca)); + + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + /* disable queue to avoid use of these values while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; @@ -4080,6 +4123,17 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, #endif } + if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) { + u32 xsk_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + + rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | + IXGBE_RXDCTL_RLPML_EN); + rxdctl |= xsk_buf_len | IXGBE_RXDCTL_RLPML_EN; + + ring->rx_buf_len = xsk_buf_len; + } + /* initialize rx_buffer_info */ memset(ring->rx_buffer_info, 0, sizeof(struct ixgbe_rx_buffer) * ring->count); @@ -4093,7 +4147,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); - ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); + if (ring->xsk_umem) + ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring)); + else + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) @@ -5173,6 +5230,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; struct ixgbe_fdir_filter *filter; + u64 action; spin_lock(&adapter->fdir_perfect_lock); @@ -5181,12 +5239,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { + action = filter->action; + if (action != IXGBE_FDIR_DROP_QUEUE && action != 0) + action = + (action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1; + ixgbe_fdir_write_perfect_filter_82599(hw, &filter->filter, filter->sw_idx, - (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + (action == IXGBE_FDIR_DROP_QUEUE) ? IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->reg_idx); + adapter->rx_ring[action]->reg_idx); } spin_unlock(&adapter->fdir_perfect_lock); @@ -5201,6 +5264,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) u16 i = rx_ring->next_to_clean; struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_ring->xsk_umem) { + ixgbe_xsk_clean_rx_ring(rx_ring); + goto skip_free; + } + /* Free all the Rx ring sk_buffs */ while (i != rx_ring->next_to_alloc) { if (rx_buffer->skb) { @@ -5239,6 +5307,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) } } +skip_free: rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -5883,6 +5952,11 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) u16 i = tx_ring->next_to_clean; struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + if (tx_ring->xsk_umem) { + ixgbe_xsk_clean_tx_ring(tx_ring); + goto out; + } + while (i != tx_ring->next_to_use) { union ixgbe_adv_tx_desc *eop_desc, *tx_desc; @@ -5934,6 +6008,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) if (!ring_is_xdp(tx_ring)) netdev_tx_reset_queue(txring_txq(tx_ring)); +out: /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; @@ -6434,7 +6509,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = -1; - int size, err; + int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; @@ -6471,13 +6546,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index) < 0) goto err; - err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err) { - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - goto err; - } - rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -8102,9 +8170,6 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ - IXGBE_TXD_CMD_RS) - static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, const u8 hdr_len) @@ -8457,8 +8522,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_frame *xdpf) +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_frame *xdpf) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; @@ -8680,6 +8745,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state))) + return NETDEV_TX_BUSY; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } @@ -10191,12 +10258,19 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; + case XDP_QUERY_XSK_UMEM: + return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem, + xdp->xsk.queue_id); + case XDP_SETUP_XSK_UMEM: + return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, + xdp->xsk.queue_id); + default: return -EINVAL; } } -static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) { /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. @@ -10226,6 +10300,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(!ring)) return -ENXIO; + if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) + return -ENXIO; + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; @@ -10287,8 +10364,162 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, + .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, }; +static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = tx_ring->reg_idx; + int wait_loop; + u32 txdctl; + + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + + /* delay mechanism from ixgbe_disable_tx */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + + if (!(txdctl & IXGBE_TXDCTL_ENABLE)) + return; + } + + e_err(drv, "TXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void ixgbe_disable_txr(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + set_bit(__IXGBE_TX_DISABLED, &tx_ring->state); + ixgbe_disable_txr_hw(adapter, tx_ring); +} + +static void ixgbe_disable_rxr_hw(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + unsigned long wait_delay, delay_interval; + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = rx_ring->reg_idx; + int wait_loop; + u32 rxdctl; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + rxdctl |= IXGBE_RXDCTL_SWFLSH; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + /* RXDCTL.EN may not change on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* delay mechanism from ixgbe_disable_rx */ + delay_interval = ixgbe_get_completion_timeout(adapter) / 100; + + wait_loop = IXGBE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + + if (!(rxdctl & IXGBE_RXDCTL_ENABLE)) + return; + } + + e_err(drv, "RXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void ixgbe_reset_txr_stats(struct ixgbe_ring *tx_ring) +{ + memset(&tx_ring->stats, 0, sizeof(tx_ring->stats)); + memset(&tx_ring->tx_stats, 0, sizeof(tx_ring->tx_stats)); +} + +static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) +{ + memset(&rx_ring->stats, 0, sizeof(rx_ring->stats)); + memset(&rx_ring->rx_stats, 0, sizeof(rx_ring->rx_stats)); +} + +/** + * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function disables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) +{ + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + ixgbe_disable_txr(adapter, tx_ring); + if (xdp_ring) + ixgbe_disable_txr(adapter, xdp_ring); + ixgbe_disable_rxr_hw(adapter, rx_ring); + + if (xdp_ring) + synchronize_sched(); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + + ixgbe_clean_tx_ring(tx_ring); + if (xdp_ring) + ixgbe_clean_tx_ring(xdp_ring); + ixgbe_clean_rx_ring(rx_ring); + + ixgbe_reset_txr_stats(tx_ring); + if (xdp_ring) + ixgbe_reset_txr_stats(xdp_ring); + ixgbe_reset_rxr_stats(rx_ring); +} + +/** + * ixgbe_txrx_ring_enable - Enable Rx/Tx/XDP Tx rings + * @adapter: adapter structure + * @ring: ring index + * + * This function enables a certain Rx/Tx/XDP Tx ring. The function + * assumes that the netdev is running. + **/ +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) +{ + struct ixgbe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring[ring]; + tx_ring = adapter->tx_ring[ring]; + xdp_ring = adapter->xdp_ring[ring]; + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + + ixgbe_configure_tx_ring(adapter, tx_ring); + if (xdp_ring) + ixgbe_configure_tx_ring(adapter, xdp_ring); + ixgbe_configure_rx_ring(adapter, rx_ring); + + clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); + clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); +} + /** * ixgbe_enumerate_functions - Get the number of ports this device has * @adapter: adapter structure diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h new file mode 100644 index 000000000000..53d4089f5644 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef _IXGBE_TXRX_COMMON_H_ +#define _IXGBE_TXRX_COMMON_H_ + +#define IXGBE_XDP_PASS 0 +#define IXGBE_XDP_CONSUMED BIT(0) +#define IXGBE_XDP_TX BIT(1) +#define IXGBE_XDP_REDIR BIT(2) + +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, + struct xdp_frame *xdpf); +bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb); +void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); +void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); + +void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); +void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring); + +struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring); +int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, + u16 qid); +int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid); + +void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); + +void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count); +int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget); +void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); +bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget); +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); +void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); + +#endif /* #define _IXGBE_TXRX_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c new file mode 100644 index 000000000000..65c3e2c979d4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -0,0 +1,801 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> + +#include "ixgbe.h" +#include "ixgbe_txrx_common.h" + +struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + bool xdp_on = READ_ONCE(adapter->xdp_prog); + int qid = ring->ring_idx; + + if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || + qid >= adapter->num_xsk_umems || !xdp_on) + return NULL; + + return adapter->xsk_umems[qid]; +} + +static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) +{ + if (adapter->xsk_umems) + return 0; + + adapter->num_xsk_umems_used = 0; + adapter->num_xsk_umems = adapter->num_rx_queues; + adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, + sizeof(*adapter->xsk_umems), + GFP_KERNEL); + if (!adapter->xsk_umems) { + adapter->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = ixgbe_alloc_xsk_umems(adapter); + if (err) + return err; + + adapter->xsk_umems[qid] = umem; + adapter->num_xsk_umems_used++; + + return 0; +} + +static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) +{ + adapter->xsk_umems[qid] = NULL; + adapter->num_xsk_umems_used--; + + if (adapter->num_xsk_umems == 0) { + kfree(adapter->xsk_umems); + adapter->xsk_umems = NULL; + adapter->num_xsk_umems = 0; + } +} + +static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i, j; + dma_addr_t dma; + + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -1; +} + +static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + unsigned int i; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, + struct xdp_umem *umem, + u16 qid) +{ + struct xdp_umem_fq_reuse *reuseq; + bool if_running; + int err; + + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + if (adapter->xsk_umems[qid]) + return -EBUSY; + } + + reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); + if (!reuseq) + return -ENOMEM; + + xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); + + err = ixgbe_xsk_umem_dma_map(adapter, umem); + if (err) + return err; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + ixgbe_txrx_ring_disable(adapter, qid); + + err = ixgbe_add_xsk_umem(adapter, umem, qid); + + if (if_running) + ixgbe_txrx_ring_enable(adapter, qid); + + return err; +} + +static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) +{ + bool if_running; + + if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || + !adapter->xsk_umems[qid]) + return -EINVAL; + + if_running = netif_running(adapter->netdev) && + READ_ONCE(adapter->xdp_prog); + + if (if_running) + ixgbe_txrx_ring_disable(adapter, qid); + + ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); + ixgbe_remove_xsk_umem(adapter, qid); + + if (if_running) + ixgbe_txrx_ring_enable(adapter, qid); + + return 0; +} + +int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, + u16 qid) +{ + if (qid >= adapter->num_rx_queues) + return -EINVAL; + + if (adapter->xsk_umems) { + if (qid >= adapter->num_xsk_umems) + return -EINVAL; + *umem = adapter->xsk_umems[qid]; + return 0; + } + + *umem = NULL; + return 0; +} + +int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, + u16 qid) +{ + return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) : + ixgbe_xsk_umem_disable(adapter, qid); +} + +static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + struct xdp_buff *xdp) +{ + int err, result = IXGBE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) { + result = IXGBE_XDP_CONSUMED; + break; + } + result = ixgbe_xmit_xdp_ring(adapter, xdpf); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +static struct +ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, + unsigned int size) +{ + struct ixgbe_rx_buffer *bi; + + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *obi) +{ + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + struct ixgbe_rx_buffer *nbi; + + nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + nbi->dma = obi->dma & mask; + nbi->dma += hr; + + nbi->addr = (void *)((unsigned long)obi->addr & mask); + nbi->addr += hr; + + nbi->handle = obi->handle & mask; + nbi->handle += rx_ring->xsk_umem->headroom; + + obi->addr = NULL; + obi->skb = NULL; +} + +void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct ixgbe_rx_buffer *bi; + struct ixgbe_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct ixgbe_ring, zca); + hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = rx_ring->rx_buffer_info; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; +} + +static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) + return true; + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + u64 handle, hr; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + handle &= rx_ring->xsk_umem->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + return true; +} + +static __always_inline bool +__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, + bool alloc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi)) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + bool ok = true; + + /* nothing to do */ + if (!cleaned_count) + return true; + + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!alloc(rx_ring, bi)) { + ok = false; + break; + } + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } + + return ok; +} + +void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) +{ + __ixgbe_alloc_rx_buffers_zc(rx_ring, count, + ixgbe_alloc_buffer_slow_zc); +} + +static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, + u16 count) +{ + return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, + ixgbe_alloc_buffer_zc); +} + +static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + return skb; +} + +static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(IXGBE_RX_DESC(rx_ring, ntc)); +} + +int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct ixgbe_adapter *adapter = q_vector->adapter; + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + failure = failure || + !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + bi = ixgbe_get_rx_buffer_zc(rx_ring, size); + + if (unlikely(!ixgbe_test_staterr(rx_desc, + IXGBE_RXD_STAT_EOP))) { + struct ixgbe_rx_buffer *next_bi; + + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + ixgbe_inc_ntc(rx_ring); + next_bi = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + next_bi->skb = ERR_PTR(-EINVAL); + continue; + } + + if (unlikely(bi->skb)) { + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + ixgbe_inc_ntc(rx_ring); + continue; + } + + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); + + if (xdp_res) { + if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + bi->addr = NULL; + bi->skb = NULL; + } else { + ixgbe_reuse_rx_buffer_zc(rx_ring, bi); + } + total_rx_packets++; + total_rx_bytes += size; + + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + ixgbe_rx_skb(q_vector, skb); + } + + if (xdp_xmit & IXGBE_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_xmit & IXGBE_XDP_TX) { + struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return failure ? budget : (int)total_rx_packets; +} + +void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; + + while (i != rx_ring->next_to_alloc) { + xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); + i++; + bi++; + if (i == rx_ring->count) { + i = 0; + bi = rx_ring->rx_buffer_info; + } + } +} + +static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_bi; + bool work_done = true; + u32 len, cmd_type; + dma_addr_t dma; + + while (budget-- > 0) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + work_done = false; + break; + } + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + tx_bi->xdpf = NULL; + + tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + /* put descriptor type bits */ + cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + cmd_type |= len | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + ixgbe_xdp_ring_update_tail(xdp_ring); + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return !!budget && work_done; +} + +static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); +} + +bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring, int napi_budget) +{ + unsigned int total_packets = 0, total_bytes = 0; + u32 i = tx_ring->next_to_clean, xsk_frames = 0; + unsigned int budget = q_vector->tx.work_limit; + struct xdp_umem *umem = tx_ring->xsk_umem; + union ixgbe_adv_tx_desc *tx_desc; + struct ixgbe_tx_buffer *tx_bi; + bool xmit_done; + + tx_bi = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + total_bytes += tx_bi->bytecount; + total_packets += tx_bi->gso_segs; + + if (tx_bi->xdpf) + ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; + + tx_bi++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_bi = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); + + xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); + return budget > 0 && xmit_done; +} + +int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return -ENETDOWN; + + if (!READ_ONCE(adapter->xdp_prog)) + return -ENXIO; + + if (qid >= adapter->num_xdp_queues) + return -ENXIO; + + if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) + return -ENXIO; + + ring = adapter->xdp_ring[qid]; + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + u64 eics = BIT_ULL(ring->q_vector->v_idx); + + ixgbe_irq_rearm_queues(adapter, eics); + } + + return 0; +} + +void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct xdp_umem *umem = tx_ring->xsk_umem; + struct ixgbe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) + ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc == tx_ring->count) + ntc = 0; + } + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 997cea675a37..e8a3231be0bf 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -21,7 +21,6 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 }; struct ixgbe_hw *hw = &adapter->hw; struct sa_mbx_msg *sam; - u16 msglen; int ret; /* send the important bits to the PF */ @@ -38,16 +37,14 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key)); msgbuf[0] = IXGBE_VF_IPSEC_ADD; - msglen = sizeof(*sam) + sizeof(msgbuf[0]); spin_lock_bh(&adapter->mbx_lock); - ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen); + ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); if (ret) goto out; - msglen = sizeof(msgbuf[0]) * 2; - ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen); + ret = hw->mbx.ops.read_posted(hw, msgbuf, 2); if (ret) goto out; @@ -80,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) spin_lock_bh(&adapter->mbx_lock); - err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf)); + err = hw->mbx.ops.write_posted(hw, msgbuf, 2); if (err) goto out; - err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf)); + err = hw->mbx.ops.read_posted(hw, msgbuf, 2); if (err) goto out; @@ -470,7 +467,7 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, } sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; - if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { + if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", __func__, sa_idx, xs->xso.offload_handle); return 0; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index f33fd22b351c..3238aa7f5dac 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -167,4 +167,7 @@ config SKY2_DEBUG If unsure, say N. + +source "drivers/net/ethernet/marvell/octeontx2/Kconfig" + endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 55d4d10aa7d3..89dea7284d5b 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o +obj-y += octeontx2/ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2373cd41a625..14f9679c957c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1755,7 +1755,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) } /* Set Tx descriptors fields relevant for CSUM calculation */ -static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, +static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto, int ip_hdr_len, int l4_proto) { u32 command; @@ -2645,14 +2645,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { int ip_hdr_len = 0; u8 l4_proto; + __be16 l3_proto = vlan_get_protocol(skb); - if (skb->protocol == htons(ETH_P_IP)) { + if (l3_proto == htons(ETH_P_IP)) { struct iphdr *ip4h = ip_hdr(skb); /* Calculate IPv4 checksum and L4 checksum */ ip_hdr_len = ip4h->ihl; l4_proto = ip4h->protocol; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (l3_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); /* Read l4_protocol from one of IPv6 extra headers */ @@ -2664,7 +2665,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) } return mvpp2_txq_desc_csum(skb_network_offset(skb), - skb->protocol, ip_hdr_len, l4_proto); + l3_proto, ip_hdr_len, l4_proto); } return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig new file mode 100644 index 000000000000..35827bdf1878 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -0,0 +1,17 @@ +# +# Marvell OcteonTX2 drivers configuration +# + +config OCTEONTX2_MBOX + tristate + +config OCTEONTX2_AF + tristate "Marvell OcteonTX2 RVU Admin Function driver" + select OCTEONTX2_MBOX + depends on (64BIT && COMPILE_TEST) || ARM64 + depends on PCI + help + This driver supports Marvell's OcteonTX2 Resource Virtualization + Unit's admin function manager which manages all RVU HW resources + and provides a medium to other PF/VFs to configure HW. Should be + enabled for other RVU device drivers to work. diff --git a/drivers/net/ethernet/marvell/octeontx2/Makefile b/drivers/net/ethernet/marvell/octeontx2/Makefile new file mode 100644 index 000000000000..e579dcd54c97 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell OcteonTX2 device drivers. +# + +obj-$(CONFIG_OCTEONTX2_AF) += af/ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile new file mode 100644 index 000000000000..eaac26424486 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# + +obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o +obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o + +octeontx2_mbox-y := mbox.o +octeontx2_af-y := cgx.o rvu.o rvu_cgx.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c new file mode 100644 index 000000000000..2cf8e402c31e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> + +#include "cgx.h" + +#define DRV_NAME "octeontx2-cgx" +#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" + +/** + * struct lmac + * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion + * @cmd_lock: Lock to serialize the command interface + * @resp: command response + * @event_cb: callback for linkchange events + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received + * @cgx: parent cgx port + * @lmac_id: lmac port id + * @name: lmac port name + */ +struct lmac { + wait_queue_head_t wq_cmd_cmplt; + struct mutex cmd_lock; + u64 resp; + struct cgx_event_cb event_cb; + bool cmd_pend; + struct cgx *cgx; + u8 lmac_id; + char *name; +}; + +struct cgx { + void __iomem *reg_base; + struct pci_dev *pdev; + u8 cgx_id; + u8 lmac_count; + struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct list_head cgx_list; +}; + +static LIST_HEAD(cgx_list); + +/* CGX PHY management internal APIs */ +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); + +/* Supported devices */ +static const struct pci_device_id cgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, + { 0, } /* end of table */ +}; + +MODULE_DEVICE_TABLE(pci, cgx_id_table); + +static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) +{ + writeq(val, cgx->reg_base + (lmac << 18) + offset); +} + +static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) +{ + return readq(cgx->reg_base + (lmac << 18) + offset); +} + +static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) +{ + if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + return NULL; + + return cgx->lmac_idmap[lmac_id]; +} + +int cgx_get_cgx_cnt(void) +{ + struct cgx *cgx_dev; + int count = 0; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) + count++; + + return count; +} +EXPORT_SYMBOL(cgx_get_cgx_cnt); + +int cgx_get_lmac_cnt(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + return cgx->lmac_count; +} +EXPORT_SYMBOL(cgx_get_lmac_cnt); + +void *cgx_get_pdata(int cgx_id) +{ + struct cgx *cgx_dev; + + list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { + if (cgx_dev->cgx_id == cgx_id) + return cgx_dev; + } + return NULL; +} +EXPORT_SYMBOL(cgx_get_pdata); + +/* CGX Firmware interface low level support */ +static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) +{ + struct cgx *cgx = lmac->cgx; + struct device *dev; + int err = 0; + u64 cmd; + + /* Ensure no other command is in progress */ + err = mutex_lock_interruptible(&lmac->cmd_lock); + if (err) + return err; + + /* Ensure command register is free */ + cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); + if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { + err = -EBUSY; + goto unlock; + } + + /* Update ownership in command request */ + req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); + + /* Mark this lmac as pending, before we start */ + lmac->cmd_pend = true; + + /* Start command in hardware */ + cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); + + /* Ensure command is completed without errors */ + if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, + msecs_to_jiffies(CGX_CMD_TIMEOUT))) { + dev = &cgx->pdev->dev; + dev_err(dev, "cgx port %d:%d cmd timeout\n", + cgx->cgx_id, lmac->lmac_id); + err = -EIO; + goto unlock; + } + + /* we have a valid command response */ + smp_rmb(); /* Ensure the latest updates are visible */ + *resp = lmac->resp; + +unlock: + mutex_unlock(&lmac->cmd_lock); + + return err; +} + +static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, + struct cgx *cgx, int lmac_id) +{ + struct lmac *lmac; + int err; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + err = cgx_fwi_cmd_send(req, resp, lmac); + + /* Check for valid response */ + if (!err) { + if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) + return -EIO; + else + return 0; + } + + return err; +} + +/* Hardware event handlers */ +static inline void cgx_link_change_handler(u64 lstat, + struct lmac *lmac) +{ + struct cgx *cgx = lmac->cgx; + struct cgx_link_event event; + struct device *dev; + + dev = &cgx->pdev->dev; + + event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); + event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); + event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat); + event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); + + event.cgx_id = cgx->cgx_id; + event.lmac_id = lmac->lmac_id; + + if (!lmac->event_cb.notify_link_chg) { + dev_dbg(dev, "cgx port %d:%d Link change handler null", + cgx->cgx_id, lmac->lmac_id); + if (event.lstat.err_type != CGX_ERR_NONE) { + dev_err(dev, "cgx port %d:%d Link error %d\n", + cgx->cgx_id, lmac->lmac_id, + event.lstat.err_type); + } + dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n", + cgx->cgx_id, lmac->lmac_id, + event.lstat.link_up ? "UP" : "DOWN", + event.lstat.speed); + return; + } + + if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) + dev_err(dev, "event notification failure\n"); +} + +static inline bool cgx_cmdresp_is_linkevent(u64 event) +{ + u8 id; + + id = FIELD_GET(EVTREG_ID, event); + if (id == CGX_CMD_LINK_BRING_UP || + id == CGX_CMD_LINK_BRING_DOWN) + return true; + else + return false; +} + +static inline bool cgx_event_is_linkevent(u64 event) +{ + if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) + return true; + else + return false; +} + +static irqreturn_t cgx_fwi_event_handler(int irq, void *data) +{ + struct lmac *lmac = data; + struct cgx *cgx; + u64 event; + + cgx = lmac->cgx; + + event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); + + if (!FIELD_GET(EVTREG_ACK, event)) + return IRQ_NONE; + + switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { + case CGX_EVT_CMD_RESP: + /* Copy the response. Since only one command is active at a + * time, there is no way a response can get overwritten + */ + lmac->resp = event; + /* Ensure response is updated before thread context starts */ + smp_wmb(); + + /* There wont be separate events for link change initiated from + * software; Hence report the command responses as events + */ + if (cgx_cmdresp_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + + /* Release thread waiting for completion */ + lmac->cmd_pend = false; + wake_up_interruptible(&lmac->wq_cmd_cmplt); + break; + case CGX_EVT_ASYNC: + if (cgx_event_is_linkevent(event)) + cgx_link_change_handler(event, lmac); + break; + } + + /* Any new event or command response will be posted by firmware + * only after the current status is acked. + * Ack the interrupt register as well. + */ + cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); + cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); + + return IRQ_HANDLED; +} + +/* APIs for PHY management using CGX firmware interface */ + +/* callback registration for hardware events like link change */ +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + struct lmac *lmac; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + lmac->event_cb = *cb; + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_evh_register); + +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) +{ + u64 req = 0; + u64 resp; + + if (enable) + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); + else + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} +EXPORT_SYMBOL(cgx_fwi_link_change); + +static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) +{ + u64 req = 0; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); + return cgx_fwi_cmd_generic(req, resp, cgx, 0); +} + +static int cgx_lmac_verify_fwi_version(struct cgx *cgx) +{ + struct device *dev = &cgx->pdev->dev; + int major_ver, minor_ver; + u64 resp; + int err; + + if (!cgx->lmac_count) + return 0; + + err = cgx_fwi_read_version(&resp, cgx); + if (err) + return err; + + major_ver = FIELD_GET(RESP_MAJOR_VER, resp); + minor_ver = FIELD_GET(RESP_MINOR_VER, resp); + dev_dbg(dev, "Firmware command interface version = %d.%d\n", + major_ver, minor_ver); + if (major_ver != CGX_FIRMWARE_MAJOR_VER || + minor_ver != CGX_FIRMWARE_MINOR_VER) + return -EIO; + else + return 0; +} + +static int cgx_lmac_init(struct cgx *cgx) +{ + struct lmac *lmac; + int i, err; + + cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; + if (cgx->lmac_count > MAX_LMAC_PER_CGX) + cgx->lmac_count = MAX_LMAC_PER_CGX; + + for (i = 0; i < cgx->lmac_count; i++) { + lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL); + if (!lmac) + return -ENOMEM; + lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); + if (!lmac->name) + return -ENOMEM; + sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + lmac->lmac_id = i; + lmac->cgx = cgx; + init_waitqueue_head(&lmac->wq_cmd_cmplt); + mutex_init(&lmac->cmd_lock); + err = request_irq(pci_irq_vector(cgx->pdev, + CGX_LMAC_FWI + i * 9), + cgx_fwi_event_handler, 0, lmac->name, lmac); + if (err) + return err; + + /* Enable interrupt */ + cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, + FW_CGX_INT); + + /* Add reference */ + cgx->lmac_idmap[i] = lmac; + } + + return cgx_lmac_verify_fwi_version(cgx); +} + +static int cgx_lmac_exit(struct cgx *cgx) +{ + struct lmac *lmac; + int i; + + /* Free all lmac related resources */ + for (i = 0; i < cgx->lmac_count; i++) { + lmac = cgx->lmac_idmap[i]; + if (!lmac) + continue; + free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); + kfree(lmac->name); + kfree(lmac); + } + + return 0; +} + +static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct cgx *cgx; + int err, nvec; + + cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); + if (!cgx) + return -ENOMEM; + cgx->pdev = pdev; + + pci_set_drvdata(pdev, cgx); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!cgx->reg_base) { + dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + nvec = CGX_NVEC; + err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + if (err < 0 || err != nvec) { + dev_err(dev, "Request for %d msix vectors failed, err %d\n", + nvec, err); + goto err_release_regions; + } + + list_add(&cgx->cgx_list, &cgx_list); + cgx->cgx_id = cgx_get_cgx_cnt() - 1; + + err = cgx_lmac_init(cgx); + if (err) + goto err_release_lmac; + + return 0; + +err_release_lmac: + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void cgx_remove(struct pci_dev *pdev) +{ + struct cgx *cgx = pci_get_drvdata(pdev); + + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); + pci_free_irq_vectors(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +struct pci_driver cgx_driver = { + .name = DRV_NAME, + .id_table = cgx_id_table, + .probe = cgx_probe, + .remove = cgx_remove, +}; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h new file mode 100644 index 000000000000..a2a7a6d72d32 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef CGX_H +#define CGX_H + +#include "cgx_fw_if.h" + + /* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_CGX 0xA059 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 + +#define MAX_CGX 3 +#define MAX_LMAC_PER_CGX 4 +#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) + +/* Registers */ +#define CGXX_CMRX_INT 0x040 +#define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMRX_INT_ENA_W1S 0x058 +#define CGXX_CMRX_RX_ID_MAP 0x060 +#define CGXX_CMRX_RX_LMACS 0x128 +#define CGXX_SCRATCH0_REG 0x1050 +#define CGXX_SCRATCH1_REG 0x1058 +#define CGX_CONST 0x2000 + +#define CGX_COMMAND_REG CGXX_SCRATCH1_REG +#define CGX_EVENT_REG CGXX_SCRATCH0_REG +#define CGX_CMD_TIMEOUT 2200 /* msecs */ + +#define CGX_NVEC 37 +#define CGX_LMAC_FWI 0 + +struct cgx_link_event { + struct cgx_lnk_sts lstat; + u8 cgx_id; + u8 lmac_id; +}; + +/** + * struct cgx_event_cb + * @notify_link_chg: callback for link change notification + * @data: data passed to callback function + */ +struct cgx_event_cb { + int (*notify_link_chg)(struct cgx_link_event *event, void *data); + void *data; +}; + +extern struct pci_driver cgx_driver; + +int cgx_get_cgx_cnt(void); +int cgx_get_lmac_cnt(void *cgxd); +void *cgx_get_pdata(int cgx_id); +int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); +#endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h new file mode 100644 index 000000000000..fa17af3f4ba7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 CGX driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __CGX_FW_INTF_H__ +#define __CGX_FW_INTF_H__ + +#include <linux/bitops.h> +#include <linux/bitfield.h> + +#define CGX_FIRMWARE_MAJOR_VER 1 +#define CGX_FIRMWARE_MINOR_VER 0 + +#define CGX_EVENT_ACK 1UL + +/* CGX error types. set for cmd response status as CGX_STAT_FAIL */ +enum cgx_error_type { + CGX_ERR_NONE, + CGX_ERR_LMAC_NOT_ENABLED, + CGX_ERR_LMAC_MODE_INVALID, + CGX_ERR_REQUEST_ID_INVALID, + CGX_ERR_PREV_ACK_NOT_CLEAR, + CGX_ERR_PHY_LINK_DOWN, + CGX_ERR_PCS_RESET_FAIL, + CGX_ERR_AN_CPT_FAIL, + CGX_ERR_TX_NOT_IDLE, + CGX_ERR_RX_NOT_IDLE, + CGX_ERR_SPUX_BR_BLKLOCK_FAIL, + CGX_ERR_SPUX_RX_ALIGN_FAIL, + CGX_ERR_SPUX_TX_FAULT, + CGX_ERR_SPUX_RX_FAULT, + CGX_ERR_SPUX_RESET_FAIL, + CGX_ERR_SPUX_AN_RESET_FAIL, + CGX_ERR_SPUX_USX_AN_RESET_FAIL, + CGX_ERR_SMUX_RX_LINK_NOT_OK, + CGX_ERR_PCS_RECV_LINK_FAIL, + CGX_ERR_TRAINING_FAIL, + CGX_ERR_RX_EQU_FAIL, + CGX_ERR_SPUX_BER_FAIL, + CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */ +}; + +/* LINK speed types */ +enum cgx_link_speed { + CGX_LINK_NONE, + CGX_LINK_10M, + CGX_LINK_100M, + CGX_LINK_1G, + CGX_LINK_2HG, + CGX_LINK_5G, + CGX_LINK_10G, + CGX_LINK_20G, + CGX_LINK_25G, + CGX_LINK_40G, + CGX_LINK_50G, + CGX_LINK_100G, + CGX_LINK_SPEED_MAX, +}; + +/* REQUEST ID types. Input to firmware */ +enum cgx_cmd_id { + CGX_CMD_NONE, + CGX_CMD_GET_FW_VER, + CGX_CMD_GET_MAC_ADDR, + CGX_CMD_SET_MTU, + CGX_CMD_GET_LINK_STS, /* optional to user */ + CGX_CMD_LINK_BRING_UP, + CGX_CMD_LINK_BRING_DOWN, + CGX_CMD_INTERNAL_LBK, + CGX_CMD_EXTERNAL_LBK, + CGX_CMD_HIGIG, + CGX_CMD_LINK_STATE_CHANGE, + CGX_CMD_MODE_CHANGE, /* hot plug support */ + CGX_CMD_INTF_SHUTDOWN, + CGX_CMD_IRQ_ENABLE, + CGX_CMD_IRQ_DISABLE, +}; + +/* async event ids */ +enum cgx_evt_id { + CGX_EVT_NONE, + CGX_EVT_LINK_CHANGE, +}; + +/* event types - cause of interrupt */ +enum cgx_evt_type { + CGX_EVT_ASYNC, + CGX_EVT_CMD_RESP +}; + +enum cgx_stat { + CGX_STAT_SUCCESS, + CGX_STAT_FAIL +}; + +enum cgx_cmd_own { + CGX_CMD_OWN_NS, + CGX_CMD_OWN_FIRMWARE, +}; + +/* m - bit mask + * y - value to be written in the bitrange + * x - input value whose bitrange to be modified + */ +#define FIELD_SET(m, y, x) \ + (((x) & ~(m)) | \ + FIELD_PREP((m), (y))) + +/* scratchx(0) CSR used for ATF->non-secure SW communication. + * This acts as the status register + * Provides details on command ack/status, command response, error details + */ +#define EVTREG_ACK BIT_ULL(0) +#define EVTREG_EVT_TYPE BIT_ULL(1) +#define EVTREG_STAT BIT_ULL(2) +#define EVTREG_ID GENMASK_ULL(8, 3) + +/* Response to command IDs with command status as CGX_STAT_FAIL + * + * Not applicable for commands : + * CGX_CMD_LINK_BRING_UP/DOWN/CGX_EVT_LINK_CHANGE + */ +#define EVTREG_ERRTYPE GENMASK_ULL(18, 9) + +/* Response to cmd ID as CGX_CMD_GET_FW_VER with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAJOR_VER GENMASK_ULL(12, 9) +#define RESP_MINOR_VER GENMASK_ULL(16, 13) + +/* Response to cmd ID as CGX_CMD_GET_MAC_ADDR with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MAC_ADDR GENMASK_ULL(56, 9) + +/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE + * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS + * + * In case of CGX_STAT_FAIL, it indicates CGX configuration failed + * when processing link up/down/change command. + * Both err_type and current link status will be updated + * + * In case of CGX_STAT_SUCCESS, err_type will be CGX_ERR_NONE and current + * link status will be updated + */ +struct cgx_lnk_sts { + uint64_t reserved1:9; + uint64_t link_up:1; + uint64_t full_duplex:1; + uint64_t speed:4; /* cgx_link_speed */ + uint64_t err_type:10; + uint64_t reserved2:39; +}; + +#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9) +#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10) +#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11) +#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15) + +/* scratchx(1) CSR used for non-secure SW->ATF communication + * This CSR acts as a command register + */ +#define CMDREG_OWN BIT_ULL(0) +#define CMDREG_ID GENMASK_ULL(7, 2) + +/* Any command using enable/disable as an argument need + * to set this bitfield. + * Ex: Loopback, HiGig... + */ +#define CMDREG_ENABLE BIT_ULL(8) + +/* command argument to be passed for cmd ID - CGX_CMD_SET_MTU */ +#define CMDMTU_SIZE GENMASK_ULL(23, 8) + +/* command argument to be passed for cmd ID - CGX_CMD_LINK_CHANGE */ +#define CMDLINKCHANGE_LINKUP BIT_ULL(8) +#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9) +#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10) + +#endif /* __CGX_FW_INTF_H__ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c new file mode 100644 index 000000000000..85ba24a05774 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> + +#include "rvu_reg.h" +#include "mbox.h" + +static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = mdev->mbase + mbox->tx_start; + rx_hdr = mdev->mbase + mbox->rx_start; + + spin_lock(&mdev->mbox_lock); + mdev->msg_size = 0; + mdev->rsp_size = 0; + tx_hdr->num_msgs = 0; + rx_hdr->num_msgs = 0; + spin_unlock(&mdev->mbox_lock); +} +EXPORT_SYMBOL(otx2_mbox_reset); + +void otx2_mbox_destroy(struct otx2_mbox *mbox) +{ + mbox->reg_base = NULL; + mbox->hwbase = NULL; + + kfree(mbox->dev); + mbox->dev = NULL; +} +EXPORT_SYMBOL(otx2_mbox_destroy); + +int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + struct otx2_mbox_dev *mdev; + int devid; + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_PFVF: + mbox->tx_start = MBOX_DOWN_TX_START; + mbox->rx_start = MBOX_DOWN_RX_START; + mbox->tx_size = MBOX_DOWN_TX_SIZE; + mbox->rx_size = MBOX_DOWN_RX_SIZE; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_VFPF: + mbox->tx_start = MBOX_DOWN_RX_START; + mbox->rx_start = MBOX_DOWN_TX_START; + mbox->tx_size = MBOX_DOWN_RX_SIZE; + mbox->rx_size = MBOX_DOWN_TX_SIZE; + break; + case MBOX_DIR_AFPF_UP: + case MBOX_DIR_PFVF_UP: + mbox->tx_start = MBOX_UP_TX_START; + mbox->rx_start = MBOX_UP_RX_START; + mbox->tx_size = MBOX_UP_TX_SIZE; + mbox->rx_size = MBOX_UP_RX_SIZE; + break; + case MBOX_DIR_PFAF_UP: + case MBOX_DIR_VFPF_UP: + mbox->tx_start = MBOX_UP_RX_START; + mbox->rx_start = MBOX_UP_TX_START; + mbox->tx_size = MBOX_UP_RX_SIZE; + mbox->rx_size = MBOX_UP_TX_SIZE; + break; + default: + return -ENODEV; + } + + switch (direction) { + case MBOX_DIR_AFPF: + case MBOX_DIR_AFPF_UP: + mbox->trigger = RVU_AF_AFPF_MBOX0; + mbox->tr_shift = 4; + break; + case MBOX_DIR_PFAF: + case MBOX_DIR_PFAF_UP: + mbox->trigger = RVU_PF_PFAF_MBOX1; + mbox->tr_shift = 0; + break; + case MBOX_DIR_PFVF: + case MBOX_DIR_PFVF_UP: + mbox->trigger = RVU_PF_VFX_PFVF_MBOX0; + mbox->tr_shift = 12; + break; + case MBOX_DIR_VFPF: + case MBOX_DIR_VFPF_UP: + mbox->trigger = RVU_VF_VFPF_MBOX1; + mbox->tr_shift = 0; + break; + default: + return -ENODEV; + } + + mbox->reg_base = reg_base; + mbox->hwbase = hwbase; + mbox->pdev = pdev; + + mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); + if (!mbox->dev) { + otx2_mbox_destroy(mbox); + return -ENOMEM; + } + + mbox->ndevs = ndevs; + for (devid = 0; devid < ndevs; devid++) { + mdev = &mbox->dev[devid]; + mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); + spin_lock_init(&mdev->mbox_lock); + /* Init header to reset value */ + otx2_mbox_reset(mbox, devid); + } + + return 0; +} +EXPORT_SYMBOL(otx2_mbox_init); + +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + int timeout = 0, sleep = 1; + + while (mdev->num_msgs != mdev->msgs_acked) { + msleep(sleep); + timeout += sleep; + if (timeout >= MBOX_RSP_TIMEOUT) + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(otx2_mbox_wait_for_rsp); + +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + unsigned long timeout = jiffies + 1 * HZ; + + while (!time_after(jiffies, timeout)) { + if (mdev->num_msgs == mdev->msgs_acked) + return 0; + cpu_relax(); + } + return -EIO; +} +EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); + +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_hdr *tx_hdr, *rx_hdr; + + tx_hdr = mdev->mbase + mbox->tx_start; + rx_hdr = mdev->mbase + mbox->rx_start; + + spin_lock(&mdev->mbox_lock); + /* Reset header for next messages */ + mdev->msg_size = 0; + mdev->rsp_size = 0; + mdev->msgs_acked = 0; + + /* Sync mbox data into memory */ + smp_wmb(); + + /* num_msgs != 0 signals to the peer that the buffer has a number of + * messages. So this should be written after writing all the messages + * to the shared memory. + */ + tx_hdr->num_msgs = mdev->num_msgs; + rx_hdr->num_msgs = 0; + spin_unlock(&mdev->mbox_lock); + + /* The interrupt should be fired after num_msgs is written + * to the shared memory + */ + writeq(1, (void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); +} +EXPORT_SYMBOL(otx2_mbox_msg_send); + +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + struct mbox_msghdr *msghdr = NULL; + + spin_lock(&mdev->mbox_lock); + size = ALIGN(size, MBOX_MSG_ALIGN); + size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN); + /* Check if there is space in mailbox */ + if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) + goto exit; + if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset) + goto exit; + + if (mdev->msg_size == 0) + mdev->num_msgs = 0; + mdev->num_msgs++; + + msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size; + + /* Clear the whole msg region */ + memset(msghdr, 0, sizeof(*msghdr) + size); + /* Init message header with reset values */ + msghdr->ver = OTX2_MBOX_VERSION; + mdev->msg_size += size; + mdev->rsp_size += size_rsp; + msghdr->next_msgoff = mdev->msg_size + msgs_offset; +exit: + spin_unlock(&mdev->mbox_lock); + + return msghdr; +} +EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp); + +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg) +{ + unsigned long imsg = mbox->tx_start + msgs_offset; + unsigned long irsp = mbox->rx_start + msgs_offset; + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + u16 msgs; + + if (mdev->num_msgs != mdev->msgs_acked) + return ERR_PTR(-ENODEV); + + for (msgs = 0; msgs < mdev->msgs_acked; msgs++) { + struct mbox_msghdr *pmsg = mdev->mbase + imsg; + struct mbox_msghdr *prsp = mdev->mbase + irsp; + + if (msg == pmsg) { + if (pmsg->id != prsp->id) + return ERR_PTR(-ENODEV); + return prsp; + } + + imsg = pmsg->next_msgoff; + irsp = prsp->next_msgoff; + } + + return ERR_PTR(-ENODEV); +} +EXPORT_SYMBOL(otx2_mbox_get_rsp); + +int +otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id) +{ + struct msg_rsp *rsp; + + rsp = (struct msg_rsp *) + otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp)); + if (!rsp) + return -ENOMEM; + rsp->hdr.id = id; + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; + rsp->hdr.rc = MBOX_MSG_INVALID; + rsp->hdr.pcifunc = pcifunc; + return 0; +} +EXPORT_SYMBOL(otx2_reply_invalid_msg); + +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid) +{ + struct otx2_mbox_dev *mdev = &mbox->dev[devid]; + bool ret; + + spin_lock(&mdev->mbox_lock); + ret = mdev->num_msgs != 0; + spin_unlock(&mdev->mbox_lock); + + return ret; +} +EXPORT_SYMBOL(otx2_mbox_nonempty); + +const char *otx2_mbox_id2name(u16 id) +{ + switch (id) { +#define M(_name, _id, _1, _2) case _id: return # _name; + MBOX_MESSAGES +#undef M + default: + return "INVALID ID"; + } +} +EXPORT_SYMBOL(otx2_mbox_id2name); + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h new file mode 100644 index 000000000000..bedf0ee62450 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MBOX_H +#define MBOX_H + +#include <linux/etherdevice.h> +#include <linux/sizes.h> + +#include "rvu_struct.h" + +#define MBOX_SIZE SZ_64K + +/* AF/PF: PF initiated, PF/VF VF initiated */ +#define MBOX_DOWN_RX_START 0 +#define MBOX_DOWN_RX_SIZE (46 * SZ_1K) +#define MBOX_DOWN_TX_START (MBOX_DOWN_RX_START + MBOX_DOWN_RX_SIZE) +#define MBOX_DOWN_TX_SIZE (16 * SZ_1K) +/* AF/PF: AF initiated, PF/VF PF initiated */ +#define MBOX_UP_RX_START (MBOX_DOWN_TX_START + MBOX_DOWN_TX_SIZE) +#define MBOX_UP_RX_SIZE SZ_1K +#define MBOX_UP_TX_START (MBOX_UP_RX_START + MBOX_UP_RX_SIZE) +#define MBOX_UP_TX_SIZE SZ_1K + +#if MBOX_UP_TX_SIZE + MBOX_UP_TX_START != MBOX_SIZE +# error "incorrect mailbox area sizes" +#endif + +#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull)) + +#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */ + +#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */ + +/* Mailbox directions */ +#define MBOX_DIR_AFPF 0 /* AF replies to PF */ +#define MBOX_DIR_PFAF 1 /* PF sends messages to AF */ +#define MBOX_DIR_PFVF 2 /* PF replies to VF */ +#define MBOX_DIR_VFPF 3 /* VF sends messages to PF */ +#define MBOX_DIR_AFPF_UP 4 /* AF sends messages to PF */ +#define MBOX_DIR_PFAF_UP 5 /* PF replies to AF */ +#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */ +#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */ + +struct otx2_mbox_dev { + void *mbase; /* This dev's mbox region */ + spinlock_t mbox_lock; + u16 msg_size; /* Total msg size to be sent */ + u16 rsp_size; /* Total rsp size to be sure the reply is ok */ + u16 num_msgs; /* No of msgs sent or waiting for response */ + u16 msgs_acked; /* No of msgs for which response is received */ +}; + +struct otx2_mbox { + struct pci_dev *pdev; + void *hwbase; /* Mbox region advertised by HW */ + void *reg_base;/* CSR base for this dev */ + u64 trigger; /* Trigger mbox notification */ + u16 tr_shift; /* Mbox trigger shift */ + u64 rx_start; /* Offset of Rx region in mbox memory */ + u64 tx_start; /* Offset of Tx region in mbox memory */ + u16 rx_size; /* Size of Rx region */ + u16 tx_size; /* Size of Tx region */ + u16 ndevs; /* The number of peers */ + struct otx2_mbox_dev *dev; +}; + +/* Header which preceeds all mbox messages */ +struct mbox_hdr { + u16 num_msgs; /* No of msgs embedded */ +}; + +/* Header which preceeds every msg and is also part of it */ +struct mbox_msghdr { + u16 pcifunc; /* Who's sending this msg */ + u16 id; /* Mbox message ID */ +#define OTX2_MBOX_REQ_SIG (0xdead) +#define OTX2_MBOX_RSP_SIG (0xbeef) + u16 sig; /* Signature, for validating corrupted msgs */ +#define OTX2_MBOX_VERSION (0x0001) + u16 ver; /* Version of msg's structure for this ID */ + u16 next_msgoff; /* Offset of next msg within mailbox region */ + int rc; /* Msg process'ed response code */ +}; + +void otx2_mbox_reset(struct otx2_mbox *mbox, int devid); +void otx2_mbox_destroy(struct otx2_mbox *mbox); +int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, + struct pci_dev *pdev, void __force *reg_base, + int direction, int ndevs); +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); +int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); +int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); +struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, + int size, int size_rsp); +struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid, + struct mbox_msghdr *msg); +int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, + u16 pcifunc, u16 id); +bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid); +const char *otx2_mbox_id2name(u16 id); +static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, + int devid, int size) +{ + return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0); +} + +/* Mailbox message types */ +#define MBOX_MSG_MASK 0xFFFF +#define MBOX_MSG_INVALID 0xFFFE +#define MBOX_MSG_MAX 0xFFFF + +#define MBOX_MESSAGES \ +/* Generic mbox IDs (range 0x000 - 0x1FF) */ \ +M(READY, 0x001, msg_req, ready_msg_rsp) \ +M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ +M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ +M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \ +/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ +/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ +/* TIM mbox IDs (range 0x800 - 0x9FF) */ \ +/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ +/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ +/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ + +enum { +#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id, +MBOX_MESSAGES +#undef M +}; + +/* Mailbox message formats */ + +/* Generic request msg used for those mbox messages which + * don't send any data in the request. + */ +struct msg_req { + struct mbox_msghdr hdr; +}; + +/* Generic rsponse msg used a ack or response for those mbox + * messages which doesn't have a specific rsp msg format. + */ +struct msg_rsp { + struct mbox_msghdr hdr; +}; + +struct ready_msg_rsp { + struct mbox_msghdr hdr; + u16 sclk_feq; /* SCLK frequency */ +}; + +/* Structure for requesting resource provisioning. + * 'modify' flag to be used when either requesting more + * or to detach partial of a cetain resource type. + * Rest of the fields specify how many of what type to + * be attached. + */ +struct rsrc_attach { + struct mbox_msghdr hdr; + u8 modify:1; + u8 npalf:1; + u8 nixlf:1; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; +}; + +/* Structure for relinquishing resources. + * 'partial' flag to be used when relinquishing all resources + * but only of a certain type. If not set, all resources of all + * types provisioned to the RVU function will be detached. + */ +struct rsrc_detach { + struct mbox_msghdr hdr; + u8 partial:1; + u8 npalf:1; + u8 nixlf:1; + u8 sso:1; + u8 ssow:1; + u8 timlfs:1; + u8 cptlfs:1; +}; + +#define MSIX_VECTOR_INVALID 0xFFFF +#define MAX_RVU_BLKLF_CNT 256 + +struct msix_offset_rsp { + struct mbox_msghdr hdr; + u16 npa_msixoff; + u16 nix_msixoff; + u8 sso; + u8 ssow; + u8 timlfs; + u8 cptlfs; + u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; + u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; +}; + +#endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c new file mode 100644 index 000000000000..2033f42c3226 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -0,0 +1,1637 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/sysfs.h> + +#include "cgx.h" +#include "rvu.h" +#include "rvu_reg.h" + +#define DRV_NAME "octeontx2-af" +#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" +#define DRV_VERSION "1.0" + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf); + +/* Supported devices */ +static const struct pci_device_id rvu_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, rvu_id_table); + +/* Poll a RVU block's register 'offset', for a 'zero' + * or 'nonzero' at bits specified by 'mask' + */ +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) +{ + void __iomem *reg; + int timeout = 100; + u64 reg_val; + + reg = rvu->afreg_base + ((block << 28) | offset); + while (timeout) { + reg_val = readq(reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + usleep_range(1, 2); + timeout--; + } + return -EBUSY; +} + +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) +{ + int id; + + if (!rsrc->bmap) + return -EINVAL; + + id = find_first_zero_bit(rsrc->bmap, rsrc->max); + if (id >= rsrc->max) + return -ENOSPC; + + __set_bit(id, rsrc->bmap); + + return id; +} + +static int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return -EINVAL; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return -ENOSPC; + + bitmap_set(rsrc->bmap, start, nrsrc); + return start; +} + +static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) +{ + if (!rsrc->bmap) + return; + if (start >= rsrc->max) + return; + + bitmap_clear(rsrc->bmap, start, nrsrc); +} + +static bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc) +{ + int start; + + if (!rsrc->bmap) + return false; + + start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0); + if (start >= rsrc->max) + return false; + + return true; +} + +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) +{ + if (!rsrc->bmap) + return; + + __clear_bit(id, rsrc->bmap); +} + +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc) +{ + int used; + + if (!rsrc->bmap) + return 0; + + used = bitmap_weight(rsrc->bmap, rsrc->max); + return (rsrc->max - used); +} + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) +{ + rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max), + sizeof(long), GFP_KERNEL); + if (!rsrc->bmap) + return -ENOMEM; + return 0; +} + +/* Get block LF's HW index from a PF_FUNC's block slot number */ +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) +{ + u16 match = 0; + int lf; + + spin_lock(&rvu->rsrc_lock); + for (lf = 0; lf < block->lf.max; lf++) { + if (block->fn_map[lf] == pcifunc) { + if (slot == match) { + spin_unlock(&rvu->rsrc_lock); + return lf; + } + match++; + } + } + spin_unlock(&rvu->rsrc_lock); + return -ENODEV; +} + +/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. + * Some silicon variants of OcteonTX2 supports + * multiple blocks of same type. + * + * @pcifunc has to be zero when no LF is yet attached. + */ +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) +{ + int devnum, blkaddr = -ENODEV; + u64 cfg, reg; + bool is_pf; + + switch (blktype) { + case BLKTYPE_NPA: + blkaddr = BLKADDR_NPA; + goto exit; + case BLKTYPE_NIX: + /* For now assume NIX0 */ + if (!pcifunc) { + blkaddr = BLKADDR_NIX0; + goto exit; + } + break; + case BLKTYPE_SSO: + blkaddr = BLKADDR_SSO; + goto exit; + case BLKTYPE_SSOW: + blkaddr = BLKADDR_SSOW; + goto exit; + case BLKTYPE_TIM: + blkaddr = BLKADDR_TIM; + goto exit; + case BLKTYPE_CPT: + /* For now assume CPT0 */ + if (!pcifunc) { + blkaddr = BLKADDR_CPT0; + goto exit; + } + break; + } + + /* Check if this is a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ + if (blktype == BLKTYPE_NIX) { + reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_NIX0; + } + + /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ + if (blktype == BLKTYPE_CPT) { + reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_CPT0; + } + +exit: + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + return -ENODEV; +} + +static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, u16 pcifunc, + u16 lf, bool attach) +{ + int devnum, num_lfs = 0; + bool is_pf; + u64 reg; + + if (lf >= block->lf.max) { + dev_err(&rvu->pdev->dev, + "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n", + __func__, lf, block->name, block->lf.max); + return; + } + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + block->fn_map[lf] = attach ? pcifunc : 0; + + switch (block->type) { + case BLKTYPE_NPA: + pfvf->npalf = attach ? true : false; + num_lfs = pfvf->npalf; + break; + case BLKTYPE_NIX: + pfvf->nixlf = attach ? true : false; + num_lfs = pfvf->nixlf; + break; + case BLKTYPE_SSO: + attach ? pfvf->sso++ : pfvf->sso--; + num_lfs = pfvf->sso; + break; + case BLKTYPE_SSOW: + attach ? pfvf->ssow++ : pfvf->ssow--; + num_lfs = pfvf->ssow; + break; + case BLKTYPE_TIM: + attach ? pfvf->timlfs++ : pfvf->timlfs--; + num_lfs = pfvf->timlfs; + break; + case BLKTYPE_CPT: + attach ? pfvf->cptlfs++ : pfvf->cptlfs--; + num_lfs = pfvf->cptlfs; + break; + } + + reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; + rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs); +} + +inline int rvu_get_pf(u16 pcifunc) +{ + return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; +} + +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) +{ + u64 cfg; + + /* Get numVFs attached to this PF and first HWVF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + *numvfs = (cfg >> 12) & 0xFF; + *hwvf = cfg & 0xFFF; +} + +static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) +{ + int pf, func; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + func = pcifunc & RVU_PFVF_FUNC_MASK; + + /* Get first HWVF attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + + return ((cfg & 0xFFF) + func - 1); +} + +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) +{ + /* Check if it is a PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)]; + else + return &rvu->pf[rvu_get_pf(pcifunc)]; +} + +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) +{ + struct rvu_block *block; + + if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT) + return false; + + block = &hw->block[blkaddr]; + return block->implemented; +} + +static void rvu_check_block_implemented(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid; + u64 cfg; + + /* For each block check if 'implemented' bit is set */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid)); + if (cfg & BIT_ULL(11)) + block->implemented = true; + } +} + +static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) +{ + struct rvu_block *block = &rvu->hw->block[blkaddr]; + + if (!block->implemented) + return; + + rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); + rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); +} + +static void rvu_reset_all_blocks(struct rvu *rvu) +{ + /* Do a HW reset of all RVU blocks */ + rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST); +} + +static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) +{ + struct rvu_pfvf *pfvf; + u64 cfg; + int lf; + + for (lf = 0; lf < block->lf.max; lf++) { + cfg = rvu_read64(rvu, block->addr, + block->lfcfg_reg | (lf << block->lfshift)); + if (!(cfg & BIT_ULL(63))) + continue; + + /* Set this resource as being used */ + __set_bit(lf, block->lf.bmap); + + /* Get, to whom this LF is attached */ + pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); + rvu_update_rsrc_map(rvu, pfvf, block, + (cfg >> 8) & 0xFFFF, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf) +{ + int min_vecs; + + if (!vf) + goto check_pf; + + if (!nvecs) { + dev_warn(rvu->dev, + "PF%d:VF%d is configured with zero msix vectors, %d\n", + pf, vf - 1, nvecs); + } + return; + +check_pf: + if (pf == 0) + min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT; + else + min_vecs = RVU_PF_INT_VEC_CNT; + + if (!(nvecs < min_vecs)) + return; + dev_warn(rvu->dev, + "PF%d is configured with too few vectors, %d, min is %d\n", + pf, nvecs, min_vecs); +} + +static int rvu_setup_msix_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf, err; + int nvecs, offset, max_msix; + struct rvu_pfvf *pfvf; + u64 cfg, phy_addr; + dma_addr_t iova; + + for (pf = 0; pf < hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + /* If PF is not enabled, nothing to do */ + if (!((cfg >> 20) & 0x01)) + continue; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + pfvf = &rvu->pf[pf]; + /* Get num of MSIX vectors attached to this PF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0); + + /* Alloc msix bitmap for this PF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + /* Allocate memory for MSIX vector to RVU block LF mapping */ + pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* For PF0 (AF) firmware will set msix vector offsets for + * AF, block AF and PF0_INT vectors, so jump to VFs. + */ + if (!pf) + goto setup_vfmsix; + + /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(pf), cfg | offset); +setup_vfmsix: + /* Alloc msix bitmap for VFs */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + /* Get num of MSIX vectors attached to this VF */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_MSIX_CFG(pf)); + pfvf->msix.max = (cfg & 0xFFF) + 1; + rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1); + + /* Alloc msix bitmap for this VF */ + err = rvu_alloc_bitmap(&pfvf->msix); + if (err) + return err; + + pfvf->msix_lfmap = + devm_kcalloc(rvu->dev, pfvf->msix.max, + sizeof(u16), GFP_KERNEL); + if (!pfvf->msix_lfmap) + return -ENOMEM; + + /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors. + * These are allocated on driver init and never freed, + * so no need to set 'msix_lfmap' for these. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf)); + nvecs = (cfg >> 12) & 0xFF; + cfg &= ~0x7FFULL; + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + rvu_write64(rvu, BLKADDR_RVUM, + RVU_PRIV_HWVFX_INT_CFG(hwvf + vf), + cfg | offset); + } + } + + /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence + * create a IOMMU mapping for the physcial address configured by + * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. + */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE); + iova = dma_map_resource(rvu->dev, phy_addr, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); + + if (dma_mapping_error(rvu->dev, iova)) + return -ENOMEM; + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova); + rvu->msix_base_iova = iova; + + return 0; +} + +static void rvu_free_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int id, max_msix; + u64 cfg; + + /* Free block LF bitmaps */ + for (id = 0; id < BLK_COUNT; id++) { + block = &hw->block[id]; + kfree(block->lf.bmap); + } + + /* Free MSIX bitmaps */ + for (id = 0; id < hw->total_pfs; id++) { + pfvf = &rvu->pf[id]; + kfree(pfvf->msix.bmap); + } + + for (id = 0; id < hw->total_vfs; id++) { + pfvf = &rvu->hwvf[id]; + kfree(pfvf->msix.bmap); + } + + /* Unmap MSIX vector base IOVA mapping */ + if (!rvu->msix_base_iova) + return; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + max_msix = cfg & 0xFFFFF; + dma_unmap_resource(rvu->dev, rvu->msix_base_iova, + max_msix * PCI_MSIX_ENTRY_SIZE, + DMA_BIDIRECTIONAL, 0); +} + +static int rvu_setup_hw_resources(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid, err; + u64 cfg; + + /* Get HW supported max RVU PF & VF count */ + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); + hw->total_pfs = (cfg >> 32) & 0xFF; + hw->total_vfs = (cfg >> 20) & 0xFFF; + hw->max_vfs_per_pf = (cfg >> 40) & 0xFF; + + /* Init NPA LF's bitmap */ + block = &hw->block[BLKADDR_NPA]; + if (!block->implemented) + goto nix; + cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST); + block->lf.max = (cfg >> 16) & 0xFFF; + block->addr = BLKADDR_NPA; + block->type = BLKTYPE_NPA; + block->lfshift = 8; + block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG; + block->lfcfg_reg = NPA_PRIV_LFX_CFG; + block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG; + block->lfreset_reg = NPA_AF_LF_RST; + sprintf(block->name, "NPA"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +nix: + /* Init NIX LF's bitmap */ + block = &hw->block[BLKADDR_NIX0]; + if (!block->implemented) + goto sso; + cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); + block->lf.max = cfg & 0xFFF; + block->addr = BLKADDR_NIX0; + block->type = BLKTYPE_NIX; + block->lfshift = 8; + block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; + block->lfcfg_reg = NIX_PRIV_LFX_CFG; + block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; + block->lfreset_reg = NIX_AF_LF_RST; + sprintf(block->name, "NIX"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +sso: + /* Init SSO group's bitmap */ + block = &hw->block[BLKADDR_SSO]; + if (!block->implemented) + goto ssow; + cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_SSO; + block->type = BLKTYPE_SSO; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG; + block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG; + block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG; + block->lfreset_reg = SSO_AF_LF_HWGRP_RST; + sprintf(block->name, "SSO GROUP"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +ssow: + /* Init SSO workslot's bitmap */ + block = &hw->block[BLKADDR_SSOW]; + if (!block->implemented) + goto tim; + block->lf.max = (cfg >> 56) & 0xFF; + block->addr = BLKADDR_SSOW; + block->type = BLKTYPE_SSOW; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG; + block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG; + block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG; + block->lfreset_reg = SSOW_AF_LF_HWS_RST; + sprintf(block->name, "SSOWS"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +tim: + /* Init TIM LF's bitmap */ + block = &hw->block[BLKADDR_TIM]; + if (!block->implemented) + goto cpt; + cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST); + block->lf.max = cfg & 0xFFFF; + block->addr = BLKADDR_TIM; + block->type = BLKTYPE_TIM; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG; + block->lfcfg_reg = TIM_PRIV_LFX_CFG; + block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG; + block->lfreset_reg = TIM_AF_LF_RST; + sprintf(block->name, "TIM"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +cpt: + /* Init CPT LF's bitmap */ + block = &hw->block[BLKADDR_CPT0]; + if (!block->implemented) + goto init; + cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); + block->lf.max = cfg & 0xFF; + block->addr = BLKADDR_CPT0; + block->type = BLKTYPE_CPT; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; + block->lfcfg_reg = CPT_PRIV_LFX_CFG; + block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; + block->lfreset_reg = CPT_AF_LF_RST; + sprintf(block->name, "CPT"); + err = rvu_alloc_bitmap(&block->lf); + if (err) + return err; + +init: + /* Allocate memory for PFVF data */ + rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->pf) + return -ENOMEM; + + rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, + sizeof(struct rvu_pfvf), GFP_KERNEL); + if (!rvu->hwvf) + return -ENOMEM; + + spin_lock_init(&rvu->rsrc_lock); + + err = rvu_setup_msix_resources(rvu); + if (err) + return err; + + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + + /* Allocate memory for block LF/slot to pcifunc mapping info */ + block->fn_map = devm_kcalloc(rvu->dev, block->lf.max, + sizeof(u16), GFP_KERNEL); + if (!block->fn_map) + return -ENOMEM; + + /* Scan all blocks to check if low level firmware has + * already provisioned any of the resources to a PF/VF. + */ + rvu_scan_block(rvu, block); + } + + return 0; +} + +static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req, + struct ready_msg_rsp *rsp) +{ + return 0; +} + +/* Get current count of a RVU block's LF/slots + * provisioned to a given RVU func. + */ +static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) +{ + switch (blktype) { + case BLKTYPE_NPA: + return pfvf->npalf ? 1 : 0; + case BLKTYPE_NIX: + return pfvf->nixlf ? 1 : 0; + case BLKTYPE_SSO: + return pfvf->sso; + case BLKTYPE_SSOW: + return pfvf->ssow; + case BLKTYPE_TIM: + return pfvf->timlfs; + case BLKTYPE_CPT: + return pfvf->cptlfs; + } + return 0; +} + +static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, + int pcifunc, int slot) +{ + u64 val; + + val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13); + rvu_write64(rvu, block->addr, block->lookup_reg, val); + /* Wait for the lookup to finish */ + /* TODO: put some timeout here */ + while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13)) + ; + + val = rvu_read64(rvu, block->addr, block->lookup_reg); + + /* Check LF valid bit */ + if (!(val & (1ULL << 12))) + return -1; + + return (val & 0xFFF); +} + +static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf, num_lfs; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); + if (!num_lfs) + return; + + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot); + if (lf < 0) /* This should never happen */ + continue; + + /* Disable the LF */ + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), 0x00ULL); + + /* Update SW maintained mapping info as well */ + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, false); + + /* Free the resource */ + rvu_free_rsrc(&block->lf, lf); + + /* Clear MSIX vector offset for this LF */ + rvu_clear_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, + u16 pcifunc) +{ + struct rvu_hwinfo *hw = rvu->hw; + bool is_pf, detach_all = true; + struct rvu_block *block; + int devnum, blkid; + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + spin_lock(&rvu->rsrc_lock); + + /* Check for partial resource detach */ + if (detach && detach->partial) + detach_all = false; + + /* Check for RVU block's LFs attached to this func, + * if so, detach them. + */ + for (blkid = 0; blkid < BLK_COUNT; blkid++) { + block = &hw->block[blkid]; + if (!block->lf.bmap) + continue; + if (!detach_all && detach) { + if (blkid == BLKADDR_NPA && !detach->npalf) + continue; + else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) + continue; + else if ((blkid == BLKADDR_SSO) && !detach->sso) + continue; + else if ((blkid == BLKADDR_SSOW) && !detach->ssow) + continue; + else if ((blkid == BLKADDR_TIM) && !detach->timlfs) + continue; + else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) + continue; + } + rvu_detach_block(rvu, pcifunc, block->type); + } + + spin_unlock(&rvu->rsrc_lock); + return 0; +} + +static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu, + struct rsrc_detach *detach, + struct msg_rsp *rsp) +{ + return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); +} + +static void rvu_attach_block(struct rvu *rvu, int pcifunc, + int blktype, int num_lfs) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int slot, lf; + int blkaddr; + u64 cfg; + + if (!num_lfs) + return; + + blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + if (blkaddr < 0) + return; + + block = &hw->block[blkaddr]; + if (!block->lf.bmap) + return; + + for (slot = 0; slot < num_lfs; slot++) { + /* Allocate the resource */ + lf = rvu_alloc_rsrc(&block->lf); + if (lf < 0) + return; + + cfg = (1ULL << 63) | (pcifunc << 8) | slot; + rvu_write64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift), cfg); + rvu_update_rsrc_map(rvu, pfvf, block, + pcifunc, lf, true); + + /* Set start MSIX vector for this LF within this PF/VF */ + rvu_set_msix_offset(rvu, pfvf, block, lf); + } +} + +static int rvu_check_rsrc_availability(struct rvu *rvu, + struct rsrc_attach *req, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int free_lfs, mappedlfs; + + /* Only one NPA LF can be attached */ + if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { + block = &hw->block[BLKADDR_NPA]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->npalf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NPA\n", + pcifunc); + return -EINVAL; + } + + /* Only one NIX LF can be attached */ + if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { + block = &hw->block[BLKADDR_NIX0]; + free_lfs = rvu_rsrc_free_count(&block->lf); + if (!free_lfs) + goto fail; + } else if (req->nixlf) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid req, already has NIX\n", + pcifunc); + return -EINVAL; + } + + if (req->sso) { + block = &hw->block[BLKADDR_SSO]; + /* Is request within limits ? */ + if (req->sso > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSO req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + /* Check if additional resources are available */ + if (req->sso > mappedlfs && + ((req->sso - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->ssow) { + block = &hw->block[BLKADDR_SSOW]; + if (req->ssow > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSOW req, %d > max %d\n", + pcifunc, req->sso, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->ssow > mappedlfs && + ((req->ssow - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->timlfs) { + block = &hw->block[BLKADDR_TIM]; + if (req->timlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid TIMLF req, %d > max %d\n", + pcifunc, req->timlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->timlfs > mappedlfs && + ((req->timlfs - mappedlfs) > free_lfs)) + goto fail; + } + + if (req->cptlfs) { + block = &hw->block[BLKADDR_CPT0]; + if (req->cptlfs > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid CPTLF req, %d > max %d\n", + pcifunc, req->cptlfs, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + free_lfs = rvu_rsrc_free_count(&block->lf); + if (req->cptlfs > mappedlfs && + ((req->cptlfs - mappedlfs) > free_lfs)) + goto fail; + } + + return 0; + +fail: + dev_info(rvu->dev, "Request for %s failed\n", block->name); + return -ENOSPC; +} + +static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, + struct rsrc_attach *attach, + struct msg_rsp *rsp) +{ + u16 pcifunc = attach->hdr.pcifunc; + int devnum, err; + bool is_pf; + + /* If first request, detach all existing attached resources */ + if (!attach->modify) + rvu_detach_rsrcs(rvu, NULL, pcifunc); + + /* Check if this is for a RVU PF or VF */ + if (pcifunc & RVU_PFVF_FUNC_MASK) { + is_pf = false; + devnum = rvu_get_hwvf(rvu, pcifunc); + } else { + is_pf = true; + devnum = rvu_get_pf(pcifunc); + } + + spin_lock(&rvu->rsrc_lock); + + /* Check if the request can be accommodated */ + err = rvu_check_rsrc_availability(rvu, attach, pcifunc); + if (err) + goto exit; + + /* Now attach the requested resources */ + if (attach->npalf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); + + if (attach->nixlf) + rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); + + if (attach->sso) { + /* RVU func doesn't know which exact LF or slot is attached + * to it, it always sees as slot 0,1,2. So for a 'modify' + * request, simply detach all existing attached LFs/slots + * and attach a fresh. + */ + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); + } + + if (attach->ssow) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); + } + + if (attach->timlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); + rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); + } + + if (attach->cptlfs) { + if (attach->modify) + rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); + rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); + } + +exit: + spin_unlock(&rvu->rsrc_lock); + return err; +} + +static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + int blkaddr, int lf) +{ + u16 vec; + + if (lf < 0) + return MSIX_VECTOR_INVALID; + + for (vec = 0; vec < pfvf->msix.max; vec++) { + if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf)) + return vec; + } + return MSIX_VECTOR_INVALID; +} + +static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Check and alloc MSIX vectors, must be contiguous */ + if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs)) + return; + + offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs); + + /* Config MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), (cfg & ~0x7FFULL) | offset); + + /* Update the bitmap as well */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf); +} + +static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct rvu_block *block, int lf) +{ + u16 nvecs, vec, offset; + u64 cfg; + + cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift)); + nvecs = (cfg >> 12) & 0xFF; + + /* Clear MSIX offset in LF */ + rvu_write64(rvu, block->addr, block->msixcfg_reg | + (lf << block->lfshift), cfg & ~0x7FFULL); + + offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf); + + /* Update the mapping */ + for (vec = 0; vec < nvecs; vec++) + pfvf->msix_lfmap[offset + vec] = 0; + + /* Free the same in MSIX bitmap */ + rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); +} + +static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, + struct msix_offset_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + int lf, slot; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->msix.bmap) + return 0; + + /* Set MSIX offsets for each block's LFs attached to this PF/VF */ + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); + rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); + + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); + rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); + + rsp->sso = pfvf->sso; + for (slot = 0; slot < rsp->sso; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot); + rsp->sso_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf); + } + + rsp->ssow = pfvf->ssow; + for (slot = 0; slot < rsp->ssow; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot); + rsp->ssow_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf); + } + + rsp->timlfs = pfvf->timlfs; + for (slot = 0; slot < rsp->timlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot); + rsp->timlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf); + } + + rsp->cptlfs = pfvf->cptlfs; + for (slot = 0; slot < rsp->cptlfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot); + rsp->cptlf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); + } + return 0; +} + +static int rvu_process_mbox_msg(struct rvu *rvu, int devid, + struct mbox_msghdr *req) +{ + /* Check if valid, if not reply with a invalid msg */ + if (req->sig != OTX2_MBOX_REQ_SIG) + goto bad_message; + + switch (req->id) { +#define M(_name, _id, _req_type, _rsp_type) \ + case _id: { \ + struct _rsp_type *rsp; \ + int err; \ + \ + rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ + &rvu->mbox, devid, \ + sizeof(struct _rsp_type)); \ + if (rsp) { \ + rsp->hdr.id = _id; \ + rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ + rsp->hdr.pcifunc = req->pcifunc; \ + rsp->hdr.rc = 0; \ + } \ + \ + err = rvu_mbox_handler_ ## _name(rvu, \ + (struct _req_type *)req, \ + rsp); \ + if (rsp && err) \ + rsp->hdr.rc = err; \ + \ + return rsp ? err : -ENOMEM; \ + } +MBOX_MESSAGES +#undef M + break; +bad_message: + default: + otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc, + req->id); + return -ENODEV; + } +} + +static void rvu_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = mwork->rvu; + struct otx2_mbox_dev *mdev; + struct mbox_hdr *req_hdr; + struct mbox_msghdr *msg; + struct otx2_mbox *mbox; + int offset, id, err; + u16 pf; + + mbox = &rvu->mbox; + pf = mwork - rvu->mbox_wrk; + mdev = &mbox->dev[pf]; + + /* Process received mbox messages */ + req_hdr = mdev->mbase + mbox->rx_start; + if (req_hdr->num_msgs == 0) + return; + + offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + + for (id = 0; id < req_hdr->num_msgs; id++) { + msg = mdev->mbase + offset; + + /* Set which PF sent this message based on mbox IRQ */ + msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); + msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT); + err = rvu_process_mbox_msg(rvu, pf, msg); + if (!err) { + offset = mbox->rx_start + msg->next_msgoff; + continue; + } + + if (msg->pcifunc & RVU_PFVF_FUNC_MASK) + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", + err, otx2_mbox_id2name(msg->id), msg->id, pf, + (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); + else + dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", + err, otx2_mbox_id2name(msg->id), msg->id, pf); + } + + /* Send mbox responses to PF */ + otx2_mbox_msg_send(mbox, pf); +} + +static int rvu_mbox_init(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + void __iomem *hwbase = NULL; + struct rvu_work *mwork; + u64 bar4_addr; + int err, pf; + + rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + hw->total_pfs); + if (!rvu->mbox_wq) + return -ENOMEM; + + rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->mbox_wrk) { + err = -ENOMEM; + goto exit; + } + + /* Map mbox region shared with PFs */ + bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); + /* Mailbox is a reserved memory (in RAM) region shared between + * RVU devices, shouldn't be mapped as device memory to allow + * unaligned accesses. + */ + hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs); + if (!hwbase) { + dev_err(rvu->dev, "Unable to map mailbox region\n"); + err = -ENOMEM; + goto exit; + } + + err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base, + MBOX_DIR_AFPF, hw->total_pfs); + if (err) + goto exit; + + for (pf = 0; pf < hw->total_pfs; pf++) { + mwork = &rvu->mbox_wrk[pf]; + mwork->rvu = rvu; + INIT_WORK(&mwork->work, rvu_mbox_handler); + } + + return 0; +exit: + if (hwbase) + iounmap((void __iomem *)hwbase); + destroy_workqueue(rvu->mbox_wq); + return err; +} + +static void rvu_mbox_destroy(struct rvu *rvu) +{ + if (rvu->mbox_wq) { + flush_workqueue(rvu->mbox_wq); + destroy_workqueue(rvu->mbox_wq); + rvu->mbox_wq = NULL; + } + + if (rvu->mbox.hwbase) + iounmap((void __iomem *)rvu->mbox.hwbase); + + otx2_mbox_destroy(&rvu->mbox); +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); + /* Clear interrupts */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); + + /* Sync with mbox memory region */ + smp_wmb(); + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + mbox = &rvu->mbox; + mdev = &mbox->dev[pf]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(rvu->mbox_wq, + &rvu->mbox_wrk[pf].work); + } + } + + return IRQ_HANDLED; +} + +static void rvu_enable_mbox_intr(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + /* Clear spurious irqs, if any */ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs)); + + /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S, + INTR_MASK(hw->total_pfs) & ~1ULL); +} + +static void rvu_unregister_interrupts(struct rvu *rvu) +{ + int irq; + + /* Disable the Mbox interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + for (irq = 0; irq < rvu->num_vec; irq++) { + if (rvu->irq_allocated[irq]) + free_irq(pci_irq_vector(rvu->pdev, irq), rvu); + } + + pci_free_irq_vectors(rvu->pdev); + rvu->num_vec = 0; +} + +static int rvu_register_interrupts(struct rvu *rvu) +{ + int ret; + + rvu->num_vec = pci_msix_vec_count(rvu->pdev); + + rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec, + NAME_SIZE, GFP_KERNEL); + if (!rvu->irq_name) + return -ENOMEM; + + rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec, + sizeof(bool), GFP_KERNEL); + if (!rvu->irq_allocated) + return -ENOMEM; + + /* Enable MSI-X */ + ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec, + rvu->num_vec, PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(rvu->dev, + "RVUAF: Request for %d msix vectors failed, ret %d\n", + rvu->num_vec, ret); + return ret; + } + + /* Register mailbox interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for mbox irq\n"); + goto fail; + } + + rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true; + + /* Enable mailbox interrupts from all PFs */ + rvu_enable_mbox_intr(rvu); + + return 0; + +fail: + pci_free_irq_vectors(rvu->pdev); + return ret; +} + +static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct rvu *rvu; + int err; + + rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL); + if (!rvu) + return -ENOMEM; + + rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL); + if (!rvu->hw) { + devm_kfree(dev, rvu); + return -ENOMEM; + } + + pci_set_drvdata(pdev, rvu); + rvu->pdev = pdev; + rvu->dev = &pdev->dev; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + goto err_freemem; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set DMA mask\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to set consistent DMA mask\n"); + goto err_release_regions; + } + + /* Map Admin function CSRs */ + rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0); + rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); + if (!rvu->afreg_base || !rvu->pfreg_base) { + dev_err(dev, "Unable to map admin function CSRs, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + /* Check which blocks the HW supports */ + rvu_check_block_implemented(rvu); + + rvu_reset_all_blocks(rvu); + + err = rvu_setup_hw_resources(rvu); + if (err) + goto err_release_regions; + + err = rvu_mbox_init(rvu); + if (err) + goto err_hwsetup; + + err = rvu_cgx_probe(rvu); + if (err) + goto err_mbox; + + err = rvu_register_interrupts(rvu); + if (err) + goto err_cgx; + + return 0; +err_cgx: + rvu_cgx_wq_destroy(rvu); +err_mbox: + rvu_mbox_destroy(rvu); +err_hwsetup: + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_freemem: + pci_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(dev, rvu); + return err; +} + +static void rvu_remove(struct pci_dev *pdev) +{ + struct rvu *rvu = pci_get_drvdata(pdev); + + rvu_unregister_interrupts(rvu); + rvu_cgx_wq_destroy(rvu); + rvu_mbox_destroy(rvu); + rvu_reset_all_blocks(rvu); + rvu_free_hw_resources(rvu); + + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, rvu->hw); + devm_kfree(&pdev->dev, rvu); +} + +static struct pci_driver rvu_driver = { + .name = DRV_NAME, + .id_table = rvu_id_table, + .probe = rvu_probe, + .remove = rvu_remove, +}; + +static int __init rvu_init_module(void) +{ + int err; + + pr_info("%s: %s\n", DRV_NAME, DRV_STRING); + + err = pci_register_driver(&cgx_driver); + if (err < 0) + return err; + + err = pci_register_driver(&rvu_driver); + if (err < 0) + pci_unregister_driver(&cgx_driver); + + return err; +} + +static void __exit rvu_cleanup_module(void) +{ + pci_unregister_driver(&rvu_driver); + pci_unregister_driver(&cgx_driver); +} + +module_init(rvu_init_module); +module_exit(rvu_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h new file mode 100644 index 000000000000..d169fa9eb45e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_H +#define RVU_H + +#include "rvu_struct.h" +#include "mbox.h" + +/* PCI device IDs */ +#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 + +/* PCI BAR nos */ +#define PCI_AF_REG_BAR_NUM 0 +#define PCI_PF_REG_BAR_NUM 2 +#define PCI_MBOX_BAR_NUM 4 + +#define NAME_SIZE 32 + +/* PF_FUNC */ +#define RVU_PFVF_PF_SHIFT 10 +#define RVU_PFVF_PF_MASK 0x3F +#define RVU_PFVF_FUNC_SHIFT 0 +#define RVU_PFVF_FUNC_MASK 0x3FF + +struct rvu_work { + struct work_struct work; + struct rvu *rvu; +}; + +struct rsrc_bmap { + unsigned long *bmap; /* Pointer to resource bitmap */ + u16 max; /* Max resource id or count */ +}; + +struct rvu_block { + struct rsrc_bmap lf; + u16 *fn_map; /* LF to pcifunc mapping */ + bool multislot; + bool implemented; + u8 addr; /* RVU_BLOCK_ADDR_E */ + u8 type; /* RVU_BLOCK_TYPE_E */ + u8 lfshift; + u64 lookup_reg; + u64 pf_lfcnt_reg; + u64 vf_lfcnt_reg; + u64 lfcfg_reg; + u64 msixcfg_reg; + u64 lfreset_reg; + unsigned char name[NAME_SIZE]; +}; + +/* Structure for per RVU func info ie PF/VF */ +struct rvu_pfvf { + bool npalf; /* Only one NPALF per RVU_FUNC */ + bool nixlf; /* Only one NIXLF per RVU_FUNC */ + u16 sso; + u16 ssow; + u16 cptlfs; + u16 timlfs; + + /* Block LF's MSIX vector info */ + struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */ +#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF)) + u16 *msix_lfmap; /* Vector to block LF mapping */ +}; + +struct rvu_hwinfo { + u8 total_pfs; /* MAX RVU PFs HW supports */ + u16 total_vfs; /* Max RVU VFs HW supports */ + u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ + + struct rvu_block block[BLK_COUNT]; /* Block info */ +}; + +struct rvu { + void __iomem *afreg_base; + void __iomem *pfreg_base; + struct pci_dev *pdev; + struct device *dev; + struct rvu_hwinfo *hw; + struct rvu_pfvf *pf; + struct rvu_pfvf *hwvf; + spinlock_t rsrc_lock; /* Serialize resource alloc/free */ + + /* Mbox */ + struct otx2_mbox mbox; + struct rvu_work *mbox_wrk; + struct workqueue_struct *mbox_wq; + + /* MSI-X */ + u16 num_vec; + char *irq_name; + bool *irq_allocated; + dma_addr_t msix_base_iova; + + /* CGX */ +#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u8 cgx_mapped_pfs; + u8 cgx_cnt; /* available cgx ports */ + u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ + u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for + * every cgx lmac port + */ + void **cgx_idmap; /* cgx id to cgx data map table */ + struct work_struct cgx_evh_work; + struct workqueue_struct *cgx_evh_wq; + spinlock_t cgx_evq_lock; /* cgx event queue lock */ + struct list_head cgx_evq_head; /* cgx event queue head */ +}; + +static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) +{ + writeq(val, rvu->afreg_base + ((block << 28) | offset)); +} + +static inline u64 rvu_read64(struct rvu *rvu, u64 block, u64 offset) +{ + return readq(rvu->afreg_base + ((block << 28) | offset)); +} + +static inline void rvupf_write64(struct rvu *rvu, u64 offset, u64 val) +{ + writeq(val, rvu->pfreg_base + offset); +} + +static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) +{ + return readq(rvu->pfreg_base + offset); +} + +/* Function Prototypes + * RVU + */ + +int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); +int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); +void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); +int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); +int rvu_get_pf(u16 pcifunc); +struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); +void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); +bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); +int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); +int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); +int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); + +/* CGX APIs */ +int rvu_cgx_probe(struct rvu *rvu); +void rvu_cgx_wq_destroy(struct rvu *rvu); +#endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c new file mode 100644 index 000000000000..5ecc22308b22 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "cgx.h" + +struct cgx_evq_entry { + struct list_head evq_node; + struct cgx_link_event link_event; +}; + +static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) +{ + return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF); +} + +static void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) +{ + if (cgx_id >= rvu->cgx_cnt) + return NULL; + + return rvu->cgx_idmap[cgx_id]; +} + +static int rvu_map_cgx_lmac_pf(struct rvu *rvu) +{ + int cgx_cnt = rvu->cgx_cnt; + int cgx, lmac_cnt, lmac; + int pf = PF_CGXMAP_BASE; + int size; + + if (!cgx_cnt) + return 0; + + if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF) + return -EINVAL; + + /* Alloc map table + * An additional entry is required since PF id starts from 1 and + * hence entry at offset 0 is invalid. + */ + size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL); + if (!rvu->pf2cgxlmac_map) + return -ENOMEM; + + /* Initialize offset 0 with an invalid cgx and lmac id */ + rvu->pf2cgxlmac_map[0] = 0xFF; + + /* Reverse map table */ + rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, + cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16), + GFP_KERNEL); + if (!rvu->cgxlmac2pf_map) + return -ENOMEM; + + rvu->cgx_mapped_pfs = 0; + for (cgx = 0; cgx < cgx_cnt; cgx++) { + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { + rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); + rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; + rvu->cgx_mapped_pfs++; + } + } + return 0; +} + +/* This is called from interrupt context and is expected to be atomic */ +static int cgx_lmac_postevent(struct cgx_link_event *event, void *data) +{ + struct cgx_evq_entry *qentry; + struct rvu *rvu = data; + + /* post event to the event queue */ + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + qentry->link_event = *event; + spin_lock(&rvu->cgx_evq_lock); + list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); + spin_unlock(&rvu->cgx_evq_lock); + + /* start worker to process the events */ + queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); + + return 0; +} + +static void cgx_evhandler_task(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); + struct cgx_evq_entry *qentry; + struct cgx_link_event *event; + unsigned long flags; + + do { + /* Dequeue an event */ + spin_lock_irqsave(&rvu->cgx_evq_lock, flags); + qentry = list_first_entry_or_null(&rvu->cgx_evq_head, + struct cgx_evq_entry, + evq_node); + if (qentry) + list_del(&qentry->evq_node); + spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->link_event; + + /* Do nothing for now */ + kfree(qentry); + } while (1); +} + +static void cgx_lmac_event_handler_init(struct rvu *rvu) +{ + struct cgx_event_cb cb; + int cgx, lmac, err; + void *cgxd; + + spin_lock_init(&rvu->cgx_evq_lock); + INIT_LIST_HEAD(&rvu->cgx_evq_head); + INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); + rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); + if (!rvu->cgx_evh_wq) { + dev_err(rvu->dev, "alloc workqueue failed"); + return; + } + + cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ + cb.data = rvu; + + for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { + err = cgx_lmac_evh_register(&cb, cgxd, lmac); + if (err) + dev_err(rvu->dev, + "%d:%d handler register failed\n", + cgx, lmac); + } + } +} + +void rvu_cgx_wq_destroy(struct rvu *rvu) +{ + if (rvu->cgx_evh_wq) { + flush_workqueue(rvu->cgx_evh_wq); + destroy_workqueue(rvu->cgx_evh_wq); + rvu->cgx_evh_wq = NULL; + } +} + +int rvu_cgx_probe(struct rvu *rvu) +{ + int i, err; + + /* find available cgx ports */ + rvu->cgx_cnt = cgx_get_cgx_cnt(); + if (!rvu->cgx_cnt) { + dev_info(rvu->dev, "No CGX devices found!\n"); + return -ENODEV; + } + + rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *), + GFP_KERNEL); + if (!rvu->cgx_idmap) + return -ENOMEM; + + /* Initialize the cgxdata table */ + for (i = 0; i < rvu->cgx_cnt; i++) + rvu->cgx_idmap[i] = cgx_get_pdata(i); + + /* Map CGX LMAC interfaces to RVU PFs */ + err = rvu_map_cgx_lmac_pf(rvu); + if (err) + return err; + + /* Register for CGX events */ + cgx_lmac_event_handler_init(rvu); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h new file mode 100644 index 000000000000..d871a394e72b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -0,0 +1,441 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_REG_H +#define RVU_REG_H + +/* Admin function registers */ +#define RVU_AF_MSIXTR_BASE (0x10) +#define RVU_AF_ECO (0x20) +#define RVU_AF_BLK_RST (0x30) +#define RVU_AF_PF_BAR4_ADDR (0x40) +#define RVU_AF_RAS (0x100) +#define RVU_AF_RAS_W1S (0x108) +#define RVU_AF_RAS_ENA_W1S (0x110) +#define RVU_AF_RAS_ENA_W1C (0x118) +#define RVU_AF_GEN_INT (0x120) +#define RVU_AF_GEN_INT_W1S (0x128) +#define RVU_AF_GEN_INT_ENA_W1S (0x130) +#define RVU_AF_GEN_INT_ENA_W1C (0x138) +#define RVU_AF_AFPF_MBOX0 (0x02000) +#define RVU_AF_AFPF_MBOX1 (0x02008) +#define RVU_AF_AFPFX_MBOXX(a, b) (0x2000 | (a) << 4 | (b) << 3) +#define RVU_AF_PFME_STATUS (0x2800) +#define RVU_AF_PFTRPEND (0x2810) +#define RVU_AF_PFTRPEND_W1S (0x2820) +#define RVU_AF_PF_RST (0x2840) +#define RVU_AF_HWVF_RST (0x2850) +#define RVU_AF_PFAF_MBOX_INT (0x2880) +#define RVU_AF_PFAF_MBOX_INT_W1S (0x2888) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1S (0x2890) +#define RVU_AF_PFAF_MBOX_INT_ENA_W1C (0x2898) +#define RVU_AF_PFFLR_INT (0x28a0) +#define RVU_AF_PFFLR_INT_W1S (0x28a8) +#define RVU_AF_PFFLR_INT_ENA_W1S (0x28b0) +#define RVU_AF_PFFLR_INT_ENA_W1C (0x28b8) +#define RVU_AF_PFME_INT (0x28c0) +#define RVU_AF_PFME_INT_W1S (0x28c8) +#define RVU_AF_PFME_INT_ENA_W1S (0x28d0) +#define RVU_AF_PFME_INT_ENA_W1C (0x28d8) + +/* Admin function's privileged PF/VF registers */ +#define RVU_PRIV_CONST (0x8000000) +#define RVU_PRIV_GEN_CFG (0x8000010) +#define RVU_PRIV_CLK_CFG (0x8000020) +#define RVU_PRIV_ACTIVE_PC (0x8000030) +#define RVU_PRIV_PFX_CFG(a) (0x8000100 | (a) << 16) +#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16) +#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16) +#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16) +#define RVU_PRIV_PFX_NIX0_CFG (0x8000300) +#define RVU_PRIV_PFX_NPA_CFG (0x8000310) +#define RVU_PRIV_PFX_SSO_CFG (0x8000320) +#define RVU_PRIV_PFX_SSOW_CFG (0x8000330) +#define RVU_PRIV_PFX_TIM_CFG (0x8000340) +#define RVU_PRIV_PFX_CPT0_CFG (0x8000350) +#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3) +#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16) +#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300) +#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310) +#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320) +#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330) +#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340) +#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350) + +/* RVU PF registers */ +#define RVU_PF_VFX_PFVF_MBOX0 (0x00000) +#define RVU_PF_VFX_PFVF_MBOX1 (0x00008) +#define RVU_PF_VFX_PFVF_MBOXX(a, b) (0x0 | (a) << 12 | (b) << 3) +#define RVU_PF_VF_BAR4_ADDR (0x10) +#define RVU_PF_BLOCK_ADDRX_DISC(a) (0x200 | (a) << 3) +#define RVU_PF_VFME_STATUSX(a) (0x800 | (a) << 3) +#define RVU_PF_VFTRPENDX(a) (0x820 | (a) << 3) +#define RVU_PF_VFTRPEND_W1SX(a) (0x840 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INTX(a) (0x880 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_W1SX(a) (0x8A0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (0x8C0 | (a) << 3) +#define RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (0x8E0 | (a) << 3) +#define RVU_PF_VFFLR_INTX(a) (0x900 | (a) << 3) +#define RVU_PF_VFFLR_INT_W1SX(a) (0x920 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1SX(a) (0x940 | (a) << 3) +#define RVU_PF_VFFLR_INT_ENA_W1CX(a) (0x960 | (a) << 3) +#define RVU_PF_VFME_INTX(a) (0x980 | (a) << 3) +#define RVU_PF_VFME_INT_W1SX(a) (0x9A0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1SX(a) (0x9C0 | (a) << 3) +#define RVU_PF_VFME_INT_ENA_W1CX(a) (0x9E0 | (a) << 3) +#define RVU_PF_PFAF_MBOX0 (0xC00) +#define RVU_PF_PFAF_MBOX1 (0xC08) +#define RVU_PF_PFAF_MBOXX(a) (0xC00 | (a) << 3) +#define RVU_PF_INT (0xc20) +#define RVU_PF_INT_W1S (0xc28) +#define RVU_PF_INT_ENA_W1S (0xc30) +#define RVU_PF_INT_ENA_W1C (0xc38) +#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) +#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) +#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) + +/* RVU VF registers */ +#define RVU_VF_VFPF_MBOX0 (0x00000) +#define RVU_VF_VFPF_MBOX1 (0x00008) + +/* NPA block's admin function registers */ +#define NPA_AF_BLK_RST (0x0000) +#define NPA_AF_CONST (0x0010) +#define NPA_AF_CONST1 (0x0018) +#define NPA_AF_LF_RST (0x0020) +#define NPA_AF_GEN_CFG (0x0030) +#define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_INP_CTL (0x00D0) +#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) +#define NPA_AF_AVG_DELAY (0x0100) +#define NPA_AF_GEN_INT (0x0140) +#define NPA_AF_GEN_INT_W1S (0x0148) +#define NPA_AF_GEN_INT_ENA_W1S (0x0150) +#define NPA_AF_GEN_INT_ENA_W1C (0x0158) +#define NPA_AF_RVU_INT (0x0160) +#define NPA_AF_RVU_INT_W1S (0x0168) +#define NPA_AF_RVU_INT_ENA_W1S (0x0170) +#define NPA_AF_RVU_INT_ENA_W1C (0x0178) +#define NPA_AF_ERR_INT (0x0180) +#define NPA_AF_ERR_INT_W1S (0x0188) +#define NPA_AF_ERR_INT_ENA_W1S (0x0190) +#define NPA_AF_ERR_INT_ENA_W1C (0x0198) +#define NPA_AF_RAS (0x01A0) +#define NPA_AF_RAS_W1S (0x01A8) +#define NPA_AF_RAS_ENA_W1S (0x01B0) +#define NPA_AF_RAS_ENA_W1C (0x01B8) +#define NPA_AF_BP_TEST (0x0200) +#define NPA_AF_ECO (0x0300) +#define NPA_AF_AQ_CFG (0x0600) +#define NPA_AF_AQ_BASE (0x0610) +#define NPA_AF_AQ_STATUS (0x0620) +#define NPA_AF_AQ_DOOR (0x0630) +#define NPA_AF_AQ_DONE_WAIT (0x0640) +#define NPA_AF_AQ_DONE (0x0650) +#define NPA_AF_AQ_DONE_ACK (0x0660) +#define NPA_AF_AQ_DONE_INT (0x0680) +#define NPA_AF_AQ_DONE_INT_W1S (0x0688) +#define NPA_AF_AQ_DONE_ENA_W1S (0x0690) +#define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) +#define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) +#define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) +#define NPA_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 18) +#define NPA_PRIV_AF_INT_CFG (0x10000) +#define NPA_PRIV_LFX_CFG (0x10010) +#define NPA_PRIV_LFX_INT_CFG (0x10020) +#define NPA_AF_RVU_LF_CFG_DEBUG (0x10030) + +/* NIX block's admin function registers */ +#define NIX_AF_CFG (0x0000) +#define NIX_AF_STATUS (0x0010) +#define NIX_AF_NDC_CFG (0x0018) +#define NIX_AF_CONST (0x0020) +#define NIX_AF_CONST1 (0x0028) +#define NIX_AF_CONST2 (0x0030) +#define NIX_AF_CONST3 (0x0038) +#define NIX_AF_SQ_CONST (0x0040) +#define NIX_AF_CQ_CONST (0x0048) +#define NIX_AF_RQ_CONST (0x0050) +#define NIX_AF_PSE_CONST (0x0060) +#define NIX_AF_TL1_CONST (0x0070) +#define NIX_AF_TL2_CONST (0x0078) +#define NIX_AF_TL3_CONST (0x0080) +#define NIX_AF_TL4_CONST (0x0088) +#define NIX_AF_MDQ_CONST (0x0090) +#define NIX_AF_MC_MIRROR_CONST (0x0098) +#define NIX_AF_LSO_CFG (0x00A8) +#define NIX_AF_BLK_RST (0x00B0) +#define NIX_AF_TX_TSTMP_CFG (0x00C0) +#define NIX_AF_RX_CFG (0x00D0) +#define NIX_AF_AVG_DELAY (0x00E0) +#define NIX_AF_CINT_DELAY (0x00F0) +#define NIX_AF_RX_MCAST_BASE (0x0100) +#define NIX_AF_RX_MCAST_CFG (0x0110) +#define NIX_AF_RX_MCAST_BUF_BASE (0x0120) +#define NIX_AF_RX_MCAST_BUF_CFG (0x0130) +#define NIX_AF_RX_MIRROR_BUF_BASE (0x0140) +#define NIX_AF_RX_MIRROR_BUF_CFG (0x0148) +#define NIX_AF_LF_RST (0x0150) +#define NIX_AF_GEN_INT (0x0160) +#define NIX_AF_GEN_INT_W1S (0x0168) +#define NIX_AF_GEN_INT_ENA_W1S (0x0170) +#define NIX_AF_GEN_INT_ENA_W1C (0x0178) +#define NIX_AF_ERR_INT (0x0180) +#define NIX_AF_ERR_INT_W1S (0x0188) +#define NIX_AF_ERR_INT_ENA_W1S (0x0190) +#define NIX_AF_ERR_INT_ENA_W1C (0x0198) +#define NIX_AF_RAS (0x01A0) +#define NIX_AF_RAS_W1S (0x01A8) +#define NIX_AF_RAS_ENA_W1S (0x01B0) +#define NIX_AF_RAS_ENA_W1C (0x01B8) +#define NIX_AF_RVU_INT (0x01C0) +#define NIX_AF_RVU_INT_W1S (0x01C8) +#define NIX_AF_RVU_INT_ENA_W1S (0x01D0) +#define NIX_AF_RVU_INT_ENA_W1C (0x01D8) +#define NIX_AF_TCP_TIMER (0x01E0) +#define NIX_AF_RX_WQE_TAG_CTL (0x01F0) +#define NIX_AF_RX_DEF_OL2 (0x0200) +#define NIX_AF_RX_DEF_OIP4 (0x0210) +#define NIX_AF_RX_DEF_IIP4 (0x0220) +#define NIX_AF_RX_DEF_OIP6 (0x0230) +#define NIX_AF_RX_DEF_IIP6 (0x0240) +#define NIX_AF_RX_DEF_OTCP (0x0250) +#define NIX_AF_RX_DEF_ITCP (0x0260) +#define NIX_AF_RX_DEF_OUDP (0x0270) +#define NIX_AF_RX_DEF_IUDP (0x0280) +#define NIX_AF_RX_DEF_OSCTP (0x0290) +#define NIX_AF_RX_DEF_ISCTP (0x02A0) +#define NIX_AF_RX_DEF_IPSECX (0x02B0) +#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) +#define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_NDC_TX_SYNC (0x03F0) +#define NIX_AF_AQ_CFG (0x0400) +#define NIX_AF_AQ_BASE (0x0410) +#define NIX_AF_AQ_STATUS (0x0420) +#define NIX_AF_AQ_DOOR (0x0430) +#define NIX_AF_AQ_DONE_WAIT (0x0440) +#define NIX_AF_AQ_DONE (0x0450) +#define NIX_AF_AQ_DONE_ACK (0x0460) +#define NIX_AF_AQ_DONE_TIMER (0x0470) +#define NIX_AF_AQ_DONE_INT (0x0480) +#define NIX_AF_AQ_DONE_INT_W1S (0x0488) +#define NIX_AF_AQ_DONE_ENA_W1S (0x0490) +#define NIX_AF_AQ_DONE_ENA_W1C (0x0498) +#define NIX_AF_RX_LINKX_SLX_SPKT_CNT (0x0500) +#define NIX_AF_RX_LINKX_SLX_SXQE_CNT (0x0510) +#define NIX_AF_RX_MCAST_JOBSX_SW_CNT (0x0520) +#define NIX_AF_RX_MIRROR_JOBSX_SW_CNT (0x0530) +#define NIX_AF_RX_LINKX_CFG(a) (0x0540 | (a) << 16) +#define NIX_AF_RX_SW_SYNC (0x0550) +#define NIX_AF_RX_SW_SYNC_DONE (0x0560) +#define NIX_AF_SEB_ECO (0x0600) +#define NIX_AF_SEB_TEST_BP (0x0610) +#define NIX_AF_NORM_TX_FIFO_STATUS (0x0620) +#define NIX_AF_EXPR_TX_FIFO_STATUS (0x0630) +#define NIX_AF_SDP_TX_FIFO_STATUS (0x0640) +#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660) +#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670) + +#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) +#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) +#define NIX_AF_PSE_CHANNEL_LEVEL (0x800) +#define NIX_AF_PSE_SHAPER_CFG (0x810) +#define NIX_AF_TX_EXPR_CREDIT (0x830) +#define NIX_AF_MARK_FORMATX_CTL(a) (0x900 | (a) << 18) +#define NIX_AF_TX_LINKX_NORM_CREDIT(a) (0xA00 | (a) << 16) +#define NIX_AF_TX_LINKX_EXPR_CREDIT(a) (0xA10 | (a) << 16) +#define NIX_AF_TX_LINKX_SW_XOFF(a) (0xA20 | (a) << 16) +#define NIX_AF_TX_LINKX_HW_XOFF(a) (0xA30 | (a) << 16) +#define NIX_AF_SDP_LINK_CREDIT (0xa40) +#define NIX_AF_SDP_SW_XOFFX(a) (0xA60 | (a) << 3) +#define NIX_AF_SDP_HW_XOFFX(a) (0xAC0 | (a) << 3) +#define NIX_AF_TL4X_BP_STATUS(a) (0xB00 | (a) << 16) +#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) +#define NIX_AF_TL1X_SHAPE(a) (0xC10 | (a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) +#define NIX_AF_TL1X_SHAPE_STATE(a) (0xC50 | (a) << 16) +#define NIX_AF_TL1X_SW_XOFF(a) (0xC70 | (a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) +#define NIX_AF_TL1X_GREEN(a) (0xC90 | (a) << 16) +#define NIX_AF_TL1X_YELLOW(a) (0xCA0 | (a) << 16) +#define NIX_AF_TL1X_RED(a) (0xCB0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG0(a) (0xCC0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG1(a) (0xCC8 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG2(a) (0xCD0 | (a) << 16) +#define NIX_AF_TL1X_MD_DEBUG3(a) (0xCD8 | (a) << 16) +#define NIX_AF_TL1A_DEBUG (0xce0) +#define NIX_AF_TL1B_DEBUG (0xcf0) +#define NIX_AF_TL1_DEBUG_GREEN (0xd00) +#define NIX_AF_TL1_DEBUG_NODE (0xd10) +#define NIX_AF_TL1X_DROPPED_PACKETS(a) (0xD20 | (a) << 16) +#define NIX_AF_TL1X_DROPPED_BYTES(a) (0xD30 | (a) << 16) +#define NIX_AF_TL1X_RED_PACKETS(a) (0xD40 | (a) << 16) +#define NIX_AF_TL1X_RED_BYTES(a) (0xD50 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_PACKETS(a) (0xD60 | (a) << 16) +#define NIX_AF_TL1X_YELLOW_BYTES(a) (0xD70 | (a) << 16) +#define NIX_AF_TL1X_GREEN_PACKETS(a) (0xD80 | (a) << 16) +#define NIX_AF_TL1X_GREEN_BYTES(a) (0xD90 | (a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) +#define NIX_AF_TL2X_SHAPE(a) (0xE10 | (a) << 16) +#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16) +#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16) +#define NIX_AF_TL2X_SCHED_STATE(a) (0xE40 | (a) << 16) +#define NIX_AF_TL2X_SHAPE_STATE(a) (0xE50 | (a) << 16) +#define NIX_AF_TL2X_POINTERS(a) (0xE60 | (a) << 16) +#define NIX_AF_TL2X_SW_XOFF(a) (0xE70 | (a) << 16) +#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) +#define NIX_AF_TL2X_GREEN(a) (0xE90 | (a) << 16) +#define NIX_AF_TL2X_YELLOW(a) (0xEA0 | (a) << 16) +#define NIX_AF_TL2X_RED(a) (0xEB0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG0(a) (0xEC0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG1(a) (0xEC8 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG2(a) (0xED0 | (a) << 16) +#define NIX_AF_TL2X_MD_DEBUG3(a) (0xED8 | (a) << 16) +#define NIX_AF_TL2A_DEBUG (0xee0) +#define NIX_AF_TL2B_DEBUG (0xef0) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) +#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16) +#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16) +#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16) +#define NIX_AF_TL3X_SCHED_STATE(a) (0x1040 | (a) << 16) +#define NIX_AF_TL3X_SHAPE_STATE(a) (0x1050 | (a) << 16) +#define NIX_AF_TL3X_POINTERS(a) (0x1060 | (a) << 16) +#define NIX_AF_TL3X_SW_XOFF(a) (0x1070 | (a) << 16) +#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) +#define NIX_AF_TL3X_GREEN(a) (0x1090 | (a) << 16) +#define NIX_AF_TL3X_YELLOW(a) (0x10A0 | (a) << 16) +#define NIX_AF_TL3X_RED(a) (0x10B0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG0(a) (0x10C0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG1(a) (0x10C8 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG2(a) (0x10D0 | (a) << 16) +#define NIX_AF_TL3X_MD_DEBUG3(a) (0x10D8 | (a) << 16) +#define NIX_AF_TL3A_DEBUG (0x10e0) +#define NIX_AF_TL3B_DEBUG (0x10f0) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) +#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16) +#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) +#define NIX_AF_TL4X_SCHED_STATE(a) (0x1240 | (a) << 16) +#define NIX_AF_TL4X_SHAPE_STATE(a) (0x1250 | (a) << 16) +#define NIX_AF_TL4X_POINTERS(a) (0x1260 | (a) << 16) +#define NIX_AF_TL4X_SW_XOFF(a) (0x1270 | (a) << 16) +#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) +#define NIX_AF_TL4X_GREEN(a) (0x1290 | (a) << 16) +#define NIX_AF_TL4X_YELLOW(a) (0x12A0 | (a) << 16) +#define NIX_AF_TL4X_RED(a) (0x12B0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG0(a) (0x12C0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG1(a) (0x12C8 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG2(a) (0x12D0 | (a) << 16) +#define NIX_AF_TL4X_MD_DEBUG3(a) (0x12D8 | (a) << 16) +#define NIX_AF_TL4A_DEBUG (0x12e0) +#define NIX_AF_TL4B_DEBUG (0x12f0) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) +#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16) +#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16) +#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16) +#define NIX_AF_MDQX_SCHED_STATE(a) (0x1440 | (a) << 16) +#define NIX_AF_MDQX_SHAPE_STATE(a) (0x1450 | (a) << 16) +#define NIX_AF_MDQX_POINTERS(a) (0x1460 | (a) << 16) +#define NIX_AF_MDQX_SW_XOFF(a) (0x1470 | (a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) +#define NIX_AF_MDQX_MD_DEBUG(a) (0x14C0 | (a) << 16) +#define NIX_AF_MDQX_PTR_FIFO(a) (0x14D0 | (a) << 16) +#define NIX_AF_MDQA_DEBUG (0x14e0) +#define NIX_AF_MDQB_DEBUG (0x14f0) +#define NIX_AF_TL3_TL2X_CFG(a) (0x1600 | (a) << 18) +#define NIX_AF_TL3_TL2X_BP_STATUS(a) (0x1610 | (a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) +#define NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(a, b) (0x1800 | (a) << 18 | (b) << 3) +#define NIX_AF_TX_MCASTX(a) (0x1900 | (a) << 15) +#define NIX_AF_TX_VTAG_DEFX_CTL(a) (0x1A00 | (a) << 16) +#define NIX_AF_TX_VTAG_DEFX_DATA(a) (0x1A10 | (a) << 16) +#define NIX_AF_RX_BPIDX_STATUS(a) (0x1A20 | (a) << 17) +#define NIX_AF_RX_CHANX_CFG(a) (0x1A30 | (a) << 15) +#define NIX_AF_CINT_TIMERX(a) (0x1A40 | (a) << 18) +#define NIX_AF_LSO_FORMATX_FIELDX(a, b) (0x1B00 | (a) << 16 | (b) << 3) +#define NIX_AF_LFX_CFG(a) (0x4000 | (a) << 17) +#define NIX_AF_LFX_SQS_CFG(a) (0x4020 | (a) << 17) +#define NIX_AF_LFX_TX_CFG2(a) (0x4028 | (a) << 17) +#define NIX_AF_LFX_SQS_BASE(a) (0x4030 | (a) << 17) +#define NIX_AF_LFX_RQS_CFG(a) (0x4040 | (a) << 17) +#define NIX_AF_LFX_RQS_BASE(a) (0x4050 | (a) << 17) +#define NIX_AF_LFX_CQS_CFG(a) (0x4060 | (a) << 17) +#define NIX_AF_LFX_CQS_BASE(a) (0x4070 | (a) << 17) +#define NIX_AF_LFX_TX_CFG(a) (0x4080 | (a) << 17) +#define NIX_AF_LFX_TX_PARSE_CFG(a) (0x4090 | (a) << 17) +#define NIX_AF_LFX_RX_CFG(a) (0x40A0 | (a) << 17) +#define NIX_AF_LFX_RSS_CFG(a) (0x40C0 | (a) << 17) +#define NIX_AF_LFX_RSS_BASE(a) (0x40D0 | (a) << 17) +#define NIX_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 17) +#define NIX_AF_LFX_QINTS_BASE(a) (0x4110 | (a) << 17) +#define NIX_AF_LFX_CINTS_CFG(a) (0x4120 | (a) << 17) +#define NIX_AF_LFX_CINTS_BASE(a) (0x4130 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG0(a) (0x4140 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_CFG1(a) (0x4148 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_CFG(a) (0x4150 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_DYNO_BASE(a) (0x4158 | (a) << 17) +#define NIX_AF_LFX_RX_IPSEC_SA_BASE(a) (0x4170 | (a) << 17) +#define NIX_AF_LFX_TX_STATUS(a) (0x4180 | (a) << 17) +#define NIX_AF_LFX_RX_VTAG_TYPEX(a, b) (0x4200 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_LOCKX(a, b) (0x4300 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_TX_STATX(a, b) (0x4400 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RX_STATX(a, b) (0x4500 | (a) << 17 | (b) << 3) +#define NIX_AF_LFX_RSS_GRPX(a, b) (0x4600 | (a) << 17 | (b) << 3) +#define NIX_AF_RX_NPC_MC_RCV (0x4700) +#define NIX_AF_RX_NPC_MC_DROP (0x4710) +#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720) +#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730) +#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) + +#define NIX_PRIV_AF_INT_CFG (0x8000000) +#define NIX_PRIV_LFX_CFG (0x8000010) +#define NIX_PRIV_LFX_INT_CFG (0x8000020) +#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030) + +/* SSO */ +#define SSO_AF_CONST (0x1000) +#define SSO_AF_CONST1 (0x1008) +#define SSO_AF_BLK_RST (0x10f8) +#define SSO_AF_LF_HWGRP_RST (0x10e0) +#define SSO_AF_RVU_LF_CFG_DEBUG (0x3800) +#define SSO_PRIV_LFX_HWGRP_CFG (0x10000) +#define SSO_PRIV_LFX_HWGRP_INT_CFG (0x20000) + +/* SSOW */ +#define SSOW_AF_RVU_LF_HWS_CFG_DEBUG (0x0010) +#define SSOW_AF_LF_HWS_RST (0x0030) +#define SSOW_PRIV_LFX_HWS_CFG (0x1000) +#define SSOW_PRIV_LFX_HWS_INT_CFG (0x2000) + +/* TIM */ +#define TIM_AF_CONST (0x90) +#define TIM_PRIV_LFX_CFG (0x20000) +#define TIM_PRIV_LFX_INT_CFG (0x24000) +#define TIM_AF_RVU_LF_CFG_DEBUG (0x30000) +#define TIM_AF_BLK_RST (0x10) +#define TIM_AF_LF_RST (0x20) + +/* CPT */ +#define CPT_AF_CONSTANTS0 (0x0000) +#define CPT_PRIV_LFX_CFG (0x41000) +#define CPT_PRIV_LFX_INT_CFG (0x43000) +#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000) +#define CPT_AF_LF_RST (0x44000) +#define CPT_AF_BLK_RST (0x46000) + +#define NDC_AF_BLK_RST (0x002F0) +#define NPC_AF_BLK_RST (0x00040) + +#endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h new file mode 100644 index 000000000000..92e05817ffe7 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2018 Marvell International Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef RVU_STRUCT_H +#define RVU_STRUCT_H + +/* RVU Block Address Enumeration */ +enum rvu_block_addr_e { + BLKADDR_RVUM = 0x0ULL, + BLKADDR_LMT = 0x1ULL, + BLKADDR_MSIX = 0x2ULL, + BLKADDR_NPA = 0x3ULL, + BLKADDR_NIX0 = 0x4ULL, + BLKADDR_NIX1 = 0x5ULL, + BLKADDR_NPC = 0x6ULL, + BLKADDR_SSO = 0x7ULL, + BLKADDR_SSOW = 0x8ULL, + BLKADDR_TIM = 0x9ULL, + BLKADDR_CPT0 = 0xaULL, + BLKADDR_CPT1 = 0xbULL, + BLKADDR_NDC0 = 0xcULL, + BLKADDR_NDC1 = 0xdULL, + BLKADDR_NDC2 = 0xeULL, + BLK_COUNT = 0xfULL, +}; + +/* RVU Block Type Enumeration */ +enum rvu_block_type_e { + BLKTYPE_RVUM = 0x0, + BLKTYPE_MSIX = 0x1, + BLKTYPE_LMT = 0x2, + BLKTYPE_NIX = 0x3, + BLKTYPE_NPA = 0x4, + BLKTYPE_NPC = 0x5, + BLKTYPE_SSO = 0x6, + BLKTYPE_SSOW = 0x7, + BLKTYPE_TIM = 0x8, + BLKTYPE_CPT = 0x9, + BLKTYPE_NDC = 0xa, + BLKTYPE_MAX = 0xa, +}; + +/* RVU Admin function Interrupt Vector Enumeration */ +enum rvu_af_int_vec_e { + RVU_AF_INT_VEC_POISON = 0x0, + RVU_AF_INT_VEC_PFFLR = 0x1, + RVU_AF_INT_VEC_PFME = 0x2, + RVU_AF_INT_VEC_GEN = 0x3, + RVU_AF_INT_VEC_MBOX = 0x4, + RVU_AF_INT_VEC_CNT = 0x5, +}; + +/** + * RVU PF Interrupt Vector Enumeration + */ +enum rvu_pf_int_vec_e { + RVU_PF_INT_VEC_VFFLR0 = 0x0, + RVU_PF_INT_VEC_VFFLR1 = 0x1, + RVU_PF_INT_VEC_VFME0 = 0x2, + RVU_PF_INT_VEC_VFME1 = 0x3, + RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4, + RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5, + RVU_PF_INT_VEC_AFPF_MBOX = 0x6, + RVU_PF_INT_VEC_CNT = 0x7, +}; + +#endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index d25e16d2c319..109472d6b61f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -167,8 +167,13 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rx_ppp = pfcrx; params->prof[i].tx_pause = !(pfcrx || pfctx); params->prof[i].tx_ppp = pfctx; - params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; - params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + if (mlx4_low_memory_profile()) { + params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE; + } else { + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + } params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up; params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up * diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index d2d59444f562..6a046030e873 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = { NULL, NULL, NULL), }; -static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id, - union devlink_param_value init_val) -{ - struct mlx4_priv *priv = devlink_priv(devlink); - struct mlx4_dev *dev = &priv->dev; - int err; - - err = devlink_param_driverinit_value_set(devlink, param_id, init_val); - if (err) - mlx4_warn(dev, - "devlink set parameter %u value failed (err = %d)", - param_id, err); -} - static void mlx4_devlink_set_params_init_values(struct devlink *devlink) { union devlink_param_value value; value.vbool = !!mlx4_internal_err_reset; - mlx4_devlink_set_init_value(devlink, - DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, - value); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, + value); value.vu32 = 1UL << log_num_mac; - mlx4_devlink_set_init_value(devlink, - DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + value); value.vbool = enable_64b_cqe_eqe; - mlx4_devlink_set_init_value(devlink, - MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, - value); + devlink_param_driverinit_value_set(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE, + value); value.vbool = enable_4k_uar; - mlx4_devlink_set_init_value(devlink, - MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, - value); + devlink_param_driverinit_value_set(devlink, + MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR, + value); value.vbool = false; - mlx4_devlink_set_init_value(devlink, - DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, - value); + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, + value); } static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index c3228b89df46..485d856546c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -72,7 +72,7 @@ #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) #define DEF_RX_RINGS 16 #define MAX_RX_RINGS 128 -#define MIN_RX_RINGS 4 +#define MIN_RX_RINGS 1 #define LOG_TXBB_SIZE 6 #define TXBB_SIZE BIT(LOG_TXBB_SIZE) #define HEADROOM (2048 / TXBB_SIZE + 1) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ef7a44eb9adb..1d743bd5d212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -54,6 +54,7 @@ #include "en_stats.h" #include "en/fs.h" +extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index bbf69e859b78..1431232c9a09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -16,6 +16,8 @@ struct mlx5e_tc_table { DECLARE_HASHTABLE(mod_hdr_tbl, 8); DECLARE_HASHTABLE(hairpin_tbl, 8); + + struct notifier_block netdevice_nb; }; struct mlx5e_flow_table { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 45cdde694d20..8657e0f26995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", - __func__, arfs_rule->filter_id, arfs_rule->rxq, err); + priv->channel_stats[arfs_rule->rxq].rq.arfs_err++; + mlx5e_dbg(HW, priv, + "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n", + __func__, arfs_rule->filter_id, arfs_rule->rxq, + tuple->ip_proto, err); } out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 35aca9a8e3d6..bc034958c846 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4318,7 +4318,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -static const struct net_device_ops mlx5e_netdev_ops = { +const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 90c7607b1f44..b7d4896c7c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -93,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, @@ -170,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; @@ -1161,6 +1163,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, }; static const struct counter_desc sq_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a5fb3dc27f50..77f74ce11280 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -106,6 +106,7 @@ struct mlx5e_sw_stats { u64 rx_cache_busy; u64 rx_cache_waive; u64 rx_congst_umr; + u64 rx_arfs_err; u64 ch_events; u64 ch_poll; u64 ch_arm; @@ -202,6 +203,7 @@ struct mlx5e_rq_stats { u64 cache_busy; u64 cache_waive; u64 congst_umr; + u64 arfs_err; }; struct mlx5e_sq_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 82723a0e509a..6de21d9f4fad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -532,7 +532,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, #define UNKNOWN_MATCH_PRIO 8 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, u8 *match_prio) + struct mlx5_flow_spec *spec, u8 *match_prio, + struct netlink_ext_ack *extack) { void *headers_c, *headers_v; u8 prio_val, prio_mask = 0; @@ -540,8 +541,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, #ifdef CONFIG_MLX5_CORE_EN_DCB if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) { - netdev_warn(priv->netdev, - "only PCP trust state supported for hairpin\n"); + NL_SET_ERR_MSG_MOD(extack, + "only PCP trust state supported for hairpin"); return -EOPNOTSUPP; } #endif @@ -557,8 +558,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, if (!vlan_present || !prio_mask) { prio_val = UNKNOWN_MATCH_PRIO; } else if (prio_mask != 0x7) { - netdev_warn(priv->netdev, - "masked priority match not supported for hairpin\n"); + NL_SET_ERR_MSG_MOD(extack, + "masked priority match not supported for hairpin"); return -EOPNOTSUPP; } @@ -568,7 +569,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { int peer_ifindex = parse_attr->mirred_ifindex; struct mlx5_hairpin_params params; @@ -583,12 +585,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { - netdev_warn(priv->netdev, "hairpin is not supported\n"); + NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); return -EOPNOTSUPP; } peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id); - err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio); + err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio, + extack); if (err) return err; hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); @@ -677,7 +680,8 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, static struct mlx5_flow_handle * mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_core_dev *dev = priv->mdev; @@ -694,7 +698,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, int err, dest_ix = 0; if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { - err = mlx5e_hairpin_flow_add(priv, flow, parse_attr); + err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); if (err) { rule = ERR_PTR(err); goto err_add_hairpin_flow; @@ -753,6 +757,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, MLX5E_TC_TABLE_NUM_GROUPS, MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to create tc offload table\n"); netdev_err(priv->netdev, "Failed to create tc offload table\n"); rule = ERR_CAST(priv->fs.tc.t); @@ -819,12 +825,14 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, - struct mlx5e_tc_flow *flow); + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); static struct mlx5_flow_handle * mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; @@ -838,7 +846,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, out_dev = __dev_get_by_index(dev_net(priv->netdev), attr->parse_attr->mirred_ifindex); err = mlx5e_attach_encap(priv, &parse_attr->tun_info, - out_dev, &encap_dev, flow); + out_dev, &encap_dev, flow, extack); if (err) { rule = ERR_PTR(err); if (err != -EAGAIN) @@ -1105,6 +1113,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -1133,6 +1142,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) parse_vxlan_attr(spec, f); else { + NL_SET_ERR_MSG_MOD(extack, + "port isn't an offloaded vxlan udp dport"); netdev_warn(priv->netdev, "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); return -EOPNOTSUPP; @@ -1149,6 +1160,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, udp_sport, ntohs(key->src)); } else { /* udp dst port must be given */ vxlan_match_offload_err: + NL_SET_ERR_MSG_MOD(extack, + "IP tunnel decap offload supported only for vxlan, must set UDP dport"); netdev_warn(priv->netdev, "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); return -EOPNOTSUPP; @@ -1225,6 +1238,16 @@ vxlan_match_offload_err: MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + + if (mask->ttl && + !MLX5_CAP_ESW_FLOWTABLE_FDB + (priv->mdev, + ft_field_support.outer_ipv4_ttl)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL is not supported"); + return -EOPNOTSUPP; + } + } /* Enforce DMAC when offloading incoming tunneled flows. @@ -1247,6 +1270,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, u8 *match_level) { + struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@ -1277,6 +1301,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_TCP) | BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", f->dissector->used_keys); return -EOPNOTSUPP; @@ -1368,6 +1393,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { @@ -1550,8 +1578,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, - ft_field_support.outer_ipv4_ttl)) + ft_field_support.outer_ipv4_ttl)) { + NL_SET_ERR_MSG_MOD(extack, + "Matching on TTL is not supported"); return -EOPNOTSUPP; + } if (mask->tos || mask->ttl) *match_level = MLX5_MATCH_L3; @@ -1593,6 +1624,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, udp_dport, ntohs(key->dst)); break; default: + NL_SET_ERR_MSG_MOD(extack, + "Only UDP and TCP transports are supported for L4 matching"); netdev_err(priv->netdev, "Only UDP and TCP transport are supported\n"); return -EINVAL; @@ -1629,6 +1662,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -1643,6 +1677,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, if (rep->vport != FDB_UPLINK_VPORT && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && esw->offloads.inline_mode < match_level)) { + NL_SET_ERR_MSG_MOD(extack, + "Flow is not offloaded due to min inline setting"); netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", match_level, esw->offloads.inline_mode); @@ -1744,7 +1780,8 @@ static struct mlx5_fields fields[] = { */ static int offload_pedit_fields(struct pedit_headers *masks, struct pedit_headers *vals, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals; int i, action_size, nactions, max_actions, first, last, next_z; @@ -1783,11 +1820,15 @@ static int offload_pedit_fields(struct pedit_headers *masks, continue; if (s_mask && a_mask) { + NL_SET_ERR_MSG_MOD(extack, + "can't set and add to the same HW field"); printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); return -EOPNOTSUPP; } if (nactions == max_actions) { + NL_SET_ERR_MSG_MOD(extack, + "too many pedit actions, can't offload"); printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions); return -EOPNOTSUPP; } @@ -1820,6 +1861,8 @@ static int offload_pedit_fields(struct pedit_headers *masks, next_z = find_next_zero_bit(&mask, field_bsize, first); last = find_last_bit(&mask, field_bsize); if (first < next_z && next_z < last) { + NL_SET_ERR_MSG_MOD(extack, + "rewrite of few sub-fields isn't supported"); printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", mask); return -EOPNOTSUPP; @@ -1878,7 +1921,8 @@ static const struct pedit_headers zero_masks = {}; static int parse_tc_pedit_action(struct mlx5e_priv *priv, const struct tc_action *a, int namespace, - struct mlx5e_tc_flow_parse_attr *parse_attr) + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct netlink_ext_ack *extack) { struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks; int nkeys, i, err = -EOPNOTSUPP; @@ -1896,12 +1940,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, err = -EOPNOTSUPP; /* can't be all optimistic */ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n"); + NL_SET_ERR_MSG_MOD(extack, + "legacy pedit isn't offloaded"); goto out_err; } if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd); + NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded"); goto out_err; } @@ -1918,13 +1963,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, if (err) goto out_err; - err = offload_pedit_fields(masks, vals, parse_attr); + err = offload_pedit_fields(masks, vals, parse_attr, extack); if (err < 0) goto out_dealloc_parsed_actions; for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { cmd_masks = &masks[cmd]; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { + NL_SET_ERR_MSG_MOD(extack, + "attempt to offload an unsupported field"); netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 16, 1, cmd_masks, sizeof(zero_masks), true); @@ -1941,19 +1988,26 @@ out_err: return err; } -static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags) +static bool csum_offload_supported(struct mlx5e_priv *priv, + u32 action, + u32 update_flags, + struct netlink_ext_ack *extack) { u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP; /* The HW recalcs checksums only if re-writing headers */ if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) { + NL_SET_ERR_MSG_MOD(extack, + "TC csum action is only offloaded with pedit"); netdev_warn(priv->netdev, "TC csum action is only offloaded with pedit\n"); return false; } if (update_flags & ~prot_flags) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload TC csum action for some header/s"); netdev_warn(priv->netdev, "can't offload TC csum action for some header/s - flags %#x\n", update_flags); @@ -1964,7 +2018,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda } static bool modify_header_match_supported(struct mlx5_flow_spec *spec, - struct tcf_exts *exts) + struct tcf_exts *exts, + struct netlink_ext_ack *extack) { const struct tc_action *a; bool modify_ip_header; @@ -2002,6 +2057,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload re-write of non TCP/UDP"); pr_info("can't offload re-write of ip proto %d\n", ip_proto); return false; } @@ -2013,7 +2070,8 @@ out_ok: static bool actions_match_supported(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { u32 actions; @@ -2027,7 +2085,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(&parse_attr->spec, exts); + return modify_header_match_supported(&parse_attr->spec, exts, + extack); return true; } @@ -2048,7 +2107,8 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; @@ -2072,7 +2132,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_pedit(a)) { err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr); + parse_attr, extack); if (err) return err; @@ -2083,7 +2143,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_csum(a)) { if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a))) + tcf_csum_update_flags(a), + extack)) continue; return -EOPNOTSUPP; @@ -2099,6 +2160,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { + NL_SET_ERR_MSG_MOD(extack, + "device is not on same HW, can't offload"); netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); return -EINVAL; @@ -2110,8 +2173,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 mark = tcf_skbedit_mark(a); if (mark & ~MLX5E_TC_FLOW_ID_MASK) { - netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", - mark); + NL_SET_ERR_MSG_MOD(extack, + "Bad flow mark - only 16 bit is supported"); return -EINVAL; } @@ -2124,7 +2187,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow)) + if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -2526,7 +2589,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); @@ -2544,6 +2608,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, /* setting udp src port isn't supported */ if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { vxlan_encap_offload_err: + NL_SET_ERR_MSG_MOD(extack, + "must set udp dst port and not set udp src port"); netdev_warn(priv->netdev, "must set udp dst port and not set udp src port\n"); return -EOPNOTSUPP; @@ -2553,6 +2619,8 @@ vxlan_encap_offload_err: MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { tunnel_type = MLX5_HEADER_TYPE_VXLAN; } else { + NL_SET_ERR_MSG_MOD(extack, + "port isn't an offloaded vxlan udp dport"); netdev_warn(priv->netdev, "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); return -EOPNOTSUPP; @@ -2657,7 +2725,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) { struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -2683,7 +2752,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_pedit(a)) { err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, - parse_attr); + parse_attr, extack); if (err) return err; @@ -2694,7 +2763,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_csum(a)) { if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a))) + tcf_csum_update_flags(a), + extack)) continue; return -EOPNOTSUPP; @@ -2707,6 +2777,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, out_dev = tcf_mirred_dev(a); if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + NL_SET_ERR_MSG_MOD(extack, + "can't support more output ports, can't offload forwarding"); pr_err("can't support more than %d output ports, can't offload forwarding\n", attr->out_count); return -EOPNOTSUPP; @@ -2730,6 +2802,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, MLX5_FLOW_CONTEXT_ACTION_COUNT; /* attr->out_rep is resolved when we handle encap */ } else { + NL_SET_ERR_MSG_MOD(extack, + "devices are not on same switch HW, can't offload forwarding"); pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); return -EINVAL; @@ -2766,10 +2840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow)) + if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) return -EOPNOTSUPP; if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + NL_SET_ERR_MSG_MOD(extack, + "current firmware doesn't support split rule for port mirroring"); netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); return -EOPNOTSUPP; } @@ -2811,6 +2887,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { + struct netlink_ext_ack *extack = f->common.extack; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct rhashtable *tc_ht = get_tc_ht(priv); @@ -2822,6 +2899,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); if (flow) { + NL_SET_ERR_MSG_MOD(extack, + "flow cookie already exists, ignoring"); netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie); return 0; } @@ -2850,15 +2929,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, goto err_free; if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); + err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, + extack); if (err < 0) goto err_free; - flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, + extack); } else { - err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); + err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, + extack); if (err < 0) goto err_free; - flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, + extack); } if (IS_ERR(flow->rule[0])) { @@ -2946,14 +3029,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, return 0; } +static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, + struct mlx5e_priv *peer_priv) +{ + struct mlx5_core_dev *peer_mdev = peer_priv->mdev; + struct mlx5e_hairpin_entry *hpe; + u16 peer_vhca_id; + int bkt; + + if (!same_hw_devs(priv, peer_priv)) + return; + + peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); + + hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) { + if (hpe->peer_vhca_id == peer_vhca_id) + hpe->hp->pair->peer_gone = true; + } +} + +static int mlx5e_tc_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct mlx5e_flow_steering *fs; + struct mlx5e_priv *peer_priv; + struct mlx5e_tc_table *tc; + struct mlx5e_priv *priv; + + if (ndev->netdev_ops != &mlx5e_netdev_ops || + event != NETDEV_UNREGISTER || + ndev->reg_state == NETREG_REGISTERED) + return NOTIFY_DONE; + + tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); + fs = container_of(tc, struct mlx5e_flow_steering, tc); + priv = container_of(fs, struct mlx5e_priv, fs); + peer_priv = netdev_priv(ndev); + if (priv == peer_priv || + !(priv->netdev->features & NETIF_F_HW_TC)) + return NOTIFY_DONE; + + mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv); + + return NOTIFY_DONE; +} + int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; + int err; hash_init(tc->mod_hdr_tbl); hash_init(tc->hairpin_tbl); - return rhashtable_init(&tc->ht, &tc_ht_params); + err = rhashtable_init(&tc->ht, &tc_ht_params); + if (err) + return err; + + tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; + if (register_netdevice_notifier(&tc->netdevice_nb)) { + tc->netdevice_nb.notifier_call = NULL; + mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); + } + + return err; } static void _mlx5e_tc_del_flow(void *ptr, void *arg) @@ -2969,6 +3109,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; + if (tc->netdevice_nb.notifier_call) + unregister_netdevice_notifier(&tc->netdevice_nb); + rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); if (!IS_ERR_OR_NULL(tc->t)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2b252cde5cc2..ea7dedc2d5ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) u32 max_guarantee = 0; int i; - for (i = 0; i <= esw->total_vports; i++) { + for (i = 0; i < esw->total_vports; i++) { evport = &esw->vports[i]; if (!evport->enabled || evport->info.min_rate < max_guarantee) continue; @@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) int err; int i; - for (i = 0; i <= esw->total_vports; i++) { + for (i = 0; i < esw->total_vports; i++) { evport = &esw->vports[i]; if (!evport->enabled) continue; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 0b05bf2b91f6..dfc642de4e6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -269,12 +269,15 @@ struct mlx5_esw_flow_attr { struct mlx5e_tc_flow_parse_attr *parse_attr; }; -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, + struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 21e957083f65..a35a2310f871 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -810,29 +810,35 @@ out: return flow_rule; } -static int esw_offloads_start(struct mlx5_eswitch *esw) +static int esw_offloads_start(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; if (esw->mode != SRIOV_LEGACY) { - esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set offloads mode, SRIOV legacy not enabled"); return -EINVAL; } mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); if (err) { - esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch to offloads"); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); - if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to legacy"); + } } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { if (mlx5_eswitch_inline_mode_get(esw, num_vfs, &esw->offloads.inline_mode)) { esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; - esw_warn(esw->dev, "Inline mode is different between vports\n"); + NL_SET_ERR_MSG_MOD(extack, + "Inline mode is different between vports"); } } return err; @@ -973,17 +979,20 @@ create_ft_err: return err; } -static int esw_offloads_stop(struct mlx5_eswitch *esw) +static int esw_offloads_stop(struct mlx5_eswitch *esw, + struct netlink_ext_ack *extack) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); if (err) { - esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); - if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); + if (err1) { + NL_SET_ERR_MSG_MOD(extack, + "Failed setting eswitch back to offloads"); + } } /* enable back PF RoCE */ @@ -1092,7 +1101,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) return 0; } -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); u16 cur_mlx5_mode, mlx5_mode = 0; @@ -1111,9 +1121,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) return 0; if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) - return esw_offloads_start(dev->priv.eswitch); + return esw_offloads_start(dev->priv.eswitch, extack); else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) - return esw_offloads_stop(dev->priv.eswitch); + return esw_offloads_stop(dev->priv.eswitch, extack); else return -EINVAL; } @@ -1130,7 +1140,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } -int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1147,14 +1158,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) return 0; /* fall through */ case MLX5_CAP_INLINE_MODE_L2: - esw_warn(dev, "Inline mode can't be set\n"); + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); return -EOPNOTSUPP; case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: break; } if (esw->offloads.num_flows > 0) { - esw_warn(dev, "Can't set inline mode when flows are configured\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set inline mode when flows are configured"); return -EOPNOTSUPP; } @@ -1165,8 +1177,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) for (vport = 1; vport < esw->enabled_vports; vport++) { err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); if (err) { - esw_warn(dev, "Failed to set min inline on vport %d\n", - vport); + NL_SET_ERR_MSG_MOD(extack, + "Failed to set min inline on vport"); goto revert_inline_mode; } } @@ -1232,7 +1244,8 @@ out: return 0; } -int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) +int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -1259,7 +1272,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) return 0; if (esw->offloads.num_flows > 0) { - esw_warn(dev, "Can't set encapsulation when flows are configured\n"); + NL_SET_ERR_MSG_MOD(extack, + "Can't set encapsulation when flows are configured"); return -EOPNOTSUPP; } @@ -1268,7 +1282,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) esw->offloads.encap = encap; err = esw_create_offloads_fast_fdb_table(esw); if (err) { - esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err); + NL_SET_ERR_MSG_MOD(extack, + "Failed re-creating fast FDB table"); esw->offloads.encap = !encap; (void)esw_create_offloads_fast_fdb_table(esw); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 41ad24f0de2c..1ab6f7e3bec6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) if (ret) return ret; - force_state = MLX5_GET(teardown_hca_out, out, force_state); + force_state = MLX5_GET(teardown_hca_out, out, state); if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); return -EIO; @@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) return 0; } +#define MLX5_FAST_TEARDOWN_WAIT_MS 3000 +int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) +{ + unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS; + u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; + int state; + int ret; + + if (!MLX5_CAP_GEN(dev, fast_teardown)) { + mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n"); + return -EOPNOTSUPP; + } + + MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); + MLX5_SET(teardown_hca_in, in, profile, + MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + state = MLX5_GET(teardown_hca_out, out, state); + if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { + mlx5_core_warn(dev, "teardown with fast mode failed\n"); + return -EIO; + } + + mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); + + /* Loop until device state turns to disable */ + end = jiffies + msecs_to_jiffies(delay_ms); + do { + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + break; + + cond_resched(); + } while (!time_after(jiffies, end)); + + if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { + dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", + mlx5_get_nic_state(dev), delay_ms); + return -EIO; + } + + return 0; +} + enum mlxsw_reg_mcc_instruction { MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9f39aeca863f..43118de8ee99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -59,22 +59,25 @@ enum { }; enum { - MLX5_NIC_IFC_FULL = 0, - MLX5_NIC_IFC_DISABLED = 1, - MLX5_NIC_IFC_NO_DRAM_NIC = 2, - MLX5_NIC_IFC_INVALID = 3 -}; - -enum { MLX5_DROP_NEW_HEALTH_WORK, MLX5_DROP_NEW_RECOVERY_WORK, }; -static u8 get_nic_state(struct mlx5_core_dev *dev) +u8 mlx5_get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; } +void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) +{ + u32 cur_cmdq_addr_l_sz; + + cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz); + iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) | + state << MLX5_NIC_IFC_OFFSET, + &dev->iseg->cmdq_addr_l_sz); +} + static void trigger_cmd_completions(struct mlx5_core_dev *dev) { unsigned long flags; @@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev) struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; - if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) return 1; if (ioread32be(&h->fw_ver) == 0xffffffff) @@ -133,7 +136,7 @@ unlock: static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) { - u8 nic_interface = get_nic_state(dev); + u8 nic_interface = mlx5_get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: @@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work) priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); - nic_state = get_nic_state(dev); + nic_state = mlx5_get_nic_state(dev); if (nic_state == MLX5_NIC_IFC_INVALID) { dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index b5e9f664fc66..28132c7dc05f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = { static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) { - int ret; + bool fast_teardown = false, force_teardown = false; + int ret = 1; + + fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); + force_teardown = MLX5_CAP_GEN(dev, force_teardown); + + mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); + mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); - if (!MLX5_CAP_GEN(dev, force_teardown)) { - mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); + if (!fast_teardown && !force_teardown) return -EOPNOTSUPP; - } if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); @@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) mlx5_drain_health_wq(dev); mlx5_stop_health_poll(dev, false); + ret = mlx5_cmd_fast_teardown_hca(dev); + if (!ret) + goto succeed; + ret = mlx5_cmd_force_teardown_hca(dev); - if (ret) { - mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); - mlx5_start_health_poll(dev); - return ret; - } + if (!ret) + goto succeed; + + mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); + mlx5_start_health_poll(dev); + return ret; +succeed: mlx5_enter_error_state(dev, true); /* Some platforms requiring freeing the IRQ's in the shutdown diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b4134fa0bba3..cc298527baf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -95,6 +95,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); + void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_core_page_fault(struct mlx5_core_dev *dev, @@ -214,4 +216,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev); int mlx5_lag_forbid(struct mlx5_core_dev *dev); void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); + +enum { + MLX5_NIC_IFC_FULL = 0, + MLX5_NIC_IFC_DISABLED = 1, + MLX5_NIC_IFC_NO_DRAM_NIC = 2, + MLX5_NIC_IFC_INVALID = 3 +}; + +u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); +void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index d2f76070ea7c..a1ee9a8a769e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp) for (i = 0; i < hp->num_channels; i++) { mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]); - mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); + if (!hp->peer_gone) + mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); } } @@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp) MLX5_RQC_STATE_RST, 0, 0); /* unset peer SQs */ + if (hp->peer_gone) + return; for (i = 0; i < hp->num_channels; i++) mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_RST, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 4d271fb3de3d..5890fdfd62c3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) memset(&active_cqns, 0, sizeof(active_cqns)); while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { - u8 event_type = mlxsw_pci_eqe_event_type_get(eqe); - switch (event_type) { - case MLXSW_PCI_EQE_EVENT_TYPE_CMD: + /* Command interface completion events are always received on + * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events + * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1). + */ + switch (q->num) { + case MLXSW_PCI_EQ_ASYNC_NUM: mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); q->u.eq.ev_cmd_count++; break; - case MLXSW_PCI_EQE_EVENT_TYPE_COMP: + case MLXSW_PCI_EQ_COMP_NUM: cqn = mlxsw_pci_eqe_cqn_get(eqe); set_bit(cqn, active_cqns); cq_handle = true; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 83f452b7ccbb..bb99f6d41fe0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -221,7 +221,7 @@ MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8); MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8); /* pci_eqe_cqn - * Completion Queue that triggeret this EQE. + * Completion Queue that triggered this EQE. */ MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index df81e0a1eb64..32cb6718bb17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -295,6 +295,7 @@ enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, + MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL = 0xC, }; /* reg_sfd_rec_type @@ -525,6 +526,61 @@ mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); } +/* reg_sfd_uc_tunnel_uip_msb + * When protocol is IPv4, the most significant byte of the underlay IPv4 + * destination IP. + * When protocol is IPv6, reserved. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_msb, MLXSW_REG_SFD_BASE_LEN, 24, + 8, MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_tunnel_fid + * Filtering ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_fid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +enum mlxsw_reg_sfd_uc_tunnel_protocol { + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4, + MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV6, +}; + +/* reg_sfd_uc_tunnel_protocol + * IP protocol. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_protocol, MLXSW_REG_SFD_BASE_LEN, 27, + 1, MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_tunnel_uip_lsb + * When protocol is IPv4, the least significant bytes of the underlay + * IPv4 destination IP. + * When protocol is IPv6, pointer to the underlay IPv6 destination IP + * which is configured by RIPS. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_tunnel_uip_lsb, MLXSW_REG_SFD_BASE_LEN, 0, + 24, MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid, + enum mlxsw_reg_sfd_rec_action action, u32 uip, + enum mlxsw_reg_sfd_uc_tunnel_protocol proto) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST_TUNNEL, mac, + action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_tunnel_uip_msb_set(payload, rec_index, uip >> 24); + mlxsw_reg_sfd_uc_tunnel_uip_lsb_set(payload, rec_index, uip); + mlxsw_reg_sfd_uc_tunnel_fid_set(payload, rec_index, fid); + mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -1069,6 +1125,8 @@ enum mlxsw_reg_sfdf_flush_type { MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, MLXSW_REG_SFDF_FLUSH_PER_LAG, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, + MLXSW_REG_SFDF_FLUSH_PER_NVE, + MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID, }; /* reg_sfdf_flush_type @@ -1079,6 +1137,10 @@ enum mlxsw_reg_sfdf_flush_type { * 3 - All FID dynamic entries pointing to port are flushed. * 4 - All dynamic entries pointing to LAG are flushed. * 5 - All FID dynamic entries pointing to LAG are flushed. + * 6 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are + * flushed. + * 7 - All entries of type "Unicast Tunnel" or "Multicast Tunnel" are + * flushed, per FID. * Access: RW */ MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); @@ -1315,12 +1377,19 @@ MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); */ MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); -static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) +/* reg_slcr_seed + * LAG seed value. The seed is the same for all ports. + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, seed, 0x08, 0, 32); + +static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash, u32 seed) { MLXSW_REG_ZERO(slcr, payload); mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC); mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); + mlxsw_reg_slcr_seed_set(payload, seed); } /* SLCOR - Switch LAG Collector Register @@ -8279,6 +8348,508 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, mlxsw_reg_mgpc_opcode_set(payload, opcode); } +/* MPRS - Monitoring Parsing State Register + * ---------------------------------------- + * The MPRS register is used for setting up the parsing for hash, + * policy-engine and routing. + */ +#define MLXSW_REG_MPRS_ID 0x9083 +#define MLXSW_REG_MPRS_LEN 0x14 + +MLXSW_REG_DEFINE(mprs, MLXSW_REG_MPRS_ID, MLXSW_REG_MPRS_LEN); + +/* reg_mprs_parsing_depth + * Minimum parsing depth. + * Need to enlarge parsing depth according to L3, MPLS, tunnels, ACL + * rules, traps, hash, etc. Default is 96 bytes. Reserved when SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, parsing_depth, 0x00, 0, 16); + +/* reg_mprs_parsing_en + * Parsing enable. + * Bit 0 - Enable parsing of NVE of types VxLAN, VxLAN-GPE, GENEVE and + * NVGRE. Default is enabled. Reserved when SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, parsing_en, 0x04, 0, 16); + +/* reg_mprs_vxlan_udp_dport + * VxLAN UDP destination port. + * Used for identifying VxLAN packets and for dport field in + * encapsulation. Default is 4789. + * Access: RW + */ +MLXSW_ITEM32(reg, mprs, vxlan_udp_dport, 0x10, 0, 16); + +static inline void mlxsw_reg_mprs_pack(char *payload, u16 parsing_depth, + u16 vxlan_udp_dport) +{ + MLXSW_REG_ZERO(mprs, payload); + mlxsw_reg_mprs_parsing_depth_set(payload, parsing_depth); + mlxsw_reg_mprs_parsing_en_set(payload, true); + mlxsw_reg_mprs_vxlan_udp_dport_set(payload, vxlan_udp_dport); +} + +/* TNGCR - Tunneling NVE General Configuration Register + * ---------------------------------------------------- + * The TNGCR register is used for setting up the NVE Tunneling configuration. + */ +#define MLXSW_REG_TNGCR_ID 0xA001 +#define MLXSW_REG_TNGCR_LEN 0x44 + +MLXSW_REG_DEFINE(tngcr, MLXSW_REG_TNGCR_ID, MLXSW_REG_TNGCR_LEN); + +enum mlxsw_reg_tngcr_type { + MLXSW_REG_TNGCR_TYPE_VXLAN, + MLXSW_REG_TNGCR_TYPE_VXLAN_GPE, + MLXSW_REG_TNGCR_TYPE_GENEVE, + MLXSW_REG_TNGCR_TYPE_NVGRE, +}; + +/* reg_tngcr_type + * Tunnel type for encapsulation and decapsulation. The types are mutually + * exclusive. + * Note: For Spectrum the NVE parsing must be enabled in MPRS. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, type, 0x00, 0, 4); + +/* reg_tngcr_nve_valid + * The VTEP is valid. Allows adding FDB entries for tunnel encapsulation. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_valid, 0x04, 31, 1); + +/* reg_tngcr_nve_ttl_uc + * The TTL for NVE tunnel encapsulation underlay unicast packets. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_ttl_uc, 0x04, 0, 8); + +/* reg_tngcr_nve_ttl_mc + * The TTL for NVE tunnel encapsulation underlay multicast packets. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_ttl_mc, 0x08, 0, 8); + +enum { + /* Do not copy flow label. Calculate flow label using nve_flh. */ + MLXSW_REG_TNGCR_FL_NO_COPY, + /* Copy flow label from inner packet if packet is IPv6 and + * encapsulation is by IPv6. Otherwise, calculate flow label using + * nve_flh. + */ + MLXSW_REG_TNGCR_FL_COPY, +}; + +/* reg_tngcr_nve_flc + * For NVE tunnel encapsulation: Flow label copy from inner packet. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_flc, 0x0C, 25, 1); + +enum { + /* Flow label is static. In Spectrum this means '0'. Spectrum-2 + * uses {nve_fl_prefix, nve_fl_suffix}. + */ + MLXSW_REG_TNGCR_FL_NO_HASH, + /* 8 LSBs of the flow label are calculated from ECMP hash of the + * inner packet. 12 MSBs are configured by nve_fl_prefix. + */ + MLXSW_REG_TNGCR_FL_HASH, +}; + +/* reg_tngcr_nve_flh + * NVE flow label hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_flh, 0x0C, 24, 1); + +/* reg_tngcr_nve_fl_prefix + * NVE flow label prefix. Constant 12 MSBs of the flow label. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_fl_prefix, 0x0C, 8, 12); + +/* reg_tngcr_nve_fl_suffix + * NVE flow label suffix. Constant 8 LSBs of the flow label. + * Reserved when nve_flh=1 and for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_fl_suffix, 0x0C, 0, 8); + +enum { + /* Source UDP port is fixed (default '0') */ + MLXSW_REG_TNGCR_UDP_SPORT_NO_HASH, + /* Source UDP port is calculated based on hash */ + MLXSW_REG_TNGCR_UDP_SPORT_HASH, +}; + +/* reg_tngcr_nve_udp_sport_type + * NVE UDP source port type. + * Spectrum uses LAG hash (SLCRv2). Spectrum-2 uses ECMP hash (RECRv2). + * When the source UDP port is calculated based on hash, then the 8 LSBs + * are calculated from hash the 8 MSBs are configured by + * nve_udp_sport_prefix. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_udp_sport_type, 0x10, 24, 1); + +/* reg_tngcr_nve_udp_sport_prefix + * NVE UDP source port prefix. Constant 8 MSBs of the UDP source port. + * Reserved when NVE type is NVGRE. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_udp_sport_prefix, 0x10, 8, 8); + +/* reg_tngcr_nve_group_size_mc + * The amount of sequential linked lists of MC entries. The first linked + * list is configured by SFD.underlay_mc_ptr. + * Valid values: 1, 2, 4, 8, 16, 32, 64 + * The linked list are configured by TNUMT. + * The hash is set by LAG hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_group_size_mc, 0x18, 0, 8); + +/* reg_tngcr_nve_group_size_flood + * The amount of sequential linked lists of flooding entries. The first + * linked list is configured by SFMR.nve_tunnel_flood_ptr + * Valid values: 1, 2, 4, 8, 16, 32, 64 + * The linked list are configured by TNUMT. + * The hash is set by LAG hash. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, nve_group_size_flood, 0x1C, 0, 8); + +/* reg_tngcr_learn_enable + * During decapsulation, whether to learn from NVE port. + * Reserved when Spectrum-2. See TNPC. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, learn_enable, 0x20, 31, 1); + +/* reg_tngcr_underlay_virtual_router + * Underlay virtual router. + * Reserved when Spectrum-2. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, underlay_virtual_router, 0x20, 0, 16); + +/* reg_tngcr_underlay_rif + * Underlay ingress router interface. RIF type should be loopback generic. + * Reserved when Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, underlay_rif, 0x24, 0, 16); + +/* reg_tngcr_usipv4 + * Underlay source IPv4 address of the NVE. + * Access: RW + */ +MLXSW_ITEM32(reg, tngcr, usipv4, 0x28, 0, 32); + +/* reg_tngcr_usipv6 + * Underlay source IPv6 address of the NVE. For Spectrum, must not be + * modified under traffic of NVE tunneling encapsulation. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, tngcr, usipv6, 0x30, 16); + +static inline void mlxsw_reg_tngcr_pack(char *payload, + enum mlxsw_reg_tngcr_type type, + bool valid, u8 ttl) +{ + MLXSW_REG_ZERO(tngcr, payload); + mlxsw_reg_tngcr_type_set(payload, type); + mlxsw_reg_tngcr_nve_valid_set(payload, valid); + mlxsw_reg_tngcr_nve_ttl_uc_set(payload, ttl); + mlxsw_reg_tngcr_nve_ttl_mc_set(payload, ttl); + mlxsw_reg_tngcr_nve_flc_set(payload, MLXSW_REG_TNGCR_FL_NO_COPY); + mlxsw_reg_tngcr_nve_flh_set(payload, 0); + mlxsw_reg_tngcr_nve_udp_sport_type_set(payload, + MLXSW_REG_TNGCR_UDP_SPORT_HASH); + mlxsw_reg_tngcr_nve_udp_sport_prefix_set(payload, 0); + mlxsw_reg_tngcr_nve_group_size_mc_set(payload, 1); + mlxsw_reg_tngcr_nve_group_size_flood_set(payload, 1); +} + +/* TNUMT - Tunneling NVE Underlay Multicast Table Register + * ------------------------------------------------------- + * The TNUMT register is for building the underlay MC table. It is used + * for MC, flooding and BC traffic into the NVE tunnel. + */ +#define MLXSW_REG_TNUMT_ID 0xA003 +#define MLXSW_REG_TNUMT_LEN 0x20 + +MLXSW_REG_DEFINE(tnumt, MLXSW_REG_TNUMT_ID, MLXSW_REG_TNUMT_LEN); + +enum mlxsw_reg_tnumt_record_type { + MLXSW_REG_TNUMT_RECORD_TYPE_IPV4, + MLXSW_REG_TNUMT_RECORD_TYPE_IPV6, + MLXSW_REG_TNUMT_RECORD_TYPE_LABEL, +}; + +/* reg_tnumt_record_type + * Record type. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4); + +enum mlxsw_reg_tnumt_tunnel_port { + MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, + MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS, + MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_tnumt_tunnel_port + * Tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, tunnel_port, 0x00, 24, 4); + +/* reg_tnumt_underlay_mc_ptr + * Index to the underlay multicast table. + * For Spectrum the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, tnumt, underlay_mc_ptr, 0x00, 0, 24); + +/* reg_tnumt_vnext + * The next_underlay_mc_ptr is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, vnext, 0x04, 31, 1); + +/* reg_tnumt_next_underlay_mc_ptr + * The next index to the underlay multicast table. + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, next_underlay_mc_ptr, 0x04, 0, 24); + +/* reg_tnumt_record_size + * Number of IP addresses in the record. + * Range is 1..cap_max_nve_mc_entries_ipv{4,6} + * Access: RW + */ +MLXSW_ITEM32(reg, tnumt, record_size, 0x08, 0, 3); + +/* reg_tnumt_udip + * The underlay IPv4 addresses. udip[i] is reserved if i >= size + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, tnumt, udip, 0x0C, 0, 32, 0x04, 0x00, false); + +/* reg_tnumt_udip_ptr + * The pointer to the underlay IPv6 addresses. udip_ptr[i] is reserved if + * i >= size. The IPv6 addresses are configured by RIPS. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false); + +static inline void mlxsw_reg_tnumt_pack(char *payload, + enum mlxsw_reg_tnumt_record_type type, + enum mlxsw_reg_tnumt_tunnel_port tport, + u32 underlay_mc_ptr, bool vnext, + u32 next_underlay_mc_ptr, + u8 record_size) +{ + MLXSW_REG_ZERO(tnumt, payload); + mlxsw_reg_tnumt_record_type_set(payload, type); + mlxsw_reg_tnumt_tunnel_port_set(payload, tport); + mlxsw_reg_tnumt_underlay_mc_ptr_set(payload, underlay_mc_ptr); + mlxsw_reg_tnumt_vnext_set(payload, vnext); + mlxsw_reg_tnumt_next_underlay_mc_ptr_set(payload, next_underlay_mc_ptr); + mlxsw_reg_tnumt_record_size_set(payload, record_size); +} + +/* TNQCR - Tunneling NVE QoS Configuration Register + * ------------------------------------------------ + * The TNQCR register configures how QoS is set in encapsulation into the + * underlay network. + */ +#define MLXSW_REG_TNQCR_ID 0xA010 +#define MLXSW_REG_TNQCR_LEN 0x0C + +MLXSW_REG_DEFINE(tnqcr, MLXSW_REG_TNQCR_ID, MLXSW_REG_TNQCR_LEN); + +/* reg_tnqcr_enc_set_dscp + * For encapsulation: How to set DSCP field: + * 0 - Copy the DSCP from the overlay (inner) IP header to the underlay + * (outer) IP header. If there is no IP header, use TNQDR.dscp + * 1 - Set the DSCP field as TNQDR.dscp + * Access: RW + */ +MLXSW_ITEM32(reg, tnqcr, enc_set_dscp, 0x04, 28, 1); + +static inline void mlxsw_reg_tnqcr_pack(char *payload) +{ + MLXSW_REG_ZERO(tnqcr, payload); + mlxsw_reg_tnqcr_enc_set_dscp_set(payload, 0); +} + +/* TNQDR - Tunneling NVE QoS Default Register + * ------------------------------------------ + * The TNQDR register configures the default QoS settings for NVE + * encapsulation. + */ +#define MLXSW_REG_TNQDR_ID 0xA011 +#define MLXSW_REG_TNQDR_LEN 0x08 + +MLXSW_REG_DEFINE(tnqdr, MLXSW_REG_TNQDR_ID, MLXSW_REG_TNQDR_LEN); + +/* reg_tnqdr_local_port + * Local port number (receive port). CPU port is supported. + * Access: Index + */ +MLXSW_ITEM32(reg, tnqdr, local_port, 0x00, 16, 8); + +/* reg_tnqdr_dscp + * For encapsulation, the default DSCP. + * Access: RW + */ +MLXSW_ITEM32(reg, tnqdr, dscp, 0x04, 0, 6); + +static inline void mlxsw_reg_tnqdr_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(tnqdr, payload); + mlxsw_reg_tnqdr_local_port_set(payload, local_port); + mlxsw_reg_tnqdr_dscp_set(payload, 0); +} + +/* TNEEM - Tunneling NVE Encapsulation ECN Mapping Register + * -------------------------------------------------------- + * The TNEEM register maps ECN of the IP header at the ingress to the + * encapsulation to the ECN of the underlay network. + */ +#define MLXSW_REG_TNEEM_ID 0xA012 +#define MLXSW_REG_TNEEM_LEN 0x0C + +MLXSW_REG_DEFINE(tneem, MLXSW_REG_TNEEM_ID, MLXSW_REG_TNEEM_LEN); + +/* reg_tneem_overlay_ecn + * ECN of the IP header in the overlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tneem, overlay_ecn, 0x04, 24, 2); + +/* reg_tneem_underlay_ecn + * ECN of the IP header in the underlay network. + * Access: RW + */ +MLXSW_ITEM32(reg, tneem, underlay_ecn, 0x04, 16, 2); + +static inline void mlxsw_reg_tneem_pack(char *payload, u8 overlay_ecn, + u8 underlay_ecn) +{ + MLXSW_REG_ZERO(tneem, payload); + mlxsw_reg_tneem_overlay_ecn_set(payload, overlay_ecn); + mlxsw_reg_tneem_underlay_ecn_set(payload, underlay_ecn); +} + +/* TNDEM - Tunneling NVE Decapsulation ECN Mapping Register + * -------------------------------------------------------- + * The TNDEM register configures the actions that are done in the + * decapsulation. + */ +#define MLXSW_REG_TNDEM_ID 0xA013 +#define MLXSW_REG_TNDEM_LEN 0x0C + +MLXSW_REG_DEFINE(tndem, MLXSW_REG_TNDEM_ID, MLXSW_REG_TNDEM_LEN); + +/* reg_tndem_underlay_ecn + * ECN field of the IP header in the underlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tndem, underlay_ecn, 0x04, 24, 2); + +/* reg_tndem_overlay_ecn + * ECN field of the IP header in the overlay network. + * Access: Index + */ +MLXSW_ITEM32(reg, tndem, overlay_ecn, 0x04, 16, 2); + +/* reg_tndem_eip_ecn + * Egress IP ECN. ECN field of the IP header of the packet which goes out + * from the decapsulation. + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, eip_ecn, 0x04, 8, 2); + +/* reg_tndem_trap_en + * Trap enable: + * 0 - No trap due to decap ECN + * 1 - Trap enable with trap_id + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, trap_en, 0x08, 28, 4); + +/* reg_tndem_trap_id + * Trap ID. Either DECAP_ECN0 or DECAP_ECN1. + * Reserved when trap_en is '0'. + * Access: RW + */ +MLXSW_ITEM32(reg, tndem, trap_id, 0x08, 0, 9); + +static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn, + u8 overlay_ecn, u8 ecn, bool trap_en, + u16 trap_id) +{ + MLXSW_REG_ZERO(tndem, payload); + mlxsw_reg_tndem_underlay_ecn_set(payload, underlay_ecn); + mlxsw_reg_tndem_overlay_ecn_set(payload, overlay_ecn); + mlxsw_reg_tndem_eip_ecn_set(payload, ecn); + mlxsw_reg_tndem_trap_en_set(payload, trap_en); + mlxsw_reg_tndem_trap_id_set(payload, trap_id); +} + +/* TNPC - Tunnel Port Configuration Register + * ----------------------------------------- + * The TNPC register is used for tunnel port configuration. + * Reserved when Spectrum. + */ +#define MLXSW_REG_TNPC_ID 0xA020 +#define MLXSW_REG_TNPC_LEN 0x18 + +MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN); + +enum mlxsw_reg_tnpc_tunnel_port { + MLXSW_REG_TNPC_TUNNEL_PORT_NVE, + MLXSW_REG_TNPC_TUNNEL_PORT_VPLS, + MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_tnpc_tunnel_port + * Tunnel port. + * Access: Index + */ +MLXSW_ITEM32(reg, tnpc, tunnel_port, 0x00, 0, 4); + +/* reg_tnpc_learn_enable_v6 + * During IPv6 underlay decapsulation, whether to learn from tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1); + +/* reg_tnpc_learn_enable_v4 + * During IPv4 underlay decapsulation, whether to learn from tunnel port. + * Access: RW + */ +MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1); + +static inline void mlxsw_reg_tnpc_pack(char *payload, + enum mlxsw_reg_tnpc_tunnel_port tport, + bool learn_enable) +{ + MLXSW_REG_ZERO(tnpc, payload); + mlxsw_reg_tnpc_tunnel_port_set(payload, tport); + mlxsw_reg_tnpc_learn_enable_v4_set(payload, learn_enable); + mlxsw_reg_tnpc_learn_enable_v6_set(payload, learn_enable); +} + /* TIGCR - Tunneling IPinIP General Configuration Register * ------------------------------------------------------- * The TIGCR register is used for setting up the IPinIP Tunnel configuration. @@ -8828,6 +9399,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcc), MLXSW_REG(mcda), MLXSW_REG(mgpc), + MLXSW_REG(mprs), + MLXSW_REG(tngcr), + MLXSW_REG(tnumt), + MLXSW_REG(tnqcr), + MLXSW_REG(tnqdr), + MLXSW_REG(tneem), + MLXSW_REG(tndem), + MLXSW_REG(tnpc), MLXSW_REG(tigcr), MLXSW_REG(sbpr), MLXSW_REG(sbcm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 79a31de7c825..99b341539870 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -46,6 +46,8 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MAX_LPM_TREES, + MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4, + MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6, /* Internal resources. * Determined by the SW, not queried from the HW. @@ -96,6 +98,8 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, + [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02, + [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03, }; struct mlxsw_res { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 88c33a8474eb..ed7e4c4e7403 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -21,6 +21,7 @@ #include <linux/dcbnl.h> #include <linux/inetdevice.h> #include <linux/netlink.h> +#include <linux/random.h> #include <net/switchdev.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mirred.h> @@ -3469,6 +3470,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ @@ -3482,6 +3484,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), + /* NVE traps */ + MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -3666,8 +3670,10 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) { char slcr_pl[MLXSW_REG_SLCR_LEN]; + u32 seed; int err; + get_random_bytes(&seed, sizeof(seed)); mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | MLXSW_REG_SLCR_LAG_HASH_DMAC | MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | @@ -3676,7 +3682,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SLCR_LAG_HASH_DIP | MLXSW_REG_SLCR_LAG_HASH_SPORT | MLXSW_REG_SLCR_LAG_HASH_DPORT | - MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); if (err) return err; @@ -4855,6 +4861,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, upper_dev = info->upper_dev; if (info->linking) break; + if (is_vlan_dev(upper_dev)) + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev); if (netif_is_macvlan(upper_dev)) mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3cdb7aca90b7..1f68ac2a20f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -383,6 +383,17 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) #endif /* spectrum_router.c */ +enum mlxsw_sp_l3proto { + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_L3_PROTO_IPV6, +#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) +}; + +union mlxsw_sp_l3addr { + __be32 addr4; + struct in6_addr addr6; +}; + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); @@ -416,6 +427,10 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, struct net_device *dev); +struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); +u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); +struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ enum mlxsw_sp_kvdl_entry_type { @@ -423,6 +438,7 @@ enum mlxsw_sp_kvdl_entry_type { MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, }; static inline unsigned int @@ -433,6 +449,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: /* fall through */ case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: /* fall through */ + case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: /* fall through */ default: return 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index 68c8b148bef2..8d14770766b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { MAX_KVD_ACTION_SETS), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE), }; #define MLXSW_SP2_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp2_kvdl_parts_info) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 1a60391daafa..3dbafdeaab2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -7,17 +7,6 @@ #include "spectrum.h" #include "reg.h" -enum mlxsw_sp_l3proto { - MLXSW_SP_L3_PROTO_IPV4, - MLXSW_SP_L3_PROTO_IPV6, -#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) -}; - -union mlxsw_sp_l3addr { - __be32 addr4; - struct in6_addr addr6; -}; - struct mlxsw_sp_rif_ipip_lb; struct mlxsw_sp_rif_ipip_lb_config { enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; @@ -35,8 +24,6 @@ struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; struct mlxsw_sp_ipip_entry; -struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); @@ -44,9 +31,7 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); -u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); -struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index db715da7bab7..fa16ad2c6a50 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -17,7 +17,6 @@ #include <net/switchdev.h> #include "spectrum_span.h" -#include "spectrum_router.h" #include "spectrum_switchdev.h" #include "spectrum.h" #include "core.h" @@ -2289,7 +2288,7 @@ struct mlxsw_sp_switchdev_event_work { unsigned long event; }; -static void mlxsw_sp_switchdev_event_work(struct work_struct *work) +static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work) { struct mlxsw_sp_switchdev_event_work *switchdev_work = container_of(work, struct mlxsw_sp_switchdev_event_work, work); @@ -2344,16 +2343,23 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct mlxsw_sp_switchdev_event_work *switchdev_work; - struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct switchdev_notifier_fdb_info *fdb_info; + struct switchdev_notifier_info *info = ptr; + struct net_device *br_dev; - if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + /* Tunnel devices are not our uppers, so check their master instead */ + br_dev = netdev_master_upper_dev_get_rcu(dev); + if (!br_dev) + return NOTIFY_DONE; + if (!netif_is_bridge_master(br_dev)) + return NOTIFY_DONE; + if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev)) return NOTIFY_DONE; switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); if (!switchdev_work) return NOTIFY_BAD; - INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work); switchdev_work->dev = dev; switchdev_work->event = event; @@ -2362,6 +2368,11 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ case SWITCHDEV_FDB_DEL_TO_BRIDGE: + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + INIT_WORK(&switchdev_work->work, + mlxsw_sp_switchdev_bridge_fdb_event_work); memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 53020724c2f6..6f18f4d3322a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -24,6 +24,7 @@ enum { MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, MLXSW_TRAP_ID_FID_MISS = 0x3D, + MLXSW_TRAP_ID_DECAP_ECN0 = 0x40, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, @@ -59,6 +60,7 @@ enum { MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, + MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 36c84625d54e..bcec0587cf61 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -23,6 +23,8 @@ config MSCC_OCELOT_SWITCH config MSCC_OCELOT_SWITCH_OCELOT tristate "Ocelot switch driver on Ocelot" depends on MSCC_OCELOT_SWITCH + depends on GENERIC_PHY + depends on OF_NET help This driver supports the Ocelot network switch device as present on the Ocelot SoCs. diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 1a4f2bb48ead..9b8a17ee3cb3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -472,6 +472,7 @@ static int ocelot_port_open(struct net_device *dev) { struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; + enum phy_mode phy_mode; int err; /* Enable receiving frames on the port, and activate auto-learning of @@ -482,8 +483,21 @@ static int ocelot_port_open(struct net_device *dev) ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port), ANA_PORT_PORT_CFG, port->chip_port); + if (port->serdes) { + if (port->phy_mode == PHY_INTERFACE_MODE_SGMII) + phy_mode = PHY_MODE_SGMII; + else + phy_mode = PHY_MODE_QSGMII; + + err = phy_set_mode(port->serdes, phy_mode); + if (err) { + netdev_err(dev, "Could not set mode of SerDes\n"); + return err; + } + } + err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link, - PHY_INTERFACE_MODE_NA); + port->phy_mode); if (err) { netdev_err(dev, "Could not attach to PHY\n"); return err; @@ -1606,7 +1620,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->ethtool_ops = &ocelot_ethtool_ops; dev->switchdev_ops = &ocelot_port_switchdev_ops; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 616bec30dfa3..62c7c8eb00d9 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -11,12 +11,13 @@ #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/phy.h> +#include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "ocelot_ana.h" #include "ocelot_dev.h" -#include "ocelot_hsio.h" #include "ocelot_qsys.h" #include "ocelot_rew.h" #include "ocelot_sys.h" @@ -333,79 +334,6 @@ enum ocelot_reg { SYS_CM_DATA_RD, SYS_CM_OP, SYS_CM_DATA, - HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET, - HSIO_PLL5G_CFG1, - HSIO_PLL5G_CFG2, - HSIO_PLL5G_CFG3, - HSIO_PLL5G_CFG4, - HSIO_PLL5G_CFG5, - HSIO_PLL5G_CFG6, - HSIO_PLL5G_STATUS0, - HSIO_PLL5G_STATUS1, - HSIO_PLL5G_BIST_CFG0, - HSIO_PLL5G_BIST_CFG1, - HSIO_PLL5G_BIST_CFG2, - HSIO_PLL5G_BIST_STAT0, - HSIO_PLL5G_BIST_STAT1, - HSIO_RCOMP_CFG0, - HSIO_RCOMP_STATUS, - HSIO_SYNC_ETH_CFG, - HSIO_SYNC_ETH_PLL_CFG, - HSIO_S1G_DES_CFG, - HSIO_S1G_IB_CFG, - HSIO_S1G_OB_CFG, - HSIO_S1G_SER_CFG, - HSIO_S1G_COMMON_CFG, - HSIO_S1G_PLL_CFG, - HSIO_S1G_PLL_STATUS, - HSIO_S1G_DFT_CFG0, - HSIO_S1G_DFT_CFG1, - HSIO_S1G_DFT_CFG2, - HSIO_S1G_TP_CFG, - HSIO_S1G_RC_PLL_BIST_CFG, - HSIO_S1G_MISC_CFG, - HSIO_S1G_DFT_STATUS, - HSIO_S1G_MISC_STATUS, - HSIO_MCB_S1G_ADDR_CFG, - HSIO_S6G_DIG_CFG, - HSIO_S6G_DFT_CFG0, - HSIO_S6G_DFT_CFG1, - HSIO_S6G_DFT_CFG2, - HSIO_S6G_TP_CFG0, - HSIO_S6G_TP_CFG1, - HSIO_S6G_RC_PLL_BIST_CFG, - HSIO_S6G_MISC_CFG, - HSIO_S6G_OB_ANEG_CFG, - HSIO_S6G_DFT_STATUS, - HSIO_S6G_ERR_CNT, - HSIO_S6G_MISC_STATUS, - HSIO_S6G_DES_CFG, - HSIO_S6G_IB_CFG, - HSIO_S6G_IB_CFG1, - HSIO_S6G_IB_CFG2, - HSIO_S6G_IB_CFG3, - HSIO_S6G_IB_CFG4, - HSIO_S6G_IB_CFG5, - HSIO_S6G_OB_CFG, - HSIO_S6G_OB_CFG1, - HSIO_S6G_SER_CFG, - HSIO_S6G_COMMON_CFG, - HSIO_S6G_PLL_CFG, - HSIO_S6G_ACJTAG_CFG, - HSIO_S6G_GP_CFG, - HSIO_S6G_IB_STATUS0, - HSIO_S6G_IB_STATUS1, - HSIO_S6G_ACJTAG_STATUS, - HSIO_S6G_PLL_STATUS, - HSIO_S6G_REVID, - HSIO_MCB_S6G_ADDR_CFG, - HSIO_HW_CFG, - HSIO_HW_QSGMII_CFG, - HSIO_HW_QSGMII_STAT, - HSIO_CLK_CFG, - HSIO_TEMP_SENSOR_CTRL, - HSIO_TEMP_SENSOR_CFG, - HSIO_TEMP_SENSOR_STAT, }; enum ocelot_regfield { @@ -527,6 +455,9 @@ struct ocelot_port { u8 vlan_aware; u64 *stats; + + phy_interface_t phy_mode; + struct phy *serdes; }; u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 3cdf63e35b53..4c23d18bbf44 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -6,9 +6,11 @@ */ #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/of_net.h> #include <linux/netdevice.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> +#include <linux/mfd/syscon.h> #include <linux/skbuff.h> #include "ocelot.h" @@ -126,11 +128,16 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) len += sz; } while (len < buf_len); - /* Read the FCS and discard it */ + /* Read the FCS */ sz = ocelot_rx_frame_word(ocelot, grp, false, &val); /* Update the statistics if part of the FCS was read before */ len -= ETH_FCS_LEN - sz; + if (unlikely(dev->features & NETIF_F_RXFCS)) { + buf = (u32 *)skb_put(skb, ETH_FCS_LEN); + *buf = val; + } + if (sz < 0) { err = sz; break; @@ -168,6 +175,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device_node *ports, *portnp; struct ocelot *ocelot; + struct regmap *hsio; u32 val; struct { @@ -179,7 +187,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) { QSYS, "qsys" }, { ANA, "ana" }, { QS, "qs" }, - { HSIO, "hsio" }, }; if (!np && !pdev->dev.platform_data) @@ -202,6 +209,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ocelot->targets[res[i].id] = target; } + hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio"); + if (IS_ERR(hsio)) { + dev_err(&pdev->dev, "missing hsio syscon\n"); + return PTR_ERR(hsio); + } + + ocelot->targets[HSIO] = hsio; + err = ocelot_chip_init(ocelot); if (err) return err; @@ -244,18 +259,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ocelot->multicast); ocelot_init(ocelot); - ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE | - HSIO_HW_CFG_DEV1G_6_MODE | - HSIO_HW_CFG_DEV1G_9_MODE, - HSIO_HW_CFG_DEV1G_4_MODE | - HSIO_HW_CFG_DEV1G_6_MODE | - HSIO_HW_CFG_DEV1G_9_MODE, - HSIO_HW_CFG); - for_each_available_child_of_node(ports, portnp) { struct device_node *phy_node; struct phy_device *phy; struct resource *res; + struct phy *serdes; void __iomem *regs; char res_name[8]; u32 port; @@ -280,10 +288,43 @@ static int mscc_ocelot_probe(struct platform_device *pdev) continue; err = ocelot_probe_port(ocelot, port, regs, phy); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); + if (err) + return err; + + err = of_get_phy_mode(portnp); + if (err < 0) + ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA; + else + ocelot->ports[port]->phy_mode = err; + + switch (ocelot->ports[port]->phy_mode) { + case PHY_INTERFACE_MODE_NA: + continue; + case PHY_INTERFACE_MODE_SGMII: + break; + case PHY_INTERFACE_MODE_QSGMII: + break; + default: + dev_err(ocelot->dev, + "invalid phy mode for port%d, (Q)SGMII only\n", + port); + return -EINVAL; + } + + serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + if (err == -EPROBE_DEFER) + dev_dbg(ocelot->dev, "deferring probe\n"); + else + dev_err(ocelot->dev, + "missing SerDes phys for port%d\n", + port); + goto err_probe_ports; } + + ocelot->ports[port]->serdes = serdes; } register_netdevice_notifier(&ocelot_netdevice_nb); diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h deleted file mode 100644 index d93ddec3931b..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_hsio.h +++ /dev/null @@ -1,785 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ -/* - * Microsemi Ocelot Switch driver - * - * Copyright (c) 2017 Microsemi Corporation - */ - -#ifndef _MSCC_OCELOT_HSIO_H_ -#define _MSCC_OCELOT_HSIO_H_ - -#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31) -#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30) -#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29) -#define HSIO_PLL5G_CFG0_DIV4 BIT(28) -#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27) -#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23)) -#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23) -#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23) -#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18)) -#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18) -#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18) -#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16) -#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15) -#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14) -#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13) -#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12) -#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6) -#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0)) -#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0) - -#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18) -#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17) -#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16) -#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15) -#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14) -#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) -#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6) -#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) -#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5) -#define HSIO_PLL5G_CFG1_PWD_TX BIT(4) -#define HSIO_PLL5G_CFG1_PWD_RX BIT(3) -#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2) -#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1) -#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0) - -#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30) -#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29) -#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28) -#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27) -#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26) -#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25) -#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24) -#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16)) -#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16) -#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15) -#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14) -#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13) -#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12) -#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11) -#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10) -#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5)) -#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5) -#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5) -#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4) -#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3) -#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2) -#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1) -#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0) - -#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22)) -#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22) -#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22) -#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19)) -#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19) -#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19) -#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18) -#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17) -#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16) -#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15) -#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14) -#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13) -#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12) -#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11) -#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10) -#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9) -#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8) -#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0)) -#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0) - -#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) -#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16) -#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0) - -#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) -#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16) -#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) -#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0) - -#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23) -#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20)) -#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20) -#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20) -#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19) -#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16) -#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8) -#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7) -#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6) -#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0)) -#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0) - -#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12) -#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11) -#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10) -#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9) -#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1)) -#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1) -#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1) -#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0) - -#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21)) -#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21) -#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21) -#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16)) -#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16) -#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16) -#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4)) -#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4) -#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4) -#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1) -#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0) - -#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31) -#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20)) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16)) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16) -#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16) -#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0) - -#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4) -#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2) -#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1) -#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0) - -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16)) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0)) -#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0) - -#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13) -#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12) -#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10)) -#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10) -#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10) -#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8)) -#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8) -#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) -#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4) -#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0)) -#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0) - -#define HSIO_RCOMP_STATUS_BUSY BIT(12) -#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7) -#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0)) -#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0) - -#define HSIO_SYNC_ETH_CFG_RSZ 0x4 - -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1) -#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0) - -#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0) - -#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) -#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) -#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) -#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11) -#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8)) -#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8) -#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8) -#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5)) -#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5) -#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5) -#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4) -#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1) -#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0) - -#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27) -#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24)) -#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24) -#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24) -#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19)) -#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19) -#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19) -#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14) -#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13) -#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12) -#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11) -#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10) -#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9) -#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6)) -#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6) -#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4)) -#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4) -#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4) -#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) -#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0) - -#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17)) -#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17) -#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17) -#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13)) -#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13) -#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) -#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10)) -#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10) -#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) -#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9) -#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8) -#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4) -#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) -#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) - -#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9) -#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8) -#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7) -#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6) -#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) -#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) -#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) -#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3) -#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2) -#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1) -#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0) - -#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31) -#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21) -#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18) -#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17) -#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16) -#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13)) -#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13) -#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13) -#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12) -#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11) -#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10) -#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9) -#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8) -#define HSIO_S1G_COMMON_CFG_HRATE BIT(7) -#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0) - -#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22) -#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21) -#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8) -#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7) -#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6) -#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5) -#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3) - -#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12) -#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11) -#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10) -#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) -#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) - -#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31) -#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23) -#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) -#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) -#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) -#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) -#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) -#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3) -#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2) -#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0) - -#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3) -#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) -#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) -#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) - -#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3) -#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) -#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) -#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) - -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) -#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) - -#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) -#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) -#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) -#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) -#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) -#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) -#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3) -#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2) -#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0) - -#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) -#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6) -#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) -#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3) -#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2) -#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1) -#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0) - -#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) - -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31) -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30) -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0)) -#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0) - -#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16) -#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7) -#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6) -#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3)) -#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3) -#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3) -#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0)) -#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0) - -#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31) -#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23) -#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) -#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) -#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) -#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) -#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) -#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3) -#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2) -#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0) - -#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3) -#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) -#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) -#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) - -#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) -#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) -#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) -#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) -#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3) -#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) -#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) -#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) - -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16)) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) -#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) - -#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13)) -#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13) -#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13) -#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) -#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) -#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) -#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) -#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7) -#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6) -#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) -#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) -#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3) -#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2) -#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0) - -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0) - -#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8) -#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) -#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6) -#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) -#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3) -#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2) -#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1) -#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0) - -#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) - -#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) -#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) -#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) -#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10)) -#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10) -#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) -#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8)) -#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8) -#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) -#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5)) -#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5) -#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5) -#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4) -#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1)) -#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1) -#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1) -#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0) - -#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29)) -#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29) -#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29) -#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28) -#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24)) -#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24) -#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24) -#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20)) -#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20) -#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20) -#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18)) -#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18) -#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15)) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7)) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7) -#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7) -#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6) -#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5) -#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4) -#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3) -#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2) -#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1) -#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0) - -#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17)) -#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17) -#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17) -#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12)) -#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12) -#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12) -#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8)) -#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8) -#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8) -#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7) -#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6) -#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5) -#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4) -#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3) -#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2) -#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1) -#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0) - -#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27)) -#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27) -#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27) -#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22)) -#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22) -#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22) -#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19)) -#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19) -#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19) -#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16)) -#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16) -#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16) -#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10)) -#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10) -#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10) -#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5)) -#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5) -#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5) -#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3)) -#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3) -#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3) -#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0)) -#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0) - -#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18) -#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12) -#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6) -#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0) - -#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18) -#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12) -#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6) -#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0) - -#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18) -#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12) -#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6) -#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0) - -#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31) -#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30) -#define HSIO_S6G_OB_CFG_OB_POL BIT(29) -#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) -#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23) -#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) -#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18)) -#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18) -#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18) -#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17) -#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16) -#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11)) -#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11) -#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11) -#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10) -#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9) -#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8) -#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4)) -#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4) -#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4) -#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) -#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) - -#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) -#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6) -#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) -#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0) - -#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8) -#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7) -#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6) -#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) -#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) -#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) -#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3) -#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2) -#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1) -#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0) - -#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17) -#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16) -#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15) -#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14) -#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13) -#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12) -#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9)) -#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9) -#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9) -#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8) -#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7) -#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6) -#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5) -#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4) -#define HSIO_S6G_COMMON_CFG_HRATE BIT(3) -#define HSIO_S6G_COMMON_CFG_QRATE BIT(2) -#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0)) -#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0) - -#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16)) -#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16) -#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16) -#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15) -#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14) -#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) -#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6) -#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) -#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5) -#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4) -#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3) -#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2) -#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1) -#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0) - -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5) -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4) -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3) -#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2) -#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1) -#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0) - -#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16)) -#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16) -#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16) -#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0)) -#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0) - -#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8) -#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7) -#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6) -#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5) -#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4) -#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3) -#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2) -#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1) -#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0) - -#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18)) -#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18) -#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18) -#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12)) -#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12) -#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12) -#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6)) -#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6) -#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6) -#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0)) -#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0) - -#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2) -#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1) -#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0) - -#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10) -#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9) -#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8) -#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) -#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) - -#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26)) -#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26) -#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26) -#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21)) -#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21) -#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21) -#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16)) -#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16) -#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16) -#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10)) -#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10) -#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10) -#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5)) -#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5) -#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5) -#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0)) -#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0) - -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31) -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30) -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0)) -#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0) - -#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6) -#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5) -#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4) -#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3) -#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2) -#define HSIO_HW_CFG_PCIE_ENA BIT(1) -#define HSIO_HW_CFG_QSGMII_ENA BIT(0) - -#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3) -#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2) -#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1) -#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0) - -#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1)) -#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1) -#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1) -#define HSIO_HW_QSGMII_STAT_SYNC BIT(0) - -#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1)) -#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1) -#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1) -#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0) - -#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2) -#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1) -#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0) - -#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8)) -#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8) -#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8) -#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0)) -#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0) - -#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8) -#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0)) -#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0) - -#endif diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c index e334b406c40c..9271af18b93b 100644 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -5,6 +5,7 @@ * Copyright (c) 2017 Microsemi Corporation */ #include "ocelot.h" +#include <soc/mscc/ocelot_hsio.h> static const u32 ocelot_ana_regmap[] = { REG(ANA_ADVLEARN, 0x009000), @@ -102,82 +103,6 @@ static const u32 ocelot_qs_regmap[] = { REG(QS_INH_DBG, 0x000048), }; -static const u32 ocelot_hsio_regmap[] = { - REG(HSIO_PLL5G_CFG0, 0x000000), - REG(HSIO_PLL5G_CFG1, 0x000004), - REG(HSIO_PLL5G_CFG2, 0x000008), - REG(HSIO_PLL5G_CFG3, 0x00000c), - REG(HSIO_PLL5G_CFG4, 0x000010), - REG(HSIO_PLL5G_CFG5, 0x000014), - REG(HSIO_PLL5G_CFG6, 0x000018), - REG(HSIO_PLL5G_STATUS0, 0x00001c), - REG(HSIO_PLL5G_STATUS1, 0x000020), - REG(HSIO_PLL5G_BIST_CFG0, 0x000024), - REG(HSIO_PLL5G_BIST_CFG1, 0x000028), - REG(HSIO_PLL5G_BIST_CFG2, 0x00002c), - REG(HSIO_PLL5G_BIST_STAT0, 0x000030), - REG(HSIO_PLL5G_BIST_STAT1, 0x000034), - REG(HSIO_RCOMP_CFG0, 0x000038), - REG(HSIO_RCOMP_STATUS, 0x00003c), - REG(HSIO_SYNC_ETH_CFG, 0x000040), - REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048), - REG(HSIO_S1G_DES_CFG, 0x00004c), - REG(HSIO_S1G_IB_CFG, 0x000050), - REG(HSIO_S1G_OB_CFG, 0x000054), - REG(HSIO_S1G_SER_CFG, 0x000058), - REG(HSIO_S1G_COMMON_CFG, 0x00005c), - REG(HSIO_S1G_PLL_CFG, 0x000060), - REG(HSIO_S1G_PLL_STATUS, 0x000064), - REG(HSIO_S1G_DFT_CFG0, 0x000068), - REG(HSIO_S1G_DFT_CFG1, 0x00006c), - REG(HSIO_S1G_DFT_CFG2, 0x000070), - REG(HSIO_S1G_TP_CFG, 0x000074), - REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078), - REG(HSIO_S1G_MISC_CFG, 0x00007c), - REG(HSIO_S1G_DFT_STATUS, 0x000080), - REG(HSIO_S1G_MISC_STATUS, 0x000084), - REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088), - REG(HSIO_S6G_DIG_CFG, 0x00008c), - REG(HSIO_S6G_DFT_CFG0, 0x000090), - REG(HSIO_S6G_DFT_CFG1, 0x000094), - REG(HSIO_S6G_DFT_CFG2, 0x000098), - REG(HSIO_S6G_TP_CFG0, 0x00009c), - REG(HSIO_S6G_TP_CFG1, 0x0000a0), - REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4), - REG(HSIO_S6G_MISC_CFG, 0x0000a8), - REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac), - REG(HSIO_S6G_DFT_STATUS, 0x0000b0), - REG(HSIO_S6G_ERR_CNT, 0x0000b4), - REG(HSIO_S6G_MISC_STATUS, 0x0000b8), - REG(HSIO_S6G_DES_CFG, 0x0000bc), - REG(HSIO_S6G_IB_CFG, 0x0000c0), - REG(HSIO_S6G_IB_CFG1, 0x0000c4), - REG(HSIO_S6G_IB_CFG2, 0x0000c8), - REG(HSIO_S6G_IB_CFG3, 0x0000cc), - REG(HSIO_S6G_IB_CFG4, 0x0000d0), - REG(HSIO_S6G_IB_CFG5, 0x0000d4), - REG(HSIO_S6G_OB_CFG, 0x0000d8), - REG(HSIO_S6G_OB_CFG1, 0x0000dc), - REG(HSIO_S6G_SER_CFG, 0x0000e0), - REG(HSIO_S6G_COMMON_CFG, 0x0000e4), - REG(HSIO_S6G_PLL_CFG, 0x0000e8), - REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec), - REG(HSIO_S6G_GP_CFG, 0x0000f0), - REG(HSIO_S6G_IB_STATUS0, 0x0000f4), - REG(HSIO_S6G_IB_STATUS1, 0x0000f8), - REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc), - REG(HSIO_S6G_PLL_STATUS, 0x000100), - REG(HSIO_S6G_REVID, 0x000104), - REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108), - REG(HSIO_HW_CFG, 0x00010c), - REG(HSIO_HW_QSGMII_CFG, 0x000110), - REG(HSIO_HW_QSGMII_STAT, 0x000114), - REG(HSIO_CLK_CFG, 0x000118), - REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c), - REG(HSIO_TEMP_SENSOR_CFG, 0x000120), - REG(HSIO_TEMP_SENSOR_STAT, 0x000124), -}; - static const u32 ocelot_qsys_regmap[] = { REG(QSYS_PORT_MODE, 0x011200), REG(QSYS_SWITCH_PORT_MODE, 0x011234), @@ -302,7 +227,6 @@ static const u32 ocelot_sys_regmap[] = { static const u32 *ocelot_regmap[] = { [ANA] = ocelot_ana_regmap, [QS] = ocelot_qs_regmap, - [HSIO] = ocelot_hsio_regmap, [QSYS] = ocelot_qsys_regmap, [REW] = ocelot_rew_regmap, [SYS] = ocelot_sys_regmap, @@ -453,9 +377,11 @@ static void ocelot_pll5_init(struct ocelot *ocelot) /* Configure PLL5. This will need a proper CCF driver * The values are coming from the VTSS API for Ocelot */ - ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | - HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4); - ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8)); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, + HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | HSIO_PLL5G_CFG0_ENA_BIAS | HSIO_PLL5G_CFG0_ENA_VCO_BUF | @@ -465,13 +391,14 @@ static void ocelot_pll5_init(struct ocelot *ocelot) HSIO_PLL5G_CFG0_SELBGV820(4) | HSIO_PLL5G_CFG0_DIV4 | HSIO_PLL5G_CFG0_ENA_CLKTREE | - HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0); - ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG0_ENA_LANE); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | HSIO_PLL5G_CFG2_ENA_AMPCTRL | HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | - HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2); + HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); } int ocelot_chip_init(struct ocelot *ocelot) diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 5b06f07c78cd..3c661f422688 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/kernel.h> diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 305ac07dc1e7..c0830c0c2c3f 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/etherdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index 934a70835473..f907b7d98917 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -1,36 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __NFP_ABM_H__ #define __NFP_ABM_H__ 1 diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 2572a4b91c7c..9b6cfa697879 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bpf.h> #include <linux/bitops.h> @@ -89,15 +59,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) return skb; } +static unsigned int +nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n) +{ + unsigned int size; + + size = sizeof(struct cmsg_req_map_op); + size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; + + return size; +} + static struct sk_buff * nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) { + return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n)); +} + +static unsigned int +nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n) +{ unsigned int size; - size = sizeof(struct cmsg_req_map_op); - size += sizeof(struct cmsg_key_value_pair) * n; + size = sizeof(struct cmsg_reply_map_op); + size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n; - return nfp_bpf_cmsg_alloc(bpf, size); + return size; } static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) @@ -338,6 +325,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) dev_consume_skb_any(skb); } +static void * +nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, + unsigned int n) +{ + return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req, + unsigned int n) +{ + return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, + unsigned int n) +{ + return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n]; +} + +static void * +nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply, + unsigned int n) +{ + return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; +} + static int nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_bpf_cmsg_type op, @@ -366,12 +381,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, /* Copy inputs */ if (key) - memcpy(&req->elem[0].key, key, map->key_size); + memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size); if (value) - memcpy(&req->elem[0].value, value, map->value_size); + memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, + map->value_size); skb = nfp_bpf_cmsg_communicate(bpf, skb, op, - sizeof(*reply) + sizeof(*reply->elem)); + nfp_bpf_cmsg_map_reply_size(bpf, 1)); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -382,9 +398,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, /* Copy outputs */ if (out_key) - memcpy(out_key, &reply->elem[0].key, map->key_size); + memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0), + map->key_size); if (out_value) - memcpy(out_value, &reply->elem[0].value, map->value_size); + memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), + map->value_size); dev_consume_skb_any(skb); @@ -428,6 +446,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, key, NULL, 0, next_key, NULL); } +unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) +{ + return max3((unsigned int)NFP_NET_DEFAULT_MTU, + nfp_bpf_cmsg_map_req_size(bpf, 1), + nfp_bpf_cmsg_map_reply_size(bpf, 1)); +} + void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_app_bpf *bpf = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index e4f9b7ec8528..721921bcf120 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_BPF_FW_H #define NFP_BPF_FW_H 1 @@ -52,6 +22,7 @@ enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_RANDOM = 4, NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, + NFP_BPF_CAP_TYPE_ABI_VERSION = 7, }; struct nfp_bpf_cap_tlv_func { @@ -98,6 +69,7 @@ enum nfp_bpf_cmsg_type { #define CMSG_TYPE_MAP_REPLY_BIT 7 #define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req)) +/* BPF ABIv2 fixed-length control message fields */ #define CMSG_MAP_KEY_LW 16 #define CMSG_MAP_VALUE_LW 16 @@ -147,24 +119,19 @@ struct cmsg_reply_map_free_tbl { __be32 count; }; -struct cmsg_key_value_pair { - __be32 key[CMSG_MAP_KEY_LW]; - __be32 value[CMSG_MAP_VALUE_LW]; -}; - struct cmsg_req_map_op { struct cmsg_hdr hdr; __be32 tid; __be32 count; __be32 flags; - struct cmsg_key_value_pair elem[0]; + u8 data[0]; }; struct cmsg_reply_map_op { struct cmsg_reply_map_simple reply_hdr; __be32 count; __be32 resv; - struct cmsg_key_value_pair elem[0]; + u8 data[0]; }; struct cmsg_bpf_event { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index eff57f7d056a..97d33bb4d84d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #define pr_fmt(fmt) "NFP net bpf: " fmt @@ -267,6 +237,38 @@ emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) } static void +__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, + u8 defer, bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_BR_ALU_BASE | + FIELD_PREP(OP_BR_ALU_A_SRC, areg) | + FIELD_PREP(OP_BR_ALU_B_SRC, breg) | + FIELD_PREP(OP_BR_ALU_DEFBR, defer) | + FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) | + FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer) +{ + struct nfp_insn_ur_regs reg; + int err; + + err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®); + if (err) { + nfp_prog->error = err; + return; + } + + __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn, + reg.src_lmextn); +} + +static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, enum immed_shift shift, bool wr_both, @@ -1137,7 +1139,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, bool clr_gpr, lmem_step step) { - s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; + s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; bool first = true, last; bool needs_inc = false; swreg stack_off_reg; @@ -1146,7 +1148,8 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool lm3 = true; int ret; - if (meta->ptr_not_const) { + if (meta->ptr_not_const || + meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) { /* Use of the last encountered ptr_off is OK, they all have * the same alignment. Depend on low bits of value being * discarded when written to LMaddr register. @@ -1695,7 +1698,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) s64 lm_off; /* We only have to reload LM0 if the key is not at start of stack */ - lm_off = nfp_prog->stack_depth; + lm_off = nfp_prog->stack_frame_depth; lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off; load_lm_ptr = meta->arg2.var_off || lm_off; @@ -1808,10 +1811,10 @@ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) swreg stack_depth_reg; stack_depth_reg = ur_load_imm_any(nfp_prog, - nfp_prog->stack_depth, + nfp_prog->stack_frame_depth, stack_imm(nfp_prog)); - emit_alu(nfp_prog, reg_both(dst), - stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); + emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog), + ALU_OP_ADD, stack_depth_reg); wrp_immed(nfp_prog, reg_both(dst + 1), 0); } else { wrp_reg_mov(nfp_prog, dst, src); @@ -3081,7 +3084,93 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE); } -static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int +bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u32 ret_tgt, stack_depth, offset_br; + swreg tmp_reg; + + stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN); + /* Space for saving the return address is accounted for by the callee, + * so stack_depth can be zero for the main function. + */ + if (stack_depth) { + tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, stack_reg(nfp_prog), + stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), + NFP_CSR_ACT_LM_ADDR0); + } + + /* Two cases for jumping to the callee: + * + * - If callee uses and needs to save R6~R9 then: + * 1. Put the start offset of the callee into imm_b(). This will + * require a fixup step, as we do not necessarily know this + * address yet. + * 2. Put the return address from the callee to the caller into + * register ret_reg(). + * 3. (After defer slots are consumed) Jump to the subroutine that + * pushes the registers to the stack. + * The subroutine acts as a trampoline, and returns to the address in + * imm_b(), i.e. jumps to the callee. + * + * - If callee does not need to save R6~R9 then just load return + * address to the caller in ret_reg(), and jump to the callee + * directly. + * + * Using ret_reg() to pass the return address to the callee is set here + * as a convention. The callee can then push this address onto its + * stack frame in its prologue. The advantages of passing the return + * address through ret_reg(), instead of pushing it to the stack right + * here, are the following: + * - It looks cleaner. + * - If the called function is called multiple time, we get a lower + * program size. + * - We save two no-op instructions that should be added just before + * the emit_br() when stack depth is not null otherwise. + * - If we ever find a register to hold the return address during whole + * execution of the callee, we will not have to push the return + * address to the stack for leaf functions. + */ + if (!meta->jmp_dst) { + pr_err("BUG: BPF-to-BPF call has no destination recorded\n"); + return -ELOOP; + } + if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) { + ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, + RELO_BR_GO_CALL_PUSH_REGS); + offset_br = nfp_prog_current_offset(nfp_prog); + wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); + } else { + ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; + emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); + offset_br = nfp_prog_current_offset(nfp_prog); + } + wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) + return -EINVAL; + + if (stack_depth) { + tmp_reg = ur_load_imm_any(nfp_prog, stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, stack_reg(nfp_prog), + stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), + NFP_CSR_ACT_LM_ADDR0); + wrp_nops(nfp_prog, 3); + } + + meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog); + meta->num_insns_after_br -= offset_br; + + return 0; +} + +static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { switch (meta->insn.imm) { case BPF_FUNC_xdp_adjust_head: @@ -3102,6 +3191,19 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } } +static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (is_mbpf_pseudo_call(meta)) + return bpf_to_bpf_call(nfp_prog, meta); + else + return helper_call(nfp_prog, meta); +} + +static bool nfp_is_main_function(struct nfp_insn_meta *meta) +{ + return meta->subprog_idx == 0; +} + static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT); @@ -3109,6 +3211,39 @@ static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) { + /* Pop R6~R9 to the stack via related subroutine. + * We loaded the return address to the caller into ret_reg(). + * This means that the subroutine does not come back here, we + * make it jump back to the subprogram caller directly! + */ + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1, + RELO_BR_GO_CALL_POP_REGS); + /* Pop return address from the stack. */ + wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); + } else { + /* Pop return address from the stack. */ + wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0)); + /* Jump back to caller if no callee-saved registers were used + * by the subprogram. + */ + emit_rtn(nfp_prog, ret_reg(nfp_prog), 0); + } + + return 0; +} + +static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + if (nfp_is_main_function(meta)) + return goto_out(nfp_prog, meta); + else + return nfp_subprog_epilogue(nfp_prog, meta); +} + static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64, [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64, @@ -3197,36 +3332,66 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, - [BPF_JMP | BPF_EXIT] = goto_out, + [BPF_JMP | BPF_EXIT] = jmp_exit, }; /* --- Assembler logic --- */ +static int +nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct nfp_insn_meta *jmp_dst, u32 br_idx) +{ + if (immed_get_value(nfp_prog->prog[br_idx + 1])) { + pr_err("BUG: failed to fix up callee register saving\n"); + return -EINVAL; + } + + immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off); + + return 0; +} + static int nfp_fixup_branches(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *jmp_dst; u32 idx, br_idx; + int err; list_for_each_entry(meta, &nfp_prog->insns, l) { if (meta->skip) continue; - if (meta->insn.code == (BPF_JMP | BPF_CALL)) - continue; if (BPF_CLASS(meta->insn.code) != BPF_JMP) continue; + if (meta->insn.code == (BPF_JMP | BPF_EXIT) && + !nfp_is_main_function(meta)) + continue; + if (is_mbpf_helper_call(meta)) + continue; if (list_is_last(&meta->l, &nfp_prog->insns)) br_idx = nfp_prog->last_bpf_off; else br_idx = list_next_entry(meta, l)->off - 1; + /* For BPF-to-BPF function call, a stack adjustment sequence is + * generated after the return instruction. Therefore, we must + * withdraw the length of this sequence to have br_idx pointing + * to where the "branch" NFP instruction is expected to be. + */ + if (is_mbpf_pseudo_call(meta)) + br_idx -= meta->num_insns_after_br; + if (!nfp_is_br(nfp_prog->prog[br_idx])) { pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n", br_idx, meta->insn.code, nfp_prog->prog[br_idx]); return -ELOOP; } + + if (meta->insn.code == (BPF_JMP | BPF_EXIT)) + continue; + /* Leave special branches for later */ if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != - RELO_BR_REL) + RELO_BR_REL && !is_mbpf_pseudo_call(meta)) continue; if (!meta->jmp_dst) { @@ -3241,6 +3406,18 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) return -ELOOP; } + if (is_mbpf_pseudo_call(meta) && + nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) { + err = nfp_fixup_immed_relo(nfp_prog, meta, + jmp_dst, br_idx); + if (err) + return err; + } + + if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) != + RELO_BR_REL) + continue; + for (idx = meta->off; idx <= br_idx; idx++) { if (!nfp_is_br(nfp_prog->prog[idx])) continue; @@ -3258,6 +3435,27 @@ static void nfp_intro(struct nfp_prog *nfp_prog) plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); } +static void +nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + /* Save return address into the stack. */ + wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog)); +} + +static void +nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth; + + nfp_prog->stack_frame_depth = round_up(depth, 4); + nfp_subprog_prologue(nfp_prog, meta); +} + +bool nfp_is_subprog_start(struct nfp_insn_meta *meta) +{ + return meta->flags & FLAG_INSN_IS_SUBPROG_START; +} + static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) { /* TC direct-action mode: @@ -3348,6 +3546,67 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); } +static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog) +{ + unsigned int idx; + + for (idx = 1; idx < nfp_prog->subprog_cnt; idx++) + if (nfp_prog->subprog[idx].needs_reg_push) + return true; + + return false; +} + +static void nfp_push_callee_registers(struct nfp_prog *nfp_prog) +{ + u8 reg; + + /* Subroutine: Save all callee saved registers (R6 ~ R9). + * imm_b() holds the return address. + */ + nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog); + for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { + u8 adj = (reg - BPF_REG_0) * 2; + u8 idx = (reg - BPF_REG_6) * 2; + + /* The first slot in the stack frame is used to push the return + * address in bpf_to_bpf_call(), start just after. + */ + wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj)); + + if (reg == BPF_REG_8) + /* Prepare to jump back, last 3 insns use defer slots */ + emit_rtn(nfp_prog, imm_b(nfp_prog), 3); + + wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1)); + } +} + +static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog) +{ + u8 reg; + + /* Subroutine: Restore all callee saved registers (R6 ~ R9). + * ret_reg() holds the return address. + */ + nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog); + for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) { + u8 adj = (reg - BPF_REG_0) * 2; + u8 idx = (reg - BPF_REG_6) * 2; + + /* The first slot in the stack frame holds the return address, + * start popping just after that. + */ + wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx)); + + if (reg == BPF_REG_8) + /* Prepare to jump back, last 3 insns use defer slots */ + emit_rtn(nfp_prog, ret_reg(nfp_prog), 3); + + wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1)); + } +} + static void nfp_outro(struct nfp_prog *nfp_prog) { switch (nfp_prog->type) { @@ -3360,13 +3619,23 @@ static void nfp_outro(struct nfp_prog *nfp_prog) default: WARN_ON(1); } + + if (!nfp_prog_needs_callee_reg_save(nfp_prog)) + return; + + nfp_push_callee_registers(nfp_prog); + nfp_pop_callee_registers(nfp_prog); } static int nfp_translate(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; + unsigned int depth; int err; + depth = nfp_prog->subprog[0].stack_depth; + nfp_prog->stack_frame_depth = round_up(depth, 4); + nfp_intro(nfp_prog); if (nfp_prog->error) return nfp_prog->error; @@ -3376,6 +3645,12 @@ static int nfp_translate(struct nfp_prog *nfp_prog) meta->off = nfp_prog_current_offset(nfp_prog); + if (nfp_is_subprog_start(meta)) { + nfp_start_subprog(nfp_prog, meta); + if (nfp_prog->error) + return nfp_prog->error; + } + if (meta->skip) { nfp_prog->n_translated++; continue; @@ -4018,20 +4293,35 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) /* Another pass to record jump information. */ list_for_each_entry(meta, &nfp_prog->insns, l) { + struct nfp_insn_meta *dst_meta; u64 code = meta->insn.code; + unsigned int dst_idx; + bool pseudo_call; + + if (BPF_CLASS(code) != BPF_JMP) + continue; + if (BPF_OP(code) == BPF_EXIT) + continue; + if (is_mbpf_helper_call(meta)) + continue; - if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT && - BPF_OP(code) != BPF_CALL) { - struct nfp_insn_meta *dst_meta; - unsigned short dst_indx; + /* If opcode is BPF_CALL at this point, this can only be a + * BPF-to-BPF call (a.k.a pseudo call). + */ + pseudo_call = BPF_OP(code) == BPF_CALL; - dst_indx = meta->n + 1 + meta->insn.off; - dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx, - cnt); + if (pseudo_call) + dst_idx = meta->n + 1 + meta->insn.imm; + else + dst_idx = meta->n + 1 + meta->insn.off; - meta->jmp_dst = dst_meta; - dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; - } + dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); + + if (pseudo_call) + dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; + + dst_meta->flags |= FLAG_INSN_IS_JUMP_DST; + meta->jmp_dst = dst_meta; } } @@ -4054,6 +4344,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) for (i = 0; i < nfp_prog->prog_len; i++) { enum nfp_relo_type special; u32 val; + u16 off; special = FIELD_GET(OP_RELO_TYPE, prog[i]); switch (special) { @@ -4070,6 +4361,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) br_set_offset(&prog[i], nfp_prog->tgt_abort + bv->start_off); break; + case RELO_BR_GO_CALL_PUSH_REGS: + if (!nfp_prog->tgt_call_push_regs) { + pr_err("BUG: failed to detect subprogram registers needs\n"); + err = -EINVAL; + goto err_free_prog; + } + off = nfp_prog->tgt_call_push_regs + bv->start_off; + br_set_offset(&prog[i], off); + break; + case RELO_BR_GO_CALL_POP_REGS: + if (!nfp_prog->tgt_call_pop_regs) { + pr_err("BUG: failed to detect subprogram registers needs\n"); + err = -EINVAL; + goto err_free_prog; + } + off = nfp_prog->tgt_call_pop_regs + bv->start_off; + br_set_offset(&prog[i], off); + break; case RELO_BR_NEXT_PKT: br_set_offset(&prog[i], bv->tgt_done); break; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 970af07f4656..6243af0ab025 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <net/pkt_cls.h> @@ -54,11 +24,14 @@ const struct rhashtable_params nfp_bpf_maps_neutral_params = { static bool nfp_net_ebpf_capable(struct nfp_net *nn) { #ifdef __LITTLE_ENDIAN - if (nn->cap & NFP_NET_CFG_CTRL_BPF && - nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI) - return true; -#endif + struct nfp_app_bpf *bpf = nn->app->priv; + + return nn->cap & NFP_NET_CFG_CTRL_BPF && + bpf->abi_version && + nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version; +#else return false; +#endif } static int @@ -342,6 +315,26 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value, return 0; } +static int +nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, + u32 length) +{ + if (length < 4) { + nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n", + length); + return -EINVAL; + } + + bpf->abi_version = readl(value); + if (bpf->abi_version < 2 || bpf->abi_version > 3) { + nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n", + bpf->abi_version); + bpf->abi_version = 0; + } + + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -393,6 +386,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_ABI_VERSION: + if (nfp_bpf_parse_cap_abi_version(app->priv, value, + length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -414,6 +412,11 @@ err_release_free: return -EINVAL; } +static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf) +{ + bpf->abi_version = 2; /* Original BPF ABI version */ +} + static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev) { struct nfp_app_bpf *bpf = app->priv; @@ -447,10 +450,21 @@ static int nfp_bpf_init(struct nfp_app *app) if (err) goto err_free_bpf; + nfp_bpf_init_capabilities(bpf); + err = nfp_bpf_parse_capabilities(app); if (err) goto err_free_neutral_maps; + if (bpf->abi_version < 3) { + bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4; + bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4; + } else { + bpf->cmsg_key_sz = bpf->maps.max_key_sz; + bpf->cmsg_val_sz = bpf->maps.max_val_sz; + app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); + } + bpf->bpf_dev = bpf_offload_dev_create(); err = PTR_ERR_OR_ZERO(bpf->bpf_dev); if (err) @@ -465,11 +479,6 @@ err_free_bpf: return err; } -static void nfp_check_rhashtable_empty(void *ptr, void *arg) -{ - WARN_ON_ONCE(1); -} - static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index dbd00982fd2b..52457ae3b259 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_BPF_H__ #define __NFP_BPF_H__ 1 @@ -61,6 +31,8 @@ enum nfp_relo_type { /* internal jumps to parts of the outro */ RELO_BR_GO_OUT, RELO_BR_GO_ABORT, + RELO_BR_GO_CALL_PUSH_REGS, + RELO_BR_GO_CALL_POP_REGS, /* external jumps to fixed addresses */ RELO_BR_NEXT_PKT, RELO_BR_HELPER, @@ -104,6 +76,7 @@ enum pkt_vec { #define imma_a(np) reg_a(STATIC_REG_IMMA) #define imma_b(np) reg_b(STATIC_REG_IMMA) #define imm_both(np) reg_both(STATIC_REG_IMM) +#define ret_reg(np) imm_a(np) #define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAG_MARK 1 @@ -121,12 +94,17 @@ enum pkt_vec { * @cmsg_replies: received cmsg replies waiting to be consumed * @cmsg_wq: work queue for waiting for cmsg replies * + * @cmsg_key_sz: size of key in cmsg element array + * @cmsg_val_sz: size of value in cmsg element array + * * @map_list: list of offloaded maps * @maps_in_use: number of currently offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps * * @maps_neutral: hash table of offload-neutral maps (on pointer) * + * @abi_version: global BPF ABI version + * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head * @adjust_head.off_min: minimal packet offset within buffer required @@ -164,12 +142,17 @@ struct nfp_app_bpf { struct sk_buff_head cmsg_replies; struct wait_queue_head cmsg_wq; + unsigned int cmsg_key_sz; + unsigned int cmsg_val_sz; + struct list_head map_list; unsigned int maps_in_use; unsigned int map_elems_in_use; struct rhashtable maps_neutral; + u32 abi_version; + struct nfp_bpf_cap_adjust_head { u32 flags; int off_min; @@ -252,7 +235,9 @@ struct nfp_bpf_reg_state { bool var_off; }; -#define FLAG_INSN_IS_JUMP_DST BIT(0) +#define FLAG_INSN_IS_JUMP_DST BIT(0) +#define FLAG_INSN_IS_SUBPROG_START BIT(1) +#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) /** * struct nfp_insn_meta - BPF instruction wrapper @@ -269,6 +254,7 @@ struct nfp_bpf_reg_state { * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB + * @num_insns_after_br: number of insns following a branch jump, used for fixup * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions @@ -279,6 +265,7 @@ struct nfp_bpf_reg_state { * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags + * @subprog_idx: index of subprogram to which the instruction belongs * @skip: skip this instruction (optimized out) * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list @@ -304,6 +291,7 @@ struct nfp_insn_meta { struct { struct nfp_insn_meta *jmp_dst; bool jump_neg_op; + u32 num_insns_after_br; /* only for BPF-to-BPF calls */ }; /* function calls */ struct { @@ -325,6 +313,7 @@ struct nfp_insn_meta { unsigned int off; unsigned short n; unsigned short flags; + unsigned short subprog_idx; bool skip; instr_cb_t double_cb; @@ -413,6 +402,34 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } +static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) +{ + struct bpf_insn insn = meta->insn; + + return insn.code == (BPF_JMP | BPF_CALL) && + insn.src_reg != BPF_PSEUDO_CALL; +} + +static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta) +{ + struct bpf_insn insn = meta->insn; + + return insn.code == (BPF_JMP | BPF_CALL) && + insn.src_reg == BPF_PSEUDO_CALL; +} + +#define STACK_FRAME_ALIGN 64 + +/** + * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info + * @stack_depth: maximum stack depth used by this sub-program + * @needs_reg_push: whether sub-program uses callee-saved registers + */ +struct nfp_bpf_subprog_info { + u16 stack_depth; + u8 needs_reg_push : 1; +}; + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure @@ -424,12 +441,16 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) * @last_bpf_off: address of the last instruction translated from BPF * @tgt_out: jump target for normal exit * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) + * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack + * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9 * @n_translated: number of successfully translated instructions (for errors) * @error: error code if something went wrong - * @stack_depth: max stack depth from the verifier + * @stack_frame_depth: max stack depth for current frame * @adjust_head_location: if program has single adjust head call - the insn no. * @map_records_cnt: the number of map pointers recorded for this prog + * @subprog_cnt: number of sub-programs, including main function * @map_records: the map record pointers from bpf->maps_neutral + * @subprog: pointer to an array of objects holding info about sub-programs * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -446,15 +467,19 @@ struct nfp_prog { unsigned int last_bpf_off; unsigned int tgt_out; unsigned int tgt_abort; + unsigned int tgt_call_push_regs; + unsigned int tgt_call_pop_regs; unsigned int n_translated; int error; - unsigned int stack_depth; + unsigned int stack_frame_depth; unsigned int adjust_head_location; unsigned int map_records_cnt; + unsigned int subprog_cnt; struct nfp_bpf_neutral_map **map_records; + struct nfp_bpf_subprog_info *subprog; struct list_head insns; }; @@ -471,6 +496,7 @@ struct nfp_bpf_vnic { unsigned int tgt_done; }; +bool nfp_is_subprog_start(struct nfp_insn_meta *meta); void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); @@ -492,6 +518,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); +unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); long long int nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); void diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 1ccd6371a15b..927e038d9f77 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ /* * nfp_net_offload.c @@ -208,6 +178,8 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *tmp; + kfree(nfp_prog->subprog); + list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { list_del(&meta->l); kfree(meta); @@ -250,18 +222,9 @@ err_free: static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - unsigned int stack_size; unsigned int max_instr; int err; - stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - if (prog->aux->stack_depth > stack_size) { - nn_info(nn, "stack too large: program %dB > FW stack %dB\n", - prog->aux->stack_depth, stack_size); - return -EOPNOTSUPP; - } - nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4); - max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index a6e9248669e1..193dd685b365 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -1,43 +1,15 @@ -/* - * Copyright (C) 2016-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/kernel.h> +#include <linux/netdevice.h> #include <linux/pkt_cls.h> #include "../nfp_app.h" #include "../nfp_main.h" +#include "../nfp_net.h" #include "fw.h" #include "main.h" @@ -155,8 +127,9 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, } static int -nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, - struct nfp_insn_meta *meta) +nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog, + struct bpf_verifier_env *env, + struct nfp_insn_meta *meta) { const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1; const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; @@ -333,6 +306,9 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, { s32 old_off, new_off; + if (reg->frameno != env->cur_state->curframe) + meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME; + if (!tnum_is_const(reg->var_off)) { pr_vlog(env, "variable ptr stack access\n"); return -EINVAL; @@ -620,8 +596,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return -EINVAL; } - if (meta->insn.code == (BPF_JMP | BPF_CALL)) - return nfp_bpf_check_call(nfp_prog, env, meta); + if (is_mbpf_helper_call(meta)) + return nfp_bpf_check_helper_call(nfp_prog, env, meta); if (meta->insn.code == (BPF_JMP | BPF_EXIT)) return nfp_bpf_check_exit(nfp_prog, env); @@ -640,6 +616,131 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return 0; } +static int +nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env, + struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta; + int index = 0; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + if (nfp_is_subprog_start(meta)) + index++; + meta->subprog_idx = index; + + if (meta->insn.dst_reg >= BPF_REG_6 && + meta->insn.dst_reg <= BPF_REG_9) + nfp_prog->subprog[index].needs_reg_push = 1; + } + + if (index + 1 != nfp_prog->subprog_cnt) { + pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n", + index + 1, nfp_prog->subprog_cnt); + return -EFAULT; + } + + return 0; +} + +static unsigned int +nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt) +{ + struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); + unsigned int max_depth = 0, depth = 0, frame = 0; + struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES]; + unsigned short frame_depths[MAX_CALL_FRAMES]; + unsigned short ret_prog[MAX_CALL_FRAMES]; + unsigned short idx = meta->subprog_idx; + + /* Inspired from check_max_stack_depth() from kernel verifier. + * Starting from main subprogram, walk all instructions and recursively + * walk all callees that given subprogram can call. Since recursion is + * prevented by the kernel verifier, this algorithm only needs a local + * stack of MAX_CALL_FRAMES to remember callsites. + */ +process_subprog: + frame_depths[frame] = nfp_prog->subprog[idx].stack_depth; + frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN); + depth += frame_depths[frame]; + max_depth = max(max_depth, depth); + +continue_subprog: + for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx; + meta = nfp_meta_next(meta)) { + if (!is_mbpf_pseudo_call(meta)) + continue; + + /* We found a call to a subprogram. Remember instruction to + * return to and subprog id. + */ + ret_insn[frame] = nfp_meta_next(meta); + ret_prog[frame] = idx; + + /* Find the callee and start processing it. */ + meta = nfp_bpf_goto_meta(nfp_prog, meta, + meta->n + 1 + meta->insn.imm, cnt); + idx = meta->subprog_idx; + frame++; + goto process_subprog; + } + /* End of for() loop means the last instruction of the subprog was + * reached. If we popped all stack frames, return; otherwise, go on + * processing remaining instructions from the caller. + */ + if (frame == 0) + return max_depth; + + depth -= frame_depths[frame]; + frame--; + meta = ret_insn[frame]; + idx = ret_prog[frame]; + goto continue_subprog; +} + +static int nfp_bpf_finalize(struct bpf_verifier_env *env) +{ + unsigned int stack_size, stack_needed; + struct bpf_subprog_info *info; + struct nfp_prog *nfp_prog; + struct nfp_net *nn; + int i; + + nfp_prog = env->prog->aux->offload->dev_priv; + nfp_prog->subprog_cnt = env->subprog_cnt; + nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt, + sizeof(nfp_prog->subprog[0]), GFP_KERNEL); + if (!nfp_prog->subprog) + return -ENOMEM; + + nfp_assign_subprog_idx_and_regs(env, nfp_prog); + + info = env->subprog_info; + for (i = 0; i < nfp_prog->subprog_cnt; i++) { + nfp_prog->subprog[i].stack_depth = info[i].stack_depth; + + if (i == 0) + continue; + + /* Account for size of return address. */ + nfp_prog->subprog[i].stack_depth += REG_WIDTH; + /* Account for size of saved registers, if necessary. */ + if (nfp_prog->subprog[i].needs_reg_push) + nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4; + } + + nn = netdev_priv(env->prog->aux->offload->netdev); + stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + stack_needed = nfp_bpf_get_stack_usage(nfp_prog, env->prog->len); + if (stack_needed > stack_size) { + pr_vlog(env, "stack too large: program %dB > FW stack %dB\n", + stack_needed, stack_size); + return -EOPNOTSUPP; + } + + return 0; +} + const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { - .insn_hook = nfp_verify_insn, + .insn_hook = nfp_verify_insn, + .finalize = nfp_bpf_finalize, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 46ba0cf257c6..f2b1938236fe 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/geneve.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index cb8565222621..4c5eaf36d5bb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 325954b829c8..29d673aa5277 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_FLOWER_CMSG_H #define NFP_FLOWER_CMSG_H diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index bf10598f66ae..81dcf5b318ba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include "main.h" diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index e57d23746585..3a54728d2ea6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/lockdep.h> @@ -518,8 +488,8 @@ err_clear_nn: static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; + u64 version, features, ctx_count; struct nfp_flower_priv *app_priv; - u64 version, features; int err; if (!pf->eth_tbl) { @@ -543,6 +513,16 @@ static int nfp_flower_init(struct nfp_app *app) return err; } + ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", + &err); + if (err) { + nfp_warn(app->cpp, + "FlowerNIC: unsupported host context count: %d\n", + err); + err = 0; + ctx_count = BIT(17); + } + /* We need to ensure hardware has enough flower capabilities. */ if (version != NFP_FLOWER_ALLOWED_VER) { nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); @@ -553,6 +533,7 @@ static int nfp_flower_init(struct nfp_app *app) if (!app_priv) return -ENOMEM; + app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); app->priv = app_priv; app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs_high); @@ -563,7 +544,7 @@ static int nfp_flower_init(struct nfp_app *app) init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); - err = nfp_flower_metadata_init(app); + err = nfp_flower_metadata_init(app, ctx_count); if (err) goto err_free_app_priv; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 81d941ab895c..90045bab95bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef __NFP_FLOWER_H__ #define __NFP_FLOWER_H__ 1 @@ -38,6 +8,7 @@ #include <linux/circ_buf.h> #include <linux/hashtable.h> +#include <linux/rhashtable.h> #include <linux/time64.h> #include <linux/types.h> #include <net/pkt_cls.h> @@ -50,10 +21,8 @@ struct net_device; struct nfp_app; #define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) -#define NFP_FL_STATS_ENTRY_RS BIT(20) -#define NFP_FL_STATS_ELEM_RS 4 -#define NFP_FL_REPEATED_HASH_MAX BIT(17) -#define NFP_FLOWER_HASH_BITS 19 +#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ + init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 #define NFP_FLOWER_MASK_ELEMENT_RS 1 #define NFP_FLOWER_MASK_HASH_BITS 10 @@ -138,7 +107,10 @@ struct nfp_fl_lag { * @stats_ids: List of free stats ids * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks + * @stats_ring_size: Maximum number of allowed stats ids * @flow_table: Hash table used to store flower rules + * @stats: Stored stats updates for flower rules + * @stats_lock: Lock for flower rule stats updates * @cmsg_work: Workqueue for control messages processing * @cmsg_skbs_high: List of higher priority skbs for control message * processing @@ -171,7 +143,10 @@ struct nfp_flower_priv { struct nfp_fl_stats_id stats_ids; struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); - DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); + u32 stats_ring_size; + struct rhashtable flow_table; + struct nfp_fl_stats *stats; + spinlock_t stats_lock; /* lock stats */ struct work_struct cmsg_work; struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_low; @@ -227,10 +202,8 @@ struct nfp_fl_stats { struct nfp_fl_payload { struct nfp_fl_rule_metadata meta; unsigned long tc_flower_cookie; - struct hlist_node link; + struct rhash_head fl_node; struct rcu_head rcu; - spinlock_t lock; /* lock stats */ - struct nfp_fl_stats stats; __be32 nfp_tun_ipv4_addr; struct net_device *ingress_dev; char *unmasked_data; @@ -239,6 +212,8 @@ struct nfp_fl_payload { bool ingress_offload; }; +extern const struct rhashtable_params nfp_flower_table_params; + struct nfp_fl_stats_frame { __be32 stats_con_id; __be32 pkt_count; @@ -246,7 +221,7 @@ struct nfp_fl_stats_frame { __be64 stats_cookie; }; -int nfp_flower_metadata_init(struct nfp_app *app); +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 17acb8cc6044..e54fb6034326 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <net/pkt_cls.h> diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index c098730544b7..48729bf171e0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/hash.h> #include <linux/hashtable.h> @@ -48,6 +18,12 @@ struct nfp_mask_id_table { u8 mask_id; }; +struct nfp_fl_flow_table_cmp_arg { + struct net_device *netdev; + unsigned long cookie; + __be32 host_ctx; +}; + static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) { struct nfp_flower_priv *priv = app->priv; @@ -55,14 +31,14 @@ static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) ring = &priv->stats_ids.free_list; /* Check if buffer is full. */ - if (!CIRC_SPACE(ring->head, ring->tail, NFP_FL_STATS_ENTRY_RS * - NFP_FL_STATS_ELEM_RS - + if (!CIRC_SPACE(ring->head, ring->tail, + priv->stats_ring_size * NFP_FL_STATS_ELEM_RS - NFP_FL_STATS_ELEM_RS + 1)) return -ENOBUFS; memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS); ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) % - (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS); + (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; } @@ -74,7 +50,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) struct circ_buf *ring; ring = &priv->stats_ids.free_list; - freed_stats_id = NFP_FL_STATS_ENTRY_RS; + freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { *stats_context_id = priv->stats_ids.init_unalloc - 1; @@ -92,7 +68,7 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) *stats_context_id = temp_stats_id; memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS); ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) % - (NFP_FL_STATS_ENTRY_RS * NFP_FL_STATS_ELEM_RS); + (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; } @@ -102,56 +78,37 @@ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, struct net_device *netdev, __be32 host_ctx) { + struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; struct nfp_flower_priv *priv = app->priv; - struct nfp_fl_payload *flower_entry; - hash_for_each_possible_rcu(priv->flow_table, flower_entry, link, - tc_flower_cookie) - if (flower_entry->tc_flower_cookie == tc_flower_cookie && - (!netdev || flower_entry->ingress_dev == netdev) && - (host_ctx == NFP_FL_STATS_CTX_DONT_CARE || - flower_entry->meta.host_ctx_id == host_ctx)) - return flower_entry; + flower_cmp_arg.netdev = netdev; + flower_cmp_arg.cookie = tc_flower_cookie; + flower_cmp_arg.host_ctx = host_ctx; - return NULL; -} - -static void -nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats) -{ - struct nfp_fl_payload *nfp_flow; - unsigned long flower_cookie; - - flower_cookie = be64_to_cpu(stats->stats_cookie); - - rcu_read_lock(); - nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL, - stats->stats_con_id); - if (!nfp_flow) - goto exit_rcu_unlock; - - spin_lock(&nfp_flow->lock); - nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count); - nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count); - nfp_flow->stats.used = jiffies; - spin_unlock(&nfp_flow->lock); - -exit_rcu_unlock: - rcu_read_unlock(); + return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, + nfp_flower_table_params); } void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) { unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); - struct nfp_fl_stats_frame *stats_frame; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_stats_frame *stats; unsigned char *msg; + u32 ctx_id; int i; msg = nfp_flower_cmsg_get_data(skb); - stats_frame = (struct nfp_fl_stats_frame *)msg; - for (i = 0; i < msg_len / sizeof(*stats_frame); i++) - nfp_flower_update_stats(app, stats_frame + i); + spin_lock(&priv->stats_lock); + for (i = 0; i < msg_len / sizeof(*stats); i++) { + stats = (struct nfp_fl_stats_frame *)msg + i; + ctx_id = be32_to_cpu(stats->stats_con_id); + priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count); + priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count); + priv->stats[ctx_id].used = jiffies; + } + spin_unlock(&priv->stats_lock); } static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id) @@ -345,9 +302,9 @@ int nfp_compile_flow_metadata(struct nfp_app *app, /* Update flow payload with mask ids. */ nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id; - nfp_flow->stats.pkts = 0; - nfp_flow->stats.bytes = 0; - nfp_flow->stats.used = jiffies; + priv->stats[stats_cxt].pkts = 0; + priv->stats[stats_cxt].bytes = 0; + priv->stats[stats_cxt].used = jiffies; check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, NFP_FL_STATS_CTX_DONT_CARE); @@ -389,12 +346,56 @@ int nfp_modify_flow_metadata(struct nfp_app *app, return nfp_release_stats_entry(app, temp_ctx_id); } -int nfp_flower_metadata_init(struct nfp_app *app) +static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, + const void *obj) +{ + const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; + const struct nfp_fl_payload *flow_entry = obj; + + if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) && + (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE || + flow_entry->meta.host_ctx_id == cmp_arg->host_ctx)) + return flow_entry->tc_flower_cookie != cmp_arg->cookie; + + return 1; +} + +static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct nfp_fl_payload *flower_entry = data; + + return jhash2((u32 *)&flower_entry->tc_flower_cookie, + sizeof(flower_entry->tc_flower_cookie) / sizeof(u32), + seed); +} + +static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed) +{ + const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data; + + return jhash2((u32 *)&cmp_arg->cookie, + sizeof(cmp_arg->cookie) / sizeof(u32), seed); +} + +const struct rhashtable_params nfp_flower_table_params = { + .head_offset = offsetof(struct nfp_fl_payload, fl_node), + .hashfn = nfp_fl_key_hashfn, + .obj_cmpfn = nfp_fl_obj_cmpfn, + .obj_hashfn = nfp_fl_obj_hashfn, + .automatic_shrinking = true, +}; + +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count) { struct nfp_flower_priv *priv = app->priv; + int err; hash_init(priv->mask_table); - hash_init(priv->flow_table); + + err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params); + if (err) + return err; + get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ @@ -402,7 +403,7 @@ int nfp_flower_metadata_init(struct nfp_app *app) kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) - return -ENOMEM; + goto err_free_flow_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; @@ -416,18 +417,29 @@ int nfp_flower_metadata_init(struct nfp_app *app) /* Init ring buffer and unallocated stats_ids. */ priv->stats_ids.free_list.buf = vmalloc(array_size(NFP_FL_STATS_ELEM_RS, - NFP_FL_STATS_ENTRY_RS)); + priv->stats_ring_size)); if (!priv->stats_ids.free_list.buf) goto err_free_last_used; - priv->stats_ids.init_unalloc = NFP_FL_REPEATED_HASH_MAX; + priv->stats_ids.init_unalloc = host_ctx_count; + + priv->stats = kvmalloc_array(priv->stats_ring_size, + sizeof(struct nfp_fl_stats), GFP_KERNEL); + if (!priv->stats) + goto err_free_ring_buf; + + spin_lock_init(&priv->stats_lock); return 0; +err_free_ring_buf: + vfree(priv->stats_ids.free_list.buf); err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); +err_free_flow_table: + rhashtable_destroy(&priv->flow_table); return -ENOMEM; } @@ -438,6 +450,9 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app) if (!priv) return; + rhashtable_free_and_destroy(&priv->flow_table, + nfp_check_rhashtable_empty, NULL); + kvfree(priv->stats); kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.last_used); vfree(priv->stats_ids.free_list.buf); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index bd19624f10cf..29c95423ab64 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/skbuff.h> #include <net/devlink.h> @@ -428,8 +398,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; - spin_lock_init(&flow_pay->lock); - flow_pay->ingress_offload = !egress; return flow_pay; @@ -513,9 +481,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - INIT_HLIST_NODE(&flow_pay->link); flow_pay->tc_flower_cookie = flow->cookie; - hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); + err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, + nfp_flower_table_params); + if (err) + goto err_destroy_flow; + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ @@ -550,6 +521,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { struct nfp_port *port = nfp_port_from_netdev(netdev); + struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct net_device *ingr_dev; int err; @@ -573,11 +545,13 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_flow; err_free_flow: - hash_del_rcu(&nfp_flow->link); port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &nfp_flow->fl_node, + nfp_flower_table_params)); kfree_rcu(nfp_flow, rcu); return err; } @@ -598,8 +572,10 @@ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { + struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; struct net_device *ingr_dev; + u32 ctx_id; ingr_dev = egress ? NULL : netdev; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, @@ -610,13 +586,16 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->ingress_offload && egress) return 0; - spin_lock_bh(&nfp_flow->lock); - tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, - nfp_flow->stats.pkts, nfp_flow->stats.used); + ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); + + spin_lock_bh(&priv->stats_lock); + tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + priv->stats[ctx_id].used); - nfp_flow->stats.pkts = 0; - nfp_flow->stats.bytes = 0; - spin_unlock_bh(&nfp_flow->lock); + priv->stats[ctx_id].pkts = 0; + priv->stats[ctx_id].bytes = 0; + spin_unlock_bh(&priv->stats_lock); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 382bb93cb090..30c926c4bc47 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/inetdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h index 8b56c27931bf..dd359a44adfb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_abi.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -1,36 +1,5 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __NFP_ABI__ #define __NFP_ABI__ 1 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 8607d09ab732..68a0991aac22 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bug.h> #include <linux/lockdep.h> @@ -60,6 +30,11 @@ static const struct nfp_app_type *apps[] = { #endif }; +void nfp_check_rhashtable_empty(void *ptr, void *arg) +{ + WARN_ON_ONCE(1); +} + struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) { if (nfp_netdev_is_nfp_net(netdev)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 4e1eb3395648..4d6ecf99b1cc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef _NFP_APP_H #define _NFP_APP_H 1 @@ -40,6 +10,8 @@ #include "nfp_net_repr.h" +#define NFP_APP_CTRL_MTU_MAX U32_MAX + struct bpf_prog; struct net_device; struct netdev_bpf; @@ -178,6 +150,7 @@ struct nfp_app_type { * @ctrl: pointer to ctrl vNIC struct * @reprs: array of pointers to representors * @type: pointer to const application ops and info + * @ctrl_mtu: MTU to set on the control vNIC (set in .init()) * @priv: app-specific priv data */ struct nfp_app { @@ -189,9 +162,11 @@ struct nfp_app { struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1]; const struct nfp_app_type *type; + unsigned int ctrl_mtu; void *priv; }; +void nfp_check_rhashtable_empty(void *ptr, void *arg); bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index e2dfe4f168bb..f119277fd66c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nsp.h" diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index cc6ace2be8a9..b04b83687fe2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #include <linux/bitops.h> #include <linux/errno.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index fad0e62a910c..648c2810e5ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2016-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2016-2018 Netronome Systems, Inc. */ #ifndef __NFP_ASM_H__ #define __NFP_ASM_H__ 1 @@ -82,6 +52,15 @@ #define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO #define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI +#define OP_BR_ALU_BASE 0x0e800000000ULL +#define OP_BR_ALU_BASE_MASK 0x0ff80000000ULL +#define OP_BR_ALU_A_SRC 0x000000003ffULL +#define OP_BR_ALU_B_SRC 0x000000ffc00ULL +#define OP_BR_ALU_DEFBR 0x00000300000ULL +#define OP_BR_ALU_IMM_HI 0x0007fc00000ULL +#define OP_BR_ALU_SRC_LMEXTN 0x40000000000ULL +#define OP_BR_ALU_DST_LMEXTN 0x80000000000ULL + static inline bool nfp_is_br(u64 insn) { return (insn & OP_BR_BASE_MASK) == OP_BR_BASE || diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index db463e20a876..107b048b33b4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/rtnetlink.h> #include <net/devlink.h> @@ -177,7 +147,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return nfp_app_eswitch_mode_get(pf->app, mode); } -static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); int ret; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c index f0dcf45aeec1..5cabb1aa9c0c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <linux/bitops.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 9474a4eed8ce..6c10e8d119e4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 595b3dc280e3..a3613a2e0aa5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.h diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 439e6ffe2f05..6f0c37d09256 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net.h diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index d05e37fcc1b2..6bddfcfdec34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_common.c @@ -2077,14 +2047,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp, return true; } -static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) +static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) { struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; struct nfp_net *nn = r_vec->nfp_net; struct nfp_net_dp *dp = &nn->dp; + unsigned int budget = 512; - while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring)) + while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--) continue; + + return budget; } static void nfp_ctrl_poll(unsigned long arg) @@ -2096,9 +2069,13 @@ static void nfp_ctrl_poll(unsigned long arg) __nfp_ctrl_tx_queued(r_vec); spin_unlock(&r_vec->lock); - nfp_ctrl_rx(r_vec); - - nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); + if (nfp_ctrl_rx(r_vec)) { + nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); + } else { + tasklet_schedule(&r_vec->tasklet); + nn_dp_warn(&r_vec->nfp_net->dp, + "control message budget exceeded!\n"); + } } /* Setup and Configuration @@ -3877,10 +3854,20 @@ int nfp_net_init(struct nfp_net *nn) return err; /* Set default MTU and Freelist buffer size */ - if (nn->max_mtu < NFP_NET_DEFAULT_MTU) + if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { + if (nn->app->ctrl_mtu <= nn->max_mtu) { + nn->dp.mtu = nn->app->ctrl_mtu; + } else { + if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) + nn_warn(nn, "app requested MTU above max supported %u > %u\n", + nn->app->ctrl_mtu, nn->max_mtu); + nn->dp.mtu = nn->max_mtu; + } + } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { nn->dp.mtu = nn->max_mtu; - else + } else { nn->dp.mtu = NFP_NET_DEFAULT_MTU; + } nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); if (nfp_app_ctrl_uses_data_vnics(nn->app)) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c index 2190836eaa1d..f2aaef976c7d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/device.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index a51490747689..d7c8518ac952 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_ctrl.h @@ -264,7 +234,6 @@ * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code */ #define NFP_NET_CFG_BPF_ABI 0x0080 -#define NFP_NET_BPF_ABI 2 #define NFP_NET_CFG_BPF_CAP 0x0081 #define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */ #define NFP_NET_CFG_BPF_MAX_LEN 0x0082 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c index b6b897840ac5..769ceef09756 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/ethtool.h> #include <linux/vmalloc.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 099b63d67451..69b1c9b62e3d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/debugfs.h> #include <linux/module.h> #include <linux/rtnetlink.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6a79c8e4a7a4..cb9c512abc76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_ethtool.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 0b1ac9c234d1..1e7d20468a34 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_net_main.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 18a09cdcd9c6..c09b893c30dd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/etherdevice.h> #include <linux/io-64-nonatomic-hi-lo.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 1bf2b18109ab..c412b94bfb97 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef NFP_NET_REPR_H #define NFP_NET_REPR_H diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 8b1b962cf1d1..b6ec46ed0540 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/bitfield.h> #include <linux/errno.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index e9df9d1eab8e..c9f09c5bb5ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017 Netronome Systems, Inc. */ #ifndef _NFP_NET_SRIOV_H_ #define _NFP_NET_SRIOV_H_ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index 68928c86b698..d2c1e9ea5668 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_netvf_main.c diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 9c1298114c70..86bc149ca231 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/lockdep.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 51f10ae2d53e..b2479a2a49e5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #ifndef _NFP_PORT_H_ #define _NFP_PORT_H_ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c index 0ecd83705368..814360ed3a20 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c @@ -1,36 +1,5 @@ -// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -/* - * Copyright (C) 2018 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <net/devlink.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h index 6cee6382deb4..afab6f0fc564 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ #ifndef NFP_CRC32_H #define NFP_CRC32_H diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index f44d0a857314..db94b0bddc92 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h index 0e497a6154db..4a12133850f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ #ifndef NFP6000_NFP6000_H #define NFP6000_NFP6000_H diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h index 40fb19939505..9a86ec11c5ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_xpb.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index fd63d83bdea5..85d46f206b3c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp6000_pcie.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 245d8aaaa97d..6d1bffa6eac6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp6000_pcie.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h index 31fe92247f51..3d172e255693 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_arm.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index 123e29cba6d1..2dd0f5842873 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cpp.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index f7e1d79e735f..94994a939277 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cppcore.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 03fcde5fa137..3cfecf105bde 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_cpplib.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c index 063a9a6243d6..f05dd34ab89f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM * after chip reset. diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c index 5f193fe2d69e..79e17943519e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* * nfp_mip.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index c88bf673cb76..7bc17b94ac60 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #include <linux/delay.h> #include <linux/device.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index a164fbc85cd3..d4e02542e2e9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nffw.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index 8d2cbdf4d517..49a4d3f56b56 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nffw.h diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index bf593a6b26a1..ce1577bbbd2a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_nsp.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index bd6c9071c8e9..ff33ac54097a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ #ifndef NSP_NSP_H #define NSP_NSP_H 1 diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c index 5d362f87af08..0997d127144f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include <linux/kernel.h> #include <linux/slab.h> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 7ca589660e4d..802c9224bb32 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ /* Authors: David Brunecz <david.brunecz@netronome.com> * Jakub Kicinski <jakub.kicinski@netronome.com> diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index d32af598da90..ce7492a6a98f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_resource.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 1ad0a015572e..75f012444796 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_rtsym.c diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c index f691c6587c76..79470f198a62 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2015-2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_target.c diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index d5b587fccaa3..aea8579206ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -1,35 +1,5 @@ -/* - * Copyright (C) 2017 Netronome Systems, Inc. - * - * This software is dual licensed under the GNU General License Version 2, - * June 1991 as shown in the file COPYING in the top-level directory of this - * source tree or the BSD 2-Clause License provided below. You have the - * option to license this software under the complete terms of either license. - * - * The BSD 2-Clause License: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * 1. Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * 2. Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nsp.h" diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 69aa7fc392c5..59c70be22a84 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter, work_func_t func, int delay); static void netxen_cancel_fw_work(struct netxen_adapter *adapter); static int netxen_nic_poll(struct napi_struct *napi, int budget); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netxen_nic_poll_controller(struct net_device *netdev); -#endif static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); @@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = { .ndo_tx_timeout = netxen_tx_timeout, .ndo_fix_features = netxen_fix_features, .ndo_set_features = netxen_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = netxen_nic_poll_controller, -#endif }; static inline bool netxen_function_zero(struct pci_dev *pdev) @@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) return work_done; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void netxen_nic_poll_controller(struct net_device *netdev) -{ - int ring; - struct nx_host_sds_ring *sds_ring; - struct netxen_adapter *adapter = netdev_priv(netdev); - struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; - - disable_irq(adapter->irq); - for (ring = 0; ring < adapter->max_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - netxen_intr(adapter->irq, sds_ring); - } - enable_irq(adapter->irq); -} -#endif - static int nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 56578f888b70..f2dfc7a976c0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12102,6 +12102,7 @@ struct public_global { u32 running_bundle_id; s32 external_temperature; u32 mdump_reason; + u64 reserved; u32 data_ptr; u32 data_size; }; @@ -12275,7 +12276,7 @@ struct public_func { #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 u32 status; -#define FUNC_STATUS_VLINK_DOWN 0x00000001 +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 u32 mac_upper; #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff @@ -12697,6 +12698,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 @@ -12749,6 +12751,7 @@ struct public_drv_mb { /* get MFW feature support response */ #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index f99797a149a4..beb8e5d6401a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1709,7 +1709,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, cm_info->local_ip[0] = ntohl(iph->daddr); cm_info->remote_ip[0] = ntohl(iph->saddr); - cm_info->ip_version = TCP_IPV4; + cm_info->ip_version = QED_TCP_IPV4; ip_hlen = (iph->ihl) * sizeof(u32); *payload_len = ntohs(iph->tot_len) - ip_hlen; @@ -1729,7 +1729,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, cm_info->remote_ip[i] = ntohl(ip6h->saddr.in6_u.u6_addr32[i]); } - cm_info->ip_version = TCP_IPV6; + cm_info->ip_version = QED_TCP_IPV6; ip_hlen = sizeof(*ip6h); *payload_len = ntohs(ip6h->payload_len); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index da13117a604a..aa633381aa47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, tx_pkt.vlan = p_buffer->vlan; tx_pkt.bd_flags = bd_flags; tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; - tx_pkt.tx_dest = p_ll2_conn->tx_dest; + switch (p_ll2_conn->tx_dest) { + case CORE_TX_DEST_NW: + tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; + break; + case CORE_TX_DEST_LB: + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + break; + case CORE_TX_DEST_DROP: + default: + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + break; + } tx_pkt.first_frag = first_frag; tx_pkt.first_frag_len = p_buffer->packet_length; tx_pkt.cookie = p_buffer; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 58c7eb9d8e1b..b06e4cb72c6c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1247,6 +1247,52 @@ static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; } +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr; + u32 i, size; + + func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + return size; +} + +static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MIN_BW); + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MAX_BW); + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { @@ -1274,10 +1320,29 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, goto out; } - if (p_hwfn->b_drv_link_init) - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); - else + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Virtual link_up = %d\n", p_link->link_up); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Physical link_up = %d\n", p_link->link_up); + } + } else { p_link->link_up = false; + } p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -1504,53 +1569,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); } -static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, - struct public_func *p_shmem_info) -{ - struct qed_mcp_function_info *p_info; - - p_info = &p_hwfn->mcp_info->func_info; - - p_info->bandwidth_min = (p_shmem_info->config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - p_info->bandwidth_min); - p_info->bandwidth_min = 1; - } - - p_info->bandwidth_max = (p_shmem_info->config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - p_info->bandwidth_max); - p_info->bandwidth_max = 100; - } -} - -static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct public_func *p_data, int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - memset(p_data, 0, sizeof(*p_data)); - - size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - return size; -} - static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; @@ -3351,7 +3369,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp, mcp_param, features; - features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index be941cfaa2d4..c71391b9c757 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, num_cons, "Toggle"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "Failed to allocate toogle bits, rc = %d\n", rc); + "Failed to allocate toggle bits, rc = %d\n", rc); goto free_cq_map; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 7d7a64c55ff1..f9167d1354bb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) { - enum roce_flavor flavor; - switch (roce_mode) { case ROCE_V1: - flavor = PLAIN_ROCE; - break; + return PLAIN_ROCE; case ROCE_V2_IPV4: - flavor = RROCE_IPV4; - break; + return RROCE_IPV4; case ROCE_V2_IPV6: - flavor = ROCE_V2_IPV6; - break; + return RROCE_IPV6; default: - flavor = MAX_ROCE_MODE; - break; + return MAX_ROCE_FLAVOR; } - return flavor; } static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 8de644b4721e..77b6248ad3b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, struct qed_tunnel_info *p_src) { - enum tunnel_clss type; + int type; p_tun->b_update_rx_cls = p_src->b_update_rx_cls; p_tun->b_update_tx_cls = p_src->b_update_tx_cls; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 3d4269659820..be118d057b92 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) } if (!p_iov->b_pre_fp_hsi && - ETH_HSI_VER_MINOR && (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { DP_INFO(p_hwfn, "PF is using older fastpath HSI; %02x.%02x is configured\n", @@ -572,7 +571,7 @@ free_p_iov: static void __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, struct qed_tunn_update_type *p_src, - enum qed_tunn_clss mask, u8 *p_cls) + enum qed_tunn_mode mask, u8 *p_cls) { if (p_src->b_update_mode) { p_req->tun_mode_update_mask |= BIT(mask); @@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, static void qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, struct qed_tunn_update_type *p_src, - enum qed_tunn_clss mask, + enum qed_tunn_mode mask, u8 *p_cls, struct qed_tunn_update_udp_port *p_port, u8 *p_update_port, u16 *p_udp_port) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 81312924df14..0c443ea98479 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops { int (*config_loopback) (struct qlcnic_adapter *, u8); int (*clear_loopback) (struct qlcnic_adapter *, u8); int (*config_promisc_mode) (struct qlcnic_adapter *, u32); - void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); + void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr, + u16 vlan, struct qlcnic_host_tx_ring *tx_ring); int (*get_board_info) (struct qlcnic_adapter *); void (*set_mac_filter_count) (struct qlcnic_adapter *); void (*free_mac_list) (struct qlcnic_adapter *); @@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, } static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, - u64 *addr, u16 id) + u64 *addr, u16 vlan, + struct qlcnic_host_tx_ring *tx_ring) { - adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); + adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring); } static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 569d54ededec..a79d84f99102 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2135,7 +2135,8 @@ out: } void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, - u16 vlan_id) + u16 vlan_id, + struct qlcnic_host_tx_ring *tx_ring) { u8 mac[ETH_ALEN]; memcpy(&mac, addr, ETH_ALEN); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index b75a81246856..73fe2f64491d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); -void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16); +void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, + u16 vlan, struct qlcnic_host_tx_ring *ring); int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 4bb33af8e2b3..56a3bd9e37dc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev); void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, - u64 *uaddr, u16 vlan_id); + u64 *uaddr, u16 vlan_id, + struct qlcnic_host_tx_ring *tx_ring); int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *, struct ethtool_coalesce *); int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 84dd83031a1b..9647578cbe6a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, } void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, - u16 vlan_id) + u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) { struct cmd_desc_type0 *hwdesc; struct qlcnic_nic_req *req; struct qlcnic_mac_req *mac_req; struct qlcnic_vlan_req *vlan_req; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; u32 producer; u64 word; @@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, static void qlcnic_send_filter(struct qlcnic_adapter *adapter, struct cmd_desc_type0 *first_desc, - struct sk_buff *skb) + struct sk_buff *skb, + struct qlcnic_host_tx_ring *tx_ring) { struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); struct ethhdr *phdr = (struct ethhdr *)(skb->data); @@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, tmp_fil->vlan_id == vlan_id) { if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) qlcnic_change_filter(adapter, &src_addr, - vlan_id); + vlan_id, tx_ring); tmp_fil->ftime = jiffies; return; } @@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, if (!fil) return; - qlcnic_change_filter(adapter, &src_addr, vlan_id); + qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring); fil->ftime = jiffies; fil->vlan_id = vlan_id; memcpy(fil->faddr, &src_addr, ETH_ALEN); @@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } if (adapter->drv_mac_learn) - qlcnic_send_filter(adapter, first_desc, skb); + qlcnic_send_filter(adapter, first_desc, skb, tx_ring); tx_ring->tx_stats.tx_bytes += skb->len; tx_ring->tx_stats.xmit_called++; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2d38d1ac2aae..dbd48012224f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev); static void qlcnic_tx_timeout(struct net_device *netdev); static void qlcnic_attach_work(struct work_struct *work); static void qlcnic_fwinit_work(struct work_struct *work); -#ifdef CONFIG_NET_POLL_CONTROLLER -static void qlcnic_poll_controller(struct net_device *netdev); -#endif static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); @@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, .ndo_features_check = qlcnic_features_check, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = qlcnic_poll_controller, -#endif #ifdef CONFIG_QLCNIC_SRIOV .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, @@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void qlcnic_poll_controller(struct net_device *netdev) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_sds_ring *sds_ring; - struct qlcnic_recv_context *recv_ctx; - struct qlcnic_host_tx_ring *tx_ring; - int ring; - - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) - return; - - recv_ctx = adapter->recv_ctx; - - for (ring = 0; ring < adapter->drv_sds_rings; ring++) { - sds_ring = &recv_ctx->sds_rings[ring]; - qlcnic_disable_sds_intr(adapter, sds_ring); - napi_schedule(&sds_ring->napi); - } - - if (adapter->flags & QLCNIC_MSIX_ENABLED) { - /* Only Multi-Tx queue capable devices need to - * schedule NAPI for TX rings - */ - if ((qlcnic_83xx_check(adapter) && - (adapter->flags & QLCNIC_TX_INTR_SHARED)) || - (qlcnic_82xx_check(adapter) && - !qlcnic_check_multi_tx(adapter))) - return; - - for (ring = 0; ring < adapter->drv_tx_rings; ring++) { - tx_ring = &adapter->tx_ring[ring]; - qlcnic_disable_tx_intr(adapter, tx_ring); - napi_schedule(&tx_ring->napi); - } - } -} -#endif - static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 7fd86d40a337..11167abe5934 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb, struct sk_buff *skbn; if (skb->dev->type == ARPHRD_ETHER) { - if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) { + if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) { kfree_skb(skb); return; } @@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, } if (skb_headroom(skb) < required_headroom) { - if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) + if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) return -ENOMEM; } @@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) if (!skb) goto done; + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + dev = skb->dev; port = rmnet_get_port(dev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ed8ffd498c88..b68e32186d67 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4059,13 +4059,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) genphy_soft_reset(dev->phydev); - /* It was reported that chip version 33 ends up with 10MBit/Half on a + /* It was reported that several chips end up with 10MBit/Half on a * 1GBit link after resuming from S3. For whatever reason the PHY on - * this chip doesn't properly start a renegotiation when soft-reset. + * these chips doesn't properly start a renegotiation when soft-reset. * Explicitly requesting a renegotiation fixes this. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_33 && - dev->phydev->autoneg == AUTONEG_ENABLE) + if (dev->phydev->autoneg == AUTONEG_ENABLE) phy_restart_aneg(dev->phydev); } @@ -4270,8 +4269,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: @@ -4523,9 +4522,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) static void rtl_set_tx_config_registers(struct rtl8169_private *tp) { - /* Set DMA burst size and Interframe Gap Time */ - RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | - (InterFrameGap << TxInterFrameGapShift)); + u32 val = TX_DMA_BURST << TxDMAShift | + InterFrameGap << TxInterFrameGapShift; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_34 && + tp->mac_version != RTL_GIGA_MAC_VER_39) + val |= TXCFG_AUTO_FIFO; + + RTL_W32(tp, TxConfig, val); } static void rtl_set_rx_max_size(struct rtl8169_private *tp) @@ -5020,7 +5024,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_disable_clock_request(tp); - RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); /* Adjust EEE LED frequency */ @@ -5054,7 +5057,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) rtl_disable_clock_request(tp); - RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); @@ -5099,8 +5101,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) static void rtl_hw_start_8168g(struct rtl8169_private *tp) { - RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); @@ -5198,8 +5198,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, false); rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); - RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); @@ -5282,8 +5280,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) { rtl8168ep_stop_cmac(tp); - RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); - rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); @@ -5605,7 +5601,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); - RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO); RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); @@ -6856,8 +6851,10 @@ static int rtl8169_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); rtl8169_net_suspend(dev); + clk_disable_unprepare(tp->clk); return 0; } @@ -6885,6 +6882,9 @@ static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + clk_prepare_enable(tp->clk); if (netif_running(dev)) __rtl8169_resume(dev); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 330233286e78..3d0dd39c289e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx) /************************************************************************** * - * Kernel netpoll interface - * - *************************************************************************/ - -#ifdef CONFIG_NET_POLL_CONTROLLER - -/* Although in the common case interrupts will be disabled, this is not - * guaranteed. However, all our work happens inside the NAPI callback, - * so no locking is required. - */ -static void efx_netpoll(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_channel *channel; - - efx_for_each_channel(channel, efx) - efx_schedule_channel(channel); -} - -#endif - -/************************************************************************** - * * Kernel net device interface * *************************************************************************/ @@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = { #endif .ndo_get_phys_port_id = efx_get_phys_port_id, .ndo_get_phys_port_name = efx_get_phys_port_name, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = efx_netpoll, -#endif .ndo_setup_tc = efx_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index dd5530a4f8c8..03e2455c502e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx) /************************************************************************** * - * Kernel netpoll interface - * - *************************************************************************/ - -#ifdef CONFIG_NET_POLL_CONTROLLER - -/* Although in the common case interrupts will be disabled, this is not - * guaranteed. However, all our work happens inside the NAPI callback, - * so no locking is required. - */ -static void ef4_netpoll(struct net_device *net_dev) -{ - struct ef4_nic *efx = netdev_priv(net_dev); - struct ef4_channel *channel; - - ef4_for_each_channel(channel, efx) - ef4_schedule_channel(channel); -} - -#endif - -/************************************************************************** - * * Kernel net device interface * *************************************************************************/ @@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = { .ndo_set_mac_address = ef4_set_mac_address, .ndo_set_rx_mode = ef4_set_rx_mode, .ndo_set_features = ef4_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ef4_netpoll, -#endif .ndo_setup_tc = ef4_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = ef4_filter_rfs, diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 7aa5ebb6766c..4289ccb26e4e 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) u16 idx = dring->tail; struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); - if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) + if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { + /* reading the register clears the irq */ + netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); break; + } /* This barrier is needed to keep us from reading * any other fields out of the netsec_de until we have diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h index a12f464941ed..448d66c2e372 100644 --- a/drivers/net/fddi/skfp/h/cmtdef.h +++ b/drivers/net/fddi/skfp/h/cmtdef.h @@ -655,14 +655,6 @@ void dump_hex(char *p, int len); #ifndef PNMI_INIT #define PNMI_INIT(smc) /* Nothing */ #endif -#ifndef PNMI_GET_ID -#define PNMI_GET_ID( smc, ndis_oid, buf, len, BytesWritten, BytesNeeded ) \ - ( 1 ? (-1) : (-1) ) -#endif -#ifndef PNMI_SET_ID -#define PNMI_SET_ID( smc, ndis_oid, buf, len, BytesRead, BytesNeeded, \ - set_type) ( 1 ? (-1) : (-1) ) -#endif /* * SMT_PANIC defines diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6625fabe2c88..82eccc930c5c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1325,11 +1325,15 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], info->key.tun_id = tunid; } - if (data[IFLA_GENEVE_TTL]) + if (data[IFLA_GENEVE_TTL_INHERIT]) { + if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT])) + *ttl_inherit = true; + else + *ttl_inherit = false; + } else if (data[IFLA_GENEVE_TTL]) { info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); - - if (data[IFLA_GENEVE_TTL_INHERIT]) - *ttl_inherit = true; + *ttl_inherit = false; + } if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 16ec7af6ab7b..ba9df430fca6 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) sizeof(struct yamdrv_ioctl_mcs)); if (IS_ERR(ym)) return PTR_ERR(ym); + if (ym->cmd != SIOCYAMSMCS) + return -EINVAL; if (ym->bitrate > YAM_MAXBITRATE) { kfree(ym); return -EINVAL; @@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg))) return -EFAULT; + if (yi.cmd != SIOCYAMSCFG) + return -EINVAL; if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev)) return -EINVAL; /* Cannot change this parameter when up */ if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev)) diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 23a52b9293f3..cd1d8faccca5 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi) { struct adf7242_local *lp = spi_get_drvdata(spi); - if (!IS_ERR_OR_NULL(lp->debugfs_root)) - debugfs_remove_recursive(lp->debugfs_root); + debugfs_remove_recursive(lp->debugfs_root); cancel_delayed_work_sync(&lp->work); destroy_workqueue(lp->wqueue); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 58299fb666ed..0ff5a403a8dc 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write( for (i = 0; i < len; i++) dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); - fifo_buffer = kmalloc(len, GFP_KERNEL); + fifo_buffer = kmemdup(buf, len, GFP_KERNEL); if (!fifo_buffer) return -ENOMEM; - memcpy(fifo_buffer, buf, len); kfifo_in(&test->up_fifo, &fifo_buffer, 4); wake_up_interruptible(&priv->test.readq); @@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv) { struct ca8210_test *test = &priv->test; - if (!IS_ERR(test->ca8210_dfs_spi_int)) - debugfs_remove(test->ca8210_dfs_spi_int); + debugfs_remove(test->ca8210_dfs_spi_int); kfifo_free(&test->up_fifo); dev_info(&priv->spi->dev, "Test interface removed\n"); } diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index bf70ab892e69..51b5198d5943 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -37,8 +37,6 @@ MODULE_LICENSE("GPL"); static LIST_HEAD(hwsim_phys); static DEFINE_MUTEX(hwsim_phys_lock); -static LIST_HEAD(hwsim_ifup_phys); - static struct platform_device *mac802154hwsim_dev; /* MAC802154_HWSIM netlink family */ @@ -85,7 +83,6 @@ struct hwsim_phy { struct list_head edges; struct list_head list; - struct list_head list_ifup; }; static int hwsim_add_one(struct genl_info *info, struct device *dev, @@ -159,9 +156,6 @@ static int hwsim_hw_start(struct ieee802154_hw *hw) struct hwsim_phy *phy = hw->priv; phy->suspended = false; - list_add_rcu(&phy->list_ifup, &hwsim_ifup_phys); - synchronize_rcu(); - return 0; } @@ -170,8 +164,6 @@ static void hwsim_hw_stop(struct ieee802154_hw *hw) struct hwsim_phy *phy = hw->priv; phy->suspended = true; - list_del_rcu(&phy->list_ifup); - synchronize_rcu(); } static int diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index e428277781ac..44de81e5f140 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -132,11 +132,6 @@ static const struct reg_sequence mar20a_iar_overwrites[] = { }; #define MCR20A_VALID_CHANNELS (0x07FFF800) - -struct mcr20a_platform_data { - int rst_gpio; -}; - #define MCR20A_MAX_BUF (127) #define printdev(X) (&X->spi->dev) @@ -412,7 +407,6 @@ struct mcr20a_local { struct spi_device *spi; struct ieee802154_hw *hw; - struct mcr20a_platform_data *pdata; struct regmap *regmap_dar; struct regmap *regmap_iar; @@ -903,19 +897,19 @@ mcr20a_irq_clean_complete(void *context) switch (seq_state) { /* TX IRQ, RX IRQ and SEQ IRQ */ - case (0x03): + case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { lp->is_tx = 0; dev_dbg(printdev(lp), "TX is done. No ACK\n"); mcr20a_handle_tx_complete(lp); } break; - case (0x05): + case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): /* rx is starting */ dev_dbg(printdev(lp), "RX is starting\n"); mcr20a_handle_rx(lp); break; - case (0x07): + case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { /* tx is done */ lp->is_tx = 0; @@ -927,7 +921,7 @@ mcr20a_irq_clean_complete(void *context) mcr20a_handle_rx(lp); } break; - case (0x01): + case (DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { dev_dbg(printdev(lp), "TX is starting\n"); mcr20a_handle_tx(lp); @@ -976,20 +970,6 @@ static irqreturn_t mcr20a_irq_isr(int irq, void *data) return IRQ_HANDLED; } -static int mcr20a_get_platform_data(struct spi_device *spi, - struct mcr20a_platform_data *pdata) -{ - int ret = 0; - - if (!spi->dev.of_node) - return -EINVAL; - - pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0); - dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio); - - return ret; -} - static void mcr20a_hw_setup(struct mcr20a_local *lp) { u8 i; @@ -1249,7 +1229,7 @@ mcr20a_probe(struct spi_device *spi) { struct ieee802154_hw *hw; struct mcr20a_local *lp; - struct mcr20a_platform_data *pdata; + struct gpio_desc *rst_b; int irq_type; int ret = -ENOMEM; @@ -1260,48 +1240,32 @@ mcr20a_probe(struct spi_device *spi) return -EINVAL; } - pdata = kmalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - /* set mcr20a platform data */ - ret = mcr20a_get_platform_data(spi, pdata); - if (ret < 0) { - dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n"); - goto free_pdata; - } - - /* init reset gpio */ - if (gpio_is_valid(pdata->rst_gpio)) { - ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio, - GPIOF_OUT_INIT_HIGH, "reset"); - if (ret) - goto free_pdata; + rst_b = devm_gpiod_get(&spi->dev, "rst_b", GPIOD_OUT_HIGH); + if (IS_ERR(rst_b)) { + ret = PTR_ERR(rst_b); + if (ret != -EPROBE_DEFER) + dev_err(&spi->dev, "Failed to get 'rst_b' gpio: %d", ret); + return ret; } /* reset mcr20a */ - if (gpio_is_valid(pdata->rst_gpio)) { - usleep_range(10, 20); - gpio_set_value_cansleep(pdata->rst_gpio, 0); - usleep_range(10, 20); - gpio_set_value_cansleep(pdata->rst_gpio, 1); - usleep_range(120, 240); - } + usleep_range(10, 20); + gpiod_set_value_cansleep(rst_b, 1); + usleep_range(10, 20); + gpiod_set_value_cansleep(rst_b, 0); + usleep_range(120, 240); /* allocate ieee802154_hw and private data */ hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops); if (!hw) { dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n"); - ret = -ENOMEM; - goto free_pdata; + return ret; } /* init mcr20a local data */ lp = hw->priv; lp->hw = hw; lp->spi = spi; - lp->spi->dev.platform_data = pdata; - lp->pdata = pdata; /* init ieee802154_hw */ hw->parent = &spi->dev; @@ -1370,8 +1334,6 @@ mcr20a_probe(struct spi_device *spi) free_dev: ieee802154_free_hw(lp->hw); -free_pdata: - kfree(pdata); return ret; } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 81444208b216..cb3518474f0e 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -86,8 +86,14 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) return 0; } +static int nsim_bpf_finalize(struct bpf_verifier_env *env) +{ + return 0; +} + static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { - .insn_hook = nsim_bpf_verify_insn, + .insn_hook = nsim_bpf_verify_insn, + .finalize = nsim_bpf_finalize, }; static bool nsim_xdp_offload_active(struct netdevsim *ns) diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 7d0384e26c99..bffe077dc75f 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -6,6 +6,8 @@ * Copyright (c) 2016 Microsemi Corporation */ +#include <linux/firmware.h> +#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mdio.h> @@ -32,6 +34,15 @@ enum rgmii_rx_clock_delay { #define DISABLE_HP_AUTO_MDIX_MASK 0x0080 #define DISABLE_PAIR_SWAP_CORR_MASK 0x0020 #define DISABLE_POLARITY_CORR_MASK 0x0010 +#define PARALLEL_DET_IGNORE_ADVERTISED 0x0008 + +#define MSCC_PHY_EXT_CNTL_STATUS 22 +#define SMI_BROADCAST_WR_EN 0x0001 + +#define MSCC_PHY_ERR_RX_CNT 19 +#define MSCC_PHY_ERR_FALSE_CARRIER_CNT 20 +#define MSCC_PHY_ERR_LINK_DISCONNECT_CNT 21 +#define ERR_CNT_MASK GENMASK(7, 0) #define MSCC_PHY_EXT_PHY_CNTL_1 23 #define MAC_IF_SELECTION_MASK 0x1800 @@ -39,7 +50,22 @@ enum rgmii_rx_clock_delay { #define MAC_IF_SELECTION_RMII 1 #define MAC_IF_SELECTION_RGMII 2 #define MAC_IF_SELECTION_POS 11 +#define VSC8584_MAC_IF_SELECTION_MASK 0x1000 +#define VSC8584_MAC_IF_SELECTION_SGMII 0 +#define VSC8584_MAC_IF_SELECTION_1000BASEX 1 +#define VSC8584_MAC_IF_SELECTION_POS 12 #define FAR_END_LOOPBACK_MODE_MASK 0x0008 +#define MEDIA_OP_MODE_MASK 0x0700 +#define MEDIA_OP_MODE_COPPER 0 +#define MEDIA_OP_MODE_SERDES 1 +#define MEDIA_OP_MODE_1000BASEX 2 +#define MEDIA_OP_MODE_100BASEFX 3 +#define MEDIA_OP_MODE_AMS_COPPER_SERDES 5 +#define MEDIA_OP_MODE_AMS_COPPER_1000BASEX 6 +#define MEDIA_OP_MODE_AMS_COPPER_100BASEFX 7 +#define MEDIA_OP_MODE_POS 8 + +#define MSCC_PHY_EXT_PHY_CNTL_2 24 #define MII_VSC85XX_INT_MASK 25 #define MII_VSC85XX_INT_MASK_MASK 0xa000 @@ -62,19 +88,40 @@ enum rgmii_rx_clock_delay { #define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ #define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */ #define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */ +#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */ +#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */ +/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs + * in the same package. + */ +#define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */ +#define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */ +#define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */ /* Extended Page 1 Registers */ +#define MSCC_PHY_CU_MEDIA_CRC_VALID_CNT 18 +#define VALID_CRC_CNT_CRC_MASK GENMASK(13, 0) + #define MSCC_PHY_EXT_MODE_CNTL 19 #define FORCE_MDI_CROSSOVER_MASK 0x000C #define FORCE_MDI_CROSSOVER_MDIX 0x000C #define FORCE_MDI_CROSSOVER_MDI 0x0008 #define MSCC_PHY_ACTIPHY_CNTL 20 +#define PHY_ADDR_REVERSED 0x0200 #define DOWNSHIFT_CNTL_MASK 0x001C #define DOWNSHIFT_EN 0x0010 #define DOWNSHIFT_CNTL_POS 2 +#define MSCC_PHY_EXT_PHY_CNTL_4 23 +#define PHY_CNTL_4_ADDR_POS 11 + +#define MSCC_PHY_VERIPHY_CNTL_2 25 + +#define MSCC_PHY_VERIPHY_CNTL_3 26 + /* Extended Page 2 Registers */ +#define MSCC_PHY_CU_PMD_TX_CNTL 16 + #define MSCC_PHY_RGMII_CNTL 20 #define RGMII_RX_CLK_DELAY_MASK 0x0070 #define RGMII_RX_CLK_DELAY_POS 4 @@ -90,11 +137,90 @@ enum rgmii_rx_clock_delay { #define SECURE_ON_ENABLE 0x8000 #define SECURE_ON_PASSWD_LEN_4 0x4000 +/* Extended Page 3 Registers */ +#define MSCC_PHY_SERDES_TX_VALID_CNT 21 +#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22 +#define MSCC_PHY_SERDES_RX_VALID_CNT 28 +#define MSCC_PHY_SERDES_RX_CRC_ERR_CNT 29 + +/* Extended page GPIO Registers */ +#define MSCC_DW8051_CNTL_STATUS 0 +#define MICRO_NSOFT_RESET 0x8000 +#define RUN_FROM_INT_ROM 0x4000 +#define AUTOINC_ADDR 0x2000 +#define PATCH_RAM_CLK 0x1000 +#define MICRO_PATCH_EN 0x0080 +#define DW8051_CLK_EN 0x0010 +#define MICRO_CLK_EN 0x0008 +#define MICRO_CLK_DIVIDE(x) ((x) >> 1) +#define MSCC_DW8051_VLD_MASK 0xf1ff + +/* x Address in range 1-4 */ +#define MSCC_TRAP_ROM_ADDR(x) ((x) * 2 + 1) +#define MSCC_PATCH_RAM_ADDR(x) (((x) + 1) * 2) +#define MSCC_INT_MEM_ADDR 11 + +#define MSCC_INT_MEM_CNTL 12 +#define READ_SFR 0x6000 +#define READ_PRAM 0x4000 +#define READ_ROM 0x2000 +#define READ_RAM 0x0000 +#define INT_MEM_WRITE_EN 0x1000 +#define EN_PATCH_RAM_TRAP_ADDR(x) (0x0100 << ((x) - 1)) +#define INT_MEM_DATA_M 0x00ff +#define INT_MEM_DATA(x) (INT_MEM_DATA_M & (x)) + +#define MSCC_PHY_PROC_CMD 18 +#define PROC_CMD_NCOMPLETED 0x8000 +#define PROC_CMD_FAILED 0x4000 +#define PROC_CMD_SGMII_PORT(x) ((x) << 8) +#define PROC_CMD_FIBER_PORT(x) (0x0100 << (x) % 4) +#define PROC_CMD_QSGMII_PORT 0x0c00 +#define PROC_CMD_RST_CONF_PORT 0x0080 +#define PROC_CMD_RECONF_PORT 0x0000 +#define PROC_CMD_READ_MOD_WRITE_PORT 0x0040 +#define PROC_CMD_WRITE 0x0040 +#define PROC_CMD_READ 0x0000 +#define PROC_CMD_FIBER_DISABLE 0x0020 +#define PROC_CMD_FIBER_100BASE_FX 0x0010 +#define PROC_CMD_FIBER_1000BASE_X 0x0000 +#define PROC_CMD_SGMII_MAC 0x0030 +#define PROC_CMD_QSGMII_MAC 0x0020 +#define PROC_CMD_NO_MAC_CONF 0x0000 +#define PROC_CMD_1588_DEFAULT_INIT 0x0010 +#define PROC_CMD_NOP 0x000f +#define PROC_CMD_PHY_INIT 0x000a +#define PROC_CMD_CRC16 0x0008 +#define PROC_CMD_FIBER_MEDIA_CONF 0x0001 +#define PROC_CMD_MCB_ACCESS_MAC_CONF 0x0000 +#define PROC_CMD_NCOMPLETED_TIMEOUT_MS 500 + +#define MSCC_PHY_MAC_CFG_FASTLINK 19 +#define MAC_CFG_MASK 0xc000 +#define MAC_CFG_SGMII 0x0000 +#define MAC_CFG_QSGMII 0x4000 + +/* Test page Registers */ +#define MSCC_PHY_TEST_PAGE_5 5 +#define MSCC_PHY_TEST_PAGE_8 8 +#define MSCC_PHY_TEST_PAGE_9 9 +#define MSCC_PHY_TEST_PAGE_20 20 +#define MSCC_PHY_TEST_PAGE_24 24 + +/* Token ring page Registers */ +#define MSCC_PHY_TR_CNTL 16 +#define TR_WRITE 0x8000 +#define TR_ADDR(x) (0x7fff & (x)) +#define MSCC_PHY_TR_LSB 17 +#define MSCC_PHY_TR_MSB 18 + /* Microsemi PHY ID's */ #define PHY_ID_VSC8530 0x00070560 #define PHY_ID_VSC8531 0x00070570 #define PHY_ID_VSC8540 0x00070760 #define PHY_ID_VSC8541 0x00070770 +#define PHY_ID_VSC8574 0x000704a0 +#define PHY_ID_VSC8584 0x000707c0 #define MSCC_VDDMAC_1500 1500 #define MSCC_VDDMAC_1800 1800 @@ -104,6 +230,24 @@ enum rgmii_rx_clock_delay { #define DOWNSHIFT_COUNT_MAX 5 #define MAX_LEDS 4 + +#define VSC8584_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \ + BIT(VSC8531_LINK_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_100_ACTIVITY) | \ + BIT(VSC8531_LINK_10_ACTIVITY) | \ + BIT(VSC8531_LINK_100_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_100_ACTIVITY) | \ + BIT(VSC8584_LINK_100FX_1000X_ACTIVITY) | \ + BIT(VSC8531_DUPLEX_COLLISION) | \ + BIT(VSC8531_COLLISION) | \ + BIT(VSC8531_ACTIVITY) | \ + BIT(VSC8584_100FX_1000X_ACTIVITY) | \ + BIT(VSC8531_AUTONEG_FAULT) | \ + BIT(VSC8531_SERIAL_MODE) | \ + BIT(VSC8531_FORCE_LED_OFF) | \ + BIT(VSC8531_FORCE_LED_ON)) + #define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \ BIT(VSC8531_LINK_1000_ACTIVITY) | \ BIT(VSC8531_LINK_100_ACTIVITY) | \ @@ -119,11 +263,120 @@ enum rgmii_rx_clock_delay { BIT(VSC8531_FORCE_LED_OFF) | \ BIT(VSC8531_FORCE_LED_ON)) +#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin" +#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800 +#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48 + +#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin" +#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000 +#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8 + +#define VSC8584_REVB 0x0001 +#define MSCC_DEV_REV_MASK GENMASK(3, 0) + +struct reg_val { + u16 reg; + u32 val; +}; + +struct vsc85xx_hw_stat { + const char *string; + u8 reg; + u16 page; + u16 mask; +}; + +static const struct vsc85xx_hw_stat vsc85xx_hw_stats[] = { + { + .string = "phy_receive_errors", + .reg = MSCC_PHY_ERR_RX_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_false_carrier", + .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_link_disconnect", + .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_crc_good_count", + .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_cu_media_crc_error_count", + .reg = MSCC_PHY_EXT_PHY_CNTL_4, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = ERR_CNT_MASK, + }, +}; + +static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = { + { + .string = "phy_receive_errors", + .reg = MSCC_PHY_ERR_RX_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_false_carrier", + .reg = MSCC_PHY_ERR_FALSE_CARRIER_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_link_disconnect", + .reg = MSCC_PHY_ERR_LINK_DISCONNECT_CNT, + .page = MSCC_PHY_PAGE_STANDARD, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_cu_media_crc_good_count", + .reg = MSCC_PHY_CU_MEDIA_CRC_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_cu_media_crc_error_count", + .reg = MSCC_PHY_EXT_PHY_CNTL_4, + .page = MSCC_PHY_PAGE_EXTENDED, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_serdes_tx_good_pkt_count", + .reg = MSCC_PHY_SERDES_TX_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_serdes_tx_bad_crc_count", + .reg = MSCC_PHY_SERDES_TX_CRC_ERR_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = ERR_CNT_MASK, + }, { + .string = "phy_serdes_rx_good_pkt_count", + .reg = MSCC_PHY_SERDES_RX_VALID_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = VALID_CRC_CNT_CRC_MASK, + }, { + .string = "phy_serdes_rx_bad_crc_count", + .reg = MSCC_PHY_SERDES_RX_CRC_ERR_CNT, + .page = MSCC_PHY_PAGE_EXTENDED_3, + .mask = ERR_CNT_MASK, + }, +}; + struct vsc8531_private { int rate_magic; u16 supp_led_modes; u32 leds_mode[MAX_LEDS]; u8 nleds; + const struct vsc85xx_hw_stat *hw_stats; + u64 *stats; + int nstats; + bool pkg_init; + /* For multiple port PHYs; the MDIO address of the base PHY in the + * package. + */ + unsigned int base_addr; }; #ifdef CONFIG_OF_MDIO @@ -140,12 +393,66 @@ static const struct vsc8531_edge_rate_table edge_table[] = { }; #endif /* CONFIG_OF_MDIO */ -static int vsc85xx_phy_page_set(struct phy_device *phydev, u16 page) +static int vsc85xx_phy_read_page(struct phy_device *phydev) { - int rc; + return __phy_read(phydev, MSCC_EXT_PAGE_ACCESS); +} - rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page); - return rc; +static int vsc85xx_phy_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page); +} + +static int vsc85xx_get_sset_count(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + + if (!priv) + return 0; + + return priv->nstats; +} + +static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data) +{ + struct vsc8531_private *priv = phydev->priv; + int i; + + if (!priv) + return; + + for (i = 0; i < priv->nstats; i++) + strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string, + ETH_GSTRING_LEN); +} + +static u64 vsc85xx_get_stat(struct phy_device *phydev, int i) +{ + struct vsc8531_private *priv = phydev->priv; + int val; + + val = phy_read_paged(phydev, priv->hw_stats[i].page, + priv->hw_stats[i].reg); + if (val < 0) + return U64_MAX; + + val = val & priv->hw_stats[i].mask; + priv->stats[i] += val; + + return priv->stats[i]; +} + +static void vsc85xx_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + struct vsc8531_private *priv = phydev->priv; + int i; + + if (!priv) + return; + + for (i = 0; i < priv->nstats; i++) + data[i] = vsc85xx_get_stat(phydev, i); } static int vsc85xx_led_cntl_set(struct phy_device *phydev, @@ -184,7 +491,7 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix) u16 reg_val; reg_val = phy_read(phydev, MSCC_PHY_BYPASS_CONTROL); - if ((mdix == ETH_TP_MDI) || (mdix == ETH_TP_MDI_X)) { + if (mdix == ETH_TP_MDI || mdix == ETH_TP_MDI_X) { reg_val |= (DISABLE_PAIR_SWAP_CORR_MASK | DISABLE_POLARITY_CORR_MASK | DISABLE_HP_AUTO_MDIX_MASK); @@ -194,25 +501,20 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix) DISABLE_HP_AUTO_MDIX_MASK); } rc = phy_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg_val); - if (rc != 0) + if (rc) return rc; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED); - if (rc != 0) - return rc; + reg_val = 0; - reg_val = phy_read(phydev, MSCC_PHY_EXT_MODE_CNTL); - reg_val &= ~(FORCE_MDI_CROSSOVER_MASK); if (mdix == ETH_TP_MDI) - reg_val |= FORCE_MDI_CROSSOVER_MDI; + reg_val = FORCE_MDI_CROSSOVER_MDI; else if (mdix == ETH_TP_MDI_X) - reg_val |= FORCE_MDI_CROSSOVER_MDIX; - rc = phy_write(phydev, MSCC_PHY_EXT_MODE_CNTL, reg_val); - if (rc != 0) - return rc; + reg_val = FORCE_MDI_CROSSOVER_MDIX; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - if (rc != 0) + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED, + MSCC_PHY_EXT_MODE_CNTL, FORCE_MDI_CROSSOVER_MASK, + reg_val); + if (rc < 0) return rc; return genphy_restart_aneg(phydev); @@ -220,30 +522,24 @@ static int vsc85xx_mdix_set(struct phy_device *phydev, u8 mdix) static int vsc85xx_downshift_get(struct phy_device *phydev, u8 *count) { - int rc; u16 reg_val; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED); - if (rc != 0) - goto out; + reg_val = phy_read_paged(phydev, MSCC_PHY_PAGE_EXTENDED, + MSCC_PHY_ACTIPHY_CNTL); + if (reg_val < 0) + return reg_val; - reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL); reg_val &= DOWNSHIFT_CNTL_MASK; if (!(reg_val & DOWNSHIFT_EN)) *count = DOWNSHIFT_DEV_DISABLE; else *count = ((reg_val & ~DOWNSHIFT_EN) >> DOWNSHIFT_CNTL_POS) + 2; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); -out: - return rc; + return 0; } static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count) { - int rc; - u16 reg_val; - if (count == DOWNSHIFT_DEV_DEFAULT_COUNT) { /* Default downshift count 3 (i.e. Bit3:2 = 0b01) */ count = ((1 << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN); @@ -255,21 +551,9 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count) count = (((count - 2) << DOWNSHIFT_CNTL_POS) | DOWNSHIFT_EN); } - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED); - if (rc != 0) - goto out; - - reg_val = phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL); - reg_val &= ~(DOWNSHIFT_CNTL_MASK); - reg_val |= count; - rc = phy_write(phydev, MSCC_PHY_ACTIPHY_CNTL, reg_val); - if (rc != 0) - goto out; - - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - -out: - return rc; + return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED, + MSCC_PHY_ACTIPHY_CNTL, DOWNSHIFT_CNTL_MASK, + count); } static int vsc85xx_wol_set(struct phy_device *phydev, @@ -283,46 +567,48 @@ static int vsc85xx_wol_set(struct phy_device *phydev, u8 *mac_addr = phydev->attached_dev->dev_addr; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) + rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc < 0) { + rc = phy_restore_page(phydev, rc, rc); goto out_unlock; + } if (wol->wolopts & WAKE_MAGIC) { /* Store the device address for the magic packet */ for (i = 0; i < ARRAY_SIZE(pwd); i++) pwd[i] = mac_addr[5 - (i * 2 + 1)] << 8 | mac_addr[5 - i * 2]; - phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]); - phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]); - phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, pwd[0]); + __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, pwd[1]); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, pwd[2]); } else { - phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0); - phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0); - phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_MAC_ADDR, 0); + __phy_write(phydev, MSCC_PHY_WOL_MID_MAC_ADDR, 0); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_MAC_ADDR, 0); } if (wol_conf->wolopts & WAKE_MAGICSECURE) { for (i = 0; i < ARRAY_SIZE(pwd); i++) pwd[i] = wol_conf->sopass[5 - (i * 2 + 1)] << 8 | wol_conf->sopass[5 - i * 2]; - phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]); - phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]); - phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, pwd[0]); + __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, pwd[1]); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, pwd[2]); } else { - phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0); - phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0); - phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0); + __phy_write(phydev, MSCC_PHY_WOL_LOWER_PASSWD, 0); + __phy_write(phydev, MSCC_PHY_WOL_MID_PASSWD, 0); + __phy_write(phydev, MSCC_PHY_WOL_UPPER_PASSWD, 0); } - reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); + reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); if (wol_conf->wolopts & WAKE_MAGICSECURE) reg_val |= SECURE_ON_ENABLE; else reg_val &= ~SECURE_ON_ENABLE; - phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); + __phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - if (rc != 0) + rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); + if (rc < 0) goto out_unlock; if (wol->wolopts & WAKE_MAGIC) { @@ -330,14 +616,14 @@ static int vsc85xx_wol_set(struct phy_device *phydev, reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); reg_val |= MII_VSC85XX_INT_MASK_WOL; rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); - if (rc != 0) + if (rc) goto out_unlock; } else { /* Disable the WOL interrupt */ reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); reg_val &= (~MII_VSC85XX_INT_MASK_WOL); rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); - if (rc != 0) + if (rc) goto out_unlock; } /* Clear WOL iterrupt status */ @@ -359,17 +645,17 @@ static void vsc85xx_wol_get(struct phy_device *phydev, struct ethtool_wolinfo *wol_conf = wol; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) + rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc < 0) goto out_unlock; - reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); + reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); if (reg_val & SECURE_ON_ENABLE) wol_conf->wolopts |= WAKE_MAGICSECURE; if (wol_conf->wolopts & WAKE_MAGICSECURE) { - pwd[0] = phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD); - pwd[1] = phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD); - pwd[2] = phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD); + pwd[0] = __phy_read(phydev, MSCC_PHY_WOL_LOWER_PASSWD); + pwd[1] = __phy_read(phydev, MSCC_PHY_WOL_MID_PASSWD); + pwd[2] = __phy_read(phydev, MSCC_PHY_WOL_UPPER_PASSWD); for (i = 0; i < ARRAY_SIZE(pwd); i++) { wol_conf->sopass[5 - i * 2] = pwd[i] & 0x00ff; wol_conf->sopass[5 - (i * 2 + 1)] = (pwd[i] & 0xff00) @@ -377,9 +663,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev, } } - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - out_unlock: + phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); mutex_unlock(&phydev->lock); } @@ -387,7 +672,7 @@ out_unlock: static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) { u32 vdd, sd; - int rc, i, j; + int i, j; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; u8 sd_array_size = ARRAY_SIZE(edge_table[0].slowdown); @@ -395,12 +680,10 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) if (!of_node) return -ENODEV; - rc = of_property_read_u32(of_node, "vsc8531,vddmac", &vdd); - if (rc != 0) + if (of_property_read_u32(of_node, "vsc8531,vddmac", &vdd)) vdd = MSCC_VDDMAC_3300; - rc = of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd); - if (rc != 0) + if (of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd)) sd = 0; for (i = 0; i < ARRAY_SIZE(edge_table); i++) @@ -474,21 +757,11 @@ static int vsc85xx_dt_led_modes_get(struct phy_device *phydev, static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate) { int rc; - u16 reg_val; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) - goto out_unlock; - reg_val = phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); - reg_val &= ~(EDGE_RATE_CNTL_MASK); - reg_val |= (edge_rate << EDGE_RATE_CNTL_POS); - rc = phy_write(phydev, MSCC_PHY_WOL_MAC_CONTROL, reg_val); - if (rc != 0) - goto out_unlock; - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); - -out_unlock: + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, + MSCC_PHY_WOL_MAC_CONTROL, EDGE_RATE_CNTL_MASK, + edge_rate << EDGE_RATE_CNTL_POS); mutex_unlock(&phydev->lock); return rc; @@ -519,7 +792,7 @@ static int vsc85xx_mac_if_set(struct phy_device *phydev, goto out_unlock; } rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val); - if (rc != 0) + if (rc) goto out_unlock; rc = genphy_soft_reset(phydev); @@ -537,17 +810,17 @@ static int vsc85xx_default_config(struct phy_device *phydev) phydev->mdix_ctrl = ETH_TP_MDI_AUTO; mutex_lock(&phydev->lock); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc != 0) + rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); + if (rc < 0) goto out_unlock; reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); - rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD); out_unlock: + rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); mutex_unlock(&phydev->lock); return rc; @@ -576,6 +849,809 @@ static int vsc85xx_set_tunable(struct phy_device *phydev, } } +/* mdiobus lock should be locked when using this function */ +static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val) +{ + __phy_write(phydev, MSCC_PHY_TR_MSB, val >> 16); + __phy_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0)); + __phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr)); +} + +static int vsc85xx_eee_init_seq_set(struct phy_device *phydev) +{ + const struct reg_val init_eee[] = { + {0x0f82, 0x0012b00a}, + {0x1686, 0x00000004}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a0, 0x00eeffdd}, + {0x16a6, 0x00071448}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x0f80, 0x00000af4}, + {0x0fec, 0x00901809}, + {0x0fee, 0x0000a6a1}, + {0x0ffe, 0x00b01007}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + }; + unsigned int i; + int oldpage; + + mutex_lock(&phydev->lock); + oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR); + if (oldpage < 0) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(init_eee); i++) + vsc85xx_tr_write(phydev, init_eee[i].reg, init_eee[i].val); + +out_unlock: + oldpage = phy_restore_page(phydev, oldpage, oldpage); + mutex_unlock(&phydev->lock); + + return oldpage; +} + +/* phydev->bus->mdio_lock should be locked when using this function */ +static int phy_base_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + struct vsc8531_private *priv = phydev->priv; + + if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) { + dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n"); + dump_stack(); + } + + return __mdiobus_write(phydev->mdio.bus, priv->base_addr, regnum, val); +} + +/* phydev->bus->mdio_lock should be locked when using this function */ +static int phy_base_read(struct phy_device *phydev, u32 regnum) +{ + struct vsc8531_private *priv = phydev->priv; + + if (unlikely(!mutex_is_locked(&phydev->mdio.bus->mdio_lock))) { + dev_err(&phydev->mdio.dev, "MDIO bus lock not held!\n"); + dump_stack(); + } + + return __mdiobus_read(phydev->mdio.bus, priv->base_addr, regnum); +} + +/* bus->mdio_lock should be locked when using this function */ +static void vsc8584_csr_write(struct phy_device *phydev, u16 addr, u32 val) +{ + phy_base_write(phydev, MSCC_PHY_TR_MSB, val >> 16); + phy_base_write(phydev, MSCC_PHY_TR_LSB, val & GENMASK(15, 0)); + phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr)); +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_cmd(struct phy_device *phydev, u16 val) +{ + unsigned long deadline; + u16 reg_val; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NCOMPLETED | val); + + deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS); + do { + reg_val = phy_base_read(phydev, MSCC_PHY_PROC_CMD); + } while (time_before(jiffies, deadline) && + (reg_val & PROC_CMD_NCOMPLETED) && + !(reg_val & PROC_CMD_FAILED)); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + if (reg_val & PROC_CMD_FAILED) + return -EIO; + + if (reg_val & PROC_CMD_NCOMPLETED) + return -ETIMEDOUT; + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_micro_deassert_reset(struct phy_device *phydev, + bool patch_en) +{ + u32 enable, release; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + enable = RUN_FROM_INT_ROM | MICRO_CLK_EN | DW8051_CLK_EN; + release = MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN | + MICRO_CLK_EN; + + if (patch_en) { + enable |= MICRO_PATCH_EN; + release |= MICRO_PATCH_EN; + + /* Clear all patches */ + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM); + } + + /* Enable 8051 Micro clock; CLEAR/SET patch present; disable PRAM clock + * override and addr. auto-incr; operate at 125 MHz + */ + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, enable); + /* Release 8051 Micro SW reset */ + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, release); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_micro_assert_reset(struct phy_device *phydev) +{ + int ret; + u16 reg; + + ret = vsc8584_cmd(phydev, PROC_CMD_NOP); + if (ret) + return ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg &= ~EN_PATCH_RAM_TRAP_ADDR(4); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + + phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(4), 0x005b); + phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(4), 0x005b); + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg |= EN_PATCH_RAM_TRAP_ADDR(4); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + + phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_NOP); + + reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS); + reg &= ~MICRO_NSOFT_RESET; + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, reg); + + phy_base_write(phydev, MSCC_PHY_PROC_CMD, PROC_CMD_MCB_ACCESS_MAC_CONF | + PROC_CMD_SGMII_PORT(0) | PROC_CMD_NO_MAC_CONF | + PROC_CMD_READ); + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + reg &= ~EN_PATCH_RAM_TRAP_ADDR(4); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_get_fw_crc(struct phy_device *phydev, u16 start, u16 size, + u16 *crc) +{ + int ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + + phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_2, start); + phy_base_write(phydev, MSCC_PHY_VERIPHY_CNTL_3, size); + + /* Start Micro command */ + ret = vsc8584_cmd(phydev, PROC_CMD_CRC16); + if (ret) + goto out; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + + *crc = phy_base_read(phydev, MSCC_PHY_VERIPHY_CNTL_2); + +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return ret; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_patch_fw(struct phy_device *phydev, + const struct firmware *fw) +{ + int i, ret; + + ret = vsc8584_micro_assert_reset(phydev); + if (ret) { + dev_err(&phydev->mdio.dev, + "%s: failed to assert reset of micro\n", __func__); + return ret; + } + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + /* Hold 8051 Micro in SW Reset, Enable auto incr address and patch clock + * Disable the 8051 Micro clock + */ + phy_base_write(phydev, MSCC_DW8051_CNTL_STATUS, RUN_FROM_INT_ROM | + AUTOINC_ADDR | PATCH_RAM_CLK | MICRO_CLK_EN | + MICRO_CLK_DIVIDE(2)); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | INT_MEM_WRITE_EN | + INT_MEM_DATA(2)); + phy_base_write(phydev, MSCC_INT_MEM_ADDR, 0x0000); + + for (i = 0; i < fw->size; i++) + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_PRAM | + INT_MEM_WRITE_EN | fw->data[i]); + + /* Clear internal memory access */ + phy_base_write(phydev, MSCC_INT_MEM_CNTL, READ_RAM); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return 0; +} + +/* bus->mdio_lock should be locked when using this function */ +static bool vsc8574_is_serdes_init(struct phy_device *phydev) +{ + u16 reg; + bool ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + reg = phy_base_read(phydev, MSCC_TRAP_ROM_ADDR(1)); + if (reg != 0x3eb7) { + ret = false; + goto out; + } + + reg = phy_base_read(phydev, MSCC_PATCH_RAM_ADDR(1)); + if (reg != 0x4012) { + ret = false; + goto out; + } + + reg = phy_base_read(phydev, MSCC_INT_MEM_CNTL); + if (reg != EN_PATCH_RAM_TRAP_ADDR(1)) { + ret = false; + goto out; + } + + reg = phy_base_read(phydev, MSCC_DW8051_CNTL_STATUS); + if ((MICRO_NSOFT_RESET | RUN_FROM_INT_ROM | DW8051_CLK_EN | + MICRO_CLK_EN) != (reg & MSCC_DW8051_VLD_MASK)) { + ret = false; + goto out; + } + + ret = true; +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + return ret; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8574_config_pre_init(struct phy_device *phydev) +{ + const struct reg_val pre_init1[] = { + {0x0fae, 0x000401bd}, + {0x0fac, 0x000f000f}, + {0x17a0, 0x00a0f147}, + {0x0fe4, 0x00052f54}, + {0x1792, 0x0027303d}, + {0x07fe, 0x00000704}, + {0x0fe0, 0x00060150}, + {0x0f82, 0x0012b00a}, + {0x0f80, 0x00000d74}, + {0x02e0, 0x00000012}, + {0x03a2, 0x00050208}, + {0x03b2, 0x00009186}, + {0x0fb0, 0x000e3700}, + {0x1688, 0x00049f81}, + {0x0fd2, 0x0000ffff}, + {0x168a, 0x00039fa2}, + {0x1690, 0x0020640b}, + {0x0258, 0x00002220}, + {0x025a, 0x00002a20}, + {0x025c, 0x00003060}, + {0x025e, 0x00003fa0}, + {0x03a6, 0x0000e0f0}, + {0x0f92, 0x00001489}, + {0x16a2, 0x00007000}, + {0x16a6, 0x00071448}, + {0x16a0, 0x00eeffdd}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + {0x0f90, 0x00688980}, + {0x03a4, 0x0000d8f0}, + {0x0fc0, 0x00000400}, + {0x07fa, 0x0050100f}, + {0x0796, 0x00000003}, + {0x07f8, 0x00c3ff98}, + {0x0fa4, 0x0018292a}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fec, 0x00901c09}, + {0x0fee, 0x0004a6a1}, + {0x0ffe, 0x00b01807}, + }; + const struct reg_val pre_init2[] = { + {0x0486, 0x0008a518}, + {0x0488, 0x006dc696}, + {0x048a, 0x00000912}, + {0x048e, 0x00000db6}, + {0x049c, 0x00596596}, + {0x049e, 0x00000514}, + {0x04a2, 0x00410280}, + {0x04a4, 0x00000000}, + {0x04a6, 0x00000000}, + {0x04a8, 0x00000000}, + {0x04aa, 0x00000000}, + {0x04ae, 0x007df7dd}, + {0x04b0, 0x006d95d4}, + {0x04b2, 0x00492410}, + }; + struct device *dev = &phydev->mdio.dev; + const struct firmware *fw; + unsigned int i; + u16 crc, reg; + bool serdes_init; + int ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* all writes below are broadcasted to all PHYs in the same package */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg |= SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0); + + /* The below register writes are tweaking analog and electrical + * configuration that were determined through characterization by PHY + * engineers. These don't mean anything more than "these are the best + * values". + */ + phy_base_write(phydev, MSCC_PHY_EXT_PHY_CNTL_2, 0x0040); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_20, 0x4320); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_24, 0x0c00); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_9, 0x18ca); + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg |= 0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init1); i++) + vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2); + + phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init2); i++) + vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg &= ~0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* end of write broadcasting */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg &= ~SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + ret = request_firmware(&fw, MSCC_VSC8574_REVB_INT8051_FW, dev); + if (ret) { + dev_err(dev, "failed to load firmware %s, ret: %d\n", + MSCC_VSC8574_REVB_INT8051_FW, ret); + return ret; + } + + /* Add one byte to size for the one added by the patch_fw function */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8574_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc == MSCC_VSC8574_REVB_INT8051_FW_CRC) { + serdes_init = vsc8574_is_serdes_init(phydev); + + if (!serdes_init) { + ret = vsc8584_micro_assert_reset(phydev); + if (ret) { + dev_err(dev, + "%s: failed to assert reset of micro\n", + __func__); + return ret; + } + } + } else { + dev_dbg(dev, "FW CRC is not the expected one, patching FW\n"); + + serdes_init = false; + + if (vsc8584_patch_fw(phydev, fw)) + dev_warn(dev, + "failed to patch FW, expect non-optimal device\n"); + } + + if (!serdes_init) { + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + phy_base_write(phydev, MSCC_TRAP_ROM_ADDR(1), 0x3eb7); + phy_base_write(phydev, MSCC_PATCH_RAM_ADDR(1), 0x4012); + phy_base_write(phydev, MSCC_INT_MEM_CNTL, + EN_PATCH_RAM_TRAP_ADDR(1)); + + vsc8584_micro_deassert_reset(phydev, false); + + /* Add one byte to size for the one added by the patch_fw + * function + */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8574_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc != MSCC_VSC8574_REVB_INT8051_FW_CRC) + dev_warn(dev, + "FW CRC after patching is not the expected one, expect non-optimal device\n"); + } + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + ret = vsc8584_cmd(phydev, PROC_CMD_1588_DEFAULT_INIT | + PROC_CMD_PHY_INIT); + +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + release_firmware(fw); + + return ret; +} + +/* bus->mdio_lock should be locked when using this function */ +static int vsc8584_config_pre_init(struct phy_device *phydev) +{ + const struct reg_val pre_init1[] = { + {0x07fa, 0x0050100f}, + {0x1688, 0x00049f81}, + {0x0f90, 0x00688980}, + {0x03a4, 0x0000d8f0}, + {0x0fc0, 0x00000400}, + {0x0f82, 0x0012b002}, + {0x1686, 0x00000004}, + {0x168c, 0x00d2c46f}, + {0x17a2, 0x00000620}, + {0x16a0, 0x00eeffdd}, + {0x16a6, 0x00071448}, + {0x16a4, 0x0013132f}, + {0x16a8, 0x00000000}, + {0x0ffc, 0x00c0a028}, + {0x0fe8, 0x0091b06c}, + {0x0fea, 0x00041600}, + {0x0f80, 0x00fffaff}, + {0x0fec, 0x00901809}, + {0x0ffe, 0x00b01007}, + {0x16b0, 0x00eeff00}, + {0x16b2, 0x00007000}, + {0x16b4, 0x00000814}, + }; + const struct reg_val pre_init2[] = { + {0x0486, 0x0008a518}, + {0x0488, 0x006dc696}, + {0x048a, 0x00000912}, + }; + const struct firmware *fw; + struct device *dev = &phydev->mdio.dev; + unsigned int i; + u16 crc, reg; + int ret; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* all writes below are broadcasted to all PHYs in the same package */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg |= SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + phy_base_write(phydev, MII_VSC85XX_INT_MASK, 0); + + reg = phy_base_read(phydev, MSCC_PHY_BYPASS_CONTROL); + reg |= PARALLEL_DET_IGNORE_ADVERTISED; + phy_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, reg); + + /* The below register writes are tweaking analog and electrical + * configuration that were determined through characterization by PHY + * engineers. These don't mean anything more than "these are the best + * values". + */ + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_3); + + phy_base_write(phydev, MSCC_PHY_SERDES_TX_CRC_ERR_CNT, 0x2000); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg |= 0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x2fa4)); + + reg = phy_base_read(phydev, MSCC_PHY_TR_MSB); + reg &= ~0x007f; + reg |= 0x0019; + phy_base_write(phydev, MSCC_PHY_TR_MSB, reg); + + phy_base_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(0x0fa4)); + + for (i = 0; i < ARRAY_SIZE(pre_init1); i++) + vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED_2); + + phy_base_write(phydev, MSCC_PHY_CU_PMD_TX_CNTL, 0x028e); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); + + for (i = 0; i < ARRAY_SIZE(pre_init2); i++) + vsc8584_csr_write(phydev, pre_init2[i].reg, pre_init2[i].val); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); + + reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); + reg &= ~0x8000; + phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + /* end of write broadcasting */ + reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS); + reg &= ~SMI_BROADCAST_WR_EN; + phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg); + + ret = request_firmware(&fw, MSCC_VSC8584_REVB_INT8051_FW, dev); + if (ret) { + dev_err(dev, "failed to load firmware %s, ret: %d\n", + MSCC_VSC8584_REVB_INT8051_FW, ret); + return ret; + } + + /* Add one byte to size for the one added by the patch_fw function */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8584_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) { + dev_dbg(dev, "FW CRC is not the expected one, patching FW\n"); + if (vsc8584_patch_fw(phydev, fw)) + dev_warn(dev, + "failed to patch FW, expect non-optimal device\n"); + } + + vsc8584_micro_deassert_reset(phydev, false); + + /* Add one byte to size for the one added by the patch_fw function */ + ret = vsc8584_get_fw_crc(phydev, + MSCC_VSC8584_REVB_INT8051_FW_START_ADDR, + fw->size + 1, &crc); + if (ret) + goto out; + + if (crc != MSCC_VSC8584_REVB_INT8051_FW_CRC) + dev_warn(dev, + "FW CRC after patching is not the expected one, expect non-optimal device\n"); + + ret = vsc8584_micro_assert_reset(phydev); + if (ret) + goto out; + + vsc8584_micro_deassert_reset(phydev, true); + +out: + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + release_firmware(fw); + + return ret; +} + +/* Check if one PHY has already done the init of the parts common to all PHYs + * in the Quad PHY package. + */ +static bool vsc8584_is_pkg_init(struct phy_device *phydev, bool reversed) +{ + struct mdio_device **map = phydev->mdio.bus->mdio_map; + struct vsc8531_private *vsc8531; + struct phy_device *phy; + int i, addr; + + /* VSC8584 is a Quad PHY */ + for (i = 0; i < 4; i++) { + vsc8531 = phydev->priv; + + if (reversed) + addr = vsc8531->base_addr - i; + else + addr = vsc8531->base_addr + i; + + phy = container_of(map[addr], struct phy_device, mdio); + + if ((phy->phy_id & phydev->drv->phy_id_mask) != + (phydev->drv->phy_id & phydev->drv->phy_id_mask)) + continue; + + vsc8531 = phy->priv; + + if (vsc8531 && vsc8531->pkg_init) + return true; + } + + return false; +} + +static int vsc8584_config_init(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + u16 addr, val; + int ret, i; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + + __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, + MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); + addr = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, + MSCC_PHY_EXT_PHY_CNTL_4); + addr >>= PHY_CNTL_4_ADDR_POS; + + val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, + MSCC_PHY_ACTIPHY_CNTL); + if (val & PHY_ADDR_REVERSED) + vsc8531->base_addr = phydev->mdio.addr + addr; + else + vsc8531->base_addr = phydev->mdio.addr - addr; + + /* Some parts of the init sequence are identical for every PHY in the + * package. Some parts are modifying the GPIO register bank which is a + * set of registers that are affecting all PHYs, a few resetting the + * microprocessor common to all PHYs. The CRC check responsible of the + * checking the firmware within the 8051 microprocessor can only be + * accessed via the PHY whose internal address in the package is 0. + * All PHYs' interrupts mask register has to be zeroed before enabling + * any PHY's interrupt in this register. + * For all these reasons, we need to do the init sequence once and only + * once whatever is the first PHY in the package that is initialized and + * do the correct init sequence for all PHYs that are package-critical + * in this pre-init function. + */ + if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0)) { + if ((phydev->phy_id & phydev->drv->phy_id_mask) == + (PHY_ID_VSC8574 & phydev->drv->phy_id_mask)) + ret = vsc8574_config_pre_init(phydev); + else if ((phydev->phy_id & phydev->drv->phy_id_mask) == + (PHY_ID_VSC8584 & phydev->drv->phy_id_mask)) + ret = vsc8584_config_pre_init(phydev); + else + ret = -EINVAL; + + if (ret) + goto err; + } + + vsc8531->pkg_init = true; + + phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + + val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); + val &= ~MAC_CFG_MASK; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) + val |= MAC_CFG_QSGMII; + else + val |= MAC_CFG_SGMII; + + ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); + if (ret) + goto err; + + val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | + PROC_CMD_READ_MOD_WRITE_PORT; + if (phydev->interface == PHY_INTERFACE_MODE_QSGMII) + val |= PROC_CMD_QSGMII_MAC; + else + val |= PROC_CMD_SGMII_MAC; + + ret = vsc8584_cmd(phydev, val); + if (ret) + goto err; + + usleep_range(10000, 20000); + + /* Disable SerDes for 100Base-FX */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX); + if (ret) + goto err; + + /* Disable SerDes for 1000Base-X */ + ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF | + PROC_CMD_FIBER_PORT(addr) | PROC_CMD_FIBER_DISABLE | + PROC_CMD_READ_MOD_WRITE_PORT | + PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X); + if (ret) + goto err; + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1); + val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK); + val |= MEDIA_OP_MODE_COPPER | (VSC8584_MAC_IF_SELECTION_SGMII << + VSC8584_MAC_IF_SELECTION_POS); + ret = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, val); + + ret = genphy_soft_reset(phydev); + if (ret) + return ret; + + for (i = 0; i < vsc8531->nleds; i++) { + ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]); + if (ret) + return ret; + } + + return genphy_config_init(phydev); + +err: + mutex_unlock(&phydev->mdio.bus->mdio_lock); + return ret; +} + static int vsc85xx_config_init(struct phy_device *phydev) { int rc, i; @@ -593,15 +1669,27 @@ static int vsc85xx_config_init(struct phy_device *phydev) if (rc) return rc; + rc = vsc85xx_eee_init_seq_set(phydev); + if (rc) + return rc; + for (i = 0; i < vsc8531->nleds; i++) { rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]); if (rc) return rc; } - rc = genphy_config_init(phydev); + return genphy_config_init(phydev); +} - return rc; +static int vsc8584_did_interrupt(struct phy_device *phydev) +{ + int rc = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + + return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK; } static int vsc85xx_ack_interrupt(struct phy_device *phydev) @@ -653,6 +1741,61 @@ static int vsc85xx_read_status(struct phy_device *phydev) return genphy_read_status(phydev); } +static int vsc8574_probe(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531; + u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY, + VSC8531_DUPLEX_COLLISION}; + + vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL); + if (!vsc8531) + return -ENOMEM; + + phydev->priv = vsc8531; + + vsc8531->nleds = 4; + vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; + vsc8531->hw_stats = vsc8584_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; + + return vsc85xx_dt_led_modes_get(phydev, default_mode); +} + +static int vsc8584_probe(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531; + u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY, + VSC8531_DUPLEX_COLLISION}; + + if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) { + dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n"); + return -ENOTSUPP; + } + + vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL); + if (!vsc8531) + return -ENOMEM; + + phydev->priv = vsc8531; + + vsc8531->nleds = 4; + vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; + vsc8531->hw_stats = vsc8584_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; + + return vsc85xx_dt_led_modes_get(phydev, default_mode); +} + static int vsc85xx_probe(struct phy_device *phydev) { struct vsc8531_private *vsc8531; @@ -673,6 +1816,12 @@ static int vsc85xx_probe(struct phy_device *phydev) vsc8531->rate_magic = rate_magic; vsc8531->nleds = 2; vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; + vsc8531->hw_stats = vsc85xx_hw_stats; + vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); + vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, + sizeof(u64), GFP_KERNEL); + if (!vsc8531->stats) + return -ENOMEM; return vsc85xx_dt_led_modes_get(phydev, default_mode); } @@ -699,6 +1848,11 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, }, { .phy_id = PHY_ID_VSC8531, @@ -720,6 +1874,11 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, }, { .phy_id = PHY_ID_VSC8540, @@ -741,6 +1900,11 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, }, { .phy_id = PHY_ID_VSC8541, @@ -762,6 +1926,63 @@ static struct phy_driver vsc85xx_driver[] = { .get_wol = &vsc85xx_wol_get, .get_tunable = &vsc85xx_get_tunable, .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, +{ + .phy_id = PHY_ID_VSC8574, + .name = "Microsemi GE VSC8574 SyncE", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc8584_config_init, + .config_aneg = &vsc85xx_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &vsc85xx_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .did_interrupt = &vsc8584_did_interrupt, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc8574_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, +{ + .phy_id = PHY_ID_VSC8584, + .name = "Microsemi GE VSC8584 SyncE", + .phy_id_mask = 0xfffffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .soft_reset = &genphy_soft_reset, + .config_init = &vsc8584_config_init, + .config_aneg = &vsc85xx_config_aneg, + .aneg_done = &genphy_aneg_done, + .read_status = &vsc85xx_read_status, + .ack_interrupt = &vsc85xx_ack_interrupt, + .config_intr = &vsc85xx_config_intr, + .did_interrupt = &vsc8584_did_interrupt, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc8584_probe, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, } }; @@ -773,6 +1994,8 @@ static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { { PHY_ID_VSC8531, 0xfffffff0, }, { PHY_ID_VSC8540, 0xfffffff0, }, { PHY_ID_VSC8541, 0xfffffff0, }, + { PHY_ID_VSC8574, 0xfffffff0, }, + { PHY_ID_VSC8584, 0xfffffff0, }, { } }; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index f53ce65f45c5..43cb08dcce81 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -237,7 +237,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) if (!netdev) return !phydev->suspended; - /* Don't suspend PHY if the attached netdev parent may wakeup. + if (netdev->wol_enabled) + return false; + + /* As long as not all affected network drivers support the + * wol_enabled flag, let's check for hints that WoL is enabled. + * Don't suspend PHY if the attached netdev parent may wake up. * The parent may point to a PCI device, as in tg3 driver. */ if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) @@ -1274,9 +1279,9 @@ void phy_detach(struct phy_device *phydev) sysfs_remove_link(&dev->dev.kobj, "phydev"); sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); } + phy_suspend(phydev); phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; - phy_suspend(phydev); phydev->phylink = NULL; phy_led_triggers_unregister(phydev); @@ -1310,12 +1315,13 @@ EXPORT_SYMBOL(phy_detach); int phy_suspend(struct phy_device *phydev) { struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); + struct net_device *netdev = phydev->attached_dev; struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; int ret = 0; /* If the device has WOL enabled, we cannot suspend the PHY */ phy_ethtool_get_wol(phydev, &wol); - if (wol.wolopts) + if (wol.wolopts || (netdev && netdev->wol_enabled)) return -EBUSY; if (phydev->drv && phydrv->suspend) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b6993af5c9e4..9b8dd0d0ee42 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -690,6 +690,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) return 0; } +static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy, + phy_interface_t interface) +{ + int ret; + + if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED || + (pl->link_an_mode == MLO_AN_INBAND && + phy_interface_mode_is_8023z(interface)))) + return -EINVAL; + + if (pl->phydev) + return -EBUSY; + + ret = phy_attach_direct(pl->netdev, phy, 0, interface); + if (ret) + return ret; + + ret = phylink_bringup_phy(pl, phy); + if (ret) + phy_detach(phy); + + return ret; +} + /** * phylink_connect_phy() - connect a PHY to the phylink instance * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -707,31 +731,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) */ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) { - int ret; - - if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED || - (pl->link_an_mode == MLO_AN_INBAND && - phy_interface_mode_is_8023z(pl->link_interface)))) - return -EINVAL; - - if (pl->phydev) - return -EBUSY; - /* Use PHY device/driver interface */ if (pl->link_interface == PHY_INTERFACE_MODE_NA) { pl->link_interface = phy->interface; pl->link_config.interface = pl->link_interface; } - ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); - if (ret) - return ret; - - ret = phylink_bringup_phy(pl, phy); - if (ret) - phy_detach(phy); - - return ret; + return __phylink_connect_phy(pl, phy, pl->link_interface); } EXPORT_SYMBOL_GPL(phylink_connect_phy); @@ -1648,7 +1654,9 @@ static void phylink_sfp_link_up(void *upstream) static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) { - return phylink_connect_phy(upstream, phy); + struct phylink *pl = upstream; + + return __phylink_connect_phy(upstream, phy, pl->link_config.interface); } static void phylink_sfp_disconnect_phy(void *upstream) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 52fffb98fde9..fd8bb998ae52 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = { /* Give this long for the PHY to reset. */ #define T_PHY_RESET_MS 50 -static DEFINE_MUTEX(sfp_mutex); - struct sff_data { unsigned int gpios; bool (*module_supported)(const struct sfp_eeprom_id *id); @@ -1098,8 +1096,11 @@ static int sfp_hwmon_insert(struct sfp *sfp) static void sfp_hwmon_remove(struct sfp *sfp) { - hwmon_device_unregister(sfp->hwmon_dev); - kfree(sfp->hwmon_name); + if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) { + hwmon_device_unregister(sfp->hwmon_dev); + sfp->hwmon_dev = NULL; + kfree(sfp->hwmon_name); + } } #else static int sfp_hwmon_insert(struct sfp *sfp) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 6a047d30e8c6..d887016e54b6 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return -EBUSY; } + if (dev == port_dev) { + NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself"); + netdev_err(dev, "Cannot enslave team device to itself\n"); + return -EINVAL; + } + if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3eb88b7147f0..92fa35abba92 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -180,6 +180,7 @@ struct tun_file { }; struct napi_struct napi; bool napi_enabled; + bool napi_frags_enabled; struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; @@ -312,32 +313,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget) } static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, - bool napi_en) + bool napi_en, bool napi_frags) { tfile->napi_enabled = napi_en; + tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, NAPI_POLL_WEIGHT); napi_enable(&tfile->napi); - mutex_init(&tfile->napi_mutex); } } -static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) +static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_disable(&tfile->napi); } -static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) +static void tun_napi_del(struct tun_file *tfile) { if (tfile->napi_enabled) netif_napi_del(&tfile->napi); } -static bool tun_napi_frags_enabled(const struct tun_struct *tun) +static bool tun_napi_frags_enabled(const struct tun_file *tfile) { - return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; + return tfile->napi_frags_enabled; } #ifdef CONFIG_TUN_VNET_CROSS_LE @@ -561,12 +562,11 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) e->rps_rxhash = hash; } -/* We try to identify a flow through its rxhash first. The reason that +/* We try to identify a flow through its rxhash. The reason that * we do not check rxq no. is because some cards(e.g 82599), chooses * the rxq based on the txq where the last packet of the flow comes. As * the userspace application move between processors, we may get a - * different rxq no. here. If we could not get rxhash, then we would - * hope the rxq no. may help here. + * different rxq no. here. */ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { @@ -577,18 +577,13 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); - if (txq) { - e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); - if (e) { - tun_flow_save_rps_rxhash(e, txq); - txq = e->queue_index; - } else - /* use multiply and shift instead of expensive divide */ - txq = ((u64)txq * numqueues) >> 32; - } else if (likely(skb_rx_queue_recorded(skb))) { - txq = skb_get_rx_queue(skb); - while (unlikely(txq >= numqueues)) - txq -= numqueues; + e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); + if (e) { + tun_flow_save_rps_rxhash(e, txq); + txq = e->queue_index; + } else { + /* use multiply and shift instead of expensive divide */ + txq = ((u64)txq * numqueues) >> 32; } return txq; @@ -689,8 +684,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun = rtnl_dereference(tfile->tun); if (tun && clean) { - tun_napi_disable(tun, tfile); - tun_napi_del(tun, tfile); + tun_napi_disable(tfile); + tun_napi_del(tfile); } if (tun && !tfile->detached) { @@ -757,7 +752,7 @@ static void tun_detach_all(struct net_device *dev) for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); BUG_ON(!tfile); - tun_napi_disable(tun, tfile); + tun_napi_disable(tfile); tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); @@ -773,7 +768,7 @@ static void tun_detach_all(struct net_device *dev) synchronize_net(); for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); - tun_napi_del(tun, tfile); + tun_napi_del(tfile); /* Drop read queue */ tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); @@ -792,7 +787,7 @@ static void tun_detach_all(struct net_device *dev) } static int tun_attach(struct tun_struct *tun, struct file *file, - bool skip_filter, bool napi) + bool skip_filter, bool napi, bool napi_frags) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; @@ -865,7 +860,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, tun_enable_queue(tfile); } else { sock_hold(&tfile->sk); - tun_napi_init(tun, tfile, napi); + tun_napi_init(tun, tfile, napi, napi_frags); } if (rtnl_dereference(tun->xdp_prog)) @@ -1046,16 +1041,13 @@ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ + struct tun_flow_entry *e; __u32 rxhash; rxhash = __skb_get_hash_symmetric(skb); - if (rxhash) { - struct tun_flow_entry *e; - e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], - rxhash); - if (e) - tun_flow_save_rps_rxhash(e, rxhash); - } + e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); + if (e) + tun_flow_save_rps_rxhash(e, rxhash); } #endif } @@ -1743,7 +1735,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, int err; u32 rxhash = 0; int skb_xdp = 1; - bool frags = tun_napi_frags_enabled(tun); + bool frags = tun_napi_frags_enabled(tfile); if (!(tun->dev->flags & IFF_UP)) return -EIO; @@ -2683,7 +2675,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) return err; err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, - ifr->ifr_flags & IFF_NAPI); + ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS); if (err < 0) return err; @@ -2781,7 +2774,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) (ifr->ifr_flags & TUN_FEATURES); INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); + err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS); if (err < 0) goto err_free_flow; @@ -2930,7 +2924,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; - ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); + ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, + tun->flags & IFF_NAPI_FRAGS); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) @@ -3348,6 +3343,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) return -ENOMEM; } + mutex_init(&tfile->napi_mutex); RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index e95dd12edec4..023b8d0bf175 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) struct usbnet *dev = netdev_priv(net); u8 opt = 0; + if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + if (wolinfo->wolopts & WAKE_PHY) opt |= AX_MONITOR_LINK; if (wolinfo->wolopts & WAKE_MAGIC) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 9e8ad372f419..2207f7a7d1ff 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) struct usbnet *dev = netdev_priv(net); u8 opt = 0; + if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + if (wolinfo->wolopts & WAKE_PHY) opt |= AX_MONITOR_MODE_RWLC; if (wolinfo->wolopts & WAKE_MAGIC) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 1eaec648bd1f..50c05d0f44cb 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -779,8 +779,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ctx->tx_timer.function = &cdc_ncm_tx_timer_cb; - ctx->bh.data = (unsigned long)dev; - ctx->bh.func = cdc_ncm_txpath_bh; + tasklet_init(&ctx->bh, cdc_ncm_txpath_bh, (unsigned long)dev); atomic_set(&ctx->stop, 0); spin_lock_init(&ctx->mtx); @@ -1601,11 +1600,8 @@ cdc_ncm_speed_change(struct usbnet *dev, static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) { - struct cdc_ncm_ctx *ctx; struct usb_cdc_notification *event; - ctx = (struct cdc_ncm_ctx *)dev->data[0]; - if (urb->actual_length < sizeof(*event)) return; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 656441d9a955..be1917be28f2 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1387,19 +1387,10 @@ static int lan78xx_set_wol(struct net_device *netdev, if (ret < 0) return ret; - pdata->wol = 0; - if (wol->wolopts & WAKE_UCAST) - pdata->wol |= WAKE_UCAST; - if (wol->wolopts & WAKE_MCAST) - pdata->wol |= WAKE_MCAST; - if (wol->wolopts & WAKE_BCAST) - pdata->wol |= WAKE_BCAST; - if (wol->wolopts & WAKE_MAGIC) - pdata->wol |= WAKE_MAGIC; - if (wol->wolopts & WAKE_PHY) - pdata->wol |= WAKE_PHY; - if (wol->wolopts & WAKE_ARP) - pdata->wol |= WAKE_ARP; + if (wol->wolopts & ~WAKE_ALL) + return -EINVAL; + + pdata->wol = wol->wolopts; device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 533b6fb8d923..72a55b6b4211 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2cd71bdb6484..f1b5201cc320 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (!rtl_can_wakeup(tp)) return -EOPNOTSUPP; + if (wol->wolopts & ~WAKE_ANY) + return -EINVAL; + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out_set_wol; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 05553d252446..ec287c9741e8 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net, struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); int ret; + if (wolinfo->wolopts & ~SUPPORTED_WAKE) + return -EINVAL; + pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); @@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); if (pdata) { + cancel_work_sync(&pdata->set_multicast); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 06b4d290784d..262e7a3c23cb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net, struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); int ret; + if (wolinfo->wolopts & ~SUPPORTED_WAKE) + return -EINVAL; + pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 9277a0f228df..35f39f23d881 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) struct usbnet *dev = netdev_priv(net); u8 opt = 0; + if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + if (wolinfo->wolopts & WAKE_PHY) opt |= SR_MONITOR_LINK; if (wolinfo->wolopts & WAKE_MAGIC) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 765920905226..3f5aa59c37b7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev, tot->rx_frame_errors = dev->stats.rx_frame_errors; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void virtnet_netpoll(struct net_device *dev) -{ - struct virtnet_info *vi = netdev_priv(dev); - int i; - - for (i = 0; i < vi->curr_queue_pairs; i++) - napi_schedule(&vi->rq[i].napi); -} -#endif - static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); @@ -2181,6 +2170,53 @@ static int virtnet_get_link_ksettings(struct net_device *dev, return 0; } +static int virtnet_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct ethtool_coalesce ec_default = { + .cmd = ETHTOOL_SCOALESCE, + .rx_max_coalesced_frames = 1, + }; + struct virtnet_info *vi = netdev_priv(dev); + int i, napi_weight; + + if (ec->tx_max_coalesced_frames > 1) + return -EINVAL; + + ec_default.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0; + + /* disallow changes to fields not explicitly tested above */ + if (memcmp(ec, &ec_default, sizeof(ec_default))) + return -EINVAL; + + if (napi_weight ^ vi->sq[0].napi.weight) { + if (dev->flags & IFF_UP) + return -EBUSY; + for (i = 0; i < vi->max_queue_pairs; i++) + vi->sq[i].napi.weight = napi_weight; + } + + return 0; +} + +static int virtnet_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct ethtool_coalesce ec_default = { + .cmd = ETHTOOL_GCOALESCE, + .rx_max_coalesced_frames = 1, + }; + struct virtnet_info *vi = netdev_priv(dev); + + memcpy(ec, &ec_default, sizeof(ec_default)); + + if (vi->sq[0].napi.weight) + ec->tx_max_coalesced_frames = 1; + + return 0; +} + static void virtnet_init_settings(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -2219,6 +2255,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_link_ksettings = virtnet_get_link_ksettings, .set_link_ksettings = virtnet_set_link_ksettings, + .set_coalesce = virtnet_set_coalesce, + .get_coalesce = virtnet_get_coalesce, }; static void virtnet_freeze_down(struct virtio_device *vdev) @@ -2447,9 +2485,6 @@ static const struct net_device_ops virtnet_netdev = { .ndo_get_stats64 = virtnet_stats, .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = virtnet_netpoll, -#endif .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, .ndo_features_check = passthru_features_check, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e5d236595206..fb0cdbba8d76 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ @@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) } if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || + nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT, + !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c index 094cea775d0c..ef298d8525c5 100644 --- a/drivers/net/wimax/i2400m/control.c +++ b/drivers/net/wimax/i2400m/control.c @@ -257,7 +257,7 @@ static const struct [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, [I2400M_MS_BUSY] = { "busy", -EBUSY }, [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, - [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ }, + [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ }, [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, [I2400M_MS_NO_RF] = { "no RF", -EIO }, diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index d113bd997f4b..dfc4c34298d4 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } } else { /* More than a single header/data pair were missed. - * Report this error, and reset the controller to + * Report this error. If running with open-source + * firmware, then reset the controller to * revive operation. */ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", ring->index, firstused, slot); - b43_controller_restart(dev, "Out of order TX"); + if (dev->fw.opensource) + b43_controller_restart(dev, "Out of order TX"); return; } } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 4daa1ce8cba3..74be3c809225 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5493,13 +5493,11 @@ err_powerdown: static void b43_one_core_detach(struct b43_bus_dev *dev) { struct b43_wldev *wldev; - struct b43_wl *wl; /* Do not cancel ieee80211-workqueue based work here. * See comment in b43_remove(). */ wldev = b43_bus_get_wldev(dev); - wl = wldev->wl; b43_debugfs_remove_device(wldev); b43_wireless_core_detach(wldev); list_del(&wldev->list); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 3e9c4f2f5dd1..456a1bf008b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -74,7 +74,7 @@ #define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000) #define P2P_INVALID_CHANNEL -1 #define P2P_CHANNEL_SYNC_RETRY 5 -#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500) +#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450) #define P2P_DEFAULT_SLEEP_TIME_VSDB 200 /* WiFi P2P Public Action Frame OUI Subtypes */ @@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) { struct afx_hdl *afx_hdl = &p2p->afx_hdl; struct brcmf_cfg80211_vif *pri_vif; - unsigned long duration; s32 retry; brcmf_dbg(TRACE, "Enter\n"); @@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) * pending action frame tx is cancelled. */ retry = 0; - duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT); while ((retry < P2P_CHANNEL_SYNC_RETRY) && (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) { afx_hdl->is_listen = false; @@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) retry); /* search peer on peer's listen channel */ schedule_work(&afx_hdl->afx_work); - wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + P2P_AF_FRM_SCAN_MAX_WAIT); if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status))) @@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p) afx_hdl->is_listen = true; schedule_work(&afx_hdl->afx_work); wait_for_completion_timeout(&afx_hdl->act_frm_scan, - duration); + P2P_AF_FRM_SCAN_MAX_WAIT); } if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) || (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, @@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, return 0; if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) { - if (e->status == BRCMF_E_STATUS_SUCCESS) + if (e->status == BRCMF_E_STATUS_SUCCESS) { set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); - else { + if (!p2p->wait_for_offchan_complete) + complete(&p2p->send_af_done); + } else { set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); /* If there is no ack, we don't need to wait for * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event @@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, p2p->af_sent_channel = le32_to_cpu(af_params->channel); p2p->af_tx_sent_jiffies = jiffies; + if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) && + p2p->af_sent_channel == + ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq)) + p2p->wait_for_offchan_complete = false; + else + p2p->wait_for_offchan_complete = true; + + brcmf_dbg(TRACE, "Waiting for %s tx completion event\n", + (p2p->wait_for_offchan_complete) ? + "off-channel" : "on-channel"); + timeout = wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index 0e8b34d2d85c..39f0d0218088 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -124,6 +124,7 @@ struct afx_hdl { * @gon_req_action: about to send go negotiation requets frame. * @block_gon_req_tx: drop tx go negotiation requets frame. * @p2pdev_dynamically: is p2p device if created by module param or supplicant. + * @wait_for_offchan_complete: wait for off-channel tx completion event. */ struct brcmf_p2p_info { struct brcmf_cfg80211_info *cfg; @@ -144,6 +145,7 @@ struct brcmf_p2p_info { bool gon_req_action; bool block_gon_req_tx; bool p2pdev_dynamically; + bool wait_for_offchan_complete; }; s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 6255fb6d97a7..81ff558046a8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } spin_lock_bh(&wl->lock); + wl->wlc->vif = vif; wl->mute_tx = false; brcms_c_mute(wl->wlc, false); if (vif->type == NL80211_IFTYPE_STATION) @@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static void brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct brcms_info *wl = hw->priv; + + spin_lock_bh(&wl->lock); + wl->wlc->vif = NULL; + spin_unlock_bh(&wl->lock); } static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) @@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw, spin_unlock_bh(&wl->lock); } +static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, bool set) +{ + struct brcms_info *wl = hw->priv; + struct sk_buff *beacon = NULL; + u16 tim_offset = 0; + + spin_lock_bh(&wl->lock); + if (wl->wlc->vif) + beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif, + &tim_offset, NULL); + if (beacon) + brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset, + wl->wlc->vif->bss_conf.dtim_period); + spin_unlock_bh(&wl->lock); + + return 0; +} + static const struct ieee80211_ops brcms_ops = { .tx = brcms_ops_tx, .start = brcms_ops_start, @@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = { .flush = brcms_ops_flush, .get_tsf = brcms_ops_get_tsf, .set_tsf = brcms_ops_set_tsf, + .set_tim = brcms_ops_beacon_set_tim, }; void brcms_dpc(unsigned long data) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h index c4d135cff04a..9f76b880814e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h @@ -563,6 +563,7 @@ struct brcms_c_info { struct wiphy *wiphy; struct scb pri_scb; + struct ieee80211_vif *vif; struct sk_buff *beacon; u16 beacon_tim_offset; diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c index c3c638ed0ed7..ce4144a89217 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.c +++ b/drivers/net/wireless/intel/iwlegacy/4965.c @@ -1297,6 +1297,8 @@ il4965_send_rxon_assoc(struct il_priv *il) const struct il_rxon_cmd *rxon1 = &il->staging; const struct il_rxon_cmd *rxon2 = &il->active; + lockdep_assert_held(&il->mutex); + if (rxon1->flags == rxon2->flags && rxon1->filter_flags == rxon2->filter_flags && rxon1->cck_basic_rates == rxon2->cck_basic_rates && diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index 46686ee88ff4..76b5ddb20248 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -47,6 +47,7 @@ static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .eeprom_size = OTP_LOW_IMAGE_SIZE, .pll_cfg = true, .max_ll_items = OTP_MAX_LL_ITEMS_1000, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index a8acc755a02c..da5d5f9b2573 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -56,7 +56,7 @@ #include "iwl-config.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 38 +#define IWL_22000_UCODE_API_MAX 41 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 37deaf4fd7b3..d55fd23cafe6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -57,7 +57,7 @@ #include "fw/file.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 38 +#define IWL9000_UCODE_API_MAX 41 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 6c5338364794..93b392f0c6a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -165,7 +165,7 @@ struct iwl_nvm_access_resp { */ struct iwl_nvm_get_info { __le32 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ +} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */ /** * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp @@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags { * @flags: bit 0: 1 - empty, 0 - non-empty * @nvm_version: nvm version * @board_type: board type - * @reserved: reserved + * @n_hw_addrs: number of reserved MAC addresses */ struct iwl_nvm_get_info_general { __le32 flags; __le16 nvm_version; u8 board_type; - u8 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ + u8 n_hw_addrs; +} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */ /** * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku @@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku { struct iwl_nvm_get_info_phy { __le32 tx_chains; __le32 rx_chains; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ +} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ #define IWL_NUM_CHANNELS (51) @@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory { __le32 lar_enabled; __le16 channel_profile[IWL_NUM_CHANNELS]; __le16 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ +} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ /** * struct iwl_nvm_get_info_rsp - response to get NVM data @@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */ +} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed @@ -270,22 +270,6 @@ struct iwl_nvm_access_complete_cmd { } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ /** - * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - */ -struct iwl_mcc_update_cmd_v1 { - __le16 mcc; - u8 source_id; - u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ - -/** * struct iwl_mcc_update_cmd - Request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. @@ -306,7 +290,18 @@ struct iwl_mcc_update_cmd { } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ /** - * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. + * enum iwl_geo_information - geographic information. + * @GEO_NO_INFO: no special info for this geo profile. + * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params + * for the 5 GHz band. + */ +enum iwl_geo_information { + GEO_NO_INFO = 0, + GEO_WMM_ETSI_5GHZ_INFO = BIT(0), +}; + +/** + * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. @@ -314,30 +309,23 @@ struct iwl_mcc_update_cmd { * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @source_id: the MCC source, see iwl_mcc_source - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) + * @time: time elapsed from the MCC test start (in units of 30 seconds) + * @geo_info: geographic specific profile information + * see &enum iwl_geo_information. + * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ -struct iwl_mcc_update_resp_v1 { +struct iwl_mcc_update_resp_v3 { __le32 status; __le16 mcc; u8 cap; u8 source_id; + __le16 time; + __le16 geo_info; __le32 n_channels; __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ - -/** - * enum iwl_geo_information - geographic information. - * @GEO_NO_INFO: no special info for this geo profile. - * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params - * for the 5 GHz band. - */ -enum iwl_geo_information { - GEO_NO_INFO = 0, - GEO_WMM_ETSI_5GHZ_INFO = BIT(0), -}; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. @@ -347,25 +335,26 @@ enum iwl_geo_information { * @status: see &enum iwl_mcc_update_status * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @time: time elapsed from the MCC test start (in 30 seconds TU) + * @time: time elapsed from the MCC test start (in units of 30 seconds) * @geo_info: geographic specific profile information * see &enum iwl_geo_information. - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) + * @source_id: the MCC source, see iwl_mcc_source + * @reserved: for four bytes alignment. + * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ struct iwl_mcc_update_resp { __le32 status; __le16 mcc; - u8 cap; - u8 source_id; + __le16 cap; __le16 time; __le16 geo_info; + u8 source_id; + u8 reserved[3]; __le32 n_channels; __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */ /** * struct iwl_mcc_chub_notif - chub notifies of mcc change diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 415b8842b426..0537496b6eb1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -368,10 +368,10 @@ enum iwl_rx_he_phy { /* trigger encoded */ IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL, IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL, - IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, - IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, - IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO = 0x3, + IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */ + IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */ + IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */ + IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */ /* second dword - MU data */ IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 310b01e3cce1..18741889ec30 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -596,9 +596,12 @@ enum iwl_umac_scan_general_flags { * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2 * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete * notification per channel or not. + * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel + * reorder optimization or not. */ enum iwl_umac_scan_general_flags2 { - IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 0dcf1a673478..c16757051f16 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -240,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); @@ -254,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, LMAC2_PRPH_OFFSET, 2); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ @@ -279,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, } } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ @@ -573,103 +573,95 @@ static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt) static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, - u32 sram_len, u32 sram_ofs, u32 smem_len, - u32 sram2_len) + u32 len, u32 ofs, u32 type) { - const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; struct iwl_fw_error_dump_mem *dump_mem; + + if (!len) + return; + + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)(*dump_data)->data; + dump_mem->type = cpu_to_le32(type); + dump_mem->offset = cpu_to_le32(ofs); + iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); + *dump_data = iwl_fw_error_next_data(*dump_data); + + IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); +} + +#define ADD_LEN(len, item_len, const_len) \ + do {size_t item = item_len; len += (!!item) * const_len + item; } \ + while (0) + +static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_shared_mem_cfg *mem_cfg) +{ + size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + u32 fifo_len = 0; int i; - if (!fwrt->fw->n_dbg_mem_tlv) { - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - (*dump_data)->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)(*dump_data)->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data, - sram_len); - *dump_data = iwl_fw_error_next_data(*dump_data); - } + if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF))) + goto dump_txf; - for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { - u32 len = le32_to_cpu(fw_dbg_mem[i].len); - u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + /* Count RXF2 size */ + ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); - dump_mem = (void *)(*dump_data)->data; - dump_mem->type = fw_dbg_mem[i].data_type; - dump_mem->offset = cpu_to_le32(ofs); + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) + ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); - IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", - dump_mem->type); +dump_txf: + if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF))) + goto dump_internal_txf; - iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); - *dump_data = iwl_fw_error_next_data(*dump_data); - } + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; - if (smem_len) { - IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n"); - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - (*dump_data)->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); - dump_mem = (void *)(*dump_data)->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset); - iwl_trans_read_mem_bytes(fwrt->trans, - fwrt->trans->cfg->smem_offset, - dump_mem->data, smem_len); - *dump_data = iwl_fw_error_next_data(*dump_data); + for (j = 0; j < mem_cfg->num_txfifo_entries; j++) + ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], + hdr_len); } - if (sram2_len) { - IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n"); - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - (*dump_data)->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); - dump_mem = (void *)(*dump_data)->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(fwrt->trans, - fwrt->trans->cfg->dccm2_offset, - dump_mem->data, sram2_len); - *dump_data = iwl_fw_error_next_data(*dump_data); - } +dump_internal_txf: + if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && + fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) + goto out; + + for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) + ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len); + +out: + return fifo_len; } -void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) +static struct iwl_fw_error_dump_file * +_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dump_ptrs *fw_error_dump) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_fw_dump_ptrs *fw_error_dump; - struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; - const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; + const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; - u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; - u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? + u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; + u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; + u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; bool monitor_dump_only = false; int i; - IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); - - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { - IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); - goto out; - } - if (fwrt->dump.trig && fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) monitor_dump_only = true; - fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); - if (!fw_error_dump) - goto out; - /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; @@ -684,112 +676,43 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { - fifo_data_len = 0; - - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { - - /* Count RXF2 size */ - if (mem_cfg->rxfifo2_size) { - /* Add header info */ - fifo_data_len += - mem_cfg->rxfifo2_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - /* Count RXF1 sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - if (!mem_cfg->lmac[i].rxfifo1_size) - continue; - - /* Add header info */ - fifo_data_len += - mem_cfg->lmac[i].rxfifo1_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } - - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { - size_t fifo_const_len = sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - - /* Count TXF sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - int j; - - for (j = 0; j < mem_cfg->num_txfifo_entries; - j++) { - if (!mem_cfg->lmac[i].txfifo_size[j]) - continue; - - /* Add header info */ - fifo_data_len += - fifo_const_len + - mem_cfg->lmac[i].txfifo_size[j]; - } - } - } - - if ((fwrt->fw->dbg_dump_mask & - BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && - fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - for (i = 0; - i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); - i++) { - if (!mem_cfg->internal_txfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += - mem_cfg->internal_txfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } + fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (!fwrt->trans->cfg->gen2 && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) + fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) prph_len += iwl_fw_get_prph_len(fwrt); if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) + fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } - file_len = sizeof(*dump_file) + - fifo_data_len + - prph_len + - radio_len; + file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + smem_len + - sizeof(struct iwl_fw_error_dump_mem); - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sram2_len + - sizeof(struct iwl_fw_error_dump_mem); - - /* Make room for MEM segments */ - for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { - file_len += sizeof(*dump_data) + - le32_to_cpu(fw_dbg_mem[i].len) + - sizeof(struct iwl_fw_error_dump_mem); - } + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + size_t hdr_len = sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_mem); + + /* Dump SRAM only if no mem_tlvs */ + if (!fwrt->fw->dbg.n_mem_tlv) + ADD_LEN(file_len, sram_len, hdr_len); + + /* Make room for all mem types that exist */ + ADD_LEN(file_len, smem_len, hdr_len); + ADD_LEN(file_len, sram2_len, hdr_len); + + for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) + ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) @@ -809,28 +732,21 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) sizeof(*dump_info) + sizeof(*dump_smem_cfg); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) && - !fwrt->fw->n_dbg_mem_tlv) - file_len += sizeof(*dump_data) + sram_len + - sizeof(struct iwl_fw_error_dump_mem); - dump_file = vzalloc(file_len); - if (!dump_file) { - kfree(fw_error_dump); - goto out; - } + if (!dump_file) + return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; @@ -851,7 +767,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_data = iwl_fw_error_next_data(dump_data); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); @@ -882,13 +798,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* We only dump the FIFOs if the FW is in error state */ - if (fifo_data_len) { + if (fifo_len) { iwl_fw_dump_fifos(fwrt, &dump_data); if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); } - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + @@ -902,12 +818,32 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) /* In case we only want monitor dump, skip to dump trasport data */ if (monitor_dump_only) - goto dump_trans_data; + goto out; - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) - iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, smem_len, - sram2_len); + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = + fwrt->fw->dbg.mem_tlv; + if (!fwrt->fw->dbg.n_mem_tlv) + iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, + IWL_FW_ERROR_DUMP_MEM_SRAM); + + for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { + u32 len = le32_to_cpu(fw_dbg_mem[i].len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + + iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, + le32_to_cpu(fw_dbg_mem[i].data_type)); + } + + iwl_fw_dump_mem(fwrt, &dump_data, smem_len, + fwrt->trans->cfg->smem_offset, + IWL_FW_ERROR_DUMP_MEM_SMEM); + + iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, + fwrt->trans->cfg->dccm2_offset, + IWL_FW_ERROR_DUMP_MEM_SRAM); + } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; @@ -929,7 +865,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* Dump fw's virtual image */ - if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) { @@ -965,13 +901,44 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ARRAY_SIZE(iwl_prph_dump_addr_9000)); } -dump_trans_data: +out: + dump_file->file_len = cpu_to_le32(file_len); + return dump_file; +} + +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) +{ + struct iwl_fw_dump_ptrs *fw_error_dump; + struct iwl_fw_error_dump_file *dump_file; + struct scatterlist *sg_dump_data; + u32 file_len; + + IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); + + /* there's no point in fw dump if the bus is dead */ + if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { + IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); + goto out; + } + + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); + if (!fw_error_dump) + goto out; + + dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump); + if (!dump_file) { + kfree(fw_error_dump); + goto out; + } + fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, fwrt->dump.trig); + file_len = le32_to_cpu(dump_file->file_len); fw_error_dump->fwrt_len = file_len; - if (fw_error_dump->trans_ptr) + if (fw_error_dump->trans_ptr) { file_len += fw_error_dump->trans_ptr->len; - dump_file->file_len = cpu_to_le32(file_len); + dump_file->file_len = cpu_to_le32(file_len); + } sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { @@ -1006,15 +973,34 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = { }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); -int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, - const struct iwl_fw_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger) +void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt) { - unsigned int delay = 0; + struct iwl_fw_dump_desc *iwl_dump_desc_no_alive = + kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL); - if (trigger) - delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + if (!iwl_dump_desc_no_alive) + return; + + iwl_dump_desc_no_alive->trig_desc.type = + cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE); + iwl_dump_desc_no_alive->len = 0; + + if (WARN_ON(fwrt->dump.desc)) + iwl_fw_free_dump_desc(fwrt); + + IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", + FW_DBG_TRIGGER_NO_ALIVE); + fwrt->dump.desc = iwl_dump_desc_no_alive; + iwl_fw_error_dump(fwrt); + clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status); +} +IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump); + +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, void *trigger, + unsigned int delay) +{ /* * If the loading of the FW completed successfully, the next step is to * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non @@ -1031,7 +1017,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, fwrt->smem_cfg.num_lmacs) return -EIO; - if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) || + test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status)) return -EBUSY; if (WARN_ON(fwrt->dump.desc)) @@ -1052,25 +1039,38 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger) + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; + unsigned int delay = 0; - if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) { - IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); - iwl_force_nmi(fwrt->trans); - return 0; + if (trigger) { + u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; + + if (!le16_to_cpu(trigger->occurrences)) + return 0; + + if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { + IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", + trig); + iwl_force_nmi(fwrt->trans); + return 0; + } + + trigger->occurrences = cpu_to_le16(occurrences); + delay = le16_to_cpu(trigger->trig_dis_ms); } desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) return -ENOMEM; + desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_fw_dbg_collect_desc(fwrt, desc, trigger); + return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); @@ -1078,13 +1078,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { - u16 occurrences = le16_to_cpu(trigger->occurrences); int ret, len = 0; char buf[64]; - if (!occurrences) - return 0; - if (fmt) { va_list ap; @@ -1107,7 +1103,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, if (ret) return ret; - trigger->occurrences = cpu_to_le16(occurrences - 1); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); @@ -1118,17 +1113,17 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) int ret; int i; - if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv), + if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ - if ((!fwrt->fw->dbg_conf_tlv[conf_id] || - !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + if ((!fwrt->fw->dbg.conf_tlv[conf_id] || + !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; - if (!fwrt->fw->dbg_conf_tlv[conf_id]) + if (!fwrt->fw->dbg.conf_tlv[conf_id]) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) @@ -1136,8 +1131,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd; + for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; struct iwl_host_cmd hcmd = { .id = cmd->id, @@ -1159,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); -void iwl_fw_error_dump_wk(struct work_struct *work) +/* this function assumes dump_start was called beforehand and dump_end will be + * called afterwards + */ +void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt) { - struct iwl_fw_runtime *fwrt = - container_of(work, struct iwl_fw_runtime, dump.wk.work); struct iwl_fw_dbg_params params = {0}; - if (fwrt->ops && fwrt->ops->dump_start && - fwrt->ops->dump_start(fwrt->ops_ctx)) + if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) return; if (fwrt->ops && fwrt->ops->fw_running && @@ -1174,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); iwl_fw_free_dump_desc(fwrt); clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); - goto out; + return; } iwl_fw_dbg_stop_recording(fwrt, ¶ms); @@ -1183,12 +1178,25 @@ void iwl_fw_error_dump_wk(struct work_struct *work) /* start recording again if the firmware is not crashed */ if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && - fwrt->fw->dbg_dest_tlv) { + fwrt->fw->dbg.dest_tlv) { /* wait before we collect the data till the DBGC stop */ udelay(500); iwl_fw_dbg_restart_recording(fwrt, ¶ms); } -out: +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync); + +void iwl_fw_error_dump_wk(struct work_struct *work) +{ + struct iwl_fw_runtime *fwrt = + container_of(work, struct iwl_fw_runtime, dump.wk.work); + + if (fwrt->ops && fwrt->ops->dump_start && + fwrt->ops->dump_start(fwrt->ops_ctx)) + return; + + iwl_fw_dbg_collect_sync(fwrt); + if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 3c89230fae6a..6f8d3256f7b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -107,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger); + void *trigger, unsigned int delay); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger); + struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ - void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ + void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \ unlikely(__dbg_trigger); \ }) static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) { - return fw->dbg_trigger_tlv[id]; + return fw->dbg.trigger_tlv[id]; } #define iwl_fw_dbg_get_trigger(fw, id) ({ \ @@ -154,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, } static inline bool -iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dbg_trigger_tlv *trig) +iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms) { - unsigned long wind_jiff = - msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms)); - u32 id = le32_to_cpu(trig->id); + unsigned long wind_jiff = msecs_to_jiffies(dis_ms); /* If this is the first event checked, jump to update start ts */ if (fwrt->dump.non_collect_ts_start[id] && @@ -179,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; - if (iwl_fw_dbg_no_trig_window(fwrt, trig)) { + if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), + le16_to_cpu(trig->trig_dis_ms))) { IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; @@ -188,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } +static inline struct iwl_fw_dbg_trigger_tlv* +_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, + const enum iwl_fw_dbg_trigger id) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + + if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) + return NULL; + + trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id); + + if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig)) + return NULL; + + return trig; +} + +#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \ + BUILD_BUG_ON(!__builtin_constant_p(id)); \ + BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ + _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \ +}) + static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, @@ -293,7 +315,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && fwrt->trans->cfg->d3_debug_data_length && - fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); + fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); @@ -344,4 +366,6 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ +void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt); +void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 1049bdfe1e69..3e120dd47305 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -258,11 +258,75 @@ static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt, FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16); +struct hcmd_write_data { + __be32 cmd_id; + __be32 flags; + __be16 length; + u8 data[0]; +} __packed; + +static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, + size_t count) +{ + size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2; + size_t data_size = (count - 1) / 2; + int ret; + struct hcmd_write_data *data; + struct iwl_host_cmd hcmd = { + .len = { 0, }, + .data = { NULL, }, + }; + + if (fwrt->ops && fwrt->ops->fw_running && + !fwrt->ops->fw_running(fwrt->ops_ctx)) + return -EIO; + + if (count < header_size + 1 || count > 1024 * 4) + return -EINVAL; + + data = kmalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = hex2bin((u8 *)data, buf, data_size); + if (ret) + goto out; + + hcmd.id = be32_to_cpu(data->cmd_id); + hcmd.flags = be32_to_cpu(data->flags); + hcmd.len[0] = be16_to_cpu(data->length); + hcmd.data[0] = data->data; + + if (count != header_size + hcmd.len[0] * 2 + 1) { + IWL_ERR(fwrt, + "host command data size does not match header length\n"); + ret = -EINVAL; + goto out; + } + + if (fwrt->ops && fwrt->ops->send_hcmd) + ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); + else + ret = -EPERM; + + if (ret < 0) + goto out; + + if (hcmd.flags & CMD_WANT_SKB) + iwl_free_resp(&hcmd); +out: + kfree(data); + return ret ?: count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); + int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); return 0; err: IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 6d3ef331b7d5..6fede174c664 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -328,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * the firmware sends a tx reply. + * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -345,6 +346,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, FW_DBG_TRIGGER_TX_STATUS, + FW_DBG_TRIGGER_NO_ALIVE, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 63e277b07b8a..6005a41c53d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -337,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * antenna the beacon should be transmitted * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * from AP and will send it upon d0i3 exit. - * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 + * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3 * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting @@ -352,6 +352,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * power reduction. * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 + * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax + * capability. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -392,7 +394,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, @@ -402,6 +404,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, NUM_IWL_UCODE_TLV_CAPA diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 9cc8fe8908ac..54dbbd998abf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -198,6 +198,29 @@ enum iwl_fw_type { }; /** + * struct iwl_fw_dbg - debug data + * + * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM) + * @n_dest_reg: num of reg_ops in dest_tlv + * @conf_tlv: array of pointers to configuration HCMDs + * @trigger_tlv: array of pointers to triggers TLVs + * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries + * @mem_tlv: Runtime addresses to dump + * @n_mem_tlv: number of runtime addresses + * @dump_mask: bitmask of dump regions +*/ +struct iwl_fw_dbg { + struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; + u8 n_dest_reg; + struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; + struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX]; + size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX]; + struct iwl_fw_dbg_mem_seg_tlv *mem_tlv; + size_t n_mem_tlv; + u32 dump_mask; +}; + +/** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file @@ -217,12 +240,6 @@ enum iwl_fw_type { * @cipher_scheme: optional external cipher scheme. * @human_readable: human readable version * we get the ALIVE from the uCode - * @dbg_dest_tlv: points to the destination TLV for debug - * @dbg_conf_tlv: array of pointers to configuration TLVs for debug - * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries - * @dbg_trigger_tlv: array of pointers to triggers TLVs - * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries - * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv */ struct iwl_fw { u32 ucode_ver; @@ -250,15 +267,7 @@ struct iwl_fw { struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; - struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; - size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; - size_t n_dbg_mem_tlv; - size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; - u8 dbg_dest_reg_num; - u32 dbg_dump_mask; + struct iwl_fw_dbg dbg; }; static inline const char *get_fw_dbg_mode_string(int mode) @@ -280,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode) static inline bool iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) { - const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id]; + const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id]; if (!conf_tlv) return false; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 9ed5819defaf..6b95d0e75889 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); void (*dump_end)(void *ctx); bool (*fw_running)(void *ctx); + int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); }; #define MAX_NUM_LMAC 2 @@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg { enum iwl_fw_runtime_status { IWL_FWRT_STATUS_DUMPING = 0, + IWL_FWRT_STATUS_WAIT_ALIVE, }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h index 2cc6c019d0e1..420e6d745f77 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -30,38 +30,20 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM iwlwifi_data -TRACE_EVENT(iwlwifi_dev_tx_data, - TP_PROTO(const struct device *dev, - struct sk_buff *skb, u8 hdr_len), - TP_ARGS(dev, skb, hdr_len), +TRACE_EVENT(iwlwifi_dev_tx_tb, + TP_PROTO(const struct device *dev, struct sk_buff *skb, + u8 *data_src, size_t data_len), + TP_ARGS(dev, skb, data_src, data_len), TP_STRUCT__entry( DEV_ENTRY __dynamic_array(u8, data, - iwl_trace_data(skb) ? skb->len - hdr_len : 0) + iwl_trace_data(skb) ? data_len : 0) ), TP_fast_assign( DEV_ASSIGN; if (iwl_trace_data(skb)) - skb_copy_bits(skb, hdr_len, - __get_dynamic_array(data), - skb->len - hdr_len); - ), - TP_printk("[%s] TX frame data", __get_str(dev)) -); - -TRACE_EVENT(iwlwifi_dev_tx_tso_chunk, - TP_PROTO(const struct device *dev, - u8 *data_src, size_t data_len), - TP_ARGS(dev, data_src, data_len), - TP_STRUCT__entry( - DEV_ENTRY - - __dynamic_array(u8, data, data_len) - ), - TP_fast_assign( - DEV_ASSIGN; - memcpy(__get_dynamic_array(data), data_src, data_len); + memcpy(__get_dynamic_array(data), data_src, data_len); ), TP_printk("[%s] TX frame data", __get_str(dev)) ); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index d3a60d1aacb5..ba41d23b4211 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -168,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) { int i; - kfree(drv->fw.dbg_dest_tlv); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) - kfree(drv->fw.dbg_conf_tlv[i]); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) - kfree(drv->fw.dbg_trigger_tlv[i]); - kfree(drv->fw.dbg_mem_tlv); + kfree(drv->fw.dbg.dest_tlv); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) + kfree(drv->fw.dbg.conf_tlv[i]); + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) + kfree(drv->fw.dbg.trigger_tlv[i]); + kfree(drv->fw.dbg.mem_tlv); kfree(drv->fw.iml); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) @@ -303,7 +303,7 @@ struct iwl_firmware_pieces { struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; - size_t n_dbg_mem_tlv; + size_t n_mem_tlv; }; /* @@ -936,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_INFO(drv, "Found debug destination: %s\n", get_fw_dbg_mode_string(mon_mode)); - drv->fw.dbg_dest_reg_num = (dest_v1) ? + drv->fw.dbg.n_dest_reg = (dest_v1) ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops) : @@ -944,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops); - drv->fw.dbg_dest_reg_num /= - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]); + drv->fw.dbg.n_dest_reg /= + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]); break; } @@ -959,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } - if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) { + if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) { IWL_ERR(drv, "Skip unknown configuration: %d\n", conf->id); @@ -988,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, (void *)tlv_data; u32 trigger_id = le32_to_cpu(trigger->id); - if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) { + if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) { IWL_ERR(drv, "Skip unknown trigger: %u\n", trigger->id); @@ -1015,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } - drv->fw.dbg_dump_mask = + drv->fw.dbg.dump_mask = le32_to_cpup((__le32 *)tlv_data); break; } @@ -1070,13 +1070,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, dbg_mem->data_type); size = sizeof(*pieces->dbg_mem_tlv) * - (pieces->n_dbg_mem_tlv + 1); + (pieces->n_mem_tlv + 1); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); if (!n) return -ENOMEM; pieces->dbg_mem_tlv = n; - pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem; - pieces->n_dbg_mem_tlv++; + pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem; + pieces->n_mem_tlv++; break; } case IWL_UCODE_TLV_IML: { @@ -1256,7 +1256,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; /* dump all fw memory areas by default except d3 debug data */ - fw->dbg_dump_mask = 0xfffdffff; + fw->dbg.dump_mask = 0xfffdffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) @@ -1323,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto out_free_fw; if (pieces->dbg_dest_tlv_init) { - size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) + - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * - drv->fw.dbg_dest_reg_num; + size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) + + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * + drv->fw.dbg.n_dest_reg; - drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); + drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); - if (!drv->fw.dbg_dest_tlv) + if (!drv->fw.dbg.dest_tlv) goto out_free_fw; if (*pieces->dbg_dest_ver == 0) { - memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1, + memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1, dbg_dest_size); } else { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = - drv->fw.dbg_dest_tlv; + drv->fw.dbg.dest_tlv; dest_tlv->version = pieces->dbg_dest_tlv->version; dest_tlv->monitor_mode = @@ -1352,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) pieces->dbg_dest_tlv->base_shift; memcpy(dest_tlv->reg_ops, pieces->dbg_dest_tlv->reg_ops, - sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) * - drv->fw.dbg_dest_reg_num); + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * + drv->fw.dbg.n_dest_reg); /* In version 1 of the destination tlv, which is * relevant for internal buffer exclusively, @@ -1369,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } } - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) { + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) { if (pieces->dbg_conf_tlv[i]) { - drv->fw.dbg_conf_tlv_len[i] = - pieces->dbg_conf_tlv_len[i]; - drv->fw.dbg_conf_tlv[i] = + drv->fw.dbg.conf_tlv[i] = kmemdup(pieces->dbg_conf_tlv[i], - drv->fw.dbg_conf_tlv_len[i], + pieces->dbg_conf_tlv_len[i], GFP_KERNEL); - if (!drv->fw.dbg_conf_tlv[i]) + if (!pieces->dbg_conf_tlv_len[i]) goto out_free_fw; } } @@ -1404,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = sizeof(struct iwl_fw_dbg_trigger_tdls); - for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { + for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { /* * If the trigger isn't long enough, WARN and exit. @@ -1417,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) goto out_free_fw; - drv->fw.dbg_trigger_tlv_len[i] = + drv->fw.dbg.trigger_tlv_len[i] = pieces->dbg_trigger_tlv_len[i]; - drv->fw.dbg_trigger_tlv[i] = + drv->fw.dbg.trigger_tlv[i] = kmemdup(pieces->dbg_trigger_tlv[i], - drv->fw.dbg_trigger_tlv_len[i], + drv->fw.dbg.trigger_tlv_len[i], GFP_KERNEL); - if (!drv->fw.dbg_trigger_tlv[i]) + if (!drv->fw.dbg.trigger_tlv[i]) goto out_free_fw; } } /* Now that we can no longer fail, copy information */ - drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv; + drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv; pieces->dbg_mem_tlv = NULL; - drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv; + drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv; /* * The (size - 16) / 12 formula is based on the information recorded @@ -1473,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) break; default: WARN(1, "Invalid fw type %d\n", fw->type); + /* fall through */ case IWL_FW_MVM: op = &iwlwifi_opmode_table[MVM_OP_MODE]; break; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index ec300d388694..96e101d79662 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1335,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; @@ -1350,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, } rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + empty_otp = !!(le32_to_cpu(rsp->general.flags) & + NVM_GENERAL_FLAGS_EMPTY_OTP); + if (empty_otp) IWL_INFO(trans, "OTP is empty\n"); nvm = kzalloc(sizeof(*nvm) + @@ -1374,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, /* Initialize general data */ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); + nvm->n_hw_addrs = rsp->general.n_hw_addrs; + if (nvm->n_hw_addrs == 0) + IWL_WARN(trans, + "Firmware declares no reserved mac addresses. OTP is empty: %d\n", + empty_otp); /* Initialize MAC sku data */ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 6c636b2a6b43..26b3c73051ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -725,7 +725,7 @@ struct iwl_dram_data { * @dbg_dest_tlv: points to the destination TLV for debug * @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug - * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv + * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv * @num_blocks: number of blocks in fw_mon * @fw_mon: address of the buffers for firmware monitor * @system_pm_mode: the system-wide power management mode in use. @@ -778,7 +778,7 @@ struct iwl_trans { const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; u32 dbg_dump_mask; - u8 dbg_dest_reg_num; + u8 dbg_n_dest_reg; int num_blocks; struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 210be26aadaa..843f3b41b72e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, { struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_data key_data = { - .configure_keys = !d0i3, + .configure_keys = !d0i3 && !unified, .use_rsc_tsc = false, .tkip = &tkip_cmd, .use_tkip = false, @@ -1636,32 +1638,10 @@ out_free_resp: } static struct iwl_wowlan_status * -iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm) { - u32 base = mvm->error_event_table[0]; - struct error_table_start { - /* cf. struct iwl_error_event_table */ - u32 valid; - u32 error_id; - } err_info; int ret; - iwl_trans_read_mem_bytes(mvm->trans, base, - &err_info, sizeof(err_info)); - - if (err_info.valid) { - IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", - err_info.valid, err_info.error_id); - if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { - struct cfg80211_wowlan_wakeup wakeup = { - .rfkill_release = true, - }; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); - } - return ERR_PTR(-EIO); - } - /* only for tracing for now */ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); if (ret) @@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, bool keep; struct iwl_mvm_sta *mvm_ap_sta; - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + fw_status = iwl_mvm_get_wakeup_status(mvm); if (IS_ERR_OR_NULL(fw_status)) goto out_unlock; @@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, u32 reasons = 0; int i, j, n_matches, ret; - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); + fw_status = iwl_mvm_get_wakeup_status(mvm); if (!IS_ERR_OR_NULL(fw_status)) { reasons = le32_to_cpu(fw_status->wakeup_reasons); kfree(fw_status); @@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, ieee80211_resume_disconnect(vif); } +static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 base = mvm->error_event_table[0]; + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + u32 error_id; + } err_info; + + iwl_trans_read_mem_bytes(mvm->trans, base, + &err_info, sizeof(err_info)); + + if (err_info.valid && + err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); + } + return err_info.valid; +} + static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; @@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* query SRAM first in case we want event logging */ iwl_mvm_read_d3_sram(mvm); + if (iwl_mvm_check_rt_status(mvm, vif)) { + set_bit(STATUS_FW_ERROR, &mvm->trans->status); + iwl_mvm_dump_nic_error_log(mvm); + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + NULL, 0); + ret = 1; + goto err; + } + if (d0i3_first) { ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); if (ret < 0) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index de40752aa67e..3b6b3d8fb961 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -666,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, }; int ret, bt_force_ant_mode; - for (bt_force_ant_mode = 0; - bt_force_ant_mode < ARRAY_SIZE(modes_str); - bt_force_ant_mode++) { - if (!strcmp(buf, modes_str[bt_force_ant_mode])) - break; - } - - if (bt_force_ant_mode >= ARRAY_SIZE(modes_str)) - return -EINVAL; + ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); + if (ret < 0) + return ret; + bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 96d26b749952..dade206d5511 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -299,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; + set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, @@ -363,12 +364,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; + /* + * Set a 'fake' TID for the command queue, since we use the + * hweight() of the tid_bitmap as a refcount now. Not that + * we ever even consider the command queue as one we might + * want to reuse, but be safe nevertheless. + */ + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = + BIT(IWL_MAX_TID_COUNT + 2); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); return 0; } @@ -699,8 +708,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; - /* in case of BIOS bug */ - if (n_profiles <= 0) { + /* + * Check the validity of n_profiles. The EWRD profiles start + * from index 1, so the maximum value allowed here is + * ACPI_SAR_PROFILES_NUM - 1. + */ + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } @@ -1022,7 +1035,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ - if (mvm->fw->dbg_dest_tlv) + if (mvm->fw->dbg.dest_tlv) mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 781f30356720..6486cfb33f40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1487,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS)) + trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MISSED_BEACONS); + if (!trigger) return; - trigger = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_MISSED_BEACONS); bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = @@ -1500,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, /* TODO: implement start trigger */ - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), - trigger)) - return; - if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index c78d017749d3..505b0385d800 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -857,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; - switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -1231,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) iwl_mvm_del_aux_sta(mvm); /* - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() - * won't be called in this case). + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the + * hw (as restart_complete() won't be called in this case) and mac80211 + * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || + test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); @@ -2802,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_TDLS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(tdls_trig->action_bitmap & BIT(action))) return; @@ -4491,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) @@ -4533,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 8f71eeed50d9..7ba5bc2ed1c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -512,6 +512,7 @@ enum iwl_mvm_scan_type { IWL_SCAN_TYPE_WILD, IWL_SCAN_TYPE_MILD, IWL_SCAN_TYPE_FRAGMENTED, + IWL_SCAN_TYPE_FAST_BALANCE, }; enum iwl_mvm_sched_scan_pass_all_states { @@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf) * This is a state in which a single queue serves more than one TID, all of * which are not aggregated. Note that the queue is only associated to one * RA. - * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it - * This is a state of a queue that has had traffic on it, but during the - * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on - * it. In this state, when a new queue is needed to be allocated but no - * such free queue exists, an inactive queue might be freed and given to - * the new RA/TID. - * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured - * This is the state of a queue that has had traffic pass through it, but - * needs to be reconfigured for some reason, e.g. the queue needs to - * become unshared and aggregations re-enabled on. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, - IWL_MVM_QUEUE_INACTIVE, - IWL_MVM_QUEUE_RECONFIGURING, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) @@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile { u8 values[ACPI_GEO_TABLE_SIZE]; }; +struct iwl_mvm_dqa_txq_info { + u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ + bool reserved; /* Is this the TXQ reserved for a STA */ + u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ + u8 txq_tid; /* The TID "owner" of this queue*/ + u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ + /* Timestamp for inactivation per TID of this queue */ + unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; + enum iwl_mvm_queue_status status; +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -843,17 +843,7 @@ struct iwl_mvm { u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; - struct { - u8 hw_queue_refcount; - u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ - bool reserved; /* Is this the TXQ reserved for a STA */ - u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ - u8 txq_tid; /* The TID "owner" of this queue*/ - u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ - /* Timestamp for inactivation per TID of this queue */ - unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; - enum iwl_mvm_queue_status status; - } queue_info[IWL_MAX_HW_QUEUES]; + struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ struct work_struct add_stream_wk; /* To add streams to queues */ @@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, mvmvif->low_latency &= ~cause; } -/* hw scheduler queue config */ -bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout); -int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, - u8 sta_id, u8 tid, unsigned int timeout); - -int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags); -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); - /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. */ @@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { + lockdep_assert_held(&mvm->mutex); + /* calling this function without using dump_start/end since at this + * point we already hold the op mode mutex + */ + iwl_fw_dbg_collect_sync(&mvm->fwrt); iwl_fw_cancel_timestamp(&mvm->fwrt); iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); @@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); -void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); - #define MVM_TCM_PERIOD_MSEC 500 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) #define MVM_LL_PERIOD (10 * HZ) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index fff98fed35ed..3633f27d048a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -477,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, u32 status; int resp_len, n_channels; u16 mcc; - bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2); if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return ERR_PTR(-EOPNOTSUPP); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); - if (!resp_v2) - cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1); IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id); @@ -497,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, pkt = cmd.resp_pkt; /* Extract MCC response */ - if (resp_v2) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp->n_channels); @@ -509,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, goto exit; } } else { - struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data; + struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; - n_channels = __le32_to_cpu(mcc_resp_v1->n_channels); + n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * sizeof(__le32); resp_cp = kzalloc(resp_len, GFP_KERNEL); @@ -520,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, goto exit; } - resp_cp->status = mcc_resp_v1->status; - resp_cp->mcc = mcc_resp_v1->mcc; - resp_cp->cap = mcc_resp_v1->cap; - resp_cp->source_id = mcc_resp_v1->source_id; - resp_cp->n_channels = mcc_resp_v1->n_channels; - memcpy(resp_cp->channels, mcc_resp_v1->channels, + resp_cp->status = mcc_resp_v3->status; + resp_cp->mcc = mcc_resp_v3->mcc; + resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); + resp_cp->source_id = mcc_resp_v3->source_id; + resp_cp->time = mcc_resp_v3->time; + resp_cp->geo_info = mcc_resp_v3->geo_info; + resp_cp->n_channels = mcc_resp_v3->n_channels; + memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 0599d323cbeb..0e2092526fae 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -565,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx) return iwl_mvm_firmware_running(ctx); } +static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, host_cmd); + mutex_unlock(&mvm->mutex); + + return ret; +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .fw_running = iwl_mvm_fwrt_fw_running, + .send_hcmd = iwl_mvm_fwrt_send_hcmd, }; static struct iwl_op_mode * @@ -604,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (cfg->max_rx_agg_size) hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size; + else + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; + else + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; op_mode = hw->priv; @@ -748,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; - trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; - memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, + trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv; + trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg; + memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv)); - trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; - trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; + trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv; + trans->dbg_dump_mask = mvm->fw->dbg.dump_mask; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; @@ -784,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); + if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status)) + iwl_fw_alive_error_dump(&mvm->fwrt); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -953,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_cmd *cmds_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, + FW_DBG_TRIGGER_FW_NOTIF); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); cmds_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { /* don't collect on CMD 0 */ if (!cmds_trig->cmds[i].cmd_id) @@ -1223,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - NULL); + NULL, 0); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 2c75f51a04e4..089972280daa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, !(info->flags & IEEE80211_TX_STAT_AMPDU)) return; - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); + if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, + &tx_resp_rate)) { + WARN_ON_ONCE(1); + return; + } #ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate but @@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ table = &lq_sta->lq; lq_hwrate = le32_to_cpu(table->rs_table[0]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { + WARN_ON_ONCE(1); + return; + } /* Here we actually compare this rate to the latest LQ command */ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { @@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Collect data for each rate used during failed TX attempts */ for (i = 0; i <= retries; ++i) { lq_hwrate = le32_to_cpu(table->rs_table[i]); - rs_rate_from_ucode_rate(lq_hwrate, info->band, - &lq_rate); + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, + &lq_rate)) { + WARN_ON_ONCE(1); + return; + } + /* * Only collect stats if retried rate is in the same RS * table as active/search. @@ -3260,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; - rs_rate_from_ucode_rate(ucode_rate, band, &rate); + if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) { + WARN_ON_ONCE(1); + return; + } if (is_mimo(&rate)) lq_cmd->mimo_delim = num_rates - 1; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index a050220da678..ef624833cf1b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -433,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); + struct iwl_fw_dbg_trigger_tlv *trig; + struct ieee80211_vif *vif = mvmsta->vif; /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(tx_blocked_vif) && - mvmsta->vif == tx_blocked_vif) { + if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); @@ -450,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rs_update_last_rssi(mvm, mvmsta, rx_status); - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_RSSI); + + if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; s32 rssi; - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); - trig_check = - iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(mvmsta->vif), - trig); - if (trig_check && rx_status->signal < rssi) + if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } @@ -693,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) struct iwl_fw_dbg_trigger_stats *trig_stats; u32 trig_offset, trig_thold; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); trig_stats = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_thold = le32_to_cpu(trig_stats->stop_threshold); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 894dd6379b9a..26ac9402568d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -923,6 +923,185 @@ static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm, } } +static void +iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status) +{ + /* + * Unfortunately, we have to leave the mac80211 data + * incorrect for the case that we receive an HE-MU + * transmission and *don't* have the HE phy data (due + * to the bits being used for TSF). This shouldn't + * happen though as management frames where we need + * the TSF/timers are not be transmitted in HE-MU. + */ + u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); + u8 offs = 0; + + rx_status->bw = RATE_INFO_BW_HE_RU; + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + switch (ru) { + case 0 ... 36: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + he->data2 |= le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); + if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); + + if (he_mu) { +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, + rate_n_flags), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); + } +} + +static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, + struct iwl_rx_mpdu_desc *desc, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status, + u64 he_phy_data, u32 rate_n_flags, + int queue) +{ + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + bool sigb_data; + u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN; + u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN; + + he->data1 |= cpu_to_le16(d1known); + he->data2 |= cpu_to_le16(d2known); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); + he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + + switch (he_type) { + case RATE_MCS_HE_TYPE_MU: + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); + + sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, + he_phy_data) == + IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO; + if (sigb_data) + iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu); + /* fall through */ + case RATE_MCS_HE_TYPE_TRIG: + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data5 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + break; + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); + he->data3 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG, + he_phy_data), + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); + break; + } + + switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) { + case IWL_RX_HE_PHY_INFO_TYPE_MU: + case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO: + case IWL_RX_HE_PHY_INFO_TYPE_TB: + iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags, + he, he_mu, rx_status); + break; + default: + /* nothing */ + break; + } +} + static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc, u32 rate_n_flags, u16 phy_info, int queue) @@ -933,9 +1112,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, u64 he_phy_data = HE_PHY_DATA_INVAL; struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; - u32 he_type = 0xffffffff; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 stbc, ltf; - static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | @@ -953,25 +1131,19 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; unsigned int radiotap_len = 0; - bool overload = phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD; - bool sigb_data = false; he = skb_put_data(skb, &known, sizeof(known)); radiotap_len += sizeof(known); rx_status->flag |= RX_FLAG_RADIOTAP_HE; - he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { - if (mvm->trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560) + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) he_phy_data = le64_to_cpu(desc->v3.he_phy_data); else he_phy_data = le64_to_cpu(desc->v1.he_phy_data); if (he_type == RATE_MCS_HE_TYPE_MU) { - he_mu = skb_put_data(skb, &mu_known, - sizeof(mu_known)); + he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); radiotap_len += sizeof(mu_known); rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } @@ -980,60 +1152,21 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, /* temporarily hide the radiotap data */ __skb_pull(skb, radiotap_len); - if (overload && he_type == RATE_MCS_HE_TYPE_SU) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); - if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, he_phy_data)) - he->data3 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); - + if (he_phy_data != HE_PHY_DATA_INVAL && + he_type == RATE_MCS_HE_TYPE_SU) { + /* report the AMPDU-EOF bit on single frames */ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } - } else if (overload && he_mu && he_phy_data != HE_PHY_DATA_INVAL) { - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); - he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); - he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); - he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); - - sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, - he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO; - if (sigb_data) - iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu); - } - if (he_phy_data != HE_PHY_DATA_INVAL && - (he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_MU)) { - u8 bss_color = FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK, - he_phy_data); - - if (bss_color) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); - he->data3 |= cpu_to_le16(bss_color); - } } + if (he_phy_data != HE_PHY_DATA_INVAL) + iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status, + he_phy_data, rate_n_flags, queue); + /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; @@ -1056,84 +1189,12 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; } - if (he_phy_data != HE_PHY_DATA_INVAL && - (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO || - FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO)) { - /* - * Unfortunately, we have to leave the mac80211 data - * incorrect for the case that we receive an HE-MU - * transmission and *don't* have the HE phy data (due - * to the bits being used for TSF). This shouldn't - * happen though as management frames where we need - * the TSF/timers are not be transmitted in HE-MU. - */ - u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); - u8 offs = 0; - - rx_status->bw = RATE_INFO_BW_HE_RU; - + /* actually data is filled in mac80211 */ + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - switch (ru) { - case 0 ... 36: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; - offs = ru; - break; - case 37 ... 52: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; - offs = ru - 37; - break; - case 53 ... 60: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; - offs = ru - 53; - break; - case 61 ... 64: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; - offs = ru - 61; - break; - case 65 ... 66: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; - offs = ru - 65; - break; - case 67: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; - break; - case 68: - rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; - break; - } - he->data2 |= - le16_encode_bits(offs, - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); - if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); - - if (he_mu) { -#define CHECK_BW(bw) \ - BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ - RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) - CHECK_BW(20); - CHECK_BW(40); - CHECK_BW(80); - CHECK_BW(160); - he->data2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, - rate_n_flags), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - } - } else if (he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_EXT_SU) { - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - } - stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> @@ -1202,9 +1263,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); - switch (he_type) { - case RATE_MCS_HE_TYPE_SU: - case RATE_MCS_HE_TYPE_EXT_SU: { + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) { u16 val; /* LTF syms correspond to streams */ @@ -1234,31 +1294,10 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, rx_status->nss); val = 0; } + he->data5 |= le16_encode_bits(val, IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); - } - break; - case RATE_MCS_HE_TYPE_MU: { - u16 val; - - if (he_phy_data == HE_PHY_DATA_INVAL) - break; - - val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, - he_phy_data); - - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - he->data5 |= - cpu_to_le16(FIELD_PREP( - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, - val)); - } - break; - case RATE_MCS_HE_TYPE_TRIG: - /* not supported */ - break; } } @@ -1424,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); + struct iwl_fw_dbg_trigger_tlv *trig; + struct ieee80211_vif *vif = mvmsta->vif; if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && @@ -1436,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(tx_blocked_vif) && - tx_blocked_vif == mvmsta->vif) { + if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); @@ -1448,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rs_update_last_rssi(mvm, mvmsta, rx_status); - if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) && - ieee80211_is_beacon(hdr->frame_control)) { - struct iwl_fw_dbg_trigger_tlv *trig; + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_RSSI); + + if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; - bool trig_check; s32 rssi; - trig = iwl_fw_dbg_get_trigger(mvm->fw, - FW_DBG_TRIGGER_RSSI); rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); - trig_check = - iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(mvmsta->vif), - trig); - if (trig_check && rx_status->signal < rssi) + if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index e9048a98e793..cfb784fea77b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { .suspend_time = 95, .max_out_time = 44, }, + [IWL_SCAN_TYPE_FAST_BALANCE] = { + .suspend_time = 30, + .max_out_time = 37, + }, }; struct iwl_mvm_scan_params { @@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) return mvm->tcm.result.band_load[band]; } +struct iwl_is_dcm_with_go_iterator_data { + struct ieee80211_vif *current_vif; + bool is_dcm_with_p2p_go; +}; + +static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_is_dcm_with_go_iterator_data *data = _data; + struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif *curr_mvmvif = + iwl_mvm_vif_from_mac80211(data->current_vif); + + /* exclude the given vif */ + if (vif == data->current_vif) + return; + + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && + other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && + other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) + data->is_dcm_with_p2p_go = true; +} + static enum -iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, +iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, enum iwl_mvm_traffic_load load, bool low_latency) { @@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, if (!global_cnt) return IWL_SCAN_TYPE_UNASSOC; - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && - fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) - return IWL_SCAN_TYPE_FRAGMENTED; + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && + (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) + return IWL_SCAN_TYPE_FRAGMENTED; + + /* in case of DCM with GO where BSS DTIM interval < 220msec + * set all scan requests as fast-balance scan + * */ + if (vif && vif->type == NL80211_IFTYPE_STATION && + vif->bss_conf.dtim_period < 220) { + struct iwl_is_dcm_with_go_iterator_data data = { + .current_vif = vif, + .is_dcm_with_p2p_go = false, + }; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_is_dcm_with_go_iterator, + &data); + if (data.is_dcm_with_p2p_go) + return IWL_SCAN_TYPE_FAST_BALANCE; + } + } if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) return IWL_SCAN_TYPE_MILD; @@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, } static enum -iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { enum iwl_mvm_traffic_load load; bool low_latency; @@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) load = iwl_mvm_get_traffic_load(mvm); low_latency = iwl_mvm_low_latency(mvm); - return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); + return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static enum iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, - bool p2p_device, + struct ieee80211_vif *vif, enum nl80211_band band) { enum iwl_mvm_traffic_load load; @@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, load = iwl_mvm_get_traffic_load_band(mvm, band); low_latency = iwl_mvm_low_latency_band(mvm, band); - return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); + return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static int @@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) params->scan_plans[0].iterations == 1; } +static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type) +{ + return (type == IWL_SCAN_TYPE_FRAGMENTED || + type == IWL_SCAN_TYPE_FAST_BALANCE); +} + static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) @@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && @@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && - params->type != IWL_SCAN_TYPE_FRAGMENTED) + !iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; return flags; @@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels) static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags) { - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); struct iwl_scan_config_v1 *cfg = config; cfg->flags = cpu_to_le32(flags); @@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, if (iwl_mvm_is_cdb_supported(mvm)) { enum iwl_mvm_scan_type lb_type, hb_type; - lb_type = iwl_mvm_get_scan_type_band(mvm, false, + lb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); - hb_type = iwl_mvm_get_scan_type_band(mvm, false, + hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = @@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, cpu_to_le32(scan_timing[hb_type].suspend_time); } else { enum iwl_mvm_scan_type type = - iwl_mvm_get_scan_type(mvm, false); + iwl_mvm_get_scan_type(mvm, NULL); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].max_out_time); @@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return -ENOBUFS; if (iwl_mvm_is_cdb_supported(mvm)) { - type = iwl_mvm_get_scan_type_band(mvm, false, + type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); - hb_type = iwl_mvm_get_scan_type_band(mvm, false, + hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) return 0; } else { - type = iwl_mvm_get_scan_type(mvm, false); + type = iwl_mvm_get_scan_type(mvm, NULL); if (type == mvm->scan_type) return 0; } @@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) SCAN_CONFIG_FLAG_SET_MAC_ADDR | SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | SCAN_CONFIG_N_CHANNELS(num_channels) | - (type == IWL_SCAN_TYPE_FRAGMENTED ? + (iwl_mvm_is_scan_fragmented(type) ? SCAN_CONFIG_FLAG_SET_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); @@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) */ if (iwl_mvm_cdb_scan_api(mvm)) { if (iwl_mvm_is_cdb_supported(mvm)) - flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? + flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); @@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) + if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm) && - params->hb_type == IWL_SCAN_TYPE_FRAGMENTED) + iwl_mvm_is_scan_fragmented(params->hb_type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && @@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, */ if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && - params->type != IWL_SCAN_TYPE_FRAGMENTED && + !iwl_mvm_is_scan_fragmented(params->type) && !iwl_mvm_is_adaptive_dwell_supported(mvm) && !iwl_mvm_is_oce_supported(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; @@ -1448,6 +1504,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; + + cmd->v8.general_flags2 = + IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER; } cmd->scan_start_mac_id = scan_vif->id; @@ -1586,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work) static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, - bool p2p) + struct ieee80211_vif *vif) { if (iwl_mvm_is_cdb_supported(mvm)) { params->type = - iwl_mvm_get_scan_type_band(mvm, p2p, + iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_2GHZ); params->hb_type = - iwl_mvm_get_scan_type_band(mvm, p2p, + iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_5GHZ); } else { - params->type = iwl_mvm_get_scan_type(mvm, p2p); + params->type = iwl_mvm_get_scan_type(mvm, vif); } } + int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -1646,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_plans = &scan_plan; params.n_scan_plans = 1; - iwl_mvm_fill_scan_type(mvm, ¶ms, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, vif); ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); if (ret < 0) @@ -1742,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; - iwl_mvm_fill_scan_type(mvm, ¶ms, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, vif); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 8f929c774e70..1887d2b9f185 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -358,6 +358,108 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, return ret; } +static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u8 tid, u8 flags) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_DISABLE_QUEUE, + }; + bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; + int ret; + + if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) + return -EINVAL; + + if (iwl_mvm_has_new_tx_api(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_free(mvm->trans, queue); + + return 0; + } + + spin_lock_bh(&mvm->queue_info_lock); + + if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) { + spin_unlock_bh(&mvm->queue_info_lock); + return 0; + } + + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); + + /* + * If there is another TID with the same AC - don't remove the MAC queue + * from the mapping + */ + if (tid < IWL_MAX_TID_COUNT) { + unsigned long tid_bitmap = + mvm->queue_info[queue].tid_bitmap; + int ac = tid_to_mac80211_ac[tid]; + int i; + + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { + if (tid_to_mac80211_ac[i] == ac) + remove_mac_queue = false; + } + } + + if (remove_mac_queue) + mvm->hw_queue_to_mac80211[queue] &= + ~BIT(mac80211_queue); + + cmd.action = mvm->queue_info[queue].tid_bitmap ? + SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; + if (cmd.action == SCD_CFG_DISABLE_QUEUE) + mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; + + IWL_DEBUG_TX_QUEUES(mvm, + "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", + queue, + mvm->queue_info[queue].tid_bitmap, + mvm->hw_queue_to_mac80211[queue]); + + /* If the queue is still enabled - nothing left to do in this func */ + if (cmd.action == SCD_CFG_ENABLE_QUEUE) { + spin_unlock_bh(&mvm->queue_info_lock); + return 0; + } + + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tid = mvm->queue_info[queue].txq_tid; + + /* Make sure queue info is correct even though we overwrite it */ + WARN(mvm->queue_info[queue].tid_bitmap || + mvm->hw_queue_to_mac80211[queue], + "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n", + queue, mvm->hw_queue_to_mac80211[queue], + mvm->queue_info[queue].tid_bitmap); + + /* If we are here - the queue is freed and we can zero out these vals */ + mvm->queue_info[queue].tid_bitmap = 0; + mvm->hw_queue_to_mac80211[queue] = 0; + + /* Regardless if this is a reserved TXQ for a STA - mark it as false */ + mvm->queue_info[queue].reserved = false; + + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); + + if (ret) + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", + queue, ret); + return ret; +} + static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; @@ -447,11 +549,12 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, - bool same_sta) + u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; u8 txq_curr_ac, sta_id, tid; unsigned long disable_agg_tids = 0; + bool same_sta; int ret; lockdep_assert_held(&mvm->mutex); @@ -465,6 +568,8 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, tid = mvm->queue_info[queue].txq_tid; spin_unlock_bh(&mvm->queue_info_lock); + same_sta = sta_id == new_sta_id; + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (WARN_ON(!mvmsta)) return -EINVAL; @@ -479,10 +584,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, mvmsta->vif->hw_queue[txq_curr_ac], tid, 0); if (ret) { - /* Re-mark the inactive queue as inactive */ - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; - spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret); @@ -504,7 +605,13 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, u8 ac_to_queue[IEEE80211_NUM_ACS]; int i; + /* + * This protects us against grabbing a queue that's being reconfigured + * by the inactivity checker. + */ + lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->queue_info_lock); + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; @@ -517,11 +624,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; - /* Don't try and take queues being reconfigured */ - if (mvm->queue_info[queue].status == - IWL_MVM_QUEUE_RECONFIGURING) - continue; - ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } @@ -562,14 +664,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, return -ENOSPC; } - /* Make sure the queue isn't in the middle of being reconfigured */ - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { - IWL_ERR(mvm, - "TXQ %d is in the middle of re-config - try again\n", - queue); - return -EBUSY; - } - return queue; } @@ -579,9 +673,9 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ -int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force) +static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, @@ -616,7 +710,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; mq = mvm->hw_queue_to_mac80211[queue]; - shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); + shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; spin_unlock_bh(&mvm->queue_info_lock); IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", @@ -674,6 +768,57 @@ out: return ret; } +static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, + u8 minq, u8 maxq) +{ + int i; + + lockdep_assert_held(&mvm->queue_info_lock); + + /* This should not be hit with new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -ENOSPC; + + /* Start by looking for a free queue */ + for (i = minq; i <= maxq; i++) + if (mvm->queue_info[i].tid_bitmap == 0 && + mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) + return i; + + return -ENOSPC; +} + +static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, + u8 sta_id, u8 tid, unsigned int timeout) +{ + int queue, size = IWL_DEFAULT_QUEUE_SIZE; + + if (tid == IWL_MAX_TID_COUNT) { + tid = IWL_MGMT_TID; + size = IWL_MGMT_QUEUE_SIZE; + } + queue = iwl_trans_txq_alloc(mvm->trans, + cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), + sta_id, tid, SCD_QUEUE_CFG, size, timeout); + + if (queue < 0) { + IWL_DEBUG_TX_QUEUES(mvm, + "Failed allocating TXQ for sta %d tid %d, ret: %d\n", + sta_id, tid, queue); + return queue; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", + queue, sta_id, tid); + + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d (mac80211 map:0x%x)\n", + queue, mvm->hw_queue_to_mac80211[queue]); + + return queue; +} + static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) @@ -698,12 +843,428 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; - mvmsta->tid_data[tid].is_tid_active = true; spin_unlock_bh(&mvmsta->lock); return 0; } +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u8 sta_id, u8 tid) +{ + bool enable_queue = true; + + spin_lock_bh(&mvm->queue_info_lock); + + /* Make sure this TID isn't already enabled */ + if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", + queue, tid); + return false; + } + + /* Update mappings and refcounts */ + if (mvm->queue_info[queue].tid_bitmap) + enable_queue = false; + + if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { + WARN(mac80211_queue >= + BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), + "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", + mac80211_queue, queue, sta_id, tid); + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); + } + + mvm->queue_info[queue].tid_bitmap |= BIT(tid); + mvm->queue_info[queue].ra_sta_id = sta_id; + + if (enable_queue) { + if (tid != IWL_MAX_TID_COUNT) + mvm->queue_info[queue].mac80211_ac = + tid_to_mac80211_ac[tid]; + else + mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; + + mvm->queue_info[queue].txq_tid = tid; + } + + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", + queue, mvm->queue_info[queue].tid_bitmap, + mvm->hw_queue_to_mac80211[queue]); + + spin_unlock_bh(&mvm->queue_info_lock); + + return enable_queue; +} + +static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, + int mac80211_queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int wdg_timeout) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_ENABLE_QUEUE, + .window = cfg->frame_limit, + .sta_id = cfg->sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = cfg->fifo, + .aggregate = cfg->aggregate, + .tid = cfg->tid, + }; + bool inc_ssn; + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return false; + + /* Send the enabling command if we need to */ + if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, + cfg->sta_id, cfg->tid)) + return false; + + inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, + NULL, wdg_timeout); + if (inc_ssn) + le16_add_cpu(&cmd.ssn, 1); + + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), + "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); + + return inc_ssn; +} + +static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_UPDATE_QUEUE_TID, + }; + int tid; + unsigned long tid_bitmap; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + + spin_lock_bh(&mvm->queue_info_lock); + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) + return; + + /* Find any TID for queue */ + tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); + cmd.tid = tid; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + if (ret) { + IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", + queue, ret); + return; + } + + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].txq_tid = tid; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", + queue, tid); +} + +static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; + int tid = -1; + unsigned long tid_bitmap; + unsigned int wdg_timeout; + int ssn; + int ret = true; + + /* queue sharing is disabled on new TX path */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + /* Find TID for queue, and make sure it is the only one on the queue */ + tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); + if (tid_bitmap != BIT(tid)) { + IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", + queue, tid_bitmap); + return; + } + + IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, + tid); + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + + ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); + + ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, + tid_to_mac80211_ac[tid], ssn, + wdg_timeout, true); + if (ret) { + IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); + return; + } + + /* If aggs should be turned back on - do it */ + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { + struct iwl_mvm_add_sta_cmd cmd = {0}; + + mvmsta->tid_disable_agg &= ~BIT(tid); + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; + cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); + + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); + if (!ret) { + IWL_DEBUG_TX_QUEUES(mvm, + "TXQ #%d is now aggregated again\n", + queue); + + /* Mark queue intenally as aggregating again */ + iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); + } + } + + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; + spin_unlock_bh(&mvm->queue_info_lock); +} + +/* + * Remove inactive TIDs of a given queue. + * If all queue TIDs are inactive - mark the queue as inactive + * If only some the queue TIDs are inactive - unmap them from the queue + * + * Returns %true if all TIDs were removed and the queue could be reused. + */ +static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int queue, + unsigned long tid_bitmap, + unsigned long *unshare_queues, + unsigned long *changetid_queues) +{ + int tid; + + lockdep_assert_held(&mvmsta->lock); + lockdep_assert_held(&mvm->queue_info_lock); + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return false; + + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + /* If some TFDs are still queued - don't mark TID as inactive */ + if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) + tid_bitmap &= ~BIT(tid); + + /* Don't mark as inactive any TID that has an active BA */ + if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) + tid_bitmap &= ~BIT(tid); + } + + /* If all TIDs in the queue are inactive - return it can be reused */ + if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { + IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); + return true; + } + + /* + * If we are here, this is a shared queue and not all TIDs timed-out. + * Remove the ones that did. + */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; + u16 tid_bitmap; + + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); + + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + + /* + * We need to take into account a situation in which a TXQ was + * allocated to TID x, and then turned shared by adding TIDs y + * and z. If TID x becomes inactive and is removed from the TXQ, + * ownership must be given to one of the remaining TIDs. + * This is mainly because if TID x continues - a new queue can't + * be allocated for it as long as it is an owner of another TXQ. + * + * Mark this queue in the right bitmap, we'll send the command + * to the firmware later. + */ + if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) + set_bit(queue, changetid_queues); + + IWL_DEBUG_TX_QUEUES(mvm, + "Removing inactive TID %d from shared Q:%d\n", + tid, queue); + } + + IWL_DEBUG_TX_QUEUES(mvm, + "TXQ #%d left with tid bitmap 0x%x\n", queue, + mvm->queue_info[queue].tid_bitmap); + + /* + * There may be different TIDs with the same mac queues, so make + * sure all TIDs have existing corresponding mac queues enabled + */ + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + mvm->hw_queue_to_mac80211[queue] |= + BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); + } + + /* If the queue is marked as shared - "unshare" it */ + if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && + mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { + IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", + queue); + set_bit(queue, unshare_queues); + } + + return false; +} + +/* + * Check for inactivity - this includes checking if any queue + * can be unshared and finding one (and only one) that can be + * reused. + * This function is also invoked as a sort of clean-up task, + * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. + * + * Returns the queue number, or -ENOSPC. + */ +static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) +{ + unsigned long now = jiffies; + unsigned long unshare_queues = 0; + unsigned long changetid_queues = 0; + int i, ret, free_queue = -ENOSPC; + + lockdep_assert_held(&mvm->mutex); + + if (iwl_mvm_has_new_tx_api(mvm)) + return -ENOSPC; + + spin_lock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + /* we skip the CMD queue below by starting at 1 */ + BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); + + for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; + int tid; + unsigned long inactive_tid_bitmap = 0; + unsigned long queue_tid_bitmap; + + queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; + if (!queue_tid_bitmap) + continue; + + /* If TXQ isn't in active use anyway - nothing to do here... */ + if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && + mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) + continue; + + /* Check to see if there are inactive TIDs on this queue */ + for_each_set_bit(tid, &queue_tid_bitmap, + IWL_MAX_TID_COUNT + 1) { + if (time_after(mvm->queue_info[i].last_frame_time[tid] + + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) + continue; + + inactive_tid_bitmap |= BIT(tid); + } + + /* If all TIDs are active - finish check on this queue */ + if (!inactive_tid_bitmap) + continue; + + /* + * If we are here - the queue hadn't been served recently and is + * in use + */ + + sta_id = mvm->queue_info[i].ra_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* + * If the STA doesn't exist anymore, it isn't an error. It could + * be that it was removed since getting the queues, and in this + * case it should've inactivated its queues anyway. + */ + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* this isn't so nice, but works OK due to the way we loop */ + spin_unlock(&mvm->queue_info_lock); + + /* and we need this locking order */ + spin_lock(&mvmsta->lock); + spin_lock(&mvm->queue_info_lock); + ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, + inactive_tid_bitmap, + &unshare_queues, + &changetid_queues); + if (ret >= 0 && free_queue < 0) + free_queue = ret; + /* only unlock sta lock - we still need the queue info lock */ + spin_unlock(&mvmsta->lock); + } + + rcu_read_unlock(); + spin_unlock_bh(&mvm->queue_info_lock); + + /* Reconfigure queues requiring reconfiguation */ + for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) + iwl_mvm_unshare_queue(mvm, i); + for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) + iwl_mvm_change_queue_tid(mvm, i); + + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { + ret = iwl_mvm_free_inactive_queue(mvm, free_queue, + alloc_for_sta); + if (ret) + return ret; + } + + return free_queue; +} + static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid, struct ieee80211_hdr *hdr) @@ -719,7 +1280,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; - bool using_inactive_queue = false, same_sta = false; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; @@ -756,9 +1316,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && (mvm->queue_info[mvmsta->reserved_queue].status == - IWL_MVM_QUEUE_RESERVED || - mvm->queue_info[mvmsta->reserved_queue].status == - IWL_MVM_QUEUE_INACTIVE)) { + IWL_MVM_QUEUE_RESERVED)) { queue = mvmsta->reserved_queue; mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); @@ -768,21 +1326,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); + if (queue < 0) { + spin_unlock_bh(&mvm->queue_info_lock); - /* - * Check if this queue is already allocated but inactive. - * In such a case, we'll need to first free this queue before enabling - * it again, so we'll mark it as reserved to make sure no new traffic - * arrives on it - */ - if (queue > 0 && - mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; - using_inactive_queue = true; - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; - IWL_DEBUG_TX_QUEUES(mvm, - "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", - queue, mvmsta->sta_id, tid); + /* try harder - perhaps kill an inactive queue */ + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + + spin_lock_bh(&mvm->queue_info_lock); } /* No free queue - we'll have to share */ @@ -800,7 +1350,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ - if ((queue > 0) && !shared_queue) + if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); @@ -821,16 +1371,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); - /* - * If this queue was previously inactive (idle) - we need to free it - * first - */ - if (using_inactive_queue) { - ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); - if (ret) - return ret; - } - IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, @@ -874,7 +1414,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (inc_ssn) mvmsta->tid_data[tid].seq_number += 0x10; mvmsta->tid_data[tid].txq_id = queue; - mvmsta->tid_data[tid].is_tid_active = true; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; @@ -909,129 +1448,6 @@ out_err: return ret; } -static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_UPDATE_QUEUE_TID, - }; - int tid; - unsigned long tid_bitmap; - int ret; - - lockdep_assert_held(&mvm->mutex); - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return; - - spin_lock_bh(&mvm->queue_info_lock); - tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); - - if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) - return; - - /* Find any TID for queue */ - tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); - cmd.tid = tid; - cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); - if (ret) { - IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", - queue, ret); - return; - } - - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].txq_tid = tid; - spin_unlock_bh(&mvm->queue_info_lock); - IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", - queue, tid); -} - -static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) -{ - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - u8 sta_id; - int tid = -1; - unsigned long tid_bitmap; - unsigned int wdg_timeout; - int ssn; - int ret = true; - - /* queue sharing is disabled on new TX path */ - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return; - - lockdep_assert_held(&mvm->mutex); - - spin_lock_bh(&mvm->queue_info_lock); - sta_id = mvm->queue_info[queue].ra_sta_id; - tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); - - /* Find TID for queue, and make sure it is the only one on the queue */ - tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); - if (tid_bitmap != BIT(tid)) { - IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", - queue, tid_bitmap); - return; - } - - IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, - tid); - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); - - ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); - - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, - tid_to_mac80211_ac[tid], ssn, - wdg_timeout, true); - if (ret) { - IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); - return; - } - - /* If aggs should be turned back on - do it */ - if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { - struct iwl_mvm_add_sta_cmd cmd = {0}; - - mvmsta->tid_disable_agg &= ~BIT(tid); - - cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; - cmd.add_modify = STA_MODE_MODIFY; - cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; - cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); - cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); - - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, - iwl_mvm_add_sta_cmd_size(mvm), &cmd); - if (!ret) { - IWL_DEBUG_TX_QUEUES(mvm, - "TXQ #%d is now aggregated again\n", - queue); - - /* Mark queue intenally as aggregating again */ - iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); - } - } - - spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; - spin_unlock_bh(&mvm->queue_info_lock); -} - static inline u8 iwl_mvm_tid_to_ac_queue(int tid) { if (tid == IWL_MAX_TID_COUNT) @@ -1100,47 +1516,12 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long deferred_tid_traffic; - int queue, sta_id, tid; - - /* Check inactivity of queues */ - iwl_mvm_inactivity_check(mvm); + int sta_id, tid; mutex_lock(&mvm->mutex); - /* No queue reconfiguration in TVQM mode */ - if (iwl_mvm_has_new_tx_api(mvm)) - goto alloc_queues; - - /* Reconfigure queues requiring reconfiguation */ - for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { - bool reconfig; - bool change_owner; - - spin_lock_bh(&mvm->queue_info_lock); - reconfig = (mvm->queue_info[queue].status == - IWL_MVM_QUEUE_RECONFIGURING); + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); - /* - * We need to take into account a situation in which a TXQ was - * allocated to TID x, and then turned shared by adding TIDs y - * and z. If TID x becomes inactive and is removed from the TXQ, - * ownership must be given to one of the remaining TIDs. - * This is mainly because if TID x continues - a new queue can't - * be allocated for it as long as it is an owner of another TXQ. - */ - change_owner = !(mvm->queue_info[queue].tid_bitmap & - BIT(mvm->queue_info[queue].txq_tid)) && - (mvm->queue_info[queue].status == - IWL_MVM_QUEUE_SHARED); - spin_unlock_bh(&mvm->queue_info_lock); - - if (reconfig) - iwl_mvm_unshare_queue(mvm, queue); - else if (change_owner) - iwl_mvm_change_queue_owner(mvm, queue); - } - -alloc_queues: /* Go over all stations with deferred traffic */ for_each_set_bit(sta_id, mvm->sta_deferred_frames, IWL_MVM_STATION_COUNT) { @@ -1167,23 +1548,19 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; - bool using_inactive_queue = false, same_sta = false; /* queue reserving is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return 0; - /* - * Check for inactive queues, so we don't reach a situation where we - * can't add a STA due to a shortage in queues that doesn't really exist - */ - iwl_mvm_inactivity_check(mvm); + /* run the general cleanup/unsharing of queues */ + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); spin_lock_bh(&mvm->queue_info_lock); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && - !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; @@ -1193,16 +1570,13 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "No available queues for new station\n"); - return -ENOSPC; - } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { - /* - * If this queue is already allocated but inactive we'll need to - * first free this queue before enabling it again, we'll mark - * it as reserved to make sure no new traffic arrives on it - */ - using_inactive_queue = true; - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; + /* try again - this time kick out a queue if needed */ + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + if (queue < 0) { + IWL_ERR(mvm, "No available queues for new station\n"); + return -ENOSPC; + } + spin_lock_bh(&mvm->queue_info_lock); } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; @@ -1210,9 +1584,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, mvmsta->reserved_queue = queue; - if (using_inactive_queue) - iwl_mvm_free_inactive_queue(mvm, queue, same_sta); - IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 0fc211108149..de1a0a2d8723 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -312,9 +312,6 @@ enum iwl_mvm_agg_state { * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. * @tx_time: medium time consumed by this A-MPDU - * @is_tid_active: has this TID sent traffic in the last - * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this - * field should be ignored. * @tpt_meas_start: time of the throughput measurements start, is reset every HZ * @tx_count_last: number of frames transmitted during the last second * @tx_count: counts the number of frames transmitted since the last reset of @@ -332,7 +329,6 @@ struct iwl_mvm_tid_data { u16 txq_id; u16 ssn; u16 tx_time; - bool is_tid_active; unsigned long tpt_meas_start; u32 tx_count_last; u32 tx_count; @@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); -int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force); - #endif /* __sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index cd91bc44259c..e1a6f4e22253 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_time_event *te_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, + ieee80211_vif_to_wdev(te_data->vif), + FW_DBG_TRIGGER_TIME_EVENT); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); te_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(te_data->vif), - trig)) - return; - for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); u32 trig_action_bitmap = diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a6877b3f8037..ec57682efe54 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -79,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; @@ -1143,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ - if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || - !mvmsta->tid_data[tid].is_tid_active)) { - /* If TXQ needs to be allocated... */ - if (txq_id == IWL_MVM_INVALID_QUEUE) { - iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); - - /* - * The frame is now deferred, and the worker scheduled - * will re-allocate it, so we can free it for now. - */ - iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); - spin_unlock(&mvmsta->lock); - return 0; - } - - /* queue should always be active in new TX path */ - WARN_ON(iwl_mvm_has_new_tx_api(mvm)); + if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) { + iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); - /* If we are here - TXQ exists and needs to be re-activated */ - spin_lock(&mvm->queue_info_lock); - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; - mvmsta->tid_data[tid].is_tid_active = true; - spin_unlock(&mvm->queue_info_lock); - - IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", - txq_id); + /* + * The frame is now deferred, and the worker scheduled + * will re-allocate it, so we can free it for now. + */ + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); + spin_unlock(&mvmsta->lock); + return 0; } if (!iwl_mvm_has_new_tx_api(mvm)) { @@ -1414,15 +1395,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, + FW_DBG_TRIGGER_TX_STATUS); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); status_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) - return; - for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index dcacc4d11abc..818e1180bbdd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_umac_error_log(mvm); } -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) -{ - int i; - - lockdep_assert_held(&mvm->queue_info_lock); - - /* This should not be hit with new TX path */ - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return -ENOSPC; - - /* Start by looking for a free queue */ - for (i = minq; i <= maxq; i++) - if (mvm->queue_info[i].hw_queue_refcount == 0 && - mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) - return i; - - /* - * If no free queue found - settle for an inactive one to reconfigure - * Make sure that the inactive queue either already belongs to this STA, - * or that if it belongs to another one - it isn't the reserved queue - */ - for (i = minq; i <= maxq; i++) - if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && - (sta_id == mvm->queue_info[i].ra_sta_id || - !mvm->queue_info[i].reserved)) - return i; - - return -ENOSPC; -} - int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { @@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, return -EINVAL; spin_lock_bh(&mvm->queue_info_lock); - if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, + if (WARN(mvm->queue_info[queue].tid_bitmap == 0, "Trying to reconfig unallocated queue %d\n", queue)) { spin_unlock_bh(&mvm->queue_info_lock); return -ENXIO; @@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, return ret; } -static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u8 sta_id, u8 tid) -{ - bool enable_queue = true; - - spin_lock_bh(&mvm->queue_info_lock); - - /* Make sure this TID isn't already enabled */ - if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", - queue, tid); - return false; - } - - /* Update mappings and refcounts */ - if (mvm->queue_info[queue].hw_queue_refcount > 0) - enable_queue = false; - - if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { - WARN(mac80211_queue >= - BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), - "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", - mac80211_queue, queue, sta_id, tid); - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - } - - mvm->queue_info[queue].hw_queue_refcount++; - mvm->queue_info[queue].tid_bitmap |= BIT(tid); - mvm->queue_info[queue].ra_sta_id = sta_id; - - if (enable_queue) { - if (tid != IWL_MAX_TID_COUNT) - mvm->queue_info[queue].mac80211_ac = - tid_to_mac80211_ac[tid]; - else - mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; - - mvm->queue_info[queue].txq_tid = tid; - } - - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", - queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->hw_queue_to_mac80211[queue]); - - spin_unlock_bh(&mvm->queue_info_lock); - - return enable_queue; -} - -int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, - u8 sta_id, u8 tid, unsigned int timeout) -{ - int queue, size = IWL_DEFAULT_QUEUE_SIZE; - - if (tid == IWL_MAX_TID_COUNT) { - tid = IWL_MGMT_TID; - size = IWL_MGMT_QUEUE_SIZE; - } - queue = iwl_trans_txq_alloc(mvm->trans, - cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), - sta_id, tid, SCD_QUEUE_CFG, size, timeout); - - if (queue < 0) { - IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ for sta %d tid %d, ret: %d\n", - sta_id, tid, queue); - return queue; - } - - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", - queue, sta_id, tid); - - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d (mac80211 map:0x%x)\n", - queue, mvm->hw_queue_to_mac80211[queue]); - - return queue; -} - -bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int wdg_timeout) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_ENABLE_QUEUE, - .window = cfg->frame_limit, - .sta_id = cfg->sta_id, - .ssn = cpu_to_le16(ssn), - .tx_fifo = cfg->fifo, - .aggregate = cfg->aggregate, - .tid = cfg->tid, - }; - bool inc_ssn; - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return false; - - /* Send the enabling command if we need to */ - if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, - cfg->sta_id, cfg->tid)) - return false; - - inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, - NULL, wdg_timeout); - if (inc_ssn) - le16_add_cpu(&cmd.ssn, 1); - - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), - "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); - - return inc_ssn; -} - -int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 tid, u8 flags) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_DISABLE_QUEUE, - }; - bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; - int ret; - - if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) - return -EINVAL; - - if (iwl_mvm_has_new_tx_api(mvm)) { - spin_lock_bh(&mvm->queue_info_lock); - - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - - spin_unlock_bh(&mvm->queue_info_lock); - - iwl_trans_txq_free(mvm->trans, queue); - - return 0; - } - - spin_lock_bh(&mvm->queue_info_lock); - - if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { - spin_unlock_bh(&mvm->queue_info_lock); - return 0; - } - - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - - /* - * If there is another TID with the same AC - don't remove the MAC queue - * from the mapping - */ - if (tid < IWL_MAX_TID_COUNT) { - unsigned long tid_bitmap = - mvm->queue_info[queue].tid_bitmap; - int ac = tid_to_mac80211_ac[tid]; - int i; - - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { - if (tid_to_mac80211_ac[i] == ac) - remove_mac_queue = false; - } - } - - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - mvm->queue_info[queue].hw_queue_refcount--; - - cmd.action = mvm->queue_info[queue].hw_queue_refcount ? - SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; - if (cmd.action == SCD_CFG_DISABLE_QUEUE) - mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; - - IWL_DEBUG_TX_QUEUES(mvm, - "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", - queue, - mvm->queue_info[queue].hw_queue_refcount, - mvm->hw_queue_to_mac80211[queue]); - - /* If the queue is still enabled - nothing left to do in this func */ - if (cmd.action == SCD_CFG_ENABLE_QUEUE) { - spin_unlock_bh(&mvm->queue_info_lock); - return 0; - } - - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; - cmd.tid = mvm->queue_info[queue].txq_tid; - - /* Make sure queue info is correct even though we overwrite it */ - WARN(mvm->queue_info[queue].hw_queue_refcount || - mvm->queue_info[queue].tid_bitmap || - mvm->hw_queue_to_mac80211[queue], - "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", - queue, mvm->queue_info[queue].hw_queue_refcount, - mvm->hw_queue_to_mac80211[queue], - mvm->queue_info[queue].tid_bitmap); - - /* If we are here - the queue is freed and we can zero out these vals */ - mvm->queue_info[queue].hw_queue_refcount = 0; - mvm->queue_info[queue].tid_bitmap = 0; - mvm->hw_queue_to_mac80211[queue] = 0; - - /* Regardless if this is a reserved TXQ for a STA - mark it as false */ - mvm->queue_info[queue].reserved = false; - - spin_unlock_bh(&mvm->queue_info_lock); - - iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, - sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); - - if (ret) - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", - queue, ret); - return ret; -} - /** * iwl_mvm_send_lq_cmd() - Send link quality command * @sync: This command can be sent synchronously. @@ -1238,14 +985,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_MLME); + if (!trig) goto out; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) @@ -1257,171 +1002,6 @@ out: ieee80211_connection_loss(vif); } -/* - * Remove inactive TIDs of a given queue. - * If all queue TIDs are inactive - mark the queue as inactive - * If only some the queue TIDs are inactive - unmap them from the queue - */ -static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, int queue, - unsigned long tid_bitmap) -{ - int tid; - - lockdep_assert_held(&mvmsta->lock); - lockdep_assert_held(&mvm->queue_info_lock); - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return; - - /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - /* If some TFDs are still queued - don't mark TID as inactive */ - if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) - tid_bitmap &= ~BIT(tid); - - /* Don't mark as inactive any TID that has an active BA */ - if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) - tid_bitmap &= ~BIT(tid); - } - - /* If all TIDs in the queue are inactive - mark queue as inactive. */ - if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; - - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) - mvmsta->tid_data[tid].is_tid_active = false; - - IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", - queue); - return; - } - - /* - * If we are here, this is a shared queue and not all TIDs timed-out. - * Remove the ones that did. - */ - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; - - mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); - mvm->queue_info[queue].hw_queue_refcount--; - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - mvmsta->tid_data[tid].is_tid_active = false; - - IWL_DEBUG_TX_QUEUES(mvm, - "Removing inactive TID %d from shared Q:%d\n", - tid, queue); - } - - IWL_DEBUG_TX_QUEUES(mvm, - "TXQ #%d left with tid bitmap 0x%x\n", queue, - mvm->queue_info[queue].tid_bitmap); - - /* - * There may be different TIDs with the same mac queues, so make - * sure all TIDs have existing corresponding mac queues enabled - */ - tid_bitmap = mvm->queue_info[queue].tid_bitmap; - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - mvm->hw_queue_to_mac80211[queue] |= - BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); - } - - /* If the queue is marked as shared - "unshare" it */ - if (mvm->queue_info[queue].hw_queue_refcount == 1 && - mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING; - IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", - queue); - } -} - -void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) -{ - unsigned long timeout_queues_map = 0; - unsigned long now = jiffies; - int i; - - if (iwl_mvm_has_new_tx_api(mvm)) - return; - - spin_lock_bh(&mvm->queue_info_lock); - for (i = 0; i < IWL_MAX_HW_QUEUES; i++) - if (mvm->queue_info[i].hw_queue_refcount > 0) - timeout_queues_map |= BIT(i); - spin_unlock_bh(&mvm->queue_info_lock); - - rcu_read_lock(); - - /* - * If a queue time outs - mark it as INACTIVE (don't remove right away - * if we don't have to.) This is an optimization in case traffic comes - * later, and we don't HAVE to use a currently-inactive queue - */ - for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - u8 sta_id; - int tid; - unsigned long inactive_tid_bitmap = 0; - unsigned long queue_tid_bitmap; - - spin_lock_bh(&mvm->queue_info_lock); - queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; - - /* If TXQ isn't in active use anyway - nothing to do here... */ - if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && - mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) { - spin_unlock_bh(&mvm->queue_info_lock); - continue; - } - - /* Check to see if there are inactive TIDs on this queue */ - for_each_set_bit(tid, &queue_tid_bitmap, - IWL_MAX_TID_COUNT + 1) { - if (time_after(mvm->queue_info[i].last_frame_time[tid] + - IWL_MVM_DQA_QUEUE_TIMEOUT, now)) - continue; - - inactive_tid_bitmap |= BIT(tid); - } - spin_unlock_bh(&mvm->queue_info_lock); - - /* If all TIDs are active - finish check on this queue */ - if (!inactive_tid_bitmap) - continue; - - /* - * If we are here - the queue hadn't been served recently and is - * in use - */ - - sta_id = mvm->queue_info[i].ra_sta_id; - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - /* - * If the STA doesn't exist anymore, it isn't an error. It could - * be that it was removed since getting the queues, and in this - * case it should've inactivated its queues anyway. - */ - if (IS_ERR_OR_NULL(sta)) - continue; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - spin_lock_bh(&mvmsta->lock); - spin_lock(&mvm->queue_info_lock); - iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, - inactive_tid_bitmap); - spin_unlock(&mvm->queue_info_lock); - spin_unlock_bh(&mvmsta->lock); - } - - rcu_read_unlock(); -} - void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, @@ -1430,14 +1010,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; - if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_BA); + if (!trig) return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, - ieee80211_vif_to_wdev(vif), trig)) - return; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index d519e7ebdbe8..e965cc588850 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) kfree(trans_pcie->rxq); } +static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, + struct iwl_rb_allocator *rba) +{ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * @@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { /* Move the 2 RBDs to the allocator ownership. Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); atomic_inc(&rba->req_pending); queue_work(rba->alloc_wq, &rba->rx_alloc); @@ -1400,10 +1406,18 @@ restart: IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *rxb; - - if (unlikely(rxq->used_count == rxq->queue_size / 2)) + /* number of RBDs still waiting for page allocation */ + u32 rb_pending_alloc = + atomic_read(&trans_pcie->rba.req_pending) * + RX_CLAIM_REQ_ALLOC; + + if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && + !emergency)) { + iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; + } rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) @@ -1425,17 +1439,13 @@ restart: iwl_pcie_rx_allocator_get(trans, rxq); if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - /* Add the remaining empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); + iwl_pcie_rx_move_to_allocator(rxq, rba); } else if (emergency) { count++; if (count == 8) { count = 0; - if (rxq->used_count < rxq->queue_size / 3) + if (rb_pending_alloc < rxq->queue_size / 3) emergency = false; rxq->read = i; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index bc6682a11fa4..5bafb3f46eb8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -931,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) else IWL_WARN(trans, "PCI should have external buffer debug\n"); - for (i = 0; i < trans->dbg_dest_reg_num; i++) { + for (i = 0; i < trans->dbg_n_dest_reg; i++) { u32 addr = le32_to_cpu(dest->reg_ops[i].addr); u32 val = le32_to_cpu(dest->reg_ops[i].val); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index ba9d37bed4c2..e880f69eac26 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, goto out_err; } iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); @@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, goto out_err; } iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, - tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, + tb_len); data_left -= tb_len; tso_build_data(skb, &tso, tb_len); @@ -438,6 +438,11 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, return -ENOMEM; tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_frag_size(frag)); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, + skb_frag_address(frag), + skb_frag_size(frag)); + if (tb_idx < 0) + return tb_idx; out_meta->tbs |= BIT(tb_idx); } @@ -452,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, - int tx_cmd_len) + int tx_cmd_len, + bool pad) { int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); @@ -476,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; - tb1_len = ALIGN(len, 4); + if (pad) + tb1_len = ALIGN(len, 4); + else + tb1_len = len; /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; @@ -484,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, + IWL_FIRST_TB_SIZE + tb1_len, hdr_len); /* set up TFD's third entry to point to remainder of skb's head */ tb2_len = skb_headlen(skb) - hdr_len; @@ -494,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, + skb->data + hdr_len, + tb2_len); } if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, - IWL_FIRST_TB_SIZE + tb1_len, hdr_len); - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); - return tfd; out_err: @@ -549,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, out_meta, hdr_len, len); return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, - hdr_len, len); + hdr_len, len, !amsdu); } int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 67820bfaba64..87b7225fe289 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, head_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; + trace_iwlwifi_dev_tx_tb(trans->dev, skb, + skb->data + hdr_len, + head_tb_len); iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); } @@ -2011,8 +2014,13 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; + trace_iwlwifi_dev_tx_tb(trans->dev, skb, + skb_frag_address(frag), + skb_frag_size(frag)); tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); + if (tb_idx < 0) + return tb_idx; out_meta->tbs |= BIT(tb_idx); } @@ -2188,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, } iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, - hdr_tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, + hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); @@ -2214,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, - size); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, + size); data_left -= size; tso_build_data(skb, &tso, size); @@ -2396,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); + trace_iwlwifi_dev_tx(trans->dev, skb, + iwl_pcie_get_tfd(trans, txq, + txq->write_ptr), + trans_pcie->tfd_size, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, + hdr_len); + /* * If gso_size wasn't set, don't give the frame "amsdu treatment" * (adding subframes, etc.). @@ -2419,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, out_meta))) goto out_err; } - - trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans, txq, - txq->write_ptr), - trans_pcie->tfd_size, - &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, - hdr_len); - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); } /* building the A-MSDU might have changed this data, so memcpy it now */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index f3863101af78..aa8058264d5b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -495,7 +495,6 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { static spinlock_t hwsim_radio_lock; static LIST_HEAD(hwsim_radios); -static struct workqueue_struct *hwsim_wq; static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; static int hwsim_radios_generation = 1; @@ -521,7 +520,6 @@ struct mac80211_hwsim_data { int channels, idx; bool use_chanctx; bool destroy_on_close; - struct work_struct destroy_work; u32 portid; char alpha2[2]; const struct ieee80211_regdomain *regd; @@ -2931,8 +2929,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); - if (idx > 0) - hwsim_mcast_new_radio(idx, info, param); + hwsim_mcast_new_radio(idx, info, param); return idx; @@ -3561,30 +3558,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = { .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), }; -static void destroy_radio(struct work_struct *work) -{ - struct mac80211_hwsim_data *data = - container_of(work, struct mac80211_hwsim_data, destroy_work); - - hwsim_radios_generation++; - mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); -} - static void remove_user_radios(u32 portid) { struct mac80211_hwsim_data *entry, *tmp; + LIST_HEAD(list); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { if (entry->destroy_on_close && entry->portid == portid) { - list_del(&entry->list); + list_move(&entry->list, &list); rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, hwsim_rht_params); - INIT_WORK(&entry->destroy_work, destroy_radio); - queue_work(hwsim_wq, &entry->destroy_work); + hwsim_radios_generation++; } } spin_unlock_bh(&hwsim_radio_lock); + + list_for_each_entry_safe(entry, tmp, &list, list) { + list_del(&entry->list); + mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy), + NULL); + } } static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, @@ -3642,6 +3636,7 @@ static __net_init int hwsim_init_net(struct net *net) static void __net_exit hwsim_exit_net(struct net *net) { struct mac80211_hwsim_data *data, *tmp; + LIST_HEAD(list); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { @@ -3652,17 +3647,19 @@ static void __net_exit hwsim_exit_net(struct net *net) if (data->netgroup == hwsim_net_get_netgroup(&init_net)) continue; - list_del(&data->list); + list_move(&data->list, &list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); hwsim_radios_generation++; - spin_unlock_bh(&hwsim_radio_lock); + } + spin_unlock_bh(&hwsim_radio_lock); + + list_for_each_entry_safe(data, tmp, &list, list) { + list_del(&data->list); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); - spin_lock_bh(&hwsim_radio_lock); } - spin_unlock_bh(&hwsim_radio_lock); ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); } @@ -3694,13 +3691,9 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); - hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); - if (!hwsim_wq) - return -ENOMEM; - err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); if (err) - goto out_free_wq; + return err; err = register_pernet_device(&hwsim_net_ops); if (err) @@ -3831,8 +3824,6 @@ out_unregister_pernet: unregister_pernet_device(&hwsim_net_ops); out_free_rht: rhashtable_destroy(&hwsim_radios_rht); -out_free_wq: - destroy_workqueue(hwsim_wq); return err; } module_init(init_mac80211_hwsim); @@ -3844,12 +3835,10 @@ static void __exit exit_mac80211_hwsim(void) hwsim_exit_netlink(); mac80211_hwsim_free(); - flush_workqueue(hwsim_wq); rhashtable_destroy(&hwsim_radios_rht); unregister_netdev(hwsim_mon); platform_driver_unregister(&mac80211_hwsim_driver); unregister_pernet_device(&hwsim_net_ops); - destroy_workqueue(hwsim_wq); } module_exit(exit_mac80211_hwsim); diff --git a/drivers/net/wireless/marvell/libertas/if_cs.c b/drivers/net/wireless/marvell/libertas/if_cs.c index 7d88223f890b..cebf03c6a622 100644 --- a/drivers/net/wireless/marvell/libertas/if_cs.c +++ b/drivers/net/wireless/marvell/libertas/if_cs.c @@ -900,8 +900,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev) /* Make this card known to the libertas driver */ priv = lbs_add_card(card, &p_dev->dev); - if (!priv) { - ret = -ENOMEM; + if (IS_ERR(priv)) { + ret = PTR_ERR(priv); goto out2; } diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 43743c26c071..8d98e7fdd27c 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1206,8 +1206,8 @@ static int if_sdio_probe(struct sdio_func *func, priv = lbs_add_card(card, &func->dev); - if (!priv) { - ret = -ENOMEM; + if (IS_ERR(priv)) { + ret = PTR_ERR(priv); goto free; } @@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev) if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { dev_info(dev, "Suspend without wake params -- powering down card\n"); if (priv->fw_ready) { + ret = lbs_suspend(priv); + if (ret) + return ret; + priv->power_up_on_resume = true; if_sdio_power_off(card); } diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index e9aec6cb1105..504d6e096476 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1146,8 +1146,8 @@ static int if_spi_probe(struct spi_device *spi) * This will call alloc_etherdev. */ priv = lbs_add_card(card, &spi->dev); - if (!priv) { - err = -ENOMEM; + if (IS_ERR(priv)) { + err = PTR_ERR(priv); goto free_card; } card->priv = priv; diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index c67a8e7be310..220dcdee8d2b 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -254,8 +254,11 @@ static int if_usb_probe(struct usb_interface *intf, goto dealloc; } - if (!(priv = lbs_add_card(cardp, &intf->dev))) + priv = lbs_add_card(cardp, &intf->dev); + if (IS_ERR(priv)) { + r = PTR_ERR(priv); goto err_add_card; + } cardp->priv = priv; @@ -456,8 +459,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; - lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) { lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index f22e1c220cba..f7db60bc7c7f 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -907,25 +907,29 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) struct net_device *dev; struct wireless_dev *wdev; struct lbs_private *priv = NULL; + int err; /* Allocate an Ethernet device and register it */ wdev = lbs_cfg_alloc(dmdev); if (IS_ERR(wdev)) { + err = PTR_ERR(wdev); pr_err("cfg80211 init failed\n"); - goto done; + goto err_cfg; } wdev->iftype = NL80211_IFTYPE_STATION; priv = wdev_priv(wdev); priv->wdev = wdev; - if (lbs_init_adapter(priv)) { + err = lbs_init_adapter(priv); + if (err) { pr_err("failed to initialize adapter structure\n"); goto err_wdev; } dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup); if (!dev) { + err = -ENOMEM; dev_err(dmdev, "no memory for network device instance\n"); goto err_adapter; } @@ -949,6 +953,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) init_waitqueue_head(&priv->waitq); priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); if (IS_ERR(priv->main_thread)) { + err = PTR_ERR(priv->main_thread); lbs_deb_thread("Error creating main thread.\n"); goto err_ndev; } @@ -961,7 +966,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) priv->wol_gap = 20; priv->ehs_remove_supported = true; - goto done; + return priv; err_ndev: free_netdev(dev); @@ -972,10 +977,8 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) err_wdev: lbs_cfg_free(priv); - priv = NULL; - -done: - return priv; + err_cfg: + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(lbs_add_card); diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 7f24aad94efd..0ccbcd7e887d 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -13,44 +13,5 @@ config MT76x02_USB tristate select MT76_USB -config MT76x0_COMMON - tristate - select MT76x02_LIB - -config MT76x2_COMMON - tristate - select MT76x02_LIB - -config MT76x0U - tristate "MediaTek MT76x0U (USB) support" - select MT76x0_COMMON - select MT76x02_USB - depends on MAC80211 - depends on USB - help - This adds support for MT7610U-based wireless USB dongles. - -config MT76x0E - tristate "MediaTek MT76x0E (PCIe) support" - select MT76x0_COMMON - depends on MAC80211 - depends on PCI - help - This adds support for MT7610/MT7630-based wireless PCIe devices. - -config MT76x2E - tristate "MediaTek MT76x2E (PCIe) support" - select MT76x2_COMMON - depends on MAC80211 - depends on PCI - ---help--- - This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. - -config MT76x2U - tristate "MediaTek MT76x2U (USB) support" - select MT76x2_COMMON - select MT76x02_USB - depends on MAC80211 - depends on USB - help - This adds support for MT7612U-based wireless USB dongles. +source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig" +source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 2346a1b768bc..9b8d7488c545 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -1,11 +1,7 @@ obj-$(CONFIG_MT76_CORE) += mt76.o obj-$(CONFIG_MT76_USB) += mt76-usb.o -obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o -obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o -obj-$(CONFIG_MT76x2E) += mt76x2e.o -obj-$(CONFIG_MT76x2U) += mt76x2u.o mt76-y := \ mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o @@ -14,25 +10,13 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o CFLAGS_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src) +CFLAGS_mt76x02_trace.o := -I$(src) mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \ - mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o + mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \ + mt76x02_txrx.o mt76x02_trace.o mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o -mt76x2-common-y := \ - mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \ - mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \ - mt76x2_debugfs.o mt76x2_mcu_common.o - -mt76x2e-y := \ - mt76x2_pci.o mt76x2_dma.o \ - mt76x2_main.o mt76x2_init.o mt76x2_tx.o \ - mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \ - mt76x2_dfs.o mt76x2_trace.o - -mt76x2u-y := \ - mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \ - mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o - -CFLAGS_mt76x2_trace.o := -I$(src) +obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ +obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c index 30a5d928e655..1d6bbce76041 100644 --- a/drivers/net/wireless/mediatek/mt76/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mmio.c @@ -79,6 +79,7 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs) .copy = mt76_mmio_copy, .wr_rp = mt76_mmio_wr_rp, .rd_rp = mt76_mmio_rd_rp, + .type = MT76_BUS_MMIO, }; dev->bus = &mt76_mmio_ops; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index f2dd4d87e355..3bfa7f5e3513 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -38,6 +38,11 @@ struct mt76_reg_pair { u32 value; }; +enum mt76_bus_type { + MT76_BUS_MMIO, + MT76_BUS_USB, +}; + struct mt76_bus_ops { u32 (*rr)(struct mt76_dev *dev, u32 offset); void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); @@ -48,8 +53,12 @@ struct mt76_bus_ops { const struct mt76_reg_pair *rp, int len); int (*rd_rp)(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *rp, int len); + enum mt76_bus_type type; }; +#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB) +#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO) + enum mt76_txq_id { MT_TXQ_VO = IEEE80211_AC_VO, MT_TXQ_VI = IEEE80211_AC_VI, @@ -262,8 +271,6 @@ struct mt76_driver_ops { void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); - s8 (*get_max_txpwr_adj)(struct mt76_dev *dev, - const struct ieee80211_tx_rate *rate); }; struct mt76_channel_state { @@ -519,8 +526,8 @@ static inline u16 mt76_rev(struct mt76_dev *dev) #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) -#define __mt76_init_queues(dev) (dev)->queue_ops->init((dev)) -#define __mt76_queue_alloc(dev, ...) (dev)->queue_ops->alloc((dev), __VA_ARGS__) +#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) +#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig new file mode 100644 index 000000000000..9a6157db3893 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig @@ -0,0 +1,20 @@ +config MT76x0_COMMON + tristate + select MT76x02_LIB + +config MT76x0U + tristate "MediaTek MT76x0U (USB) support" + select MT76x0_COMMON + select MT76x02_USB + depends on MAC80211 + depends on USB + help + This adds support for MT7610U-based wireless USB dongles. + +config MT76x0E + tristate "MediaTek MT76x0E (PCIe) support" + select MT76x0_COMMON + depends on MAC80211 + depends on PCI + help + This adds support for MT7610/MT7630-based wireless PCIe devices. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile index 254d94efd24d..20672978dceb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o mt76x0-common-y := \ init.o main.o trace.o eeprom.o phy.o \ - mac.o debugfs.o tx.o + mac.o debugfs.o mt76x0u-y := usb.o usb_mcu.o mt76x0e-y := pci.o pci_mcu.o diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c index ddc1af626b3b..3224e5b1a1e5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c @@ -21,7 +21,7 @@ static int mt76x0_ampdu_stat_read(struct seq_file *file, void *data) { - struct mt76x0_dev *dev = file->private; + struct mt76x02_dev *dev = file->private; int i, j; #define stat_printf(grp, off, name) \ @@ -75,7 +75,7 @@ static const struct file_operations fops_ampdu_stat = { .release = single_release, }; -void mt76x0_init_debugfs(struct mt76x0_dev *dev) +void mt76x0_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h deleted file mode 100644 index 891ce1c3461f..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MT76X0U_DMA_H -#define __MT76X0U_DMA_H - -#include <asm/unaligned.h> -#include <linux/skbuff.h> - -#define MT_DMA_HDR_LEN 4 -#define MT_RX_INFO_LEN 4 -#define MT_FCE_INFO_LEN 4 -#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN) - -/* Common Tx DMA descriptor fields */ -#define MT_TXD_INFO_LEN GENMASK(15, 0) -#define MT_TXD_INFO_D_PORT GENMASK(29, 27) -#define MT_TXD_INFO_TYPE GENMASK(31, 30) - -/* Tx DMA MCU command specific flags */ -#define MT_TXD_CMD_SEQ GENMASK(19, 16) -#define MT_TXD_CMD_TYPE GENMASK(26, 20) - -enum mt76_msg_port { - WLAN_PORT, - CPU_RX_PORT, - CPU_TX_PORT, - HOST_PORT, - VIRTUAL_CPU_RX_PORT, - VIRTUAL_CPU_TX_PORT, - DISCARD, -}; - -enum mt76_info_type { - DMA_PACKET, - DMA_COMMAND, -}; - -/* Tx DMA packet specific flags */ -#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16) -#define MT_TXD_PKT_INFO_TX_BURST BIT(17) -#define MT_TXD_PKT_INFO_80211 BIT(19) -#define MT_TXD_PKT_INFO_TSO BIT(20) -#define MT_TXD_PKT_INFO_CSO BIT(21) -#define MT_TXD_PKT_INFO_WIV BIT(24) -#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25) - -enum mt76_qsel { - MT_QSEL_MGMT, - MT_QSEL_HCCA, - MT_QSEL_EDCA, - MT_QSEL_EDCA_2, -}; - - -static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb, - enum mt76_msg_port d_port, - enum mt76_info_type type, u32 flags) -{ - u32 info; - - /* Buffer layout: - * | 4B | xfer len | pad | 4B | - * | TXINFO | pkt/cmd | zero pad to 4B | zero | - * - * length field of TXINFO should be set to 'xfer len'. - */ - - info = flags | - FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | - FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) | - FIELD_PREP(MT_TXD_INFO_TYPE, type); - - put_unaligned_le32(info, skb_push(skb, sizeof(info))); - return skb_put_padto(skb, round_up(skb->len, 4) + 4); -} - -static inline int -mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags) -{ - flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel); - return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags); -} - -/* Common Rx DMA descriptor fields */ -#define MT_RXD_INFO_LEN GENMASK(13, 0) -#define MT_RXD_INFO_PCIE_INTR BIT(24) -#define MT_RXD_INFO_QSEL GENMASK(26, 25) -#define MT_RXD_INFO_PORT GENMASK(29, 27) -#define MT_RXD_INFO_TYPE GENMASK(31, 30) - -/* Rx DMA packet specific flags */ -#define MT_RXD_PKT_INFO_UDP_ERR BIT(16) -#define MT_RXD_PKT_INFO_TCP_ERR BIT(17) -#define MT_RXD_PKT_INFO_IP_ERR BIT(18) -#define MT_RXD_PKT_INFO_PKT_80211 BIT(19) -#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20) -#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21) - -/* Rx DMA MCU command specific flags */ -#define MT_RXD_CMD_INFO_SELF_GEN BIT(15) -#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16) -#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20) - -enum mt76_evt_type { - CMD_DONE, - CMD_ERROR, - CMD_RETRY, - EVENT_PWR_RSP, - EVENT_WOW_RSP, - EVENT_CARRIER_DETECT_RSP, - EVENT_DFS_DETECT_RSP, -}; - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 166a1fd8644e..ab4fd6e0f23a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -25,14 +25,14 @@ #define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16) static int -mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev) +mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev) { u8 data[MT_MAP_READS * 16]; int ret, i; u32 start = 0, end = 0, cnt_free; - ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START, - data, sizeof(data), MT_EE_PHYSICAL_READ); + ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data, + sizeof(data), MT_EE_PHYSICAL_READ); if (ret) return ret; @@ -53,12 +53,12 @@ mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev) return 0; } -static void mt76x0_set_chip_cap(struct mt76x0_dev *dev) +static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) { - u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); - u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); + u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); + u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); - mt76x02_eeprom_parse_hw_cap(&dev->mt76); + mt76x02_eeprom_parse_hw_cap(dev); dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n", dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz); @@ -82,46 +82,44 @@ static void mt76x0_set_chip_cap(struct mt76x0_dev *dev) dev_err(dev->mt76.dev, "invalid tx-rx stream\n"); } -static void mt76x0_set_temp_offset(struct mt76x0_dev *dev) +static void mt76x0_set_temp_offset(struct mt76x02_dev *dev) { u8 val; - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8; + val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8; if (mt76x02_field_valid(val)) - dev->caldata.temp_offset = mt76x02_sign_extend(val, 8); + dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8); else - dev->caldata.temp_offset = -10; + dev->cal.rx.temp_offset = -10; } -static void mt76x0_set_freq_offset(struct mt76x0_dev *dev) +static void mt76x0_set_freq_offset(struct mt76x02_dev *dev) { - struct mt76x0_caldata *caldata = &dev->caldata; + struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx; u8 val; - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET); + val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET); if (!mt76x02_field_valid(val)) val = 0; caldata->freq_offset = val; - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8; + val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8; if (!mt76x02_field_valid(val)) val = 0; caldata->freq_offset -= mt76x02_sign_extend(val, 8); } -void mt76x0_read_rx_gain(struct mt76x0_dev *dev) +void mt76x0_read_rx_gain(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; - struct mt76x0_caldata *caldata = &dev->caldata; + struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx; s8 val, lna_5g[3], lna_2g; u16 rssi_offset; int i; - mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset, - &lna_2g, lna_5g); - caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, - lna_5g, chan); + mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g); + caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) { val = rssi_offset >> (8 * i); @@ -132,12 +130,12 @@ void mt76x0_read_rx_gain(struct mt76x0_dev *dev) } } -static s8 mt76x0_get_delta(struct mt76_dev *dev) +static s8 mt76x0_get_delta(struct mt76x02_dev *dev) { - struct cfg80211_chan_def *chandef = &dev->chandef; + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; u8 val; - if (mt76x02_tssi_enabled(dev)) + if (mt76x0_tssi_enabled(dev)) return 0; if (chandef->width == NL80211_CHAN_WIDTH_80) { @@ -157,66 +155,66 @@ static s8 mt76x0_get_delta(struct mt76_dev *dev) return mt76x02_rate_power_val(val); } -void mt76x0_get_tx_power_per_rate(struct mt76x0_dev *dev) +void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_2ghz = chan->band == NL80211_BAND_2GHZ; struct mt76_rate_power *t = &dev->mt76.rate_power; - s8 delta = mt76x0_get_delta(&dev->mt76); + s8 delta = mt76x0_get_delta(dev); u16 val, addr; memset(t, 0, sizeof(*t)); /* cck 1M, 2M, 5.5M, 11M */ - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE); t->cck[0] = t->cck[1] = s6_to_s8(val); t->cck[2] = t->cck[3] = s6_to_s8(val >> 8); /* ofdm 6M, 9M, 12M, 18M */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120; - val = mt76x02_eeprom_get(&dev->mt76, addr); + val = mt76x02_eeprom_get(dev, addr); t->ofdm[0] = t->ofdm[1] = s6_to_s8(val); t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8); /* ofdm 24M, 36M, 48M, 54M */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122; - val = mt76x02_eeprom_get(&dev->mt76, addr); + val = mt76x02_eeprom_get(dev, addr); t->ofdm[4] = t->ofdm[5] = s6_to_s8(val); t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8); /* ht-vht mcs 1ss 0, 1, 2, 3 */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124; - val = mt76x02_eeprom_get(&dev->mt76, addr); + val = mt76x02_eeprom_get(dev, addr); t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val); t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8); /* ht-vht mcs 1ss 4, 5, 6 */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126; - val = mt76x02_eeprom_get(&dev->mt76, addr); + val = mt76x02_eeprom_get(dev, addr); t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val); t->ht[6] = t->vht[6] = s6_to_s8(val >> 8); /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec; - val = mt76x02_eeprom_get(&dev->mt76, addr); + val = mt76x02_eeprom_get(dev, addr); t->stbc[0] = t->stbc[1] = s6_to_s8(val); t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8); /* ht-vht mcs 1ss 4, 5, 6 stbc */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee; - val = mt76x02_eeprom_get(&dev->mt76, addr); + val = mt76x02_eeprom_get(dev, addr); t->stbc[4] = t->stbc[5] = s6_to_s8(val); t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8); /* vht mcs 8, 9 5GHz */ - val = mt76x02_eeprom_get(&dev->mt76, 0x132); + val = mt76x02_eeprom_get(dev, 0x132); t->vht[7] = s6_to_s8(val); t->vht[8] = s6_to_s8(val >> 8); mt76x02_add_rate_power_offset(t, delta); } -void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info) +void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) { struct mt76x0_chan_map { u8 chan; @@ -266,7 +264,7 @@ void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info) addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset; } - data = mt76x02_eeprom_get(&dev->mt76, addr); + data = mt76x02_eeprom_get(dev, addr); info[0] = data; if (!info[0] || info[0] > 0x3f) @@ -277,7 +275,7 @@ void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info) info[1] = 5; } -static int mt76x0_check_eeprom(struct mt76x0_dev *dev) +static int mt76x0_check_eeprom(struct mt76x02_dev *dev) { u16 val; @@ -297,7 +295,7 @@ static int mt76x0_check_eeprom(struct mt76x0_dev *dev) } } -static int mt76x0_load_eeprom(struct mt76x0_dev *dev) +static int mt76x0_load_eeprom(struct mt76x02_dev *dev) { int found; @@ -312,11 +310,11 @@ static int mt76x0_load_eeprom(struct mt76x0_dev *dev) if (found < 0) return found; - return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data, + return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data, MT76X0_EEPROM_SIZE, MT_EE_READ); } -int mt76x0_eeprom_init(struct mt76x0_dev *dev) +int mt76x0_eeprom_init(struct mt76x02_dev *dev) { u8 version, fae; u16 data; @@ -326,7 +324,7 @@ int mt76x0_eeprom_init(struct mt76x0_dev *dev) if (err < 0) return err; - data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION); + data = mt76x02_eeprom_get(dev, MT_EE_VERSION); version = data >> 8; fae = data; @@ -337,8 +335,7 @@ int mt76x0_eeprom_init(struct mt76x0_dev *dev) dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", version, fae); - mt76x02_mac_setaddr(&dev->mt76, - dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); mt76x0_set_chip_cap(dev); mt76x0_set_freq_offset(dev); mt76x0_set_temp_offset(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index 4e1fafa5b8c3..ee9ade9f3c8b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h @@ -18,23 +18,15 @@ #include "../mt76x02_eeprom.h" -struct mt76x0_dev; +struct mt76x02_dev; #define MT76X0U_EE_MAX_VER 0x0c #define MT76X0_EEPROM_SIZE 512 -struct mt76x0_caldata { - s8 rssi_offset[2]; - s8 lna_gain; - - s16 temp_offset; - u8 freq_offset; -}; - -int mt76x0_eeprom_init(struct mt76x0_dev *dev); -void mt76x0_read_rx_gain(struct mt76x0_dev *dev); -void mt76x0_get_tx_power_per_rate(struct mt76x0_dev *dev); -void mt76x0_get_power_info(struct mt76x0_dev *dev, u8 *info); +int mt76x0_eeprom_init(struct mt76x02_dev *dev); +void mt76x0_read_rx_gain(struct mt76x02_dev *dev); +void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); +void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); static inline s8 s6_to_s8(u32 val) { @@ -45,4 +37,10 @@ static inline s8 s6_to_s8(u32 val) return ret; } +static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev) +{ + return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & + MT_EE_NIC_CONF_1_TX_ALC_EN); +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index edfd5d94d197..4a9408801260 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -18,9 +18,6 @@ #include "eeprom.h" #include "trace.h" #include "mcu.h" -#include "../mt76x02_util.h" -#include "../mt76x02_dma.h" - #include "initvals.h" static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband) @@ -42,7 +39,7 @@ static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband) } static void -mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable) +mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable) { u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD; @@ -69,12 +66,10 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable) dev_err(dev->mt76.dev, "PLL and XTAL check failed\n"); } -void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) +void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset) { u32 val; - mutex_lock(&dev->hw_atomic_mutex); - val = mt76_rr(dev, MT_WLAN_FUN_CTRL); if (reset) { @@ -96,12 +91,10 @@ void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset) udelay(20); mt76x0_set_wlan_state(dev, val, enable); - - mutex_unlock(&dev->hw_atomic_mutex); } EXPORT_SYMBOL_GPL(mt76x0_chip_onoff); -static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev) +static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR | @@ -116,7 +109,7 @@ static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev) mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \ tab, ARRAY_SIZE(tab)) -static int mt76x0_init_bbp(struct mt76x0_dev *dev) +static int mt76x0_init_bbp(struct mt76x02_dev *dev) { int ret, i; @@ -139,13 +132,13 @@ static int mt76x0_init_bbp(struct mt76x0_dev *dev) return 0; } -static void mt76x0_init_mac_registers(struct mt76x0_dev *dev) +static void mt76x0_init_mac_registers(struct mt76x02_dev *dev) { u32 reg; RANDOM_WRITE(dev, common_mac_reg_table); - mt76x02_set_beacon_offsets(&dev->mt76); + mt76x02_set_beacon_offsets(dev); /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ RANDOM_WRITE(dev, mt76x0_mac_reg_table); @@ -172,15 +165,9 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev) reg &= ~0x000003FF; reg |= 0x00000201; mt76_wr(dev, MT_WMM_CTRL, reg); - - /* TODO: Probably not needed */ - mt76_wr(dev, 0x7028, 0); - mt76_wr(dev, 0x7010, 0); - mt76_wr(dev, 0x7024, 0); - msleep(10); } -static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev) +static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev) { u32 *vals; int i; @@ -199,14 +186,14 @@ static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev) return 0; } -static void mt76x0_init_key_mem(struct mt76x0_dev *dev) +static void mt76x0_init_key_mem(struct mt76x02_dev *dev) { u32 vals[4] = {}; mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals)); } -static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev) +static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev) { u32 *vals; int i; @@ -223,7 +210,7 @@ static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev) return 0; } -static void mt76x0_reset_counters(struct mt76x0_dev *dev) +static void mt76x0_reset_counters(struct mt76x02_dev *dev) { mt76_rr(dev, MT_RX_STAT_0); mt76_rr(dev, MT_RX_STAT_1); @@ -233,7 +220,7 @@ static void mt76x0_reset_counters(struct mt76x0_dev *dev) mt76_rr(dev, MT_TX_STA_2); } -int mt76x0_mac_start(struct mt76x0_dev *dev) +int mt76x0_mac_start(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); @@ -248,7 +235,7 @@ int mt76x0_mac_start(struct mt76x0_dev *dev) } EXPORT_SYMBOL_GPL(mt76x0_mac_start); -void mt76x0_mac_stop(struct mt76x0_dev *dev) +void mt76x0_mac_stop(struct mt76x02_dev *dev) { int i = 200, ok = 0; @@ -281,7 +268,7 @@ void mt76x0_mac_stop(struct mt76x0_dev *dev) } EXPORT_SYMBOL_GPL(mt76x0_mac_stop); -int mt76x0_init_hardware(struct mt76x0_dev *dev) +int mt76x0_init_hardware(struct mt76x02_dev *dev) { int ret; @@ -293,7 +280,7 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev) return -ETIMEDOUT; mt76x0_reset_csr_bbp(dev); - ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false); + ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); if (ret) return ret; @@ -335,12 +322,12 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev) } EXPORT_SYMBOL_GPL(mt76x0_init_hardware); -struct mt76x0_dev * +struct mt76x02_dev * mt76x0_alloc_device(struct device *pdev, const struct mt76_driver_ops *drv_ops, const struct ieee80211_ops *ops) { - struct mt76x0_dev *dev; + struct mt76x02_dev *dev; struct mt76_dev *mdev; mdev = mt76_alloc_device(sizeof(*dev), ops); @@ -350,18 +337,15 @@ mt76x0_alloc_device(struct device *pdev, mdev->dev = pdev; mdev->drv = drv_ops; - dev = container_of(mdev, struct mt76x0_dev, mt76); - mutex_init(&dev->reg_atomic_mutex); - mutex_init(&dev->hw_atomic_mutex); - spin_lock_init(&dev->mac_lock); - spin_lock_init(&dev->con_mon_lock); + dev = container_of(mdev, struct mt76x02_dev, mt76); + mutex_init(&dev->phy_mutex); atomic_set(&dev->avg_ampdu_len, 1); return dev; } EXPORT_SYMBOL_GPL(mt76x0_alloc_device); -int mt76x0_register_device(struct mt76x0_dev *dev) +int mt76x0_register_device(struct mt76x02_dev *dev) { struct mt76_dev *mdev = &dev->mt76; struct ieee80211_hw *hw = mdev->hw; @@ -384,7 +368,10 @@ int mt76x0_register_device(struct mt76x0_dev *dev) hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; - hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2; + hw->extra_tx_headroom = 2; + if (mt76_is_usb(dev)) + hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + + MT_DMA_HDR_LEN; hw->sta_data_size = sizeof(struct mt76x02_sta); hw->vif_data_size = sizeof(struct mt76x02_vif); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index 6f26dc6dabde..236dce6860b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -83,7 +83,8 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { { MT_LDO_CTRL_1, 0x6B006464 }, { MT_HT_BASIC_RATE, 0x00004003 }, { MT_HT_CTRL_CFG, 0x000001FF }, - { MT_TXOP_HLDR_ET, 0x00000000 } + { MT_TXOP_HLDR_ET, 0x00000000 }, + { MT_PN_PAD_MODE, 0x00000003 }, }; static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c index f55734a922aa..7a422c590211 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c @@ -13,13 +13,13 @@ * GNU General Public License for more details. */ +#include <linux/etherdevice.h> + #include "mt76x0.h" #include "trace.h" -#include "../mt76x02_util.h" -#include <linux/etherdevice.h> -void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, - int ht_mode) +void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot, + int ht_mode) { int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); @@ -77,7 +77,7 @@ void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); } -void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb) +void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb) { if (short_preamb) mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); @@ -85,7 +85,7 @@ void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb) mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); } -void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval) +void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval) { u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG); @@ -105,7 +105,7 @@ void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval) MT_BEACON_TIME_CFG_TBTT_EN; } -static void mt76x0_check_mac_err(struct mt76x0_dev *dev) +static void mt76x0_check_mac_err(struct mt76x02_dev *dev) { u32 val = mt76_rr(dev, 0x10f4); @@ -120,7 +120,7 @@ static void mt76x0_check_mac_err(struct mt76x0_dev *dev) } void mt76x0_mac_work(struct work_struct *work) { - struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, mac_work.work); struct { u32 addr_base; @@ -171,7 +171,7 @@ void mt76x0_mac_work(struct work_struct *work) ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ); } -void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) +void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev) { struct ieee80211_sta *sta; struct mt76_wcid *wcid; @@ -195,67 +195,3 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev) mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor)); } - -static void -mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi, - u16 rate, int rssi) -{ - dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate); - dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256; -} - -static int -mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; - - return ieee80211_is_beacon(hdr->frame_control) && - ether_addr_equal(hdr->addr2, dev->ap_bssid); -} - -u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, - void *rxi) -{ - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct mt76x02_rxwi *rxwi = rxi; - u32 len, ctl = le32_to_cpu(rxwi->ctl); - u16 rate = le16_to_cpu(rxwi->rate); - int rssi, pad_len = 0; - - len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); - if (WARN_ON(len < 10)) - return 0; - - if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { - status->flag |= RX_FLAG_DECRYPTED; - status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; - } - - if (rxwi->rxinfo & MT_RXINFO_L2PAD) - pad_len += 2; - - mt76x02_remove_hdr_pad(skb, pad_len); - - pskb_trim(skb, len); - status->chains = BIT(0); - rssi = mt76x0_phy_get_rssi(dev, rxwi); - status->chain_signal[0] = status->signal = rssi; - status->freq = dev->mt76.chandef.chan->center_freq; - status->band = dev->mt76.chandef.chan->band; - - mt76x02_mac_process_rate(status, rate); - - spin_lock_bh(&dev->con_mon_lock); - if (mt76x0_rx_is_our_beacon(dev, skb->data)) { - mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi); - } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST)) { - if (dev->avg_rssi == 0) - dev->avg_rssi = rssi; - else - dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16; - - } - spin_unlock_bh(&dev->con_mon_lock); - - return len; -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h deleted file mode 100644 index b887693a56b6..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MT76_MAC_H -#define __MT76_MAC_H - -u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb, - void *rxi); -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index c3cea52ec0dc..9273d2d2764a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -13,21 +13,33 @@ * GNU General Public License for more details. */ -#include "mt76x0.h" -#include "mac.h" -#include "../mt76x02_util.h" #include <linux/etherdevice.h> +#include "mt76x0.h" + +static int +mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) +{ + int ret; + + cancel_delayed_work_sync(&dev->cal_work); + + mt76_set_channel(&dev->mt76); + ret = mt76x0_phy_set_channel(dev, chandef); + mt76_txq_schedule_all(&dev->mt76); + + return ret; +} int mt76x0_config(struct ieee80211_hw *hw, u32 changed) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret = 0; mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); - ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef); + ret = mt76x0_set_channel(dev, &hw->conf.chandef); ieee80211_wake_queues(hw); } @@ -54,7 +66,7 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed) EXPORT_SYMBOL_GPL(mt76x0_config); static void -mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr) +mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr) { mt76_wr(dev, offset, get_unaligned_le32(addr)); mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8); @@ -64,13 +76,10 @@ void mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); - if (changed & BSS_CHANGED_ASSOC) - mt76x0_phy_con_cal_onoff(dev, info); - if (changed & BSS_CHANGED_BSSID) { mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); @@ -117,10 +126,8 @@ EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed); void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - cancel_delayed_work_sync(&dev->cal_work); - mt76x0_agc_save(dev); set_bit(MT76_SCANNING, &dev->mt76.state); } EXPORT_SYMBOL_GPL(mt76x0_sw_scan); @@ -128,19 +135,15 @@ EXPORT_SYMBOL_GPL(mt76x0_sw_scan); void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mt76x0_agc_restore(dev); clear_bit(MT76_SCANNING, &dev->mt76.state); - - ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, - MT_CALIBRATE_INTERVAL); } EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete); int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h index 297bf6b94d8c..3b34e1d2769f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h @@ -17,7 +17,7 @@ #include "../mt76x02_mcu.h" -struct mt76x0_dev; +struct mt76x02_dev; #define MT_MCU_IVB_SIZE 0x40 #define MT_MCU_DLM_OFFSET 0x80000 @@ -39,11 +39,14 @@ enum mcu_calibrate { MCU_CAL_TXDCOC, MCU_CAL_RX_GROUP_DELAY, MCU_CAL_TX_GROUP_DELAY, + MCU_CAL_VCO, + MCU_CAL_NO_SIGNAL = 0xfe, + MCU_CAL_FULL = 0xff, }; -int mt76x0e_mcu_init(struct mt76x0_dev *dev); -int mt76x0u_mcu_init(struct mt76x0_dev *dev); -static inline int mt76x0_firmware_running(struct mt76x0_dev *dev) +int mt76x0e_mcu_init(struct mt76x02_dev *dev); +int mt76x0u_mcu_init(struct mt76x02_dev *dev); +static inline int mt76x0_firmware_running(struct mt76x02_dev *dev) { return mt76_rr(dev, MT_MCU_COM_REG0) == 1; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index a37dbf944b15..2187bafaf2e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -25,114 +25,33 @@ #include <net/mac80211.h> #include <linux/debugfs.h> -#include "../mt76.h" -#include "../mt76x02_regs.h" -#include "../mt76x02_mac.h" +#include "../mt76x02.h" #include "eeprom.h" #define MT_CALIBRATE_INTERVAL (4 * HZ) -#define MT_FREQ_CAL_INIT_DELAY (30 * HZ) -#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ) -#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2) - -#define MT_BBP_REG_VERSION 0x00 - #define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */ #define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ -struct mac_stats { - u64 rx_stat[6]; - u64 tx_stat[6]; - u64 aggr_stat[2]; - u64 aggr_n[32]; - u64 zero_len_del[2]; -}; - -struct mt76x0_eeprom_params; - -#define MT_EE_TEMPERATURE_SLOPE 39 -#define MT_FREQ_OFFSET_INVALID -128 - -/* addr req mask */ -#define MT_VEND_TYPE_EEPROM BIT(31) -#define MT_VEND_TYPE_CFG BIT(30) -#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) - -#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) - -enum mt_bw { - MT_BW_20, - MT_BW_40, -}; - -/** - * struct mt76x0_dev - adapter structure - * @lock: protects @wcid->tx_rate. - * @mac_lock: locks out mac80211's tx status and rx paths. - * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. - * @mutex: ensures exclusive access from mac80211 callbacks. - * @reg_atomic_mutex: ensures atomicity of indirect register accesses - * (accesses to RF and BBP). - * @hw_atomic_mutex: ensures exclusive access to HW during critical - * operations (power management, channel switch). - */ -struct mt76x0_dev { - struct mt76_dev mt76; /* must be first */ - - u8 data[32]; - - struct delayed_work cal_work; - struct delayed_work mac_work; - - spinlock_t mac_lock; - - struct mt76x0_caldata caldata; - - struct mutex reg_atomic_mutex; - struct mutex hw_atomic_mutex; - - atomic_t avg_ampdu_len; - - /* Connection monitoring things */ - spinlock_t con_mon_lock; - u8 ap_bssid[ETH_ALEN]; - - s8 bcn_freq_off; - u8 bcn_phy_mode; - - int avg_rssi; /* starts at 0 and converges */ - - u8 agc_save; - - bool no_2ghz; - - struct mac_stats stats; -}; - -static inline bool is_mt7610e(struct mt76x0_dev *dev) +static inline bool is_mt7610e(struct mt76x02_dev *dev) { /* TODO */ return false; } -void mt76x0_init_debugfs(struct mt76x0_dev *dev); - -/* Compatibility with mt76 */ -#define mt76_rmw_field(_dev, _reg, _field, _val) \ - mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) +void mt76x0_init_debugfs(struct mt76x02_dev *dev); /* Init */ -struct mt76x0_dev * +struct mt76x02_dev * mt76x0_alloc_device(struct device *pdev, const struct mt76_driver_ops *drv_ops, const struct ieee80211_ops *ops); -int mt76x0_init_hardware(struct mt76x0_dev *dev); -int mt76x0_register_device(struct mt76x0_dev *dev); -void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset); +int mt76x0_init_hardware(struct mt76x02_dev *dev); +int mt76x0_register_device(struct mt76x02_dev *dev); +void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset); -int mt76x0_mac_start(struct mt76x0_dev *dev); -void mt76x0_mac_stop(struct mt76x0_dev *dev); +int mt76x0_mac_start(struct mt76x02_dev *dev); +void mt76x0_mac_stop(struct mt76x02_dev *dev); int mt76x0_config(struct ieee80211_hw *hw, u32 changed); void mt76x0_bss_info_changed(struct ieee80211_hw *hw, @@ -145,35 +64,20 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value); /* PHY */ -void mt76x0_phy_init(struct mt76x0_dev *dev); -int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev); -void mt76x0_agc_save(struct mt76x0_dev *dev); -void mt76x0_agc_restore(struct mt76x0_dev *dev); -int mt76x0_phy_set_channel(struct mt76x0_dev *dev, +void mt76x0_phy_init(struct mt76x02_dev *dev); +int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev); +int mt76x0_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef); -void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev); -int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi); -void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev, - struct ieee80211_bss_conf *info); -void mt76x0_phy_set_txpower(struct mt76x0_dev *dev); +void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev); +void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); +void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on); /* MAC */ void mt76x0_mac_work(struct work_struct *work); -void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot, +void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot, int ht_mode); -void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb); -void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval); -void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev); - -/* TX */ -void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb); -struct mt76x02_txwi * -mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb, - struct ieee80211_sta *sta, struct mt76_wcid *wcid, - int pkt_len); - -void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb); +void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb); +void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval); +void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 876291dd3c1e..522c86059bcb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -20,16 +20,15 @@ #include "mt76x0.h" #include "mcu.h" -#include "../mt76x02_dma.h" -#include "../mt76x02_util.h" static int mt76x0e_start(struct ieee80211_hw *hw) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); - mt76x02_mac_start(&dev->mt76); + mt76x02_mac_start(dev); + mt76x0_phy_calibrate(dev, true); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, MT_CALIBRATE_INTERVAL); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, @@ -41,13 +40,8 @@ static int mt76x0e_start(struct ieee80211_hw *hw) return 0; } -static void mt76x0e_stop(struct ieee80211_hw *hw) +static void mt76x0e_stop_hw(struct mt76x02_dev *dev) { - struct mt76x0_dev *dev = hw->priv; - - mutex_lock(&dev->mt76.mutex); - - clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); @@ -62,21 +56,38 @@ static void mt76x0e_stop(struct ieee80211_hw *hw) 0, 1000)) dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN); +} +static void mt76x0e_stop(struct ieee80211_hw *hw) +{ + struct mt76x02_dev *dev = hw->priv; + + mutex_lock(&dev->mt76.mutex); + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + mt76x0e_stop_hw(dev); mutex_unlock(&dev->mt76.mutex); } static const struct ieee80211_ops mt76x0e_ops = { - .tx = mt76x0_tx, + .tx = mt76x02_tx, .start = mt76x0e_start, .stop = mt76x0e_stop, - .config = mt76x0_config, .add_interface = mt76x02_add_interface, .remove_interface = mt76x02_remove_interface, + .config = mt76x0_config, .configure_filter = mt76x02_configure_filter, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, + .set_key = mt76x02_set_key, + .conf_tx = mt76x02_conf_tx, + .sw_scan_start = mt76x0_sw_scan, + .sw_scan_complete = mt76x0_sw_scan_complete, + .ampdu_action = mt76x02_ampdu_action, + .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, + .wake_tx_queue = mt76_wake_tx_queue, }; -static int mt76x0e_register_device(struct mt76x0_dev *dev) +static int mt76x0e_register_device(struct mt76x02_dev *dev) { int err; @@ -84,12 +95,12 @@ static int mt76x0e_register_device(struct mt76x0_dev *dev) if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; - mt76x02_dma_disable(&dev->mt76); + mt76x02_dma_disable(dev); err = mt76x0e_mcu_init(dev); if (err < 0) return err; - err = mt76x02_dma_init(&dev->mt76); + err = mt76x02_dma_init(dev); if (err < 0) return err; @@ -101,30 +112,36 @@ static int mt76x0e_register_device(struct mt76x0_dev *dev) u16 val; mt76_clear(dev, MT_COEXCFG0, BIT(0)); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); - if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) { - u32 data; - - /* set external external PA I/O - * current to 16mA - */ - data = mt76_rr(dev, 0x11c); - val |= 0xc03; - mt76_wr(dev, 0x11c, val); - } + + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); + if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT)) + mt76_set(dev, MT_XO_CTRL7, 0xc03); } mt76_clear(dev, 0x110, BIT(9)); mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); + err = mt76x0_register_device(dev); + if (err < 0) + return err; + + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + return 0; } static int mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct mt76x0_dev *dev; - int ret = -ENODEV; + static const struct mt76_driver_ops drv_ops = { + .txwi_size = sizeof(struct mt76x02_txwi), + .tx_prepare_skb = mt76x02_tx_prepare_skb, + .tx_complete_skb = mt76x02_tx_complete_skb, + .rx_skb = mt76x02_queue_rx_skb, + .rx_poll_complete = mt76x02_rx_poll_complete, + }; + struct mt76x02_dev *dev; + int ret; ret = pcim_enable_device(pdev); if (ret) @@ -140,7 +157,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) return ret; - dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops); + dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops); if (!dev) return -ENOMEM; @@ -149,6 +166,11 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto error; + ret = mt76x0e_register_device(dev); if (ret < 0) goto error; @@ -160,12 +182,23 @@ error: return ret; } +static void mt76x0e_cleanup(struct mt76x02_dev *dev) +{ + clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); + mt76x0e_stop_hw(dev); + mt76x02_dma_cleanup(dev); + mt76x02_mcu_cleanup(dev); +} + static void mt76x0e_remove(struct pci_dev *pdev) { struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); mt76_unregister_device(mdev); + mt76x0e_cleanup(dev); ieee80211_free_hw(mdev->hw); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c index e3cf049314bb..569861289aa5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c @@ -24,7 +24,7 @@ #define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE) -static int mt76x0e_load_firmware(struct mt76x0_dev *dev) +static int mt76x0e_load_firmware(struct mt76x02_dev *dev) { bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610; u32 val, ilm_len, dlm_len, offset = 0; @@ -116,6 +116,7 @@ static int mt76x0e_load_firmware(struct mt76x0_dev *dev) goto out; } + mt76x02_set_ethtool_fwver(dev, hdr); dev_dbg(dev->mt76.dev, "Firmware running!\n"); out: @@ -126,7 +127,7 @@ out: return err; } -int mt76x0e_mcu_init(struct mt76x0_dev *dev) +int mt76x0e_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x0e_mcu_ops = { .mcu_msg_alloc = mt76x02_mcu_msg_alloc, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 4fd2c65e196a..cf024950e0ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -14,6 +14,9 @@ * GNU General Public License for more details. */ +#include <linux/kernel.h> +#include <linux/etherdevice.h> + #include "mt76x0.h" #include "mcu.h" #include "eeprom.h" @@ -23,10 +26,8 @@ #include "initvals_phy.h" #include "../mt76x02_phy.h" -#include <linux/etherdevice.h> - static int -mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) +mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value) { int ret = 0; u8 bank, reg; @@ -37,10 +38,10 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) bank = MT_RF_BANK(offset); reg = MT_RF_REG(offset); - if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) + if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8)) return -EINVAL; - mutex_lock(&dev->reg_atomic_mutex); + mutex_lock(&dev->phy_mutex); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) { ret = -ETIMEDOUT; @@ -55,7 +56,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value) MT_RF_CSR_CFG_KICK); trace_mt76x0_rf_write(&dev->mt76, bank, offset, value); out: - mutex_unlock(&dev->reg_atomic_mutex); + mutex_unlock(&dev->phy_mutex); if (ret < 0) dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n", @@ -64,8 +65,7 @@ out: return ret; } -static int -mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) +static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset) { int ret = -ETIMEDOUT; u32 val; @@ -77,10 +77,10 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) bank = MT_RF_BANK(offset); reg = MT_RF_REG(offset); - if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) + if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8)) return -EINVAL; - mutex_lock(&dev->reg_atomic_mutex); + mutex_lock(&dev->phy_mutex); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) goto out; @@ -100,7 +100,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset) trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret); } out: - mutex_unlock(&dev->reg_atomic_mutex); + mutex_unlock(&dev->phy_mutex); if (ret < 0) dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n", @@ -110,36 +110,38 @@ out: } static int -rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val) +rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) { - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { + if (mt76_is_usb(dev)) { struct mt76_reg_pair pair = { .reg = offset, .value = val, }; + WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING, + &dev->mt76.state)); return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); } else { - WARN_ON_ONCE(1); return mt76x0_rf_csr_wr(dev, offset, val); } } static int -rf_rr(struct mt76x0_dev *dev, u32 offset) +rf_rr(struct mt76x02_dev *dev, u32 offset) { int ret; u32 val; - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { + if (mt76_is_usb(dev)) { struct mt76_reg_pair pair = { .reg = offset, }; + WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING, + &dev->mt76.state)); ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); val = pair.value; } else { - WARN_ON_ONCE(1); ret = val = mt76x0_rf_csr_rr(dev, offset); } @@ -147,7 +149,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset) } static int -rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val) +rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val) { int ret; @@ -163,31 +165,43 @@ rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val) } static int -rf_set(struct mt76x0_dev *dev, u32 offset, u8 val) +rf_set(struct mt76x02_dev *dev, u32 offset, u8 val) { return rf_rmw(dev, offset, 0, val); } #if 0 static int -rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask) +rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask) { return rf_rmw(dev, offset, mask, 0); } #endif -#define RF_RANDOM_WRITE(dev, tab) \ - mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \ - tab, ARRAY_SIZE(tab)) +static void +mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data, + int n) +{ + while (n-- > 0) { + mt76x0_rf_csr_wr(dev, data->reg, data->value); + data++; + } +} + +#define RF_RANDOM_WRITE(dev, tab) do { \ + if (mt76_is_mmio(dev)) \ + mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \ + else \ + mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\ +} while (0) -int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev) +int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) { int i = 20; u32 val; do { val = mt76_rr(dev, MT_BBP(CORE, 0)); - printk("BBP version %08x\n", val); if (val && ~val) break; } while (--i); @@ -197,44 +211,11 @@ int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev) return -EIO; } + dev_dbg(dev->mt76.dev, "BBP version %08x\n", val); return 0; } -static void -mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width, - u8 ctrl) -{ - int core_val, agc_val; - - switch (width) { - case NL80211_CHAN_WIDTH_80: - core_val = 3; - agc_val = 7; - break; - case NL80211_CHAN_WIDTH_40: - core_val = 2; - agc_val = 3; - break; - default: - core_val = 0; - agc_val = 1; - break; - } - - mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); - mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); -} - -int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi) -{ - struct mt76x0_caldata *caldata = &dev->caldata; - - return rxwi->rssi[0] + caldata->rssi_offset[0] - caldata->lna_gain; -} - -static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel) +static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel) { u8 val; @@ -291,14 +272,7 @@ static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel) } static void -mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper) -{ - mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, - primary_upper); -} - -static void -mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) +mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) { switch (band) { case NL80211_BAND_2GHZ: @@ -307,9 +281,6 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) rf_wr(dev, MT_RF(5, 0), 0x45); rf_wr(dev, MT_RF(6, 0), 0x44); - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); - mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007); mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002); break; @@ -319,9 +290,6 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) rf_wr(dev, MT_RF(5, 0), 0x44); rf_wr(dev, MT_RF(6, 0), 0x45); - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); - mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005); mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102); break; @@ -331,7 +299,7 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band) } static void -mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band) +mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) { u16 rf_band = rf_bw_band & 0xff00; u16 rf_bw = rf_bw_band & 0x00ff; @@ -483,7 +451,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band mt76_wr(dev, MT_RF_MISC, mac_reg); band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { + if (mt76x02_ext_pa_enabled(dev, band)) { /* MT_RF_MISC (offset: 0x0518) [2]1'b1: enable external A band PA, 1'b0: disable external A band PA @@ -522,7 +490,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band } static void -mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band) +mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band) { int i; @@ -538,7 +506,7 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban u8 gain; gain = FIELD_GET(MT_BBP_AGC_GAIN, val); - gain -= dev->caldata.lna_gain * 2; + gain -= dev->cal.rx.lna_gain * 2; val &= ~MT_BBP_AGC_GAIN; val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain); mt76_wr(dev, pair->reg, val); @@ -548,7 +516,7 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban } } -static void mt76x0_ant_select(struct mt76x0_dev *dev) +static void mt76x0_ant_select(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; @@ -568,7 +536,7 @@ static void mt76x0_ant_select(struct mt76x0_dev *dev) } static void -mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width) +mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width) { enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4}; int bw; @@ -595,10 +563,10 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width) return ; } - mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false); + mt76x02_mcu_function_select(dev, BW_SETTING, bw, false); } -void mt76x0_phy_set_txpower(struct mt76x0_dev *dev) +void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) { struct mt76_rate_power *t = &dev->mt76.rate_power; u8 info[2]; @@ -611,12 +579,53 @@ void mt76x0_phy_set_txpower(struct mt76x0_dev *dev) dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); mt76x02_add_rate_power_offset(t, -info[0]); - mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]); + mt76x02_phy_set_txpower(dev, info[0], info[1]); } -static int -__mt76x0_phy_set_channel(struct mt76x0_dev *dev, - struct cfg80211_chan_def *chandef) +void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u32 val, tx_alc, reg_val; + + if (power_on) { + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value, + false); + usleep_range(10, 20); + /* XXX: tssi */ + } + + tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0); + mt76_wr(dev, MT_TX_ALC_CFG_0, 0); + usleep_range(500, 700); + + reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); + + if (chan->band == NL80211_BAND_5GHZ) { + if (chan->hw_value < 100) + val = 0x701; + else if (chan->hw_value < 140) + val = 0x801; + else + val = 0x901; + } else { + val = 0x600; + } + + mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false); + msleep(350); + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false); + usleep_range(15000, 20000); + + mt76_wr(dev, MT_BBP(IBI, 9), reg_val); + mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); +} +EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate); + +int mt76x0_phy_set_channel(struct mt76x02_dev *dev, + struct cfg80211_chan_def *chandef) { u32 ext_cca_chan[4] = { [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) | @@ -674,9 +683,19 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, break; } - mt76x0_bbp_set_bw(dev, chandef->width); - mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index); - mt76x0_mac_set_ctrlch(dev, ch_group_index & 1); + if (mt76_is_usb(dev)) { + mt76x0_bbp_set_bw(dev, chandef->width); + } else { + if (chandef->width == NL80211_CHAN_WIDTH_80 || + chandef->width == NL80211_CHAN_WIDTH_40) + val = 0x201; + else + val = 0x601; + mt76_wr(dev, MT_TX_SW_CFG0, val); + } + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); + mt76x02_phy_set_band(dev, chandef->chan->band, + ch_group_index & 1); mt76x0_ant_select(dev); mt76_rmw(dev, MT_EXT_CCA_CFG, @@ -689,7 +708,6 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, mt76x0_phy_set_band(dev, chandef->chan->band); mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band); - mt76x0_read_rx_gain(dev); /* set Japan Tx filter at channel 14 */ val = mt76_rr(dev, MT_BBP(CORE, 1)); @@ -699,39 +717,37 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev, val &= ~0x20; mt76_wr(dev, MT_BBP(CORE, 1), val); - mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band); + mt76x0_read_rx_gain(dev); + mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band); + mt76x02_init_agc_gain(dev); - /* Vendor driver don't do it */ - /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ + if (mt76_is_usb(dev)) { + mt76x0_vco_cal(dev, channel); + } else { + /* enable vco */ + rf_set(dev, MT_RF(0, 4), BIT(7)); + } - mt76x0_vco_cal(dev, channel); if (scan) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); + return 0; + if (mt76_is_mmio(dev)) + mt76x0_phy_calibrate(dev, false); mt76x0_phy_set_txpower(dev); - return 0; -} - -int mt76x0_phy_set_channel(struct mt76x0_dev *dev, - struct cfg80211_chan_def *chandef) -{ - int ret; - - mutex_lock(&dev->hw_atomic_mutex); - ret = __mt76x0_phy_set_channel(dev, chandef); - mutex_unlock(&dev->hw_atomic_mutex); + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); - return ret; + return 0; } -void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) +void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev) { u32 tx_alc, reg_val; u8 channel = dev->mt76.chandef.chan->hw_value; int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); mt76x0_vco_cal(dev, channel); @@ -739,124 +755,119 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev) mt76_wr(dev, MT_TX_ALC_CFG_0, 0); usleep_range(500, 700); - reg_val = mt76_rr(dev, 0x2124); - reg_val &= 0xffffff7e; - mt76_wr(dev, 0x2124, reg_val); + reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY, - is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY, - is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false); - mt76_wr(dev, 0x2124, reg_val); + mt76_wr(dev, MT_BBP(IBI, 9), reg_val); mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); msleep(100); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); -} - -void mt76x0_agc_save(struct mt76x0_dev *dev) -{ - /* Only one RX path */ - dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8))); -} - -void mt76x0_agc_restore(struct mt76x0_dev *dev) -{ - mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); } -static void mt76x0_temp_sensor(struct mt76x0_dev *dev) +static void mt76x0_temp_sensor(struct mt76x02_dev *dev) { u8 rf_b7_73, rf_b0_66, rf_b0_67; - int cycle, temp; - u32 val; - s32 sval; + s8 val; rf_b7_73 = rf_rr(dev, MT_RF(7, 73)); rf_b0_66 = rf_rr(dev, MT_RF(0, 66)); - rf_b0_67 = rf_rr(dev, MT_RF(0, 73)); + rf_b0_67 = rf_rr(dev, MT_RF(0, 67)); rf_wr(dev, MT_RF(7, 73), 0x02); rf_wr(dev, MT_RF(0, 66), 0x23); - rf_wr(dev, MT_RF(0, 73), 0x01); + rf_wr(dev, MT_RF(0, 67), 0x01); mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055); - for (cycle = 0; cycle < 2000; cycle++) { - val = mt76_rr(dev, MT_BBP(CORE, 34)); - if (!(val & 0x10)) - break; - udelay(3); - } - - if (cycle >= 2000) { - val &= 0x10; - mt76_wr(dev, MT_BBP(CORE, 34), val); + if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) { + mt76_clear(dev, MT_BBP(CORE, 34), BIT(4)); goto done; } - sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; - if (!(sval & 0x80)) - sval &= 0x7f; /* Positive */ - else - sval |= 0xffffff00; /* Negative */ + val = mt76_rr(dev, MT_BBP(CORE, 35)); + val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25; - temp = (35 * (sval - dev->caldata.temp_offset)) / 10 + 25; + if (abs(val - dev->cal.temp_vco) > 20) { + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, + dev->mt76.chandef.chan->hw_value, + false); + dev->cal.temp_vco = val; + } + if (abs(val - dev->cal.temp) > 30) { + mt76x0_phy_calibrate(dev, false); + dev->cal.temp = val; + } done: rf_wr(dev, MT_RF(7, 73), rf_b7_73); rf_wr(dev, MT_RF(0, 66), rf_b0_66); - rf_wr(dev, MT_RF(0, 73), rf_b0_67); + rf_wr(dev, MT_RF(0, 67), rf_b0_67); } -static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev) +static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev) { - u32 val, init_vga; - - init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E; - if (dev->avg_rssi > -60) - init_vga -= 0x20; - else if (dev->avg_rssi > -70) - init_vga -= 0x10; - - val = mt76_rr(dev, MT_BBP(AGC, 8)); - val &= 0xFFFF80FF; - val |= init_vga << 8; - mt76_wr(dev, MT_BBP(AGC,8), val); + u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; + u32 val = 0x122c << 16 | 0xf2; + + mt76_wr(dev, MT_BBP(AGC, 8), + val | FIELD_PREP(MT_BBP_AGC_GAIN, gain)); } -static void mt76x0_phy_calibrate(struct work_struct *work) +static void +mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) { - struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev, - cal_work.work); + bool gain_change; + u8 gain_delta; + int low_gain; - mt76x0_dynamic_vga_tuning(dev); - mt76x0_temp_sensor(dev); + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); - ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, - MT_CALIBRATE_INTERVAL); + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); + + gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); + dev->cal.low_gain = low_gain; + + if (!gain_change) { + if (mt76x02_phy_adjust_vga_gain(dev)) + mt76x0_phy_set_gain_val(dev); + return; + } + + dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10; + gain_delta = (low_gain == 2) ? 10 : 0; + + dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta; + mt76x0_phy_set_gain_val(dev); + + /* clear false CCA counters */ + mt76_rr(dev, MT_RX_STAT_1); } -void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev, - struct ieee80211_bss_conf *info) +static void mt76x0_phy_calibration_work(struct work_struct *work) { - /* Start/stop collecting beacon data */ - spin_lock_bh(&dev->con_mon_lock); - ether_addr_copy(dev->ap_bssid, info->bssid); - dev->avg_rssi = 0; - dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; - spin_unlock_bh(&dev->con_mon_lock); + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + cal_work.work); + + mt76x0_phy_update_channel_gain(dev); + if (!mt76x0_tssi_enabled(dev)) + mt76x0_temp_sensor(dev); + + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); } -static void -mt76x0_rf_init(struct mt76x0_dev *dev) +static void mt76x0_rf_init(struct mt76x02_dev *dev) { int i; u8 val; @@ -889,7 +900,7 @@ mt76x0_rf_init(struct mt76x0_dev *dev) E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1> */ rf_wr(dev, MT_RF(0, 22), - min_t(u8, dev->caldata.freq_offset, 0xbf)); + min_t(u8, dev->cal.rx.freq_offset, 0xbf)); val = rf_rr(dev, MT_RF(0, 22)); /* @@ -909,11 +920,11 @@ mt76x0_rf_init(struct mt76x0_dev *dev) rf_set(dev, MT_RF(0, 4), 0x80); } -void mt76x0_phy_init(struct mt76x0_dev *dev) +void mt76x0_phy_init(struct mt76x02_dev *dev) { - INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate); + INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work); mt76x0_rf_init(dev); - mt76x02_phy_set_rxpath(&dev->mt76); - mt76x02_phy_set_txdac(&dev->mt76); + mt76x02_phy_set_rxpath(dev); + mt76x02_phy_set_txdac(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h index 36bbdd585163..75d1d6738c34 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h @@ -17,7 +17,6 @@ #include <linux/tracepoint.h> #include "mt76x0.h" -#include "mac.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM mt76x0 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c deleted file mode 100644 index b3c5dc2ffeb1..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include "mt76x0.h" -#include "trace.h" -#include "../mt76x02_util.h" -#include "../mt76x02_usb.h" - -struct mt76x02_txwi * -mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb, - struct ieee80211_sta *sta, struct mt76_wcid *wcid, - int pkt_len) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_tx_rate *rate = &info->control.rates[0]; - struct mt76x02_txwi *txwi; - unsigned long flags; - u16 rate_ctl; - u8 nss; - - txwi = (struct mt76x02_txwi *)skb_push(skb, sizeof(struct mt76x02_txwi)); - memset(txwi, 0, sizeof(*txwi)); - - if (!wcid->tx_rate_set) - ieee80211_get_tx_rates(info->control.vif, sta, skb, - info->control.rates, 1); - - spin_lock_irqsave(&dev->mt76.lock, flags); - if (rate->idx < 0 || !rate->count) { - rate_ctl = wcid->tx_rate; - nss = wcid->tx_rate_nss; - } else { - rate_ctl = mt76x02_mac_tx_rate_val(&dev->mt76, rate, &nss); - } - spin_unlock_irqrestore(&dev->mt76.lock, flags); - - txwi->wcid = wcid->idx; - txwi->rate = cpu_to_le16(rate_ctl); - txwi->pktid = (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) ? 1 : 0; - - mt76x02_mac_fill_txwi(txwi, skb, sta, pkt_len, nss); - - return txwi; -} -EXPORT_SYMBOL_GPL(mt76x0_push_txwi); - -void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mt76x0_dev *dev = hw->priv; - struct ieee80211_vif *vif = info->control.vif; - struct mt76_wcid *wcid = &dev->mt76.global_wcid; - - if (control->sta) { - struct mt76x02_sta *msta; - - msta = (struct mt76x02_sta *)control->sta->drv_priv; - wcid = &msta->wcid; - /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != 0xff) - control->sta = NULL; - } - - if (vif && !control->sta) { - struct mt76x02_vif *mvif; - - mvif = (struct mt76x02_vif *)vif->drv_priv; - wcid = &mvif->group_wcid; - } - - mt76_tx(&dev->mt76, control->sta, wcid, skb); -} -EXPORT_SYMBOL_GPL(mt76x0_tx); - -void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb) -{ - struct mt76x0_dev *dev = container_of(mdev, struct mt76x0_dev, mt76); - void *rxwi = skb->data; - - skb_pull(skb, sizeof(struct mt76x02_rxwi)); - if (!mt76x0_mac_process_rx(dev, skb, rxwi)) { - dev_kfree_skb(skb); - return; - } - - mt76_rx(&dev->mt76, q, skb); -} -EXPORT_SYMBOL_GPL(mt76x0_queue_rx_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index a76043213f55..a7fd36c2f633 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -18,7 +18,6 @@ #include "mt76x0.h" #include "mcu.h" #include "trace.h" -#include "../mt76x02_util.h" #include "../mt76x02_usb.h" static struct usb_device_id mt76x0_device_table[] = { @@ -49,7 +48,7 @@ static struct usb_device_id mt76x0_device_table[] = { { 0, } }; -static void mt76x0_init_usb_dma(struct mt76x0_dev *dev) +static void mt76x0_init_usb_dma(struct mt76x02_dev *dev) { u32 val; @@ -76,7 +75,7 @@ static void mt76x0_init_usb_dma(struct mt76x0_dev *dev) mt76_wr(dev, MT_USB_DMA_CFG, val); } -static void mt76x0u_cleanup(struct mt76x0_dev *dev) +static void mt76x0u_cleanup(struct mt76x02_dev *dev) { clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); mt76x0_chip_onoff(dev, false, false); @@ -84,16 +83,16 @@ static void mt76x0u_cleanup(struct mt76x0_dev *dev) mt76u_mcu_deinit(&dev->mt76); } -static void mt76x0u_mac_stop(struct mt76x0_dev *dev) +static void mt76x0u_mac_stop(struct mt76x02_dev *dev) { - if (test_bit(MT76_REMOVED, &dev->mt76.state)) - return; - clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); mt76u_stop_stat_wk(&dev->mt76); + if (test_bit(MT76_REMOVED, &dev->mt76.state)) + return; + mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | MT_BEACON_TIME_CFG_BEACON_TX); @@ -109,7 +108,7 @@ static void mt76x0u_mac_stop(struct mt76x0_dev *dev) static int mt76x0u_start(struct ieee80211_hw *hw) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret; mutex_lock(&dev->mt76.mutex); @@ -131,7 +130,7 @@ out: static void mt76x0u_stop(struct ieee80211_hw *hw) { - struct mt76x0_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); mt76x0u_mac_stop(dev); @@ -139,7 +138,7 @@ static void mt76x0u_stop(struct ieee80211_hw *hw) } static const struct ieee80211_ops mt76x0u_ops = { - .tx = mt76x0_tx, + .tx = mt76x02_tx, .start = mt76x0u_start, .stop = mt76x0u_stop, .add_interface = mt76x02_add_interface, @@ -159,48 +158,33 @@ static const struct ieee80211_ops mt76x0u_ops = { .wake_tx_queue = mt76_wake_tx_queue, }; -static int mt76x0u_tx_prepare_skb(struct mt76_dev *mdev, void *data, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info) -{ - struct mt76x0_dev *dev = container_of(mdev, struct mt76x0_dev, mt76); - struct mt76x02_txwi *txwi; - int len = skb->len; - - mt76x02_insert_hdr_pad(skb); - txwi = mt76x0_push_txwi(dev, skb, sta, wcid, len); - - return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); -} - -static int mt76x0u_register_device(struct mt76x0_dev *dev) +static int mt76x0u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = dev->mt76.hw; int err; - err = mt76u_mcu_init_rx(&dev->mt76); + err = mt76u_alloc_queues(&dev->mt76); if (err < 0) - return err; + goto out_err; - err = mt76u_alloc_queues(&dev->mt76); + err = mt76u_mcu_init_rx(&dev->mt76); if (err < 0) - return err; + goto out_err; mt76x0_chip_onoff(dev, true, true); if (!mt76x02_wait_for_mac(&dev->mt76)) { err = -ETIMEDOUT; - goto err; + goto out_err; } err = mt76x0u_mcu_init(dev); if (err < 0) - goto err; + goto out_err; mt76x0_init_usb_dma(dev); err = mt76x0_init_hardware(dev); if (err < 0) - goto err; + goto out_err; mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, @@ -209,7 +193,7 @@ static int mt76x0u_register_device(struct mt76x0_dev *dev) err = mt76x0_register_device(dev); if (err < 0) - goto err; + goto out_err; /* check hw sg support in order to enable AMSDU */ if (mt76u_check_sg(&dev->mt76)) @@ -221,7 +205,7 @@ static int mt76x0u_register_device(struct mt76x0_dev *dev) return 0; -err: +out_err: mt76x0u_cleanup(dev); return err; } @@ -230,13 +214,13 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { - .tx_prepare_skb = mt76x0u_tx_prepare_skb, - .tx_complete_skb = mt76x02_tx_complete_skb, + .tx_prepare_skb = mt76x02u_tx_prepare_skb, + .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, - .rx_skb = mt76x0_queue_rx_skb, + .rx_skb = mt76x02_queue_rx_skb, }; struct usb_device *usb_dev = interface_to_usbdev(usb_intf); - struct mt76x0_dev *dev; + struct mt76x02_dev *dev; u32 asic_rev, mac_rev; int ret; @@ -292,7 +276,7 @@ err: static void mt76x0_disconnect(struct usb_interface *usb_intf) { - struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); if (!initalized) @@ -310,7 +294,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf) static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state) { - struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); struct mt76_usb *usb = &dev->mt76.usb; mt76u_stop_queues(&dev->mt76); @@ -322,7 +306,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) { - struct mt76x0_dev *dev = usb_get_intfdata(usb_intf); + struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); struct mt76_usb *usb = &dev->mt76.usb; int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c index 4c5b7a6f15ce..a9f14d5149d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c @@ -25,7 +25,7 @@ #define MT7610U_FIRMWARE "mediatek/mt7610u.bin" static int -mt76x0u_upload_firmware(struct mt76x0_dev *dev, +mt76x0u_upload_firmware(struct mt76x02_dev *dev, const struct mt76x02_fw_header *hdr) { u8 *fw_payload = (u8 *)(hdr + 1); @@ -40,8 +40,7 @@ mt76x0u_upload_firmware(struct mt76x0_dev *dev, ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE; dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n", ilm_len, MT_MCU_IVB_SIZE); - err = mt76x02u_mcu_fw_send_data(&dev->mt76, - fw_payload + MT_MCU_IVB_SIZE, + err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE, ilm_len, MCU_FW_URB_MAX_PAYLOAD, MT_MCU_IVB_SIZE); if (err) @@ -49,7 +48,7 @@ mt76x0u_upload_firmware(struct mt76x0_dev *dev, dlm_len = le32_to_cpu(hdr->dlm_len); dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); - err = mt76x02u_mcu_fw_send_data(&dev->mt76, + err = mt76x02u_mcu_fw_send_data(dev, fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len, MCU_FW_URB_MAX_PAYLOAD, MT_MCU_DLM_OFFSET); @@ -76,7 +75,7 @@ out: return err; } -static int mt76x0u_load_firmware(struct mt76x0_dev *dev) +static int mt76x0u_load_firmware(struct mt76x02_dev *dev) { const struct firmware *fw; const struct mt76x02_fw_header *hdr; @@ -121,7 +120,7 @@ static int mt76x0u_load_firmware(struct mt76x0_dev *dev) mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) | FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); - mt76x02u_mcu_fw_reset(&dev->mt76); + mt76x02u_mcu_fw_reset(dev); usleep_range(5000, 6000); /* mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | @@ -160,7 +159,7 @@ err_inv_fw: return -ENOENT; } -int mt76x0u_mcu_init(struct mt76x0_dev *dev) +int mt76x0u_mcu_init(struct mt76x02_dev *dev) { int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h new file mode 100644 index 000000000000..47c42c607964 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76X02_UTIL_H +#define __MT76X02_UTIL_H + +#include <linux/kfifo.h> + +#include "mt76.h" +#include "mt76x02_regs.h" +#include "mt76x02_mac.h" +#include "mt76x02_dfs.h" +#include "mt76x02_dma.h" + +struct mt76x02_mac_stats { + u64 rx_stat[6]; + u64 tx_stat[6]; + u64 aggr_stat[2]; + u64 aggr_n[32]; + u64 zero_len_del[2]; +}; + +#define MT_MAX_CHAINS 2 +struct mt76x02_rx_freq_cal { + s8 high_gain[MT_MAX_CHAINS]; + s8 rssi_offset[MT_MAX_CHAINS]; + s8 lna_gain; + u32 mcu_gain; + s16 temp_offset; + u8 freq_offset; +}; + +struct mt76x02_calibration { + struct mt76x02_rx_freq_cal rx; + + u8 agc_gain_init[MT_MAX_CHAINS]; + u8 agc_gain_cur[MT_MAX_CHAINS]; + + u16 false_cca; + s8 avg_rssi_all; + s8 agc_gain_adjust; + s8 low_gain; + + s8 temp_vco; + s8 temp; + + bool init_cal_done; + bool tssi_cal_done; + bool tssi_comp_pending; + bool dpd_cal_done; + bool channel_cal_done; +}; + +struct mt76x02_dev { + struct mt76_dev mt76; /* must be first */ + + struct mac_address macaddr_list[8]; + + struct mutex phy_mutex; + struct mutex mutex; + + u8 txdone_seq; + DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); + + struct sk_buff *rx_head; + + struct tasklet_struct tx_tasklet; + struct tasklet_struct pre_tbtt_tasklet; + struct delayed_work cal_work; + struct delayed_work mac_work; + + struct mt76x02_mac_stats stats; + atomic_t avg_ampdu_len; + u32 aggr_stats[32]; + + struct sk_buff *beacons[8]; + u8 beacon_mask; + u8 beacon_data_mask; + + u8 tbtt_count; + u16 beacon_int; + + struct mt76x02_calibration cal; + + s8 target_power; + s8 target_power_delta[2]; + bool enable_tpc; + + bool no_2ghz; + + u8 coverage_class; + u8 slottime; + + struct mt76x02_dfs_pattern_detector dfs_pd; +}; + +extern struct ieee80211_rate mt76x02_rates[12]; + +void mt76x02_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, + unsigned int idx); +int mt76x02_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void mt76x02_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, + const struct ieee80211_tx_rate *rate); +s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, + s8 max_txpwr_adj); +void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); +int mt76x02_insert_hdr_pad(struct sk_buff *skb); +void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); +void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb); +bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); +void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); +irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); +void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); + +extern const u16 mt76x02_beacon_offsets[16]; +void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev); +void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); +void mt76x02_mac_start(struct mt76x02_dev *dev); + +static inline bool is_mt76x2(struct mt76x02_dev *dev) +{ + return mt76_chip(&dev->mt76) == 0x7612 || + mt76_chip(&dev->mt76) == 0x7662 || + mt76_chip(&dev->mt76) == 0x7602; +} + +static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) +{ + mt76x02_set_irq_mask(dev, 0, mask); +} + +static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask) +{ + mt76x02_set_irq_mask(dev, mask, 0); +} + +static inline bool +mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) +{ + return __mt76_poll_msec(dev, MT_MAC_STATUS, + MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, + 0, 100); +} + +static inline struct mt76x02_sta * +mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) +{ + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->wcid)) + return NULL; + + wcid = rcu_dereference(dev->wcid[idx]); + if (!wcid) + return NULL; + + return container_of(wcid, struct mt76x02_sta, wcid); +} + +static inline struct mt76_wcid * +mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) +{ + if (!sta) + return NULL; + + if (unicast) + return &sta->wcid; + else + return &sta->vif->group_wcid; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h index 693f421bf096..7e177c934592 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h @@ -14,8 +14,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76x2_DFS_H -#define __MT76x2_DFS_H +#ifndef __MT76x02_DFS_H +#define __MT76x02_DFS_H #include <linux/types.h> #include <linux/nl80211.h> @@ -49,7 +49,7 @@ #define MT_DFS_ETSI_MAX_PRI (133333 + 125000 + 117647 + 1000) #define MT_DFS_ETSI_MIN_PRI (4500 - 20) -struct mt76x2_radar_specs { +struct mt76x02_radar_specs { u8 mode; u16 avg_len; u16 e_low; @@ -70,7 +70,7 @@ struct mt76x2_radar_specs { #define MT_DFS_EVENT_ENGINE(x) (((x) & BIT(31)) ? 2 : 0) #define MT_DFS_EVENT_TIMESTAMP(x) ((x) & GENMASK(21, 0)) #define MT_DFS_EVENT_WIDTH(x) ((x) & GENMASK(11, 0)) -struct mt76x2_dfs_event { +struct mt76x02_dfs_event { unsigned long fetch_ts; u32 ts; u16 width; @@ -78,12 +78,12 @@ struct mt76x2_dfs_event { }; #define MT_DFS_EVENT_BUFLEN 256 -struct mt76x2_dfs_event_rb { - struct mt76x2_dfs_event data[MT_DFS_EVENT_BUFLEN]; +struct mt76x02_dfs_event_rb { + struct mt76x02_dfs_event data[MT_DFS_EVENT_BUFLEN]; int h_rb, t_rb; }; -struct mt76x2_dfs_sequence { +struct mt76x02_dfs_sequence { struct list_head head; u32 first_ts; u32 last_ts; @@ -92,7 +92,7 @@ struct mt76x2_dfs_sequence { u8 engine; }; -struct mt76x2_dfs_hw_pulse { +struct mt76x02_dfs_hw_pulse { u8 engine; u32 period; u32 w1; @@ -100,47 +100,41 @@ struct mt76x2_dfs_hw_pulse { u32 burst; }; -struct mt76x2_dfs_sw_detector_params { +struct mt76x02_dfs_sw_detector_params { u32 min_pri; u32 max_pri; u32 pri_margin; }; -struct mt76x2_dfs_engine_stats { +struct mt76x02_dfs_engine_stats { u32 hw_pattern; u32 hw_pulse_discarded; u32 sw_pattern; }; -struct mt76x2_dfs_seq_stats { +struct mt76x02_dfs_seq_stats { u32 seq_pool_len; u32 seq_len; }; -struct mt76x2_dfs_pattern_detector { +struct mt76x02_dfs_pattern_detector { enum nl80211_dfs_regions region; u8 chirp_pulse_cnt; u32 chirp_pulse_ts; - struct mt76x2_dfs_sw_detector_params sw_dpd_params; - struct mt76x2_dfs_event_rb event_rb[2]; + struct mt76x02_dfs_sw_detector_params sw_dpd_params; + struct mt76x02_dfs_event_rb event_rb[2]; struct list_head sequences; struct list_head seq_pool; - struct mt76x2_dfs_seq_stats seq_stats; + struct mt76x02_dfs_seq_stats seq_stats; unsigned long last_sw_check; u32 last_event_ts; - struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; + struct mt76x02_dfs_engine_stats stats[MT_DFS_NUM_ENGINES]; struct tasklet_struct dfs_tasklet; }; -void mt76x2_dfs_init_params(struct mt76x2_dev *dev); -void mt76x2_dfs_init_detector(struct mt76x2_dev *dev); -void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev); -void mt76x2_dfs_set_domain(struct mt76x2_dev *dev, - enum nl80211_dfs_regions region); - -#endif /* __MT76x2_DFS_H */ +#endif /* __MT76x02_DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h index 65b97f5713d3..6394010a565f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h @@ -17,8 +17,8 @@ #ifndef __MT76x02_DMA_H #define __MT76x02_DMA_H +#include "mt76x02.h" #include "dma.h" -#include "mt76x02_regs.h" #define MT_TXD_INFO_LEN GENMASK(15, 0) #define MT_TXD_INFO_NEXT_VLD BIT(16) @@ -70,8 +70,8 @@ mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout) 0, timeout); } -int mt76x02_dma_init(struct mt76_dev *dev); -void mt76x02_dma_enable(struct mt76_dev *dev); -void mt76x02_dma_disable(struct mt76_dev *dev); +int mt76x02_dma_init(struct mt76x02_dev *dev); +void mt76x02_dma_disable(struct mt76x02_dev *dev); +void mt76x02_dma_cleanup(struct mt76x02_dev *dev); #endif /* __MT76x02_DMA_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index d3efeb8a72b7..9390de2a323e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -17,46 +17,43 @@ #include <asm/unaligned.h> -#include "mt76.h" #include "mt76x02_eeprom.h" -#include "mt76x02_regs.h" static int -mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data, +mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data, enum mt76x02_eeprom_modes mode) { u32 val; int i; - val = __mt76_rr(dev, MT_EFUSE_CTRL); + val = mt76_rr(dev, MT_EFUSE_CTRL); val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE); val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode); val |= MT_EFUSE_CTRL_KICK; - __mt76_wr(dev, MT_EFUSE_CTRL, val); + mt76_wr(dev, MT_EFUSE_CTRL, val); - if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, - 0, 1000)) + if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) return -ETIMEDOUT; udelay(2); - val = __mt76_rr(dev, MT_EFUSE_CTRL); + val = mt76_rr(dev, MT_EFUSE_CTRL); if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { memset(data, 0xff, 16); return 0; } for (i = 0; i < 4; i++) { - val = __mt76_rr(dev, MT_EFUSE_DATA(i)); + val = mt76_rr(dev, MT_EFUSE_DATA(i)); put_unaligned_le32(val, data + 4 * i); } return 0; } -int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, +int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf, int len, enum mt76x02_eeprom_modes mode) { int ret, i; @@ -71,26 +68,26 @@ int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, } EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data); -void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev) +void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev) { u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { case BOARD_TYPE_5GHZ: - dev->cap.has_5ghz = true; + dev->mt76.cap.has_5ghz = true; break; case BOARD_TYPE_2GHZ: - dev->cap.has_2ghz = true; + dev->mt76.cap.has_2ghz = true; break; default: - dev->cap.has_2ghz = true; - dev->cap.has_5ghz = true; + dev->mt76.cap.has_2ghz = true; + dev->mt76.cap.has_5ghz = true; break; } } EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap); -bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band) +bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band) { u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); @@ -101,7 +98,7 @@ bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band) } EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled); -void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, +void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band, u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g) { u16 val; @@ -129,7 +126,7 @@ void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, } EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain); -u8 mt76x02_get_lna_gain(struct mt76_dev *dev, +u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, s8 *lna_2g, s8 *lna_5g, struct ieee80211_channel *chan) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h index bcd05f7c5f45..b3ec74835d10 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h @@ -18,6 +18,8 @@ #ifndef __MT76x02_EEPROM_H #define __MT76x02_EEPROM_H +#include "mt76x02.h" + enum mt76x02_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, @@ -168,44 +170,23 @@ static inline s8 mt76x02_rate_power_val(u8 val) } static inline int -mt76x02_eeprom_get(struct mt76_dev *dev, +mt76x02_eeprom_get(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field) { if ((field & 1) || field >= __MT_EE_MAX) return -1; - return get_unaligned_le16(dev->eeprom.data + field); -} - -static inline bool -mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev) -{ - u16 val; - - val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); - if (!(val & BIT(15))) - return false; - - return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & - MT_EE_NIC_CONF_1_TEMP_TX_ALC; -} - -static inline bool -mt76x02_tssi_enabled(struct mt76_dev *dev) -{ - return !mt76x02_temp_tx_alc_enabled(dev) && - (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & - MT_EE_NIC_CONF_1_TX_ALC_EN); + return get_unaligned_le16(dev->mt76.eeprom.data + field); } -bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band); -int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, +bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band); +int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf, int len, enum mt76x02_eeprom_modes mode); -void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, +void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band, u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g); -u8 mt76x02_get_lna_gain(struct mt76_dev *dev, +u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, s8 *lna_2g, s8 *lna_5g, struct ieee80211_channel *chan); -void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev); +void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev); #endif /* __MT76x02_EEPROM_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index df4366a702c9..10578e4cb269 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -15,9 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "mt76.h" -#include "mt76x02_regs.h" -#include "mt76x02_mac.h" +#include "mt76x02.h" +#include "mt76x02_trace.h" enum mt76x02_cipher_type mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) @@ -46,8 +45,8 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) } EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info); -int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, - struct ieee80211_key_conf *key) +int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, + u8 key_idx, struct ieee80211_key_conf *key) { enum mt76x02_cipher_type cipher; u8 key_data[32]; @@ -57,20 +56,20 @@ int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, if (cipher == MT_CIPHER_NONE && key) return -EOPNOTSUPP; - val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); - __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); - __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, - sizeof(key_data)); + mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, + sizeof(key_data)); return 0; } EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); -int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, - struct ieee80211_key_conf *key) +int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, + struct ieee80211_key_conf *key) { enum mt76x02_cipher_type cipher; u8 key_data[32]; @@ -80,25 +79,26 @@ int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, if (cipher == MT_CIPHER_NONE && key) return -EOPNOTSUPP; - __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); - __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); + mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); memset(iv_data, 0, sizeof(iv_data)); if (key) { - __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, - !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); iv_data[3] = key->keyidx << 6; if (cipher >= MT_CIPHER_TKIP) iv_data[3] |= 0x20; } - __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); return 0; } EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key); -void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, + u8 vif_idx, u8 *mac) { struct mt76_wcid_addr addr = {}; u32 attr; @@ -106,10 +106,10 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac) attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); - __mt76_wr(dev, MT_WCID_ATTR(idx), attr); + mt76_wr(dev, MT_WCID_ATTR(idx), attr); - __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); - __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); + mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); + mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); if (idx >= 128) return; @@ -117,22 +117,22 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac) if (mac) memcpy(addr.macaddr, mac, ETH_ALEN); - __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); + mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); } EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup); -void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop) +void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop) { - u32 val = __mt76_rr(dev, MT_WCID_DROP(idx)); + u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); u32 bit = MT_WCID_DROP_MASK(idx); /* prevent unnecessary writes */ if ((val & bit) != (bit * drop)) - __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); + mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); } EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop); -void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) +void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq) { struct mt76_txq *mtxq; @@ -152,55 +152,13 @@ void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) mtxq->wcid = &mvif->group_wcid; } - mt76_txq_init(dev, txq); + mt76_txq_init(&dev->mt76, txq); } EXPORT_SYMBOL_GPL(mt76x02_txq_init); -void mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb, - struct ieee80211_sta *sta, int len, u8 nss) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u16 txwi_flags = 0; - - if (info->flags & IEEE80211_TX_CTL_LDPC) - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); - if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); - if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) - txwi_flags |= MT_TXWI_FLAGS_MMPS; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) - txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - txwi->pktid |= MT_TXWI_PKTID_PROBE; - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { - u8 ba_size = IEEE80211_MIN_AMPDU_BUF; - - ba_size <<= sta->ht_cap.ampdu_factor; - ba_size = min_t(int, 63, ba_size - 1); - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - ba_size = 0; - txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); - - txwi_flags |= MT_TXWI_FLAGS_AMPDU | - FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, - sta->ht_cap.ampdu_density); - } - - if (ieee80211_is_probe_resp(hdr->frame_control) || - ieee80211_is_beacon(hdr->frame_control)) - txwi_flags |= MT_TXWI_FLAGS_TS; - - txwi->flags |= cpu_to_le16(txwi_flags); - txwi->len_ctl = cpu_to_le16(len); -} -EXPORT_SYMBOL_GPL(mt76x02_mac_fill_txwi); - -__le16 -mt76x02_mac_tx_rate_val(struct mt76_dev *dev, - const struct ieee80211_tx_rate *rate, u8 *nss_val) +static __le16 +mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val) { u16 rateval; u8 phy, rate_idx; @@ -225,10 +183,10 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev, bw = 1; } else { const struct ieee80211_rate *r; - int band = dev->chandef.chan->band; + int band = dev->mt76.chandef.chan->band; u16 val; - r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; + r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx]; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) val = r->hw_value_short; else @@ -248,24 +206,23 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev, *nss_val = nss; return cpu_to_le16(rateval); } -EXPORT_SYMBOL_GPL(mt76x02_mac_tx_rate_val); -void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, - const struct ieee80211_tx_rate *rate) +void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate) { - spin_lock_bh(&dev->lock); + spin_lock_bh(&dev->mt76.lock); wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); wcid->tx_rate_set = true; - spin_unlock_bh(&dev->lock); + spin_unlock_bh(&dev->mt76.lock); } -bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, - struct mt76x02_tx_status *stat) +bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, + struct mt76x02_tx_status *stat) { u32 stat1, stat2; - stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT); - stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO); + stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); + stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); if (!stat->valid) @@ -341,10 +298,103 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, return 0; } +void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int len) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct ieee80211_key_conf *key = info->control.hw_key; + u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); + u16 txwi_flags = 0; + u8 nss; + s8 txpwr_adj, max_txpwr_adj; + u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf; + + memset(txwi, 0, sizeof(*txwi)); + + if (wcid) + txwi->wcid = wcid->idx; + else + txwi->wcid = 0xff; + + txwi->pktid = 1; + + if (wcid && wcid->sw_iv && key) { + u64 pn = atomic64_inc_return(&key->tx_pn); + ccmp_pn[0] = pn; + ccmp_pn[1] = pn >> 8; + ccmp_pn[2] = 0; + ccmp_pn[3] = 0x20 | (key->keyidx << 6); + ccmp_pn[4] = pn >> 16; + ccmp_pn[5] = pn >> 24; + ccmp_pn[6] = pn >> 32; + ccmp_pn[7] = pn >> 40; + txwi->iv = *((__le32 *)&ccmp_pn[0]); + txwi->eiv = *((__le32 *)&ccmp_pn[1]); + } + + spin_lock_bh(&dev->mt76.lock); + if (wcid && (rate->idx < 0 || !rate->count)) { + txwi->rate = wcid->tx_rate; + max_txpwr_adj = wcid->max_txpwr_adj; + nss = wcid->tx_rate_nss; + } else { + txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss); + max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); + } + spin_unlock_bh(&dev->mt76.lock); + + txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf, + max_txpwr_adj); + txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); + + if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4) + txwi->txstream = 0x13; + else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 && + !(txwi->rate & cpu_to_le16(rate_ht_mask))) + txwi->txstream = 0x93; + + if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC)) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); + if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); + if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) + txwi_flags |= MT_TXWI_FLAGS_MMPS; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + txwi->pktid |= MT_TXWI_PKTID_PROBE; + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 63, ba_size - 1); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + ba_size = 0; + txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + + txwi_flags |= MT_TXWI_FLAGS_AMPDU | + FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density); + } + + if (ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_beacon(hdr->frame_control)) + txwi_flags |= MT_TXWI_FLAGS_TS; + + txwi->flags |= cpu_to_le16(txwi_flags); + txwi->len_ctl = cpu_to_le16(len); +} +EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi); + static void -mt76x02_mac_fill_tx_status(struct mt76_dev *dev, - struct ieee80211_tx_info *info, - struct mt76x02_tx_status *st, int n_frames) +mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, + struct ieee80211_tx_info *info, + struct mt76x02_tx_status *st, int n_frames) { struct ieee80211_tx_rate *rate = info->status.rates; int cur_idx, last_rate; @@ -355,7 +405,7 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev, last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate, - dev->chandef.chan->band); + dev->mt76.chandef.chan->band); if (last_rate < IEEE80211_TX_MAX_RATES - 1) rate[last_rate + 1].idx = -1; @@ -383,8 +433,8 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev, info->flags |= IEEE80211_TX_STAT_ACK; } -void mt76x02_send_tx_status(struct mt76_dev *dev, - struct mt76x02_tx_status *stat, u8 *update) +void mt76x02_send_tx_status(struct mt76x02_dev *dev, + struct mt76x02_tx_status *stat, u8 *update) { struct ieee80211_tx_info info = {}; struct ieee80211_sta *sta = NULL; @@ -392,8 +442,8 @@ void mt76x02_send_tx_status(struct mt76_dev *dev, struct mt76x02_sta *msta = NULL; rcu_read_lock(); - if (stat->wcid < ARRAY_SIZE(dev->wcid)) - wcid = rcu_dereference(dev->wcid[stat->wcid]); + if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) + wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); if (wcid) { void *priv; @@ -418,7 +468,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev, } mt76x02_mac_fill_tx_status(dev, &info, &msta->status, - msta->n_frames); + msta->n_frames); msta->status = *stat; msta->n_frames = 1; @@ -428,7 +478,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev, *update = 1; } - ieee80211_tx_status_noskb(dev->hw, sta, &info); + ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info); out: rcu_read_unlock(); @@ -503,20 +553,185 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) } EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate); -void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr) +void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr) { - ether_addr_copy(dev->macaddr, addr); + ether_addr_copy(dev->mt76.macaddr, addr); - if (!is_valid_ether_addr(dev->macaddr)) { - eth_random_addr(dev->macaddr); - dev_info(dev->dev, + if (!is_valid_ether_addr(dev->mt76.macaddr)) { + eth_random_addr(dev->mt76.macaddr); + dev_info(dev->mt76.dev, "Invalid MAC address, using random address %pM\n", - dev->macaddr); + dev->mt76.macaddr); } - __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); - __mt76_wr(dev, MT_MAC_ADDR_DW1, - get_unaligned_le16(dev->macaddr + 4) | - FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, + get_unaligned_le16(dev->mt76.macaddr + 4) | + FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); } EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr); + +static int +mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain) +{ + struct mt76x02_rx_freq_cal *cal = &dev->cal.rx; + + rssi += cal->rssi_offset[chain]; + rssi -= cal->lna_gain; + + return rssi; +} + +int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, + void *rxi) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct mt76x02_rxwi *rxwi = rxi; + struct mt76x02_sta *sta; + u32 rxinfo = le32_to_cpu(rxwi->rxinfo); + u32 ctl = le32_to_cpu(rxwi->ctl); + u16 rate = le16_to_cpu(rxwi->rate); + u16 tid_sn = le16_to_cpu(rxwi->tid_sn); + bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); + int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf; + s8 signal; + u8 pn_len; + u8 wcid; + int len; + + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return -EINVAL; + + if (rxinfo & MT_RXINFO_L2PAD) + pad_len += 2; + + if (rxinfo & MT_RXINFO_DECRYPT) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_MMIC_STRIPPED; + status->flag |= RX_FLAG_MIC_STRIPPED; + status->flag |= RX_FLAG_IV_STRIPPED; + } + + wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); + sta = mt76x02_rx_get_sta(&dev->mt76, wcid); + status->wcid = mt76x02_rx_get_sta_wcid(sta, unicast); + + len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); + pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); + if (pn_len) { + int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len; + u8 *data = skb->data + offset; + + status->iv[0] = data[7]; + status->iv[1] = data[6]; + status->iv[2] = data[5]; + status->iv[3] = data[4]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + + /* + * Driver CCMP validation can't deal with fragments. + * Let mac80211 take care of it. + */ + if (rxinfo & MT_RXINFO_FRAG) { + status->flag &= ~RX_FLAG_IV_STRIPPED; + } else { + pad_len += pn_len << 2; + len -= pn_len << 2; + } + } + + mt76x02_remove_hdr_pad(skb, pad_len); + + if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) + status->aggr = true; + + if (WARN_ON_ONCE(len > skb->len)) + return -EINVAL; + + pskb_trim(skb, len); + + status->chains = BIT(0); + signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0); + for (i = 1; i < nstreams; i++) { + status->chains |= BIT(i); + status->chain_signal[i] = mt76x02_mac_get_rssi(dev, + rxwi->rssi[i], + i); + signal = max_t(s8, signal, status->chain_signal[i]); + } + status->signal = signal; + status->freq = dev->mt76.chandef.chan->center_freq; + status->band = dev->mt76.chandef.chan->band; + + status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); + status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); + + if (sta) { + ewma_signal_add(&sta->rssi, status->signal); + sta->inactive_count = 0; + } + + return mt76x02_mac_process_rate(status, rate); +} + +void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) +{ + struct mt76x02_tx_status stat = {}; + unsigned long flags; + u8 update = 1; + bool ret; + + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return; + + trace_mac_txstat_poll(dev); + + while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { + spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); + ret = mt76x02_mac_load_tx_status(dev, &stat); + spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); + + if (!ret) + break; + + trace_mac_txstat_fetch(dev, &stat); + + if (!irq) { + mt76x02_send_tx_status(dev, &stat, &update); + continue; + } + + kfifo_put(&dev->txstatus_fifo, stat); + } +} +EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status); + +static void +mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb, + void *txwi_ptr) +{ + struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb); + struct mt76x02_txwi *txwi = txwi_ptr; + + mt76x02_mac_poll_tx_status(dev, false); + + txi->tries = 0; + txi->jiffies = jiffies; + txi->wcid = txwi->wcid; + txi->pktid = txwi->pktid; + trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); + mt76x02_tx_complete(&dev->mt76, skb); +} + +void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + + if (e->txwi) + mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi); + else + dev_kfree_skb_any(e->skb); +} +EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 62072291e416..d99c18743969 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -20,6 +20,8 @@ #include <linux/average.h> +struct mt76x02_dev; + struct mt76x02_tx_status { u8 valid:1; u8 success:1; @@ -40,6 +42,15 @@ struct mt76x02_vif { struct mt76_wcid group_wcid; }; +struct mt76x02_tx_info { + unsigned long jiffies; + u8 tries; + + u8 wcid; + u8 pktid; + u8 retry; +}; + DECLARE_EWMA(signal, 10, 8); struct mt76x02_sta { @@ -179,28 +190,40 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev) return false; } -void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); -void mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb, - struct ieee80211_sta *sta, int len, u8 nss); +static inline struct mt76x02_tx_info * +mt76x02_skb_tx_info(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + return (void *)info->status.status_driver_data; +} + +void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq); enum mt76x02_cipher_type mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data); -int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, - struct ieee80211_key_conf *key); -int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, - struct ieee80211_key_conf *key); -void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac); -void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop); -void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, - const struct ieee80211_tx_rate *rate); -__le16 -mt76x02_mac_tx_rate_val(struct mt76_dev *dev, - const struct ieee80211_tx_rate *rate, u8 *nss_val); -bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, - struct mt76x02_tx_status *stat); -void mt76x02_send_tx_status(struct mt76_dev *dev, - struct mt76x02_tx_status *stat, u8 *update); +int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, + u8 key_idx, struct ieee80211_key_conf *key); +int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, + struct ieee80211_key_conf *key); +void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, + u8 *mac); +void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); +void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate); +bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, + struct mt76x02_tx_status *stat); +void mt76x02_send_tx_status(struct mt76x02_dev *dev, + struct mt76x02_tx_status *stat, u8 *update); +int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, + void *rxi); int mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate); -void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr); +void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr); +void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, int len); +void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq); +void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index 6d565133b7af..1b853bb723fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -19,9 +19,7 @@ #include <linux/firmware.h> #include <linux/delay.h> -#include "mt76.h" #include "mt76x02_mcu.h" -#include "mt76x02_dma.h" struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) { @@ -37,7 +35,7 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); static struct sk_buff * -mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires) +mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) { unsigned long timeout; @@ -45,17 +43,17 @@ mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires) return NULL; timeout = expires - jiffies; - wait_event_timeout(dev->mmio.mcu.wait, - !skb_queue_empty(&dev->mmio.mcu.res_q), + wait_event_timeout(dev->mt76.mmio.mcu.wait, + !skb_queue_empty(&dev->mt76.mmio.mcu.res_q), timeout); - return skb_dequeue(&dev->mmio.mcu.res_q); + return skb_dequeue(&dev->mt76.mmio.mcu.res_q); } static int -mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid, +mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid, struct sk_buff *skb, int cmd, int seq) { - struct mt76_queue *q = &dev->q_tx[qid]; + struct mt76_queue *q = &dev->mt76.q_tx[qid]; struct mt76_queue_buf buf; dma_addr_t addr; u32 tx_info; @@ -66,24 +64,26 @@ mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid, FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | FIELD_PREP(MT_MCU_MSG_LEN, skb->len); - addr = dma_map_single(dev->dev, skb->data, skb->len, + addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(dev->dev, addr)) + if (dma_mapping_error(dev->mt76.dev, addr)) return -ENOMEM; buf.addr = addr; buf.len = skb->len; + spin_lock_bh(&q->lock); - dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL); - dev->queue_ops->kick(dev, q); + mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); + mt76_queue_kick(dev, q); spin_unlock_bh(&q->lock); return 0; } -int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, +int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, bool wait_resp) { + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); unsigned long expires = jiffies + HZ; int ret; u8 seq; @@ -91,11 +91,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, if (!skb) return -EINVAL; - mutex_lock(&dev->mmio.mcu.mutex); + mutex_lock(&mdev->mmio.mcu.mutex); - seq = ++dev->mmio.mcu.msg_seq & 0xf; + seq = ++mdev->mmio.mcu.msg_seq & 0xf; if (!seq) - seq = ++dev->mmio.mcu.msg_seq & 0xf; + seq = ++mdev->mmio.mcu.msg_seq & 0xf; ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); if (ret) @@ -107,7 +107,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, skb = mt76x02_mcu_get_response(dev, expires); if (!skb) { - dev_err(dev->dev, + dev_err(mdev->dev, "MCU message %d (seq %d) timed out\n", cmd, seq); ret = -ETIMEDOUT; @@ -125,13 +125,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, } out: - mutex_unlock(&dev->mmio.mcu.mutex); + mutex_unlock(&mdev->mmio.mcu.mutex); return ret; } EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); -int mt76x02_mcu_function_select(struct mt76_dev *dev, +int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, u32 val, bool wait_resp) { @@ -144,13 +144,12 @@ int mt76x02_mcu_function_select(struct mt76_dev *dev, .value = cpu_to_le32(val), }; - skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); - return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP, - wait_resp); + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp); } EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); -int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, +int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, bool wait_resp) { struct sk_buff *skb; @@ -162,13 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, .level = cpu_to_le32(0), }; - skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); - return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, - wait_resp); + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp); } EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); -int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, +int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param, bool wait) { struct sk_buff *skb; @@ -182,44 +180,44 @@ int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, int ret; if (wait) - dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); + mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); - skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); - ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); + ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); if (ret) return ret; if (wait && - WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0, - BIT(31), BIT(31), 100))) + WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, + BIT(31), BIT(31), 100))) return -ETIMEDOUT; return 0; } EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); -int mt76x02_mcu_cleanup(struct mt76_dev *dev) +int mt76x02_mcu_cleanup(struct mt76x02_dev *dev) { struct sk_buff *skb; - dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1); + mt76_wr(dev, MT_MCU_INT_LEVEL, 1); usleep_range(20000, 30000); - while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL) + while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL) dev_kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); -void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, +void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, const struct mt76x02_fw_header *h) { u16 bld = le16_to_cpu(h->build_ver); u16 ver = le16_to_cpu(h->fw_ver); - snprintf(dev->hw->wiphy->fw_version, - sizeof(dev->hw->wiphy->fw_version), + snprintf(dev->mt76.hw->wiphy->fw_version, + sizeof(dev->mt76.hw->wiphy->fw_version), "%d.%d.%02d-b%x", (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h index d30a58b5df29..2d8fd2514570 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -15,7 +15,9 @@ */ #ifndef __MT76x02_MCU_H -#define __MT76x0x_MCU_H +#define __MT76x02_MCU_H + +#include "mt76x02.h" #define MT_MCU_RESET_CTL 0x070C #define MT_MCU_INT_LEVEL 0x0718 @@ -94,18 +96,18 @@ struct mt76x02_patch_header { u8 pad[2]; }; -int mt76x02_mcu_cleanup(struct mt76_dev *dev); -int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, +int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); +int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param, bool wait); struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len); -int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, +int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, bool wait_resp); -int mt76x02_mcu_function_select(struct mt76_dev *dev, +int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, u32 val, bool wait_resp); -int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, +int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, bool wait_resp); -void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, +void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, const struct mt76x02_fw_header *h); #endif /* __MT76x02_MCU_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 1146fbfd8df5..39f092034240 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -16,23 +16,22 @@ */ #include <linux/kernel.h> +#include <linux/irq.h> -#include "mt76.h" -#include "mt76x02_dma.h" -#include "mt76x02_util.h" -#include "mt76x02_mac.h" +#include "mt76x02.h" +#include "mt76x02_trace.h" static int -mt76x02_init_tx_queue(struct mt76_dev *dev, struct mt76_queue *q, +mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, int idx, int n_desc) { int ret; - q->regs = dev->mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; + q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; q->ndesc = n_desc; q->hw_idx = idx; - ret = __mt76_queue_alloc(dev, q); + ret = mt76_queue_alloc(dev, q); if (ret) return ret; @@ -42,16 +41,16 @@ mt76x02_init_tx_queue(struct mt76_dev *dev, struct mt76_queue *q, } static int -mt76x02_init_rx_queue(struct mt76_dev *dev, struct mt76_queue *q, +mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize) { int ret; - q->regs = dev->mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; + q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; q->ndesc = n_desc; q->buf_size = bufsize; - ret = __mt76_queue_alloc(dev, q); + ret = mt76_queue_alloc(dev, q); if (ret) return ret; @@ -60,100 +59,200 @@ mt76x02_init_rx_queue(struct mt76_dev *dev, struct mt76_queue *q, return 0; } -int mt76x02_dma_init(struct mt76_dev *dev) +static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev) +{ + struct mt76x02_tx_status stat; + u8 update = 1; + + while (kfifo_get(&dev->txstatus_fifo, &stat)) + mt76x02_send_tx_status(dev, &stat, &update); +} + +static void mt76x02_tx_tasklet(unsigned long data) +{ + struct mt76x02_dev *dev = (struct mt76x02_dev *)data; + int i; + + mt76x02_process_tx_status_fifo(dev); + + for (i = MT_TXQ_MCU; i >= 0; i--) + mt76_queue_tx_cleanup(dev, i, false); + + mt76x02_mac_poll_tx_status(dev, false); + mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); +} + +int mt76x02_dma_init(struct mt76x02_dev *dev) { struct mt76_txwi_cache __maybe_unused *t; + int i, ret, fifo_size; struct mt76_queue *q; - int i, ret; + void *status_fifo; BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi)); BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM); - mt76_dma_attach(dev); - __mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); + status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); + if (!status_fifo) + return -ENOMEM; + + tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); + kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); + + mt76_dma_attach(&dev->mt76); + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - ret = mt76x02_init_tx_queue(dev, &dev->q_tx[i], + ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i], mt76_ac_to_hwq(i), MT_TX_RING_SIZE); if (ret) return ret; } - ret = mt76x02_init_tx_queue(dev, &dev->q_tx[MT_TXQ_PSD], + ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); if (ret) return ret; - ret = mt76x02_init_tx_queue(dev, &dev->q_tx[MT_TXQ_MCU], + ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); if (ret) return ret; - ret = mt76x02_init_rx_queue(dev, &dev->q_rx[MT_RXQ_MCU], 1, + ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); if (ret) return ret; - q = &dev->q_rx[MT_RXQ_MAIN]; + q = &dev->mt76.q_rx[MT_RXQ_MAIN]; q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi); ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, MT_RX_BUF_SIZE); if (ret) return ret; - return __mt76_init_queues(dev); + return mt76_init_queues(dev); } EXPORT_SYMBOL_GPL(mt76x02_dma_init); -void mt76x02_set_irq_mask(struct mt76_dev *dev, u32 clear, u32 set) +void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt76x02_dev *dev; + + dev = container_of(mdev, struct mt76x02_dev, mt76); + mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); +} +EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete); + +irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) +{ + struct mt76x02_dev *dev = dev_instance; + u32 intr; + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) + return IRQ_NONE; + + trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask); + + intr &= dev->mt76.mmio.irqmask; + + if (intr & MT_INT_TX_DONE_ALL) { + mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_RX_DONE(0)) { + mt76x02_irq_disable(dev, MT_INT_RX_DONE(0)); + napi_schedule(&dev->mt76.napi[0]); + } + + if (intr & MT_INT_RX_DONE(1)) { + mt76x02_irq_disable(dev, MT_INT_RX_DONE(1)); + napi_schedule(&dev->mt76.napi[1]); + } + + if (intr & MT_INT_PRE_TBTT) + tasklet_schedule(&dev->pre_tbtt_tasklet); + + /* send buffered multicast frames now */ + if (intr & MT_INT_TBTT) + mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + + if (intr & MT_INT_TX_STAT) { + mt76x02_mac_poll_tx_status(dev, true); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_GPTIMER) { + mt76x02_irq_disable(dev, MT_INT_GPTIMER); + tasklet_schedule(&dev->dfs_pd.dfs_tasklet); + } + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(mt76x02_irq_handler); + +void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set) { unsigned long flags; - spin_lock_irqsave(&dev->mmio.irq_lock, flags); - dev->mmio.irqmask &= ~clear; - dev->mmio.irqmask |= set; - __mt76_wr(dev, MT_INT_MASK_CSR, dev->mmio.irqmask); - spin_unlock_irqrestore(&dev->mmio.irq_lock, flags); + spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); + dev->mt76.mmio.irqmask &= ~clear; + dev->mt76.mmio.irqmask |= set; + mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); + spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); } EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask); -void mt76x02_dma_enable(struct mt76_dev *dev) +static void mt76x02_dma_enable(struct mt76x02_dev *dev) { u32 val; - __mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); - mt76x02_wait_for_wpdma(dev, 1000); + mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + mt76x02_wait_for_wpdma(&dev->mt76, 1000); usleep_range(50, 100); val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN; - __mt76_set(dev, MT_WPDMA_GLO_CFG, val); - __mt76_clear(dev, MT_WPDMA_GLO_CFG, - MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + mt76_set(dev, MT_WPDMA_GLO_CFG, val); + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); } EXPORT_SYMBOL_GPL(mt76x02_dma_enable); -void mt76x02_dma_disable(struct mt76_dev *dev) +void mt76x02_dma_cleanup(struct mt76x02_dev *dev) +{ + tasklet_kill(&dev->tx_tasklet); + mt76_dma_cleanup(&dev->mt76); +} +EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup); + +void mt76x02_dma_disable(struct mt76x02_dev *dev) { - u32 val = __mt76_rr(dev, MT_WPDMA_GLO_CFG); + u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | MT_WPDMA_GLO_CFG_BIG_ENDIAN | MT_WPDMA_GLO_CFG_HDR_SEG_LEN; val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE; - __mt76_wr(dev, MT_WPDMA_GLO_CFG, val); + mt76_wr(dev, MT_WPDMA_GLO_CFG, val); } EXPORT_SYMBOL_GPL(mt76x02_dma_disable); -void mt76x02_mac_start(struct mt76_dev *dev) +void mt76x02_mac_start(struct mt76x02_dev *dev) { mt76x02_dma_enable(dev); - __mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); - __mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | - MT_MAC_SYS_CTRL_ENABLE_RX); + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); mt76x02_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | MT_INT_TX_STAT); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index e29914d78b72..0f1d7b5c9f68 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -17,17 +17,17 @@ #include <linux/kernel.h> -#include "mt76.h" +#include "mt76x02.h" #include "mt76x02_phy.h" -void mt76x02_phy_set_rxpath(struct mt76_dev *dev) +void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev) { u32 val; - val = __mt76_rr(dev, MT_BBP(AGC, 0)); + val = mt76_rr(dev, MT_BBP(AGC, 0)); val &= ~BIT(4); - switch (dev->chainmask & 0xf) { + switch (dev->mt76.chainmask & 0xf) { case 2: val |= BIT(3); break; @@ -36,23 +36,23 @@ void mt76x02_phy_set_rxpath(struct mt76_dev *dev) break; } - __mt76_wr(dev, MT_BBP(AGC, 0), val); + mt76_wr(dev, MT_BBP(AGC, 0), val); mb(); - val = __mt76_rr(dev, MT_BBP(AGC, 0)); + val = mt76_rr(dev, MT_BBP(AGC, 0)); } EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath); -void mt76x02_phy_set_txdac(struct mt76_dev *dev) +void mt76x02_phy_set_txdac(struct mt76x02_dev *dev) { int txpath; - txpath = (dev->chainmask >> 8) & 0xf; + txpath = (dev->mt76.chainmask >> 8) & 0xf; switch (txpath) { case 2: - __mt76_set(dev, MT_BBP(TXBE, 5), 0x3); + mt76_set(dev, MT_BBP(TXBE, 5), 0x3); break; default: - __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); + mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); break; } } @@ -101,35 +101,158 @@ void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset) } EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset); -void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1) +void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1) { - struct mt76_rate_power *t = &dev->rate_power; - - __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, - txp_0); - __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, - txp_1); - - __mt76_wr(dev, MT_TX_PWR_CFG_0, - mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], - t->ofdm[2])); - __mt76_wr(dev, MT_TX_PWR_CFG_1, - mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], - t->ht[2])); - __mt76_wr(dev, MT_TX_PWR_CFG_2, - mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], - t->ht[10])); - __mt76_wr(dev, MT_TX_PWR_CFG_3, - mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], - t->stbc[2])); - __mt76_wr(dev, MT_TX_PWR_CFG_4, - mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); - __mt76_wr(dev, MT_TX_PWR_CFG_7, - mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], - t->vht[9])); - __mt76_wr(dev, MT_TX_PWR_CFG_8, - mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); - __mt76_wr(dev, MT_TX_PWR_CFG_9, - mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9])); + struct mt76_rate_power *t = &dev->mt76.rate_power; + + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); + + mt76_wr(dev, MT_TX_PWR_CFG_0, + mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], + t->ofdm[2])); + mt76_wr(dev, MT_TX_PWR_CFG_1, + mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], + t->ht[2])); + mt76_wr(dev, MT_TX_PWR_CFG_2, + mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], + t->ht[10])); + mt76_wr(dev, MT_TX_PWR_CFG_3, + mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], + t->stbc[2])); + mt76_wr(dev, MT_TX_PWR_CFG_4, + mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); + mt76_wr(dev, MT_TX_PWR_CFG_7, + mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], + t->vht[9])); + mt76_wr(dev, MT_TX_PWR_CFG_8, + mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); + mt76_wr(dev, MT_TX_PWR_CFG_9, + mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9])); } EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); + +int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev) +{ + struct mt76x02_sta *sta; + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) { + unsigned long mask = dev->mt76.wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->mt76.wcid[j]); + if (!wcid) + continue; + + sta = container_of(wcid, struct mt76x02_sta, wcid); + spin_lock(&dev->mt76.rx_lock); + if (sta->inactive_count++ < 5) + cur_rssi = ewma_signal_read(&sta->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->mt76.rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + if (!min_rssi) + return -75; + + return min_rssi; +} +EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); + +void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl) +{ + int core_val, agc_val; + + switch (width) { + case NL80211_CHAN_WIDTH_80: + core_val = 3; + agc_val = 7; + break; + case NL80211_CHAN_WIDTH_40: + core_val = 2; + agc_val = 3; + break; + default: + core_val = 0; + agc_val = 1; + break; + } + + mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); + mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); +} +EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw); + +void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band, + bool primary_upper) +{ + switch (band) { + case NL80211_BAND_2GHZ: + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + break; + case NL80211_BAND_5GHZ: + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); + break; + } + + mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, + primary_upper); +} +EXPORT_SYMBOL_GPL(mt76x02_phy_set_band); + +bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev) +{ + u8 limit = dev->cal.low_gain > 0 ? 16 : 4; + bool ret = false; + u32 false_cca; + + false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); + dev->cal.false_cca = false_cca; + if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) { + dev->cal.agc_gain_adjust += 2; + ret = true; + } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) || + (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) { + dev->cal.agc_gain_adjust -= 2; + ret = true; + } + + return ret; +} +EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); + +void mt76x02_init_agc_gain(struct mt76x02_dev *dev) +{ + dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8), + MT_BBP_AGC_GAIN); + dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9), + MT_BBP_AGC_GAIN); + memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, + sizeof(dev->cal.agc_gain_cur)); + dev->cal.low_gain = -1; +} +EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h index df69f8fade75..2b316cf7c70c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h @@ -19,11 +19,43 @@ #include "mt76x02_regs.h" +static inline int +mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev) +{ + switch (dev->mt76.chandef.width) { + case NL80211_CHAN_WIDTH_80: + return -62; + case NL80211_CHAN_WIDTH_40: + return -65; + default: + return -68; + } +} + +static inline int +mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev) +{ + switch (dev->mt76.chandef.width) { + case NL80211_CHAN_WIDTH_80: + return -76; + case NL80211_CHAN_WIDTH_40: + return -79; + default: + return -82; + } +} + void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset); -void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2); +void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2); void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit); int mt76x02_get_max_rate_power(struct mt76_rate_power *r); -void mt76x02_phy_set_rxpath(struct mt76_dev *dev); -void mt76x02_phy_set_txdac(struct mt76_dev *dev); +void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev); +void mt76x02_phy_set_txdac(struct mt76x02_dev *dev); +int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev); +void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl); +void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band, + bool primary_upper); +bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev); +void mt76x02_init_agc_gain(struct mt76x02_dev *dev); #endif /* __MT76x02_PHY_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h index 24d1e6d747dd..f7de77d09d28 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h @@ -205,8 +205,8 @@ #define MT_TXQ_STA 0x0434 #define MT_RF_CSR_CFG 0x0500 #define MT_RF_CSR_CFG_DATA GENMASK(7, 0) -#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) -#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) +#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8) +#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15) #define MT_RF_CSR_CFG_WR BIT(30) #define MT_RF_CSR_CFG_KICK BIT(31) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c index a09f117848d6..5b42d2c87937 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.c @@ -18,6 +18,6 @@ #ifndef __CHECKER__ #define CREATE_TRACE_POINTS -#include "mt76x2_trace.h" +#include "mt76x02_trace.h" #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h index eb5afeaefa44..713f12d3c8de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_trace.h @@ -14,14 +14,14 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define __MT76x2_TRACE_H +#if !defined(__MT76x02_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT76x02_TRACE_H #include <linux/tracepoint.h> -#include "mt76x2.h" +#include "mt76x02.h" #undef TRACE_SYSTEM -#define TRACE_SYSTEM mt76x2 +#define TRACE_SYSTEM mt76x02 #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) @@ -35,7 +35,7 @@ #define TXID_PR_ARG __entry->wcid, __entry->pktid DECLARE_EVENT_CLASS(dev_evt, - TP_PROTO(struct mt76x2_dev *dev), + TP_PROTO(struct mt76x02_dev *dev), TP_ARGS(dev), TP_STRUCT__entry( DEV_ENTRY @@ -47,7 +47,7 @@ DECLARE_EVENT_CLASS(dev_evt, ); DECLARE_EVENT_CLASS(dev_txid_evt, - TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid), + TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid), TP_ARGS(dev, wcid, pktid), TP_STRUCT__entry( DEV_ENTRY @@ -63,18 +63,18 @@ DECLARE_EVENT_CLASS(dev_txid_evt, ) ); -DEFINE_EVENT(dev_evt, mac_txstat_poll, - TP_PROTO(struct mt76x2_dev *dev), - TP_ARGS(dev) -); - DEFINE_EVENT(dev_txid_evt, mac_txdone_add, - TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid), + TP_PROTO(struct mt76x02_dev *dev, u8 wcid, u8 pktid), TP_ARGS(dev, wcid, pktid) ); +DEFINE_EVENT(dev_evt, mac_txstat_poll, + TP_PROTO(struct mt76x02_dev *dev), + TP_ARGS(dev) +); + TRACE_EVENT(mac_txstat_fetch, - TP_PROTO(struct mt76x2_dev *dev, + TP_PROTO(struct mt76x02_dev *dev, struct mt76x02_tx_status *stat), TP_ARGS(dev, stat), @@ -110,9 +110,8 @@ TRACE_EVENT(mac_txstat_fetch, ) ); - TRACE_EVENT(dev_irq, - TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask), + TP_PROTO(struct mt76x02_dev *dev, u32 val, u32 mask), TP_ARGS(dev, val, mask), @@ -139,6 +138,6 @@ TRACE_EVENT(dev_irq, #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE mt76x2_trace +#define TRACE_INCLUDE_FILE mt76x02_trace #include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c new file mode 100644 index 000000000000..d3de08872d6e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> + +#include "mt76x02.h" + +void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76x02_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + + if (control->sta) { + struct mt76x02_sta *msta; + + msta = (struct mt76x02_sta *)control->sta->drv_priv; + wcid = &msta->wcid; + /* sw encrypted frames */ + if (!info->control.hw_key && wcid->hw_key_idx != 0xff) + control->sta = NULL; + } + + if (vif && !control->sta) { + struct mt76x02_vif *mvif; + + mvif = (struct mt76x02_vif *)vif->drv_priv; + wcid = &mvif->group_wcid; + } + + mt76_tx(&dev->mt76, control->sta, wcid, skb); +} +EXPORT_SYMBOL_GPL(mt76x02_tx); + +void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + void *rxwi = skb->data; + + if (q == MT_RXQ_MCU) { + /* this is used just by mmio code */ + skb_queue_tail(&mdev->mmio.mcu.res_q, skb); + wake_up(&mdev->mmio.mcu.wait); + return; + } + + skb_pull(skb, sizeof(struct mt76x02_rxwi)); + if (mt76x02_mac_process_rx(dev, skb, rxwi)) { + dev_kfree_skb(skb); + return; + } + + mt76_rx(mdev, q, skb); +} +EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb); + +s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, + const struct ieee80211_tx_rate *rate) +{ + s8 max_txpwr; + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + + if (mcs == 8 || mcs == 9) { + max_txpwr = dev->mt76.rate_power.vht[8]; + } else { + u8 nss, idx; + + nss = ieee80211_rate_get_vht_nss(rate); + idx = ((nss - 1) << 3) + mcs; + max_txpwr = dev->mt76.rate_power.ht[idx & 0xf]; + } + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf]; + } else { + enum nl80211_band band = dev->mt76.chandef.chan->band; + + if (band == NL80211_BAND_2GHZ) { + const struct ieee80211_rate *r; + struct wiphy *wiphy = dev->mt76.hw->wiphy; + struct mt76_rate_power *rp = &dev->mt76.rate_power; + + r = &wiphy->bands[band]->bitrates[rate->idx]; + if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) + max_txpwr = rp->cck[r->hw_value & 0x3]; + else + max_txpwr = rp->ofdm[r->hw_value & 0x7]; + } else { + max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7]; + } + } + + return max_txpwr; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj); + +s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj) +{ + txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf); + txpwr -= (dev->target_power + dev->target_power_delta[0]); + txpwr = min_t(s8, txpwr, max_txpwr_adj); + + if (!dev->enable_tpc) + return 0; + else if (txpwr >= 0) + return min_t(s8, txpwr, 7); + else + return (txpwr < -16) ? 8 : (txpwr + 32) / 2; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj); + +void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr) +{ + s8 txpwr_adj; + + txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr, + dev->mt76.rate_power.ofdm[4]); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); + mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, + MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); +} +EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto); + +void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + ieee80211_free_txskb(dev->hw, skb); + } else { + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status(dev->hw, skb); + } +} +EXPORT_SYMBOL_GPL(mt76x02_tx_complete); + +bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct mt76x02_tx_status stat; + + if (!mt76x02_mac_load_tx_status(dev, &stat)) + return false; + + mt76x02_send_tx_status(dev, &stat, update); + + return true; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); + +int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + int qsel = MT_QSEL_EDCA; + int ret; + + if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) + mt76x02_mac_wcid_set_drop(dev, wcid->idx, false); + + mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); + + ret = mt76x02_insert_hdr_pad(skb); + if (ret < 0) + return ret; + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + qsel = MT_QSEL_MGMT; + + *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | + MT_TXD_INFO_80211; + + if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) + *tx_info |= MT_TXD_INFO_WIV; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h index 2482f9761fcd..0126e51d77ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h @@ -15,15 +15,20 @@ */ #ifndef __MT76x02_USB_H -#define __MT76x0x_USB_H +#define __MT76x02_USB_H -#include "mt76.h" +#include "mt76x02.h" void mt76x02u_init_mcu(struct mt76_dev *dev); -void mt76x02u_mcu_fw_reset(struct mt76_dev *dev); -int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, +void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev); +int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, int data_len, u32 max_payload, u32 offset); int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); -int mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep); +int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); +void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); #endif /* __MT76x02_USB_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index aecbe0c429ea..dc2226c722dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -14,8 +14,25 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "mt76.h" -#include "mt76x02_dma.h" +#include "mt76x02.h" + +static void mt76x02u_remove_dma_hdr(struct sk_buff *skb) +{ + int hdr_len; + + skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN); + hdr_len = ieee80211_get_hdrlen_from_skb(skb); + if (hdr_len % 4) + mt76x02_remove_hdr_pad(skb, 2); +} + +void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + mt76x02u_remove_dma_hdr(e->skb); + mt76x02_tx_complete(mdev, e->skb); +} +EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) { @@ -50,7 +67,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) return 0; } -int mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep) +static int +mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); enum mt76_qsel qsel; @@ -69,4 +87,21 @@ int mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep) return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); } -EXPORT_SYMBOL_GPL(mt76x02u_set_txinfo); + +int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct mt76x02_txwi *txwi; + int len = skb->len; + + mt76x02_insert_hdr_pad(skb); + + txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); + mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); + + return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); +} +EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index cb5f073f08af..da299b8a1334 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -17,8 +17,7 @@ #include <linux/module.h> #include <linux/firmware.h> -#include "mt76.h" -#include "mt76x02_dma.h" +#include "mt76x02.h" #include "mt76x02_mcu.h" #include "mt76x02_usb.h" @@ -255,16 +254,16 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base, return ret; } -void mt76x02u_mcu_fw_reset(struct mt76_dev *dev) +void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev) { - mt76u_vendor_request(dev, MT_VEND_DEV_MODE, + mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, USB_DIR_OUT | USB_TYPE_VENDOR, 0x1, 0, NULL, 0); } EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset); static int -__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, +__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, const void *fw_data, int len, u32 dst_addr) { u8 *data = sg_virt(&buf->urb->sg[0]); @@ -281,14 +280,14 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, memcpy(data + sizeof(info), fw_data, len); memset(data + sizeof(info) + len, 0, 4); - mt76u_single_wr(dev, MT_VEND_WRITE_FCE, + mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_ADDR, dst_addr); len = roundup(len, 4); - mt76u_single_wr(dev, MT_VEND_WRITE_FCE, + mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_LEN, len << 16); buf->len = MT_CMD_HDR_LEN + len + sizeof(info); - err = mt76u_submit_buf(dev, USB_DIR_OUT, + err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, buf, GFP_KERNEL, mt76u_mcu_complete_urb, &cmpl); @@ -297,31 +296,31 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { - dev_err(dev->dev, "firmware upload timed out\n"); + dev_err(dev->mt76.dev, "firmware upload timed out\n"); usb_kill_urb(buf->urb); return -ETIMEDOUT; } if (mt76u_urb_error(buf->urb)) { - dev_err(dev->dev, "firmware upload failed: %d\n", + dev_err(dev->mt76.dev, "firmware upload failed: %d\n", buf->urb->status); return buf->urb->status; } - val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); + val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); val++; - mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); + mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); return 0; } -int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, +int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, int data_len, u32 max_payload, u32 offset) { int err, len, pos = 0, max_len = max_payload - 8; struct mt76u_buf buf; - err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload, + err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload, GFP_KERNEL); if (err < 0) return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index ec422c3980e8..ca05332f81fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -16,10 +16,7 @@ */ #include <linux/module.h> -#include "mt76.h" -#include "mt76x02_dma.h" -#include "mt76x02_regs.h" -#include "mt76x02_mac.h" +#include "mt76x02.h" #define CCK_RATE(_idx, _rate) { \ .bitrate = _rate, \ @@ -51,21 +48,21 @@ struct ieee80211_rate mt76x02_rates[] = { EXPORT_SYMBOL_GPL(mt76x02_rates); void mt76x02_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast) + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) { - struct mt76_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; u32 flags = 0; #define MT76_FILTER(_flag, _hw) do { \ flags |= *total_flags & FIF_##_flag; \ - dev->rxfilter &= ~(_hw); \ - dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + dev->mt76.rxfilter &= ~(_hw); \ + dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \ } while (0) - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); - dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); @@ -78,25 +75,25 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw, MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); *total_flags = flags; - dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); } EXPORT_SYMBOL_GPL(mt76x02_configure_filter); int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta) { - struct mt76_dev *dev = hw->priv; - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; + struct mt76x02_dev *dev = hw->priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; int ret = 0; int idx = 0; int i; - mutex_lock(&dev->mutex); + mutex_lock(&dev->mt76.mutex); - idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); if (idx < 0) { ret = -ENOSPC; goto out; @@ -116,40 +113,40 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ewma_signal_init(&msta->rssi); - rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid); out: - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return ret; } EXPORT_SYMBOL_GPL(mt76x02_sta_add); int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta) { - struct mt76_dev *dev = hw->priv; - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; + struct mt76x02_dev *dev = hw->priv; + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; int idx = msta->wcid.idx; int i; - mutex_lock(&dev->mutex); - rcu_assign_pointer(dev->wcid[idx], NULL); + mutex_lock(&dev->mt76.mutex); + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76_txq_remove(dev, sta->txq[i]); + mt76_txq_remove(&dev->mt76, sta->txq[i]); mt76x02_mac_wcid_set_drop(dev, idx, true); - mt76_wcid_free(dev->wcid_mask, idx); + mt76_wcid_free(dev->mt76.wcid_mask, idx); mt76x02_mac_wcid_setup(dev, idx, 0, NULL); - mutex_unlock(&dev->mutex); + mutex_unlock(&dev->mt76.mutex); return 0; } EXPORT_SYMBOL_GPL(mt76x02_sta_remove); -void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, - unsigned int idx) +void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, + unsigned int idx) { - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); @@ -161,11 +158,11 @@ EXPORT_SYMBOL_GPL(mt76x02_vif_init); int mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; unsigned int idx = 0; if (vif->addr[0] & BIT(1)) - idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7); + idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); /* * Client mode typically only has one configurable BSSID register, @@ -189,20 +186,20 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) EXPORT_SYMBOL_GPL(mt76x02_add_interface); void mt76x02_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif) { - struct mt76_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; - mt76_txq_remove(dev, vif->txq); + mt76_txq_remove(&dev->mt76, vif->txq); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) + struct ieee80211_ampdu_params *params) { enum ieee80211_ampdu_mlme_action action = params->action; struct ieee80211_sta *sta = params->sta; - struct mt76_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; struct ieee80211_txq *txq = sta->txq[params->tid]; u16 tid = params->tid; @@ -216,12 +213,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size); - __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, + *ssn, params->buf_size); + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); break; case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(dev, &msta->wcid, tid); - __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, + BIT(16 + tid)); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; @@ -248,11 +247,11 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, EXPORT_SYMBOL_GPL(mt76x02_ampdu_action); int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) { - struct mt76_dev *dev = hw->priv; - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; + struct mt76x02_dev *dev = hw->priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76x02_sta *msta; struct mt76_wcid *wcid; int idx = key->keyidx; @@ -298,7 +297,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key = NULL; } - mt76_wcid_key_setup(dev, wcid, key); + mt76_wcid_key_setup(&dev->mt76, wcid, key); if (!msta) { if (key || wcid->hw_key_idx == idx) { @@ -315,13 +314,13 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, EXPORT_SYMBOL_GPL(mt76x02_set_key); int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params) + u16 queue, const struct ieee80211_tx_queue_params *params) { - struct mt76_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; u8 cw_min = 5, cw_max = 10, qid; u32 val; - qid = dev->q_tx[queue].hw_idx; + qid = dev->mt76.q_tx[queue].hw_idx; if (params->cw_min) cw_min = fls(params->cw_min); @@ -332,27 +331,27 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); - __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); + mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); - val = __mt76_rr(dev, MT_WMM_TXOP(qid)); + val = mt76_rr(dev, MT_WMM_TXOP(qid)); val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); val |= params->txop << MT_WMM_TXOP_SHIFT(qid); - __mt76_wr(dev, MT_WMM_TXOP(qid), val); + mt76_wr(dev, MT_WMM_TXOP(qid), val); - val = __mt76_rr(dev, MT_WMM_AIFSN); + val = mt76_rr(dev, MT_WMM_AIFSN); val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); - __mt76_wr(dev, MT_WMM_AIFSN, val); + mt76_wr(dev, MT_WMM_AIFSN, val); - val = __mt76_rr(dev, MT_WMM_CWMIN); + val = mt76_rr(dev, MT_WMM_CWMIN); val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); - __mt76_wr(dev, MT_WMM_CWMIN, val); + mt76_wr(dev, MT_WMM_CWMIN, val); - val = __mt76_rr(dev, MT_WMM_CWMAX); + val = mt76_rr(dev, MT_WMM_CWMAX); val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); - __mt76_wr(dev, MT_WMM_CWMAX, val); + mt76_wr(dev, MT_WMM_CWMAX, val); return 0; } @@ -362,7 +361,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt76_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); struct ieee80211_tx_rate rate = {}; @@ -373,9 +372,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, rate.idx = rates->rate[0].idx; rate.flags = rates->rate[0].flags; mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate); - - if (dev->drv && dev->drv->get_max_txpwr_adj) - msta->wcid.max_txpwr_adj = dev->drv->get_max_txpwr_adj(dev, &rate); + msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate); } EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update); @@ -408,52 +405,6 @@ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len) } EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad); -static void mt76x02_remove_dma_hdr(struct sk_buff *skb) -{ - int hdr_len; - - skb_pull(skb, sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN); - hdr_len = ieee80211_get_hdrlen_from_skb(skb); - if (hdr_len % 4) - mt76x02_remove_hdr_pad(skb, 2); -} - -void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - ieee80211_free_txskb(dev->hw, skb); - } else { - ieee80211_tx_info_clear_status(info); - info->status.rates[0].idx = -1; - info->flags |= IEEE80211_TX_STAT_ACK; - ieee80211_tx_status(dev->hw, skb); - } -} -EXPORT_SYMBOL_GPL(mt76x02_tx_complete); - -void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush) -{ - mt76x02_remove_dma_hdr(e->skb); - mt76x02_tx_complete(mdev, e->skb); -} -EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); - -bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update) -{ - struct mt76x02_tx_status stat; - - if (!mt76x02_mac_load_tx_status(dev, &stat)) - return false; - - mt76x02_send_tx_status(dev, &stat, update); - - return true; -} -EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); - const u16 mt76x02_beacon_offsets[16] = { /* 1024 byte per beacon */ 0xc000, @@ -476,7 +427,7 @@ const u16 mt76x02_beacon_offsets[16] = { }; EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets); -void mt76x02_set_beacon_offsets(struct mt76_dev *dev) +void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) { u16 val, base = MT_BEACON_BASE; u32 regs[4] = {}; @@ -488,7 +439,7 @@ void mt76x02_set_beacon_offsets(struct mt76_dev *dev) } for (i = 0; i < 4; i++) - __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); + mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); } EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.h b/drivers/net/wireless/mediatek/mt76/mt76x02_util.h deleted file mode 100644 index ff4cab5ca038..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef __MT76X02_UTIL_H -#define __MT76X02_UTIL_H - -extern struct ieee80211_rate mt76x02_rates[12]; - -void mt76x02_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast); -int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); - -void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, - unsigned int idx); -int mt76x02_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -void mt76x02_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); - -int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params); -int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key); -int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); -void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt76x02_insert_hdr_pad(struct sk_buff *skb); -void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); -void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb); -void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush); -bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update); - -extern const u16 mt76x02_beacon_offsets[16]; -void mt76x02_set_beacon_offsets(struct mt76_dev *dev); -void mt76x02_set_irq_mask(struct mt76_dev *dev, u32 clear, u32 set); -void mt76x02_mac_start(struct mt76_dev *dev); - -static inline void mt76x02_irq_enable(struct mt76_dev *dev, u32 mask) -{ - mt76x02_set_irq_mask(dev, 0, mask); -} - -static inline void mt76x02_irq_disable(struct mt76_dev *dev, u32 mask) -{ - mt76x02_set_irq_mask(dev, mask, 0); -} - -static inline bool -mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) -{ - return __mt76_poll_msec(dev, MT_MAC_STATUS, - MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, - 0, 100); -} - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h deleted file mode 100644 index d6ccab06a594..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef __MT76x2_H -#define __MT76x2_H - -#include <linux/device.h> -#include <linux/dma-mapping.h> -#include <linux/spinlock.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/mutex.h> -#include <linux/bitops.h> -#include <linux/kfifo.h> - -#define MT7662_FIRMWARE "mt7662.bin" -#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" -#define MT7662_EEPROM_SIZE 512 - -#define MT7662U_FIRMWARE "mediatek/mt7662u.bin" -#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin" - -#define MT_MAX_CHAINS 2 - -#define MT_CALIBRATE_INTERVAL HZ - -#include "mt76.h" -#include "mt76x02_regs.h" -#include "mt76x2_mac.h" -#include "mt76x2_dfs.h" - -struct mt76x2_rx_freq_cal { - s8 high_gain[MT_MAX_CHAINS]; - s8 rssi_offset[MT_MAX_CHAINS]; - s8 lna_gain; - u32 mcu_gain; -}; - -struct mt76x2_calibration { - struct mt76x2_rx_freq_cal rx; - - u8 agc_gain_init[MT_MAX_CHAINS]; - u8 agc_gain_cur[MT_MAX_CHAINS]; - - u16 false_cca; - s8 avg_rssi_all; - s8 agc_gain_adjust; - s8 low_gain; - - u8 temp; - - bool init_cal_done; - bool tssi_cal_done; - bool tssi_comp_pending; - bool dpd_cal_done; - bool channel_cal_done; -}; - -struct mt76x2_dev { - struct mt76_dev mt76; /* must be first */ - - struct mac_address macaddr_list[8]; - - struct mutex mutex; - - u8 txdone_seq; - DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); - - struct sk_buff *rx_head; - - struct tasklet_struct tx_tasklet; - struct tasklet_struct pre_tbtt_tasklet; - struct delayed_work cal_work; - struct delayed_work mac_work; - - u32 aggr_stats[32]; - - struct sk_buff *beacons[8]; - u8 beacon_mask; - u8 beacon_data_mask; - - u8 tbtt_count; - u16 beacon_int; - - struct mt76x2_calibration cal; - - s8 target_power; - s8 target_power_delta[2]; - bool enable_tpc; - - u8 coverage_class; - u8 slottime; - - struct mt76x2_dfs_pattern_detector dfs_pd; -}; - -static inline bool is_mt7612(struct mt76x2_dev *dev) -{ - return mt76_chip(&dev->mt76) == 0x7612; -} - -static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev) -{ - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - - return ((chan->flags & IEEE80211_CHAN_RADAR) && - chan->dfs_state != NL80211_DFS_AVAILABLE); -} - -extern const struct ieee80211_ops mt76x2_ops; - -struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); -int mt76x2_register_device(struct mt76x2_dev *dev); -void mt76x2_init_debugfs(struct mt76x2_dev *dev); -void mt76x2_init_device(struct mt76x2_dev *dev); - -irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance); -void mt76x2_phy_power_on(struct mt76x2_dev *dev); -int mt76x2_init_hardware(struct mt76x2_dev *dev); -void mt76x2_stop_hardware(struct mt76x2_dev *dev); -int mt76x2_eeprom_init(struct mt76x2_dev *dev); -int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel); -void mt76x2_set_tx_ackto(struct mt76x2_dev *dev); - -void mt76x2_phy_set_antenna(struct mt76x2_dev *dev); -int mt76x2_phy_start(struct mt76x2_dev *dev); -int mt76x2_phy_set_channel(struct mt76x2_dev *dev, - struct cfg80211_chan_def *chandef); -int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain); -void mt76x2_phy_calibrate(struct work_struct *work); -void mt76x2_phy_set_txpower(struct mt76x2_dev *dev); - -int mt76x2_mcu_init(struct mt76x2_dev *dev); -int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, - u8 bw_index, bool scan); -int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, - u8 channel); - -void mt76x2_tx_tasklet(unsigned long data); -void mt76x2_dma_cleanup(struct mt76x2_dev *dev); - -void mt76x2_cleanup(struct mt76x2_dev *dev); - -void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb); -int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info); -void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush); -void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val); - -void mt76x2_pre_tbtt_tasklet(unsigned long arg); - -void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); -void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb); - -void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); - -void mt76x2_update_channel(struct mt76_dev *mdev); - -s8 mt76x2_tx_get_max_txpwr_adj(struct mt76_dev *dev, - const struct ieee80211_tx_rate *rate); -s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj); -void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr); - - -void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable); -void mt76x2_init_txpower(struct mt76x2_dev *dev, - struct ieee80211_supported_band *sband); -void mt76_write_mac_initvals(struct mt76x2_dev *dev); - -int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -void mt76x2_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 queue, const struct ieee80211_tx_queue_params *params); -void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq); - -void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait); -void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, - enum nl80211_band band); -void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, - enum nl80211_band band, u8 bw); -void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl); -void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper); -int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev); -void mt76x2_apply_gain_adj(struct mt76x2_dev *dev); - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig new file mode 100644 index 000000000000..2b414a0e9088 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig @@ -0,0 +1,20 @@ +config MT76x2_COMMON + tristate + select MT76x02_LIB + +config MT76x2E + tristate "MediaTek MT76x2E (PCIe) support" + select MT76x2_COMMON + depends on MAC80211 + depends on PCI + ---help--- + This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. + +config MT76x2U + tristate "MediaTek MT76x2U (USB) support" + select MT76x2_COMMON + select MT76x02_USB + depends on MAC80211 + depends on USB + help + This adds support for MT7612U-based wireless USB dongles. diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile new file mode 100644 index 000000000000..b71bb1049170 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile @@ -0,0 +1,16 @@ +obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o +obj-$(CONFIG_MT76x2E) += mt76x2e.o +obj-$(CONFIG_MT76x2U) += mt76x2u.o + +mt76x2-common-y := \ + eeprom.o mac.o init.o phy.o debugfs.o mcu.o + +mt76x2e-y := \ + pci.o pci_main.o pci_init.o pci_tx.o \ + pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o + +mt76x2u-y := \ + usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \ + usb_phy.o + +CFLAGS_pci_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c index ea373bae1522..e8f8ccc0a5ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c @@ -20,7 +20,7 @@ static int mt76x2_ampdu_stat_read(struct seq_file *file, void *data) { - struct mt76x2_dev *dev = file->private; + struct mt76x02_dev *dev = file->private; int i, j; for (i = 0; i < 4; i++) { @@ -49,7 +49,7 @@ mt76x2_ampdu_stat_open(struct inode *inode, struct file *f) static int read_txpower(struct seq_file *file, void *data) { - struct mt76x2_dev *dev = dev_get_drvdata(file->private); + struct mt76x02_dev *dev = dev_get_drvdata(file->private); seq_printf(file, "Target power: %d\n", dev->target_power); @@ -68,9 +68,9 @@ static const struct file_operations fops_ampdu_stat = { static int mt76x2_dfs_stat_read(struct seq_file *file, void *data) { + struct mt76x02_dev *dev = file->private; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; int i; - struct mt76x2_dev *dev = file->private; - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; seq_printf(file, "allocated sequences:\t%d\n", dfs_pd->seq_stats.seq_pool_len); @@ -106,7 +106,7 @@ static const struct file_operations fops_dfs_stat = { static int read_agc(struct seq_file *file, void *data) { - struct mt76x2_dev *dev = dev_get_drvdata(file->private); + struct mt76x02_dev *dev = dev_get_drvdata(file->private); seq_printf(file, "avg_rssi: %d\n", dev->cal.avg_rssi_all); seq_printf(file, "low_gain: %d\n", dev->cal.low_gain); @@ -116,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data) return 0; } -void mt76x2_init_debugfs(struct mt76x2_dev *dev) +void mt76x2_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h new file mode 100644 index 000000000000..3cb9d1864286 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __DFS_H +#define __DFS_H + +void mt76x2_dfs_init_params(struct mt76x02_dev *dev); +void mt76x2_dfs_init_detector(struct mt76x02_dev *dev); +void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev); +void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, + enum nl80211_dfs_regions region); + +#endif /* __DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 136faa4066a5..f39b622d03f4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -17,12 +17,12 @@ #include <linux/module.h> #include <asm/unaligned.h> #include "mt76x2.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" #define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1 static int -mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x02_eeprom_field field, +mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field, void *dest, int len) { if (field + len > dev->mt76.eeprom.size) @@ -33,7 +33,7 @@ mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x02_eeprom_field field, } static int -mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev) +mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev) { void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR; @@ -42,7 +42,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev) } static bool -mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) +mt76x2_has_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) { u16 *efuse_w = (u16 *) efuse; @@ -68,7 +68,7 @@ mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) } static void -mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) +mt76x2_apply_cal_free_data(struct mt76x02_dev *dev, u8 *efuse) { #define GROUP_5G(_id) \ MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \ @@ -137,7 +137,7 @@ mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse) eeprom[MT_EE_BT_PMUCFG] = val & 0xff; } -static int mt76x2_check_eeprom(struct mt76x2_dev *dev) +static int mt76x2_check_eeprom(struct mt76x02_dev *dev) { u16 val = get_unaligned_le16(dev->mt76.eeprom.data); @@ -155,7 +155,7 @@ static int mt76x2_check_eeprom(struct mt76x2_dev *dev) } static int -mt76x2_eeprom_load(struct mt76x2_dev *dev) +mt76x2_eeprom_load(struct mt76x02_dev *dev) { void *efuse; bool found; @@ -177,8 +177,8 @@ mt76x2_eeprom_load(struct mt76x2_dev *dev) efuse = dev->mt76.otp.data; - if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse, - MT7662_EEPROM_SIZE, MT_EE_READ)) + if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE, + MT_EE_READ)) goto out; if (found) { @@ -197,7 +197,7 @@ out: } static void -mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val) +mt76x2_set_rx_gain_group(struct mt76x02_dev *dev, u8 val) { s8 *dest = dev->cal.rx.high_gain; @@ -212,7 +212,7 @@ mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val) } static void -mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val) +mt76x2_set_rssi_offset(struct mt76x02_dev *dev, int chain, u8 val) { s8 *dest = dev->cal.rx.rssi_offset; @@ -241,34 +241,34 @@ mt76x2_get_cal_channel_group(int channel) } static u8 -mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel) +mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel) { enum mt76x2_cal_channel_group group; group = mt76x2_get_cal_channel_group(channel); switch (group) { case MT_CH_5G_JAPAN: - return mt76x02_eeprom_get(&dev->mt76, + return mt76x02_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN); case MT_CH_5G_UNII_1: - return mt76x02_eeprom_get(&dev->mt76, + return mt76x02_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8; case MT_CH_5G_UNII_2: - return mt76x02_eeprom_get(&dev->mt76, + return mt76x02_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN); case MT_CH_5G_UNII_2E_1: - return mt76x02_eeprom_get(&dev->mt76, + return mt76x02_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8; case MT_CH_5G_UNII_2E_2: - return mt76x02_eeprom_get(&dev->mt76, + return mt76x02_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN); default: - return mt76x02_eeprom_get(&dev->mt76, + return mt76x02_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8; } } -void mt76x2_read_rx_gain(struct mt76x2_dev *dev) +void mt76x2_read_rx_gain(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; int channel = chan->hw_value; @@ -277,14 +277,13 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev) u16 val; if (chan->band == NL80211_BAND_2GHZ) - val = mt76x02_eeprom_get(&dev->mt76, - MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; + val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; else val = mt76x2_get_5g_rx_gain(dev, channel); mt76x2_set_rx_gain_group(dev, val); - mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g); + mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g); mt76x2_set_rssi_offset(dev, 0, val); mt76x2_set_rssi_offset(dev, 1, val >> 8); @@ -293,12 +292,12 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev) dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; - lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan); + lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); } EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); -void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, +void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t, struct ieee80211_channel *chan) { bool is_5ghz; @@ -308,53 +307,49 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, memset(t, 0, sizeof(*t)); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK); t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val); t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8); if (is_5ghz) - val = mt76x02_eeprom_get(&dev->mt76, - MT_EE_TX_POWER_OFDM_5G_6M); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M); else - val = mt76x02_eeprom_get(&dev->mt76, - MT_EE_TX_POWER_OFDM_2G_6M); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M); t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val); t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8); if (is_5ghz) - val = mt76x02_eeprom_get(&dev->mt76, - MT_EE_TX_POWER_OFDM_5G_24M); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M); else - val = mt76x02_eeprom_get(&dev->mt76, - MT_EE_TX_POWER_OFDM_2G_24M); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M); t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val); t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0); t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val); t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4); t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val); t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8); t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val); t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12); t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val); t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0); t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val); t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4); t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val); t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8); + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8); if (!is_5ghz) val >>= 8; t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8); @@ -366,8 +361,10 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, EXPORT_SYMBOL_GPL(mt76x2_get_rate_power); static void -mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, - struct ieee80211_channel *chan, int chain, int offset) +mt76x2_get_power_info_2g(struct mt76x02_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan, + int chain, int offset) { int channel = chan->hw_value; int delta_idx; @@ -388,13 +385,15 @@ mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, t->chain[chain].target_power = data[2]; t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER); + val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER); t->target_power = val >> 8; } static void -mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, - struct ieee80211_channel *chan, int chain, int offset) +mt76x2_get_power_info_5g(struct mt76x02_dev *dev, + struct mt76x2_tx_power_info *t, + struct ieee80211_channel *chan, + int chain, int offset) { int channel = chan->hw_value; enum mt76x2_cal_channel_group group; @@ -437,11 +436,11 @@ mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, t->chain[chain].target_power = data[2]; t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN); + val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN); t->target_power = val & 0xff; } -void mt76x2_get_power_info(struct mt76x2_dev *dev, +void mt76x2_get_power_info(struct mt76x02_dev *dev, struct mt76x2_tx_power_info *t, struct ieee80211_channel *chan) { @@ -449,8 +448,8 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, memset(t, 0, sizeof(*t)); - bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40); - bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80); + bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40); + bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80); if (chan->band == NL80211_BAND_5GHZ) { bw40 >>= 8; @@ -465,7 +464,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, MT_EE_TX_POWER_1_START_2G); } - if (mt76x02_tssi_enabled(&dev->mt76) || + if (mt76x2_tssi_enabled(dev) || !mt76x02_field_valid(t->target_power)) t->target_power = t->chain[0].target_power; @@ -474,7 +473,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev, } EXPORT_SYMBOL_GPL(mt76x2_get_power_info); -int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) +int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t) { enum nl80211_band band = dev->mt76.chandef.chan->band; u16 val, slope; @@ -482,23 +481,20 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) memset(t, 0, sizeof(*t)); - if (!mt76x02_temp_tx_alc_enabled(&dev->mt76)) + if (!mt76x2_temp_tx_alc_enabled(dev)) return -EINVAL; - if (!mt76x02_ext_pa_enabled(&dev->mt76, band)) + if (!mt76x02_ext_pa_enabled(dev, band)) return -EINVAL; - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8; + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8; t->temp_25_ref = val & 0x7f; if (band == NL80211_BAND_5GHZ) { - slope = mt76x02_eeprom_get(&dev->mt76, - MT_EE_RF_TEMP_COMP_SLOPE_5G); - bounds = mt76x02_eeprom_get(&dev->mt76, - MT_EE_TX_POWER_EXT_PA_5G); + slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G); + bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); } else { - slope = mt76x02_eeprom_get(&dev->mt76, - MT_EE_RF_TEMP_COMP_SLOPE_2G); - bounds = mt76x02_eeprom_get(&dev->mt76, + slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G); + bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8; } @@ -511,7 +507,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) } EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp); -int mt76x2_eeprom_init(struct mt76x2_dev *dev) +int mt76x2_eeprom_init(struct mt76x02_dev *dev) { int ret; @@ -519,7 +515,7 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev) if (ret) return ret; - mt76x02_eeprom_parse_hw_cap(&dev->mt76); + mt76x02_eeprom_parse_hw_cap(dev); mt76x2_eeprom_get_macaddr(dev); mt76_eeprom_override(&dev->mt76); dev->mt76.macaddr[0] &= ~BIT(1); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h index c2e99bbeac3b..9e735524d367 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h @@ -17,7 +17,7 @@ #ifndef __MT76x2_EEPROM_H #define __MT76x2_EEPROM_H -#include "mt76x02_eeprom.h" +#include "../mt76x02_eeprom.h" enum mt76x2_cal_channel_group { MT_CH_5G_JAPAN, @@ -51,18 +51,18 @@ struct mt76x2_temp_comp { unsigned int low_slope; /* J / dB */ }; -void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t, +void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t, struct ieee80211_channel *chan); -void mt76x2_get_power_info(struct mt76x2_dev *dev, +void mt76x2_get_power_info(struct mt76x02_dev *dev, struct mt76x2_tx_power_info *t, struct ieee80211_channel *chan); -int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t); -void mt76x2_read_rx_gain(struct mt76x2_dev *dev); +int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t); +void mt76x2_read_rx_gain(struct mt76x02_dev *dev); static inline bool -mt76x2_has_ext_lna(struct mt76x2_dev *dev) +mt76x2_has_ext_lna(struct mt76x02_dev *dev) { - u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); + u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) return val & MT_EE_NIC_CONF_1_LNA_EXT_2G; @@ -70,4 +70,25 @@ mt76x2_has_ext_lna(struct mt76x2_dev *dev) return val & MT_EE_NIC_CONF_1_LNA_EXT_5G; } +static inline bool +mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev) +{ + u16 val; + + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); + if (!(val & BIT(15))) + return false; + + return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & + MT_EE_NIC_CONF_1_TEMP_TX_ALC; +} + +static inline bool +mt76x2_tssi_enabled(struct mt76x02_dev *dev) +{ + return !mt76x2_temp_tx_alc_enabled(dev) && + (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & + MT_EE_NIC_CONF_1_TX_ALC_EN); +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index f4c4cde9301e..3c73fdeaf30f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -16,11 +16,11 @@ */ #include "mt76x2.h" -#include "mt76x2_eeprom.h" -#include "mt76x02_phy.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" static void -mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) +mt76x2_set_wlan_state(struct mt76x02_dev *dev, bool enable) { u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL); @@ -35,7 +35,7 @@ mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable) udelay(20); } -void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable) +void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable) { u32 val; @@ -62,7 +62,7 @@ out: } EXPORT_SYMBOL_GPL(mt76x2_reset_wlan); -void mt76_write_mac_initvals(struct mt76x2_dev *dev) +void mt76_write_mac_initvals(struct mt76x02_dev *dev) { #define DEFAULT_PROT_CFG_CCK \ (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \ @@ -158,7 +158,7 @@ void mt76_write_mac_initvals(struct mt76x2_dev *dev) } EXPORT_SYMBOL_GPL(mt76_write_mac_initvals); -void mt76x2_init_device(struct mt76x2_dev *dev) +void mt76x2_init_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); @@ -167,6 +167,9 @@ void mt76x2_init_device(struct mt76x2_dev *dev) hw->max_report_rates = 7; hw->max_rate_tries = 1; hw->extra_tx_headroom = 2; + if (mt76_is_usb(dev)) + hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + + MT_DMA_HDR_LEN; hw->sta_data_size = sizeof(struct mt76x02_sta); hw->vif_data_size = sizeof(struct mt76x02_vif); @@ -187,7 +190,7 @@ void mt76x2_init_device(struct mt76x2_dev *dev) } EXPORT_SYMBOL_GPL(mt76x2_init_device); -void mt76x2_init_txpower(struct mt76x2_dev *dev, +void mt76x2_init_txpower(struct mt76x02_dev *dev, struct ieee80211_supported_band *sband) { struct ieee80211_channel *chan; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c index 3e667d8c0ee7..e25905c91ee2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c @@ -16,27 +16,39 @@ */ #include "mt76x2.h" -#include "mt76x02_mac.h" -void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, - struct sk_buff *skb) +void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force) { - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - void *rxwi = skb->data; - - if (q == MT_RXQ_MCU) { - /* this is used just by mmio code */ - skb_queue_tail(&mdev->mmio.mcu.res_q, skb); - wake_up(&mdev->mmio.mcu.wait); - return; + bool stopped = false; + u32 rts_cfg; + int i; + + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); + + rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); + + /* Wait for MAC to become idle */ + for (i = 0; i < 300; i++) { + if ((mt76_rr(dev, MT_MAC_STATUS) & + (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || + mt76_rr(dev, MT_BBP(IBI, 12))) { + udelay(1); + continue; + } + + stopped = true; + break; } - skb_pull(skb, sizeof(struct mt76x02_rxwi)); - if (mt76x2_mac_process_rx(dev, skb, rxwi)) { - dev_kfree_skb(skb); - return; + if (force && !stopped) { + mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); + + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); } - mt76_rx(&dev->mt76, q, skb); + mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); } -EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb); +EXPORT_SYMBOL_GPL(mt76x2_mac_stop); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h index 7e5eccda47f8..a31bd49ae6cb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h @@ -14,26 +14,24 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#ifndef __MT76x2_MAC_H +#define __MT76x2_MAC_H + #include "mt76x2.h" -#include "mt76x02_dma.h" -#include "mt76x02_util.h" -void mt76x2_tx_tasklet(unsigned long data) -{ - struct mt76x2_dev *dev = (struct mt76x2_dev *) data; - int i; +struct mt76x02_dev; +struct mt76x2_sta; +struct mt76x02_vif; - mt76x2_mac_process_tx_status_fifo(dev); +int mt76x2_mac_start(struct mt76x02_dev *dev); +void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force); +void mt76x2_mac_resume(struct mt76x02_dev *dev); +void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr); - for (i = MT_TXQ_MCU; i >= 0; i--) - mt76_queue_tx_cleanup(dev, i, false); +int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, + struct sk_buff *skb); +void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val); - mt76x2_mac_poll_tx_status(dev, false); - mt76x02_irq_enable(&dev->mt76, MT_INT_TX_DONE_ALL); -} +void mt76x2_mac_work(struct work_struct *work); -void mt76x2_dma_cleanup(struct mt76x2_dev *dev) -{ - tasklet_kill(&dev->tx_tasklet); - mt76_dma_cleanup(&dev->mt76); -} +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c index eff483333183..88bd62cfbdf9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c @@ -20,11 +20,10 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_eeprom.h" -#include "mt76x02_dma.h" +#include "mcu.h" +#include "eeprom.h" -int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, +int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, u8 bw_index, bool scan) { struct sk_buff *skb; @@ -57,10 +56,9 @@ int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw, } EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel); -int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, +int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, u8 channel) { - struct mt76_dev *mdev = &dev->mt76; struct sk_buff *skb; struct { u8 cr_mode; @@ -77,8 +75,8 @@ int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, u32 val; val = BIT(31); - val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; - val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00; + val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; + val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; msg.cfg = cpu_to_le32(val); /* first set the channel without the extension channel info */ @@ -87,7 +85,7 @@ int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level, } EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr); -int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, +int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, bool force) { struct sk_buff *skb; @@ -107,7 +105,7 @@ int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, } EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain); -int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, +int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data) { struct sk_buff *skb; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h index fa72d5a5ecad..acfa2b570c7c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h @@ -17,7 +17,7 @@ #ifndef __MT76x2_MCU_H #define __MT76x2_MCU_H -#include "mt76x02_mcu.h" +#include "../mt76x02_mcu.h" /* Register definitions */ #define MT_MCU_CPU_CTL 0x0704 @@ -94,8 +94,8 @@ struct mt76x2_tssi_comp { u8 offset1; } __packed __aligned(4); -int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data); -int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain, +int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data); +int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, bool force); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h new file mode 100644 index 000000000000..ab93125f46de --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __MT76x2_H +#define __MT76x2_H + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/bitops.h> + +#define MT7662_FIRMWARE "mt7662.bin" +#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" +#define MT7662_EEPROM_SIZE 512 + +#define MT7662U_FIRMWARE "mediatek/mt7662u.bin" +#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin" + +#define MT_CALIBRATE_INTERVAL HZ + +#include "../mt76x02.h" +#include "mac.h" +#include "dfs.h" + +static inline bool is_mt7612(struct mt76x02_dev *dev) +{ + return mt76_chip(&dev->mt76) == 0x7612; +} + +static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + + return ((chan->flags & IEEE80211_CHAN_RADAR) && + chan->dfs_state != NL80211_DFS_AVAILABLE); +} + +extern const struct ieee80211_ops mt76x2_ops; + +struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev); +int mt76x2_register_device(struct mt76x02_dev *dev); +void mt76x2_init_debugfs(struct mt76x02_dev *dev); +void mt76x2_init_device(struct mt76x02_dev *dev); + +void mt76x2_phy_power_on(struct mt76x02_dev *dev); +int mt76x2_init_hardware(struct mt76x02_dev *dev); +void mt76x2_stop_hardware(struct mt76x02_dev *dev); +int mt76x2_eeprom_init(struct mt76x02_dev *dev); +int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); +void mt76x2_set_tx_ackto(struct mt76x02_dev *dev); + +void mt76x2_phy_set_antenna(struct mt76x02_dev *dev); +int mt76x2_phy_start(struct mt76x02_dev *dev); +int mt76x2_phy_set_channel(struct mt76x02_dev *dev, + struct cfg80211_chan_def *chandef); +void mt76x2_phy_calibrate(struct work_struct *work); +void mt76x2_phy_set_txpower(struct mt76x02_dev *dev); + +int mt76x2_mcu_init(struct mt76x02_dev *dev); +int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, + u8 bw_index, bool scan); +int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, + u8 channel); + +void mt76x2_cleanup(struct mt76x02_dev *dev); + +void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val); + +void mt76x2_pre_tbtt_tasklet(unsigned long arg); + +void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); + +void mt76x2_update_channel(struct mt76_dev *mdev); + +void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); +void mt76x2_init_txpower(struct mt76x02_dev *dev, + struct ieee80211_supported_band *sband); +void mt76_write_mac_initvals(struct mt76x02_dev *dev); + +void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait); +void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, + enum nl80211_band band); +void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, + enum nl80211_band band, u8 bw); +void mt76x2_apply_gain_adj(struct mt76x02_dev *dev); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h index 5d2ebdf42c63..6e932b5010ef 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h @@ -20,8 +20,7 @@ #include <linux/device.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x02_dma.h" +#include "mcu.h" #define MT7612U_EEPROM_SIZE 512 @@ -30,35 +29,31 @@ extern const struct ieee80211_ops mt76x2u_ops; -struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev); -int mt76x2u_register_device(struct mt76x2_dev *dev); -int mt76x2u_init_hardware(struct mt76x2_dev *dev); -void mt76x2u_cleanup(struct mt76x2_dev *dev); -void mt76x2u_stop_hw(struct mt76x2_dev *dev); +struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev); +int mt76x2u_register_device(struct mt76x02_dev *dev); +int mt76x2u_init_hardware(struct mt76x02_dev *dev); +void mt76x2u_cleanup(struct mt76x02_dev *dev); +void mt76x2u_stop_hw(struct mt76x02_dev *dev); -int mt76x2u_mac_reset(struct mt76x2_dev *dev); -void mt76x2u_mac_resume(struct mt76x2_dev *dev); -int mt76x2u_mac_start(struct mt76x2_dev *dev); -int mt76x2u_mac_stop(struct mt76x2_dev *dev); +int mt76x2u_mac_reset(struct mt76x02_dev *dev); +void mt76x2u_mac_resume(struct mt76x02_dev *dev); +int mt76x2u_mac_start(struct mt76x02_dev *dev); +int mt76x2u_mac_stop(struct mt76x02_dev *dev); -int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, +int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef); void mt76x2u_phy_calibrate(struct work_struct *work); -void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev); +void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev); void mt76x2u_mcu_complete_urb(struct urb *urb); -int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, +int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap, bool ext, int rssi, u32 false_cca); -int mt76x2u_mcu_init(struct mt76x2_dev *dev); -int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev); +int mt76x2u_mcu_init(struct mt76x02_dev *dev); +int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev); -int mt76x2u_alloc_queues(struct mt76x2_dev *dev); -void mt76x2u_queues_deinit(struct mt76x2_dev *dev); -void mt76x2u_stop_queues(struct mt76x2_dev *dev); -int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info); +int mt76x2u_alloc_queues(struct mt76x02_dev *dev); +void mt76x2u_queues_deinit(struct mt76x02_dev *dev); +void mt76x2u_stop_queues(struct mt76x02_dev *dev); int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port, u32 flags); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 26cfda24ce08..92432fe97312 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -19,7 +19,6 @@ #include <linux/pci.h> #include "mt76x2.h" -#include "mt76x2_trace.h" static const struct pci_device_id mt76pci_device_table[] = { { PCI_DEVICE(0x14c3, 0x7662) }, @@ -31,7 +30,7 @@ static const struct pci_device_id mt76pci_device_table[] = { static int mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; int ret; ret = pcim_enable_device(pdev); @@ -58,7 +57,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); - ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler, + ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) goto error; @@ -89,7 +88,7 @@ static void mt76pci_remove(struct pci_dev *pdev) { struct mt76_dev *mdev = pci_get_drvdata(pdev); - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); mt76_unregister_device(mdev); mt76x2_cleanup(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c index 8cfa3a063bda..b56febae8945 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c @@ -15,7 +15,6 @@ */ #include "mt76x2.h" -#include "mt76x02_util.h" #define RADAR_SPEC(m, len, el, eh, wl, wh, \ w_tolerance, tl, th, t_tolerance, \ @@ -37,7 +36,7 @@ .pwr_jmp = power_jmp \ } -static const struct mt76x2_radar_specs etsi_radar_specs[] = { +static const struct mt76x02_radar_specs etsi_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0, 0x7fffffff, 0x155cc0, 0x19cc), @@ -67,7 +66,7 @@ static const struct mt76x2_radar_specs etsi_radar_specs[] = { 0x7fffffff, 0x2191c0, 0x15cc) }; -static const struct mt76x2_radar_specs fcc_radar_specs[] = { +static const struct mt76x02_radar_specs fcc_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0, 0x7fffffff, 0xfe808, 0x13dc), @@ -97,7 +96,7 @@ static const struct mt76x2_radar_specs fcc_radar_specs[] = { 0x3938700, 0x57bcf00, 0x1289) }; -static const struct mt76x2_radar_specs jp_w56_radar_specs[] = { +static const struct mt76x02_radar_specs jp_w56_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0, 0x7fffffff, 0x14c080, 0x13dc), @@ -127,7 +126,7 @@ static const struct mt76x2_radar_specs jp_w56_radar_specs[] = { 0x3938700, 0X57bcf00, 0x1289) }; -static const struct mt76x2_radar_specs jp_w53_radar_specs[] = { +static const struct mt76x02_radar_specs jp_w53_radar_specs[] = { /* 20MHz */ RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0, 0x7fffffff, 0x14c080, 0x16cc), @@ -151,8 +150,9 @@ static const struct mt76x2_radar_specs jp_w53_radar_specs[] = { { 0 } }; -static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, - u8 enable) +static void +mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, + u8 enable) { u32 data; @@ -160,10 +160,10 @@ static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev, mt76_wr(dev, MT_BBP(DFS, 36), data); } -static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, - struct mt76x2_dfs_sequence *seq) +static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev, + struct mt76x02_dfs_sequence *seq) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; list_add(&seq->head, &dfs_pd->seq_pool); @@ -171,17 +171,17 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x2_dev *dev, dfs_pd->seq_stats.seq_len--; } -static -struct mt76x2_dfs_sequence *mt76x2_dfs_seq_pool_get(struct mt76x2_dev *dev) +static struct mt76x02_dfs_sequence * +mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sequence *seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sequence *seq; if (list_empty(&dfs_pd->seq_pool)) { seq = devm_kzalloc(dev->mt76.dev, sizeof(*seq), GFP_ATOMIC); } else { seq = list_first_entry(&dfs_pd->seq_pool, - struct mt76x2_dfs_sequence, + struct mt76x02_dfs_sequence, head); list_del(&seq->head); dfs_pd->seq_stats.seq_pool_len--; @@ -214,10 +214,10 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin) return factor; } -static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) +static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sequence *seq, *tmp_seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sequence *seq, *tmp_seq; int i; /* reset hw detector */ @@ -235,11 +235,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x2_dev *dev) } } -static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) +static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev) { bool ret = false; u32 current_ts, delta_ts; - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER); delta_ts = current_ts - dfs_pd->chirp_pulse_ts; @@ -256,8 +256,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev) return ret; } -static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev, - struct mt76x2_dfs_hw_pulse *pulse) +static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev, + struct mt76x02_dfs_hw_pulse *pulse) { u32 data; @@ -276,8 +276,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev, pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22)); } -static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, - struct mt76x2_dfs_hw_pulse *pulse) +static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, + struct mt76x02_dfs_hw_pulse *pulse) { bool ret = false; @@ -371,8 +371,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev, return ret; } -static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { u32 data; @@ -398,12 +398,12 @@ static bool mt76x2_dfs_fetch_event(struct mt76x2_dev *dev, return true; } -static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { if (event->engine == 2) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event_rb *event_buff = &dfs_pd->event_rb[1]; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event_rb *event_buff = &dfs_pd->event_rb[1]; u16 last_event_idx; u32 delta_ts; @@ -417,11 +417,11 @@ static bool mt76x2_dfs_check_event(struct mt76x2_dev *dev, return true; } -static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event_rb *event_buff; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event_rb *event_buff; /* add radar event to ring buffer */ event_buff = event->engine == 2 ? &dfs_pd->event_rb[1] @@ -435,16 +435,16 @@ static void mt76x2_dfs_queue_event(struct mt76x2_dev *dev, MT_DFS_EVENT_BUFLEN); } -static int mt76x2_dfs_create_sequence(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event, +static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event, u16 cur_len) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sw_detector_params *sw_params; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sw_detector_params *sw_params; u32 width_delta, with_sum, factor, cur_pri; - struct mt76x2_dfs_sequence seq, *seq_p; - struct mt76x2_dfs_event_rb *event_rb; - struct mt76x2_dfs_event *cur_event; + struct mt76x02_dfs_sequence seq, *seq_p; + struct mt76x02_dfs_event_rb *event_rb; + struct mt76x02_dfs_event *cur_event; int i, j, end, pri; event_rb = event->engine == 2 ? &dfs_pd->event_rb[1] @@ -522,12 +522,12 @@ next: return 0; } -static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev, - struct mt76x2_dfs_event *event) +static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sw_detector_params *sw_params; - struct mt76x2_dfs_sequence *seq, *tmp_seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sw_detector_params *sw_params; + struct mt76x02_dfs_sequence *seq, *tmp_seq; u16 max_seq_len = 0; u32 factor, pri; @@ -554,10 +554,10 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x2_dev *dev, return max_seq_len; } -static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) +static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_sequence *seq; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_sequence *seq; if (list_empty(&dfs_pd->sequences)) return false; @@ -571,10 +571,10 @@ static bool mt76x2_dfs_check_detection(struct mt76x2_dev *dev) return false; } -static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) +static void mt76x2_dfs_add_events(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event event; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event event; int i, seq_len; /* disable debug mode */ @@ -598,11 +598,11 @@ static void mt76x2_dfs_add_events(struct mt76x2_dev *dev) mt76x2_dfs_set_capture_mode_ctrl(dev, true); } -static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev) +static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; - struct mt76x2_dfs_event_rb *event_buff; - struct mt76x2_dfs_event *event; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_event_rb *event_buff; + struct mt76x02_dfs_event *event; int i; for (i = 0; i < ARRAY_SIZE(dfs_pd->event_rb); i++) { @@ -623,8 +623,8 @@ static void mt76x2_dfs_check_event_window(struct mt76x2_dev *dev) static void mt76x2_dfs_tasklet(unsigned long arg) { - struct mt76x2_dev *dev = (struct mt76x2_dev *)arg; - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; u32 engine_mask; int i; @@ -654,7 +654,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg) goto out; for (i = 0; i < MT_DFS_NUM_ENGINES; i++) { - struct mt76x2_dfs_hw_pulse pulse; + struct mt76x02_dfs_hw_pulse pulse; if (!(engine_mask & (1 << i))) continue; @@ -679,12 +679,12 @@ static void mt76x2_dfs_tasklet(unsigned long arg) mt76_wr(dev, MT_BBP(DFS, 1), 0xf); out: - mt76x02_irq_enable(&dev->mt76, MT_INT_GPTIMER); + mt76x02_irq_enable(dev, MT_INT_GPTIMER); } -static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev) +static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; switch (dev->dfs_pd.region) { case NL80211_DFS_FCC: @@ -708,11 +708,11 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x2_dev *dev) } } -static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) +static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev) { - u32 data; + const struct mt76x02_radar_specs *radar_specs; u8 i, shift; - const struct mt76x2_radar_specs *radar_specs; + u32 data; switch (dev->mt76.chandef.width) { case NL80211_CHAN_WIDTH_40: @@ -803,7 +803,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev) mt76_wr(dev, 0x212c, 0x0c350001); } -void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev) +void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev) { u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31; @@ -824,7 +824,7 @@ void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071); } -void mt76x2_dfs_init_params(struct mt76x2_dev *dev) +void mt76x2_dfs_init_params(struct mt76x02_dev *dev) { struct cfg80211_chan_def *chandef = &dev->mt76.chandef; @@ -835,7 +835,7 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) /* enable debug mode */ mt76x2_dfs_set_capture_mode_ctrl(dev, true); - mt76x02_irq_enable(&dev->mt76, MT_INT_GPTIMER); + mt76x02_irq_enable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_GP_TIMER_EN, 1); } else { @@ -845,15 +845,15 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev) mt76_wr(dev, MT_BBP(DFS, 1), 0xf); mt76_wr(dev, 0x212c, 0); - mt76x02_irq_disable(&dev->mt76, MT_INT_GPTIMER); + mt76x02_irq_disable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_GP_TIMER_EN, 0); } } -void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) +void mt76x2_dfs_init_detector(struct mt76x02_dev *dev) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; INIT_LIST_HEAD(&dfs_pd->sequences); INIT_LIST_HEAD(&dfs_pd->seq_pool); @@ -863,10 +863,10 @@ void mt76x2_dfs_init_detector(struct mt76x2_dev *dev) (unsigned long)dev); } -void mt76x2_dfs_set_domain(struct mt76x2_dev *dev, +void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, enum nl80211_dfs_regions region) { - struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 3f77c13a6d54..3824290b219d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -16,13 +16,11 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_eeprom.h" -#include "mt76x2_mcu.h" -#include "mt76x02_util.h" -#include "mt76x02_dma.h" +#include "eeprom.h" +#include "mcu.h" static void -mt76x2_mac_pbf_init(struct mt76x2_dev *dev) +mt76x2_mac_pbf_init(struct mt76x02_dev *dev) { u32 val; @@ -40,12 +38,12 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev) } static void -mt76x2_fixup_xtal(struct mt76x2_dev *dev) +mt76x2_fixup_xtal(struct mt76x02_dev *dev) { u16 eep_val; s8 offset = 0; - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2); offset = eep_val & 0x7f; if ((eep_val & 0xff) == 0xff) @@ -55,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev) eep_val >>= 8; if (eep_val == 0x00 || eep_val == 0xff) { - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1); eep_val &= 0xff; if (eep_val == 0x00 || eep_val == 0xff) @@ -66,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev) mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset); mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL); - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); + eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { case 0: mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); @@ -79,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x2_dev *dev) } } -static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) +static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { static const u8 null_addr[ETH_ALEN] = {}; const u8 *macaddr = dev->mt76.macaddr; @@ -145,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); for (i = 0; i < 256; i++) - mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL); + mt76x02_mac_wcid_setup(dev, i, 0, NULL); for (i = 0; i < MT_MAX_VIFS; i++) - mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL); + mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) - mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL); + mt76x02_mac_shared_key_setup(dev, i, k, NULL); for (i = 0; i < 8; i++) { mt76x2_mac_set_bssid(dev, i, null_addr); @@ -170,14 +168,14 @@ static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) MT_CH_TIME_CFG_EIFS_AS_BUSY | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); - mt76x02_set_beacon_offsets(&dev->mt76); + mt76x02_set_beacon_offsets(dev); mt76x2_set_tx_ackto(dev); return 0; } -int mt76x2_mac_start(struct mt76x2_dev *dev) +int mt76x2_mac_start(struct mt76x02_dev *dev) { int i; @@ -188,12 +186,12 @@ int mt76x2_mac_start(struct mt76x2_dev *dev) mt76_rr(dev, MT_TX_STAT_FIFO); memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats)); - mt76x02_mac_start(&dev->mt76); + mt76x02_mac_start(dev); return 0; } -void mt76x2_mac_resume(struct mt76x2_dev *dev) +void mt76x2_mac_resume(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | @@ -201,7 +199,7 @@ void mt76x2_mac_resume(struct mt76x2_dev *dev) } static void -mt76x2_power_on_rf_patch(struct mt76x2_dev *dev) +mt76x2_power_on_rf_patch(struct mt76x02_dev *dev) { mt76_set(dev, 0x10130, BIT(0) | BIT(16)); udelay(1); @@ -222,7 +220,7 @@ mt76x2_power_on_rf_patch(struct mt76x2_dev *dev) } static void -mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit) +mt76x2_power_on_rf(struct mt76x02_dev *dev, int unit) { int shift = unit ? 8 : 0; @@ -244,7 +242,7 @@ mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit) } static void -mt76x2_power_on(struct mt76x2_dev *dev) +mt76x2_power_on(struct mt76x02_dev *dev) { u32 val; @@ -279,7 +277,7 @@ mt76x2_power_on(struct mt76x2_dev *dev) mt76x2_power_on_rf(dev, 1); } -void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) +void mt76x2_set_tx_ackto(struct mt76x02_dev *dev) { u8 ackto, sifs, slottime = dev->slottime; @@ -296,14 +294,14 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) MT_TX_TIMEOUT_CFG_ACKTO, ackto); } -int mt76x2_init_hardware(struct mt76x2_dev *dev) +int mt76x2_init_hardware(struct mt76x02_dev *dev) { int ret; tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet, (unsigned long) dev); - mt76x02_dma_disable(&dev->mt76); + mt76x02_dma_disable(dev); mt76x2_reset_wlan(dev, true); mt76x2_power_on(dev); @@ -317,7 +315,7 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev) dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); - ret = mt76x02_dma_init(&dev->mt76); + ret = mt76x02_dma_init(dev); if (ret) return ret; @@ -335,43 +333,42 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev) return 0; } -void mt76x2_stop_hardware(struct mt76x2_dev *dev) +void mt76x2_stop_hardware(struct mt76x02_dev *dev) { cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); - mt76x02_mcu_set_radio_state(&dev->mt76, false, true); + mt76x02_mcu_set_radio_state(dev, false, true); mt76x2_mac_stop(dev, false); } -void mt76x2_cleanup(struct mt76x2_dev *dev) +void mt76x2_cleanup(struct mt76x02_dev *dev) { tasklet_disable(&dev->dfs_pd.dfs_tasklet); tasklet_disable(&dev->pre_tbtt_tasklet); mt76x2_stop_hardware(dev); - mt76x2_dma_cleanup(dev); - mt76x02_mcu_cleanup(&dev->mt76); + mt76x02_dma_cleanup(dev); + mt76x02_mcu_cleanup(dev); } -struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) +struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) { static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), .update_survey = mt76x2_update_channel, - .tx_prepare_skb = mt76x2_tx_prepare_skb, - .tx_complete_skb = mt76x2_tx_complete_skb, - .rx_skb = mt76x2_queue_rx_skb, - .rx_poll_complete = mt76x2_rx_poll_complete, + .tx_prepare_skb = mt76x02_tx_prepare_skb, + .tx_complete_skb = mt76x02_tx_complete_skb, + .rx_skb = mt76x02_queue_rx_skb, + .rx_poll_complete = mt76x02_rx_poll_complete, .sta_ps = mt76x2_sta_ps, - .get_max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj, }; - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; struct mt76_dev *mdev; mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops); if (!mdev) return NULL; - dev = container_of(mdev, struct mt76x2_dev, mt76); + dev = container_of(mdev, struct mt76x02_dev, mt76); mdev->dev = pdev; mdev->drv = &drv_ops; @@ -382,7 +379,7 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mt76x2_dfs_set_domain(dev, request->dfs_region); } @@ -418,8 +415,8 @@ static const struct ieee80211_iface_combination if_comb[] = { static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, u8 delay_off) { - struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev, - mt76); + struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev, + mt76); u32 val; val = MT_LED_STATUS_DURATION(0xff) | @@ -463,21 +460,12 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, mt76x2_led_set_config(mt76, 0xff, 0); } -int mt76x2_register_device(struct mt76x2_dev *dev) +int mt76x2_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; - void *status_fifo; - int fifo_size; int i, ret; - fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status)); - status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL); - if (!status_fifo) - return -ENOMEM; - - tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long)dev); - kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c index bb9c0a059a6e..4b331ed14bb2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c @@ -16,12 +16,10 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_eeprom.h" -#include "mt76x2_trace.h" -#include "mt76x02_util.h" +#include "mcu.h" +#include "eeprom.h" -void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) +void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr) { idx &= 7; mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); @@ -29,76 +27,8 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) get_unaligned_le16(addr + 4)); } -void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) -{ - struct mt76x02_tx_status stat = {}; - unsigned long flags; - u8 update = 1; - bool ret; - - if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) - return; - - trace_mac_txstat_poll(dev); - - while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { - spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); - ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat); - spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); - - if (!ret) - break; - - trace_mac_txstat_fetch(dev, &stat); - - if (!irq) { - mt76x02_send_tx_status(&dev->mt76, &stat, &update); - continue; - } - - kfifo_put(&dev->txstatus_fifo, stat); - } -} - -static void -mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb, - void *txwi_ptr) -{ - struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb); - struct mt76x02_txwi *txwi = txwi_ptr; - - mt76x2_mac_poll_tx_status(dev, false); - - txi->tries = 0; - txi->jiffies = jiffies; - txi->wcid = txwi->wcid; - txi->pktid = txwi->pktid; - trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); - mt76x02_tx_complete(&dev->mt76, skb); -} - -void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev) -{ - struct mt76x02_tx_status stat; - u8 update = 1; - - while (kfifo_get(&dev->txstatus_fifo, &stat)) - mt76x02_send_tx_status(&dev->mt76, &stat, &update); -} - -void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - - if (e->txwi) - mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi); - else - dev_kfree_skb_any(e->skb); -} - static int -mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) +mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb) { int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; struct mt76x02_txwi txwi; @@ -106,7 +36,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) return -ENOSPC; - mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); + mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); offset += sizeof(txwi); @@ -116,7 +46,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) } static int -__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) +__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb) { int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; int beacon_addr = mt76x02_beacon_offsets[bcn_idx]; @@ -141,7 +71,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb) return ret; } -int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, +int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, struct sk_buff *skb) { bool force_update = false; @@ -176,7 +106,8 @@ int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, return 0; } -void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val) +void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, + u8 vif_idx, bool val) { u8 old_mask = dev->beacon_mask; bool en; @@ -201,14 +132,14 @@ void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val) mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); if (en) - mt76x02_irq_enable(&dev->mt76, MT_INT_PRE_TBTT | MT_INT_TBTT); + mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); else - mt76x02_irq_disable(&dev->mt76, MT_INT_PRE_TBTT | MT_INT_TBTT); + mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); } void mt76x2_update_channel(struct mt76_dev *mdev) { - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76_channel_state *state; u32 active, busy; @@ -225,8 +156,8 @@ void mt76x2_update_channel(struct mt76_dev *mdev) void mt76x2_mac_work(struct work_struct *work) { - struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev, - mac_work.work); + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + mac_work.work); int i, idx; mt76x2_update_channel(&dev->mt76); @@ -241,7 +172,7 @@ void mt76x2_mac_work(struct work_struct *work) MT_CALIBRATE_INTERVAL); } -void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val) +void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) { u32 data = 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 63691b68a436..034a06295668 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -15,12 +15,11 @@ */ #include "mt76x2.h" -#include "mt76x02_util.h" static int mt76x2_start(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret; mutex_lock(&dev->mt76.mutex); @@ -46,7 +45,7 @@ out: static void mt76x2_stop(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); @@ -55,7 +54,7 @@ mt76x2_stop(struct ieee80211_hw *hw) } static int -mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) +mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { int ret; @@ -91,7 +90,7 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) static int mt76x2_config(struct ieee80211_hw *hw, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret = 0; mutex_lock(&dev->mt76.mutex); @@ -113,7 +112,7 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) { mt76x2_phy_set_txpower(dev); - mt76x2_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf); + mt76x02_tx_set_txpwr_auto(dev, dev->mt76.txpower_conf); } } @@ -132,7 +131,7 @@ static void mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; mutex_lock(&dev->mt76.mutex); @@ -169,18 +168,18 @@ void mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); int idx = msta->wcid.idx; mt76_stop_tx_queues(&dev->mt76, sta, true); - mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps); + mt76x02_mac_wcid_set_drop(dev, idx, ps); } static void mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; tasklet_disable(&dev->pre_tbtt_tasklet); set_bit(MT76_SCANNING, &dev->mt76.state); @@ -189,7 +188,7 @@ mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static void mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; clear_bit(MT76_SCANNING, &dev->mt76.state); tasklet_enable(&dev->pre_tbtt_tasklet); @@ -204,7 +203,7 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static int mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; *dbm = dev->mt76.txpower_cur / 2; @@ -217,7 +216,7 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); dev->coverage_class = coverage_class; @@ -234,7 +233,7 @@ mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; if (!tx_ant || tx_ant > 3 || tx_ant != rx_ant) return -EINVAL; @@ -255,7 +254,7 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); *tx_ant = dev->mt76.antenna_mask; @@ -268,7 +267,7 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, static int mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; if (val != ~0 && val > 0xffff) return -EINVAL; @@ -281,7 +280,7 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) } const struct ieee80211_ops mt76x2_ops = { - .tx = mt76x2_tx, + .tx = mt76x02_tx, .start = mt76x2_start, .stop = mt76x2_stop, .add_interface = mt76x02_add_interface, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index 55716fd7e01d..d8fa9ba56437 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -19,12 +19,11 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_eeprom.h" -#include "mt76x02_dma.h" +#include "mcu.h" +#include "eeprom.h" static int -mt76pci_load_rom_patch(struct mt76x2_dev *dev) +mt76pci_load_rom_patch(struct mt76x02_dev *dev) { const struct firmware *fw = NULL; struct mt76x02_patch_header *hdr; @@ -90,7 +89,7 @@ out: } static int -mt76pci_load_firmware(struct mt76x2_dev *dev) +mt76pci_load_firmware(struct mt76x02_dev *dev) { const struct firmware *fw; const struct mt76x02_fw_header *hdr; @@ -141,7 +140,7 @@ mt76pci_load_firmware(struct mt76x2_dev *dev) mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); @@ -153,8 +152,8 @@ mt76pci_load_firmware(struct mt76x2_dev *dev) return -ETIMEDOUT; } + mt76x02_set_ethtool_fwver(dev, hdr); dev_info(dev->mt76.dev, "Firmware running!\n"); - mt76x02_set_ethtool_fwver(&dev->mt76, hdr); release_firmware(fw); @@ -166,7 +165,7 @@ error: return -ENOENT; } -int mt76x2_mcu_init(struct mt76x2_dev *dev) +int mt76x2_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x2_mcu_ops = { .mcu_msg_alloc = mt76x02_mcu_msg_alloc, @@ -184,6 +183,6 @@ int mt76x2_mcu_init(struct mt76x2_dev *dev) if (ret) return ret; - mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true); + mt76x02_mcu_function_select(dev, Q_SELECT, 1, true); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c index 22e66006a5f8..5bda44540225 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c @@ -16,16 +16,17 @@ #include <linux/delay.h> #include "mt76x2.h" -#include "mt76x2_mcu.h" -#include "mt76x2_eeprom.h" +#include "mcu.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" static bool -mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) +mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; u32 flag = 0; - if (!mt76x02_tssi_enabled(&dev->mt76)) + if (!mt76x2_tssi_enabled(dev)) return false; if (mt76x2_channel_silent(dev)) @@ -34,16 +35,16 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) if (chan->band == NL80211_BAND_5GHZ) flag |= BIT(0); - if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) + if (mt76x02_ext_pa_enabled(dev, chan->band)) flag |= BIT(8); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true); dev->cal.tssi_cal_done = true; return true; } static void -mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) +mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_5ghz = chan->band == NL80211_BAND_5GHZ; @@ -61,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) mt76x2_mac_stop(dev, false); if (is_5ghz) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true); if (!mac_stopped) mt76x2_mac_resume(dev); @@ -77,7 +78,7 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped) dev->cal.channel_cal_done = true; } -void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) +void mt76x2_phy_set_antenna(struct mt76x02_dev *dev) { u32 val; @@ -124,40 +125,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) } static void -mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest) -{ - dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN); - dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN); -} - -static int -mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev) -{ - switch (dev->mt76.chandef.width) { - case NL80211_CHAN_WIDTH_80: - return -62; - case NL80211_CHAN_WIDTH_40: - return -65; - default: - return -68; - } -} - -static int -mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev) -{ - switch (dev->mt76.chandef.width) { - case NL80211_CHAN_WIDTH_80: - return -76; - case NL80211_CHAN_WIDTH_40: - return -79; - default: - return -82; - } -} - -static void -mt76x2_phy_set_gain_val(struct mt76x2_dev *dev) +mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) { u32 val; u8 gain_val[2]; @@ -182,26 +150,7 @@ mt76x2_phy_set_gain_val(struct mt76x2_dev *dev) } static void -mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev) -{ - u32 false_cca; - u8 limit = dev->cal.low_gain > 0 ? 16 : 4; - - false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); - dev->cal.false_cca = false_cca; - if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) - dev->cal.agc_gain_adjust += 2; - else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) || - (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) - dev->cal.agc_gain_adjust -= 2; - else - return; - - mt76x2_phy_set_gain_val(dev); -} - -static void -mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) +mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 *gain = dev->cal.agc_gain_init; u8 low_gain_delta, gain_delta; @@ -209,16 +158,17 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) int low_gain; u32 val; - dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); - low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + - (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); dev->cal.low_gain = low_gain; if (!gain_change) { - mt76x2_phy_adjust_vga_gain(dev); + if (mt76x02_phy_adjust_vga_gain(dev)) + mt76x2_phy_set_gain_val(dev); return; } @@ -264,7 +214,7 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) mt76_rr(dev, MT_RX_STAT_1); } -int mt76x2_phy_set_channel(struct mt76x2_dev *dev, +int mt76x2_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { struct ieee80211_channel *chan = chandef->chan; @@ -336,8 +286,8 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, mt76x2_configure_tx_delay(dev, band, bw); mt76x2_phy_set_txpower(dev); - mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); - mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); + mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1); + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | @@ -360,17 +310,17 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); if (!dev->cal.init_cal_done) { - u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); + u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); if (val != 0xff) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true); } - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true); dev->cal.init_cal_done = true; @@ -383,14 +333,11 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, if (scan) return 0; - dev->cal.low_gain = -1; mt76x2_phy_channel_calibrate(dev, true); - mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init); - memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, - sizeof(dev->cal.agc_gain_cur)); + mt76x02_init_agc_gain(dev); /* init default values for temp compensation */ - if (mt76x02_tssi_enabled(&dev->mt76)) { + if (mt76x2_tssi_enabled(dev)) { mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 0x38); mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, @@ -404,7 +351,7 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, } static void -mt76x2_phy_temp_compensate(struct mt76x2_dev *dev) +mt76x2_phy_temp_compensate(struct mt76x02_dev *dev) { struct mt76x2_temp_comp t; int temp, db_diff; @@ -433,9 +380,9 @@ mt76x2_phy_temp_compensate(struct mt76x2_dev *dev) void mt76x2_phy_calibrate(struct work_struct *work) { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; - dev = container_of(work, struct mt76x2_dev, cal_work.work); + dev = container_of(work, struct mt76x02_dev, cal_work.work); mt76x2_phy_channel_calibrate(dev, false); mt76x2_phy_tssi_compensate(dev, true); mt76x2_phy_temp_compensate(dev); @@ -444,11 +391,11 @@ void mt76x2_phy_calibrate(struct work_struct *work) MT_CALIBRATE_INTERVAL); } -int mt76x2_phy_start(struct mt76x2_dev *dev) +int mt76x2_phy_start(struct mt76x02_dev *dev) { int ret; - ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true); + ret = mt76x02_mcu_set_radio_state(dev, true, true); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c index fcdf1879162e..3a2ec86d3e88 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c @@ -15,50 +15,17 @@ */ #include "mt76x2.h" -#include "mt76x02_util.h" -#include "mt76x02_dma.h" struct beacon_bc_data { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; struct sk_buff_head q; struct sk_buff *tail[8]; }; -int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - int qsel = MT_QSEL_EDCA; - int ret; - - if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) - mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false); - - mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); - - ret = mt76x02_insert_hdr_pad(skb); - if (ret < 0) - return ret; - - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - qsel = MT_QSEL_MGMT; - - *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | - MT_TXD_INFO_80211; - - if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) - *tx_info |= MT_TXD_INFO_WIV; - - return 0; -} - static void mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = (struct mt76x2_dev *) priv; + struct mt76x02_dev *dev = (struct mt76x02_dev *) priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; struct sk_buff *skb = NULL; @@ -76,7 +43,7 @@ static void mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct beacon_bc_data *data = priv; - struct mt76x2_dev *dev = data->dev; + struct mt76x02_dev *dev = data->dev; struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; struct ieee80211_tx_info *info; struct sk_buff *skb; @@ -97,7 +64,7 @@ mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) } static void -mt76x2_resync_beacon_timer(struct mt76x2_dev *dev) +mt76x2_resync_beacon_timer(struct mt76x02_dev *dev) { u32 timer_val = dev->beacon_int << 4; @@ -129,7 +96,7 @@ mt76x2_resync_beacon_timer(struct mt76x2_dev *dev) void mt76x2_pre_tbtt_tasklet(unsigned long arg) { - struct mt76x2_dev *dev = (struct mt76x2_dev *) arg; + struct mt76x02_dev *dev = (struct mt76x02_dev *) arg; struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; struct beacon_bc_data data = {}; struct sk_buff *skb; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index dd32e756d8b7..e9fff5b7f125 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -16,12 +16,12 @@ */ #include "mt76x2.h" -#include "mt76x2_eeprom.h" -#include "mt76x2_mcu.h" -#include "mt76x02_phy.h" +#include "eeprom.h" +#include "mcu.h" +#include "../mt76x02_phy.h" static void -mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) +mt76x2_adjust_high_lna_gain(struct mt76x02_dev *dev, int reg, s8 offset) { s8 gain; @@ -31,7 +31,7 @@ mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset) } static void -mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) +mt76x2_adjust_agc_gain(struct mt76x02_dev *dev, int reg, s8 offset) { s8 gain; @@ -40,7 +40,7 @@ mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset) mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain); } -void mt76x2_apply_gain_adj(struct mt76x2_dev *dev) +void mt76x2_apply_gain_adj(struct mt76x02_dev *dev) { s8 *gain_adj = dev->cal.rx.high_gain; @@ -52,7 +52,7 @@ void mt76x2_apply_gain_adj(struct mt76x2_dev *dev) } EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj); -void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, +void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, enum nl80211_band band) { u32 pa_mode[2]; @@ -65,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { + if (mt76x02_ext_pa_enabled(dev, band)) { mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); } else { @@ -76,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, pa_mode[0] = 0x0000ffff; pa_mode[1] = 0x00ff00ff; - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { + if (mt76x02_ext_pa_enabled(dev, band)) { mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); } else { @@ -84,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); } - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) + if (mt76x02_ext_pa_enabled(dev, band)) pa_mode_adj = 0x04000000; else pa_mode_adj = 0; @@ -98,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { + if (mt76x02_ext_pa_enabled(dev, band)) { u32 val; if (band == NL80211_BAND_2GHZ) @@ -144,7 +144,7 @@ mt76x2_get_min_rate_power(struct mt76_rate_power *r) return ret; } -void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) +void mt76x2_phy_set_txpower(struct mt76x02_dev *dev) { enum nl80211_chan_width width = dev->mt76.chandef.width; struct ieee80211_channel *chan = dev->mt76.chandef.chan; @@ -187,16 +187,16 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; dev->mt76.rate_power = t; - mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1); + mt76x02_phy_set_txpower(dev, txp_0, txp_1); } EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower); -void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, +void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, enum nl80211_band band, u8 bw) { u32 cfg0, cfg1; - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { + if (mt76x02_ext_pa_enabled(dev, band)) { cfg0 = bw ? 0x000b0c01 : 0x00101101; cfg1 = 0x00011414; } else { @@ -210,98 +210,7 @@ void mt76x2_configure_tx_delay(struct mt76x2_dev *dev, } EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); -void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl) -{ - int core_val, agc_val; - - switch (width) { - case NL80211_CHAN_WIDTH_80: - core_val = 3; - agc_val = 7; - break; - case NL80211_CHAN_WIDTH_40: - core_val = 2; - agc_val = 3; - break; - default: - core_val = 0; - agc_val = 1; - break; - } - - mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); - mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); -} -EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw); - -void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper) -{ - switch (band) { - case NL80211_BAND_2GHZ: - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); - break; - case NL80211_BAND_5GHZ: - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); - break; - } - - mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, - primary_upper); -} -EXPORT_SYMBOL_GPL(mt76x2_phy_set_band); - -int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev) -{ - struct mt76x02_sta *sta; - struct mt76_wcid *wcid; - int i, j, min_rssi = 0; - s8 cur_rssi; - - local_bh_disable(); - rcu_read_lock(); - - for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) { - unsigned long mask = dev->mt76.wcid_mask[i]; - - if (!mask) - continue; - - for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { - if (!(mask & 1)) - continue; - - wcid = rcu_dereference(dev->mt76.wcid[j]); - if (!wcid) - continue; - - sta = container_of(wcid, struct mt76x02_sta, wcid); - spin_lock(&dev->mt76.rx_lock); - if (sta->inactive_count++ < 5) - cur_rssi = ewma_signal_read(&sta->rssi); - else - cur_rssi = 0; - spin_unlock(&dev->mt76.rx_lock); - - if (cur_rssi < min_rssi) - min_rssi = cur_rssi; - } - } - - rcu_read_unlock(); - local_bh_enable(); - - if (!min_rssi) - return -75; - - return min_rssi; -} -EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi); - -void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait) +void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; struct mt76x2_tx_power_info txp; @@ -322,7 +231,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait) dev->cal.tssi_comp_pending = false; mt76x2_get_power_info(dev, &txp, chan); - if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) + if (mt76x02_ext_pa_enabled(dev, chan->band)) t.pa_mode = 1; t.cal_mode = BIT(1); @@ -336,8 +245,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev, bool wait) return; usleep_range(10000, 20000); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD, - chan->hw_value, wait); + mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait); dev->cal.dpd_cal_done = true; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index feb5cec66c67..57baf8d1c830 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -17,7 +17,7 @@ #include <linux/kernel.h> #include <linux/module.h> -#include "mt76x02_usb.h" +#include "../mt76x02_usb.h" #include "mt76x2u.h" static const struct usb_device_id mt76x2u_device_table[] = { @@ -37,7 +37,7 @@ static int mt76x2u_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; int err; dev = mt76x2u_alloc_device(&intf->dev); @@ -72,7 +72,7 @@ err: static void mt76x2u_disconnect(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); - struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76x02_dev *dev = usb_get_intfdata(intf); struct ieee80211_hw *hw = mt76_hw(dev); set_bit(MT76_REMOVED, &dev->mt76.state); @@ -87,7 +87,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf) static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf, pm_message_t state) { - struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76x02_dev *dev = usb_get_intfdata(intf); struct mt76_usb *usb = &dev->mt76.usb; mt76u_stop_queues(&dev->mt76); @@ -99,7 +99,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf, static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) { - struct mt76x2_dev *dev = usb_get_intfdata(intf); + struct mt76x02_dev *dev = usb_get_intfdata(intf); struct mt76_usb *usb = &dev->mt76.usb; int err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 5759a72d7ef6..13cce2937573 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -17,11 +17,11 @@ #include <linux/delay.h> #include "mt76x2u.h" -#include "mt76x02_util.h" -#include "mt76x02_phy.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" +#include "../mt76x02_usb.h" -static void mt76x2u_init_dma(struct mt76x2_dev *dev) +static void mt76x2u_init_dma(struct mt76x02_dev *dev) { u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG)); @@ -36,7 +36,7 @@ static void mt76x2u_init_dma(struct mt76x2_dev *dev) mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); } -static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev) +static void mt76x2u_power_on_rf_patch(struct mt76x02_dev *dev) { mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16)); udelay(1); @@ -56,7 +56,7 @@ static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev) mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20)); } -static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit) +static void mt76x2u_power_on_rf(struct mt76x02_dev *dev, int unit) { int shift = unit ? 8 : 0; u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift; @@ -78,7 +78,7 @@ static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit) mt76_set(dev, 0x530, 0xf); } -static void mt76x2u_power_on(struct mt76x2_dev *dev) +static void mt76x2u_power_on(struct mt76x02_dev *dev) { u32 val; @@ -114,7 +114,7 @@ static void mt76x2u_power_on(struct mt76x2_dev *dev) mt76x2u_power_on_rf(dev, 1); } -static int mt76x2u_init_eeprom(struct mt76x2_dev *dev) +static int mt76x2u_init_eeprom(struct mt76x02_dev *dev) { u32 val, i; @@ -130,33 +130,33 @@ static int mt76x2u_init_eeprom(struct mt76x2_dev *dev) put_unaligned_le32(val, dev->mt76.eeprom.data + i); } - mt76x02_eeprom_parse_hw_cap(&dev->mt76); + mt76x02_eeprom_parse_hw_cap(dev); return 0; } -struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev) +struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev) { static const struct mt76_driver_ops drv_ops = { - .tx_prepare_skb = mt76x2u_tx_prepare_skb, - .tx_complete_skb = mt76x02_tx_complete_skb, + .tx_prepare_skb = mt76x02u_tx_prepare_skb, + .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, - .rx_skb = mt76x2_queue_rx_skb, + .rx_skb = mt76x02_queue_rx_skb, }; - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; struct mt76_dev *mdev; mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops); if (!mdev) return NULL; - dev = container_of(mdev, struct mt76x2_dev, mt76); + dev = container_of(mdev, struct mt76x02_dev, mt76); mdev->dev = pdev; mdev->drv = &drv_ops; return dev; } -static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev) +static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev) { mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800); mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820); @@ -164,7 +164,7 @@ static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev) mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860); } -int mt76x2u_init_hardware(struct mt76x2_dev *dev) +int mt76x2u_init_hardware(struct mt76x02_dev *dev) { const struct mt76_wcid_addr addr = { .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, @@ -204,8 +204,7 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev) if (err < 0) return err; - mt76x02_mac_setaddr(&dev->mt76, - dev->mt76.eeprom.data + MT_EE_MAC_ADDR); + mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); mt76x2u_init_beacon_offsets(dev); @@ -237,13 +236,13 @@ int mt76x2u_init_hardware(struct mt76x2_dev *dev) if (err < 0) return err; - mt76x02_phy_set_rxpath(&dev->mt76); - mt76x02_phy_set_txdac(&dev->mt76); + mt76x02_phy_set_rxpath(dev); + mt76x02_phy_set_txdac(dev); return mt76x2u_mac_stop(dev); } -int mt76x2u_register_device(struct mt76x2_dev *dev) +int mt76x2u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); struct wiphy *wiphy = hw->wiphy; @@ -262,7 +261,7 @@ int mt76x2u_register_device(struct mt76x2_dev *dev) err = mt76u_mcu_init_rx(&dev->mt76); if (err < 0) - return err; + goto fail; err = mt76x2u_init_hardware(dev); if (err < 0) @@ -294,16 +293,16 @@ fail: return err; } -void mt76x2u_stop_hw(struct mt76x2_dev *dev) +void mt76x2u_stop_hw(struct mt76x02_dev *dev) { mt76u_stop_stat_wk(&dev->mt76); cancel_delayed_work_sync(&dev->cal_work); mt76x2u_mac_stop(dev); } -void mt76x2u_cleanup(struct mt76x2_dev *dev) +void mt76x2u_cleanup(struct mt76x02_dev *dev) { - mt76x02_mcu_set_radio_state(&dev->mt76, false, false); + mt76x02_mcu_set_radio_state(dev, false, false); mt76x2u_stop_hw(dev); mt76u_queues_deinit(&dev->mt76); mt76u_mcu_deinit(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index f28c6fbcc305..db2194a92e67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c @@ -15,9 +15,9 @@ */ #include "mt76x2u.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" -static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev) +static void mt76x2u_mac_reset_counters(struct mt76x02_dev *dev) { mt76_rr(dev, MT_RX_STAT_0); mt76_rr(dev, MT_RX_STAT_1); @@ -27,12 +27,12 @@ static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev) mt76_rr(dev, MT_TX_STA_2); } -static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) +static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev) { s8 offset = 0; u16 eep_val; - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2); offset = eep_val & 0x7f; if ((eep_val & 0xff) == 0xff) @@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) eep_val >>= 8; if (eep_val == 0x00 || eep_val == 0xff) { - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1); eep_val &= 0xff; if (eep_val == 0x00 || eep_val == 0xff) @@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) /* init fce */ mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); + eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { case 0: mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); @@ -80,7 +80,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev) } } -int mt76x2u_mac_reset(struct mt76x2_dev *dev) +int mt76x2u_mac_reset(struct mt76x02_dev *dev) { mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5)); @@ -114,7 +114,7 @@ int mt76x2u_mac_reset(struct mt76x2_dev *dev) return 0; } -int mt76x2u_mac_start(struct mt76x2_dev *dev) +int mt76x2u_mac_start(struct mt76x02_dev *dev) { mt76x2u_mac_reset_counters(dev); @@ -131,7 +131,7 @@ int mt76x2u_mac_start(struct mt76x2_dev *dev) return 0; } -int mt76x2u_mac_stop(struct mt76x2_dev *dev) +int mt76x2u_mac_stop(struct mt76x02_dev *dev) { int i, count = 0, val; bool stopped = false; @@ -212,7 +212,7 @@ int mt76x2u_mac_stop(struct mt76x2_dev *dev) return 0; } -void mt76x2u_mac_resume(struct mt76x2_dev *dev) +void mt76x2u_mac_resume(struct mt76x02_dev *dev) { mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index a80704568780..1971a1b00038 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -15,11 +15,10 @@ */ #include "mt76x2u.h" -#include "mt76x02_util.h" static int mt76x2u_start(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int ret; mutex_lock(&dev->mt76.mutex); @@ -37,7 +36,7 @@ out: static void mt76x2u_stop(struct ieee80211_hw *hw) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); @@ -48,17 +47,17 @@ static void mt76x2u_stop(struct ieee80211_hw *hw) static int mt76x2u_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) - mt76x02_mac_setaddr(&dev->mt76, vif->addr); + mt76x02_mac_setaddr(dev, vif->addr); - mt76x02_vif_init(&dev->mt76, vif, 0); + mt76x02_vif_init(dev, vif, 0); return 0; } static int -mt76x2u_set_channel(struct mt76x2_dev *dev, +mt76x2u_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { int err; @@ -86,7 +85,7 @@ static void mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; mutex_lock(&dev->mt76.mutex); @@ -108,7 +107,7 @@ mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static int mt76x2u_config(struct ieee80211_hw *hw, u32 changed) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; int err = 0; mutex_lock(&dev->mt76.mutex); @@ -146,7 +145,7 @@ static void mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; set_bit(MT76_SCANNING, &dev->mt76.state); } @@ -154,13 +153,13 @@ mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static void mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct mt76x2_dev *dev = hw->priv; + struct mt76x02_dev *dev = hw->priv; clear_bit(MT76_SCANNING, &dev->mt76.state); } const struct ieee80211_ops mt76x2u_ops = { - .tx = mt76x2_tx, + .tx = mt76x02_tx, .start = mt76x2u_start, .stop = mt76x2u_stop, .add_interface = mt76x2u_add_interface, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c index fdd94cad7b66..3f1e558e5e6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c @@ -17,8 +17,8 @@ #include <linux/firmware.h> #include "mt76x2u.h" -#include "mt76x2_eeprom.h" -#include "mt76x02_usb.h" +#include "eeprom.h" +#include "../mt76x02_usb.h" #define MT_CMD_HDR_LEN 4 @@ -29,7 +29,7 @@ #define MT76U_MCU_DLM_OFFSET 0x110000 #define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 -int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, +int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap, bool ext, int rssi, u32 false_cca) { struct { @@ -53,14 +53,14 @@ int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap, return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true); } -static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev) +static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev) { mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, USB_DIR_OUT | USB_TYPE_VENDOR, 0x12, 0, NULL, 0); } -static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev) +static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; const u8 data[] = { @@ -75,7 +75,7 @@ static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev) 0x12, 0, usb->data, sizeof(data)); } -static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev) +static void mt76x2u_mcu_reset_wmt(struct mt76x02_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; u8 data[] = { @@ -89,7 +89,7 @@ static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev) 0x12, 0, usb->data, sizeof(data)); } -static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) +static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev) { bool rom_protect = !is_mt7612(dev); struct mt76x02_patch_header *hdr; @@ -137,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); /* vendor reset */ - mt76x02u_mcu_fw_reset(&dev->mt76); + mt76x02u_mcu_fw_reset(dev); usleep_range(5000, 10000); /* enable FCE to send in-band cmd */ @@ -151,7 +151,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev) /* FCE skip_fs_en */ mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); - err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), + err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr), fw->size - sizeof(*hdr), MCU_ROM_PATCH_MAX_PAYLOAD, MT76U_MCU_ROM_PATCH_OFFSET); @@ -176,7 +176,7 @@ out: return err; } -static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) +static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev) { u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET; const struct mt76x02_fw_header *hdr; @@ -210,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); /* vendor reset */ - mt76x02u_mcu_fw_reset(&dev->mt76); + mt76x02u_mcu_fw_reset(dev); usleep_range(5000, 10000); /* enable USB_DMA_CFG */ @@ -230,7 +230,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); /* load ILM */ - err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), + err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr), ilm_len, MCU_FW_URB_MAX_PAYLOAD, MT76U_MCU_ILM_OFFSET); if (err < 0) { @@ -241,8 +241,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) /* load DLM */ if (mt76xx_rev(dev) >= MT76XX_REV_E3) dlm_offset += 0x800; - err = mt76x02u_mcu_fw_send_data(&dev->mt76, - fw->data + sizeof(*hdr) + ilm_len, + err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len, dlm_len, MCU_FW_URB_MAX_PAYLOAD, dlm_offset); if (err < 0) { @@ -260,15 +259,15 @@ static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev) mt76_set(dev, MT_MCU_COM_REG0, BIT(1)); /* enable FCE to send in-band cmd */ mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); + mt76x02_set_ethtool_fwver(dev, hdr); dev_dbg(dev->mt76.dev, "firmware running\n"); - mt76x02_set_ethtool_fwver(&dev->mt76, hdr); out: release_firmware(fw); return err; } -int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev) +int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev) { int err; @@ -279,14 +278,13 @@ int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev) return mt76x2u_mcu_load_firmware(dev); } -int mt76x2u_mcu_init(struct mt76x2_dev *dev) +int mt76x2u_mcu_init(struct mt76x02_dev *dev) { int err; - err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, - 1, false); + err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); if (err < 0) return err; - return mt76x02_mcu_set_radio_state(&dev->mt76, true, false); + return mt76x02_mcu_set_radio_state(dev, true, false); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c index 06362d3487be..ca96ba60510e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c @@ -15,9 +15,10 @@ */ #include "mt76x2u.h" -#include "mt76x2_eeprom.h" +#include "eeprom.h" +#include "../mt76x02_phy.h" -void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev) +void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_5ghz = chan->band == NL80211_BAND_5GHZ; @@ -28,18 +29,18 @@ void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev) mt76x2u_mac_stop(dev); if (is_5ghz) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false); mt76x2u_mac_resume(dev); } static void -mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) +mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev) { u8 channel = dev->mt76.chandef.chan->hw_value; int freq, freq1; @@ -68,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) break; } - dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); @@ -78,9 +79,9 @@ mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev) void mt76x2u_phy_calibrate(struct work_struct *work) { - struct mt76x2_dev *dev; + struct mt76x02_dev *dev; - dev = container_of(work, struct mt76x2_dev, cal_work.work); + dev = container_of(work, struct mt76x02_dev, cal_work.work); mt76x2_phy_tssi_compensate(dev, false); mt76x2u_phy_update_channel_gain(dev); @@ -88,7 +89,7 @@ void mt76x2u_phy_calibrate(struct work_struct *work) MT_CALIBRATE_INTERVAL); } -int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, +int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { u32 ext_cca_chan[4] = { @@ -154,8 +155,8 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, mt76x2_configure_tx_delay(dev, chan->band, bw); mt76x2_phy_set_txpower(dev); - mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); - mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); + mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1); + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | @@ -176,18 +177,17 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); if (!dev->cal.init_cal_done) { - u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); + u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); if (val != 0xff) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, - 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); } - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false); dev->cal.init_cal_done = true; mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); @@ -202,7 +202,7 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, if (scan) return 0; - if (mt76x02_tssi_enabled(&dev->mt76)) { + if (mt76x2_tssi_enabled(dev)) { /* init default values for temp compensation */ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 0x38); @@ -217,10 +217,9 @@ int mt76x2u_phy_set_channel(struct mt76x2_dev *dev, chan = dev->mt76.chandef.chan; if (chan->band == NL80211_BAND_5GHZ) flag |= BIT(0); - if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) + if (mt76x02_ext_pa_enabled(dev, chan->band)) flag |= BIT(8); - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, - flag, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false); dev->cal.tssi_cal_done = true; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c deleted file mode 100644 index 06e47f960f9a..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <linux/delay.h> -#include "mt76x2.h" -#include "mt76x2_trace.h" -#include "mt76x02_util.h" - -void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) -{ - mt76x02_irq_enable(mdev, MT_INT_RX_DONE(q)); -} - -irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance) -{ - struct mt76x2_dev *dev = dev_instance; - u32 intr; - - intr = mt76_rr(dev, MT_INT_SOURCE_CSR); - mt76_wr(dev, MT_INT_SOURCE_CSR, intr); - - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) - return IRQ_NONE; - - trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask); - - intr &= dev->mt76.mmio.irqmask; - - if (intr & MT_INT_TX_DONE_ALL) { - mt76x02_irq_disable(&dev->mt76, MT_INT_TX_DONE_ALL); - tasklet_schedule(&dev->tx_tasklet); - } - - if (intr & MT_INT_RX_DONE(0)) { - mt76x02_irq_disable(&dev->mt76, MT_INT_RX_DONE(0)); - napi_schedule(&dev->mt76.napi[0]); - } - - if (intr & MT_INT_RX_DONE(1)) { - mt76x02_irq_disable(&dev->mt76, MT_INT_RX_DONE(1)); - napi_schedule(&dev->mt76.napi[1]); - } - - if (intr & MT_INT_PRE_TBTT) - tasklet_schedule(&dev->pre_tbtt_tasklet); - - /* send buffered multicast frames now */ - if (intr & MT_INT_TBTT) - mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); - - if (intr & MT_INT_TX_STAT) { - mt76x2_mac_poll_tx_status(dev, true); - tasklet_schedule(&dev->tx_tasklet); - } - - if (intr & MT_INT_GPTIMER) { - mt76x02_irq_disable(&dev->mt76, MT_INT_GPTIMER); - tasklet_schedule(&dev->dfs_pd.dfs_tasklet); - } - - return IRQ_HANDLED; -} - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h deleted file mode 100644 index 66a57294fcfc..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef __MT76x2_MAC_H -#define __MT76x2_MAC_H - -#include "mt76.h" -#include "mt76x02_mac.h" - -struct mt76x2_dev; -struct mt76x2_sta; -struct mt76x02_vif; - -struct mt76x2_tx_info { - unsigned long jiffies; - u8 tries; - - u8 wcid; - u8 pktid; - u8 retry; -}; - -static inline struct mt76x2_tx_info * -mt76x2_skb_tx_info(struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - return (void *) info->status.status_driver_data; -} - -int mt76x2_mac_start(struct mt76x2_dev *dev); -void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force); -void mt76x2_mac_resume(struct mt76x2_dev *dev); -void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr); - -int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, - void *rxi); -void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x02_txwi *txwi, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, int len); - -int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx, - struct sk_buff *skb); -void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val); - -void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq); -void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev); - -void mt76x2_mac_work(struct work_struct *work); - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c deleted file mode 100644 index ed4f56a3aae9..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2.h" -#include "mt76x02_util.h" - -void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) -{ - bool stopped = false; - u32 rts_cfg; - int i; - - mt76_wr(dev, MT_MAC_SYS_CTRL, 0); - - rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); - mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); - - /* Wait for MAC to become idle */ - for (i = 0; i < 300; i++) { - if ((mt76_rr(dev, MT_MAC_STATUS) & - (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || - mt76_rr(dev, MT_BBP(IBI, 12))) { - udelay(1); - continue; - } - - stopped = true; - break; - } - - if (force && !stopped) { - mt76_set(dev, MT_BBP(CORE, 4), BIT(1)); - mt76_clear(dev, MT_BBP(CORE, 4), BIT(1)); - - mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); - mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); - } - - mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); -} -EXPORT_SYMBOL_GPL(mt76x2_mac_stop); - -void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x02_txwi *txwi, - struct sk_buff *skb, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, int len) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_tx_rate *rate = &info->control.rates[0]; - struct ieee80211_key_conf *key = info->control.hw_key; - u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); - u8 nss; - s8 txpwr_adj, max_txpwr_adj; - u8 ccmp_pn[8]; - - memset(txwi, 0, sizeof(*txwi)); - - if (wcid) - txwi->wcid = wcid->idx; - else - txwi->wcid = 0xff; - - txwi->pktid = 1; - - if (wcid && wcid->sw_iv && key) { - u64 pn = atomic64_inc_return(&key->tx_pn); - ccmp_pn[0] = pn; - ccmp_pn[1] = pn >> 8; - ccmp_pn[2] = 0; - ccmp_pn[3] = 0x20 | (key->keyidx << 6); - ccmp_pn[4] = pn >> 16; - ccmp_pn[5] = pn >> 24; - ccmp_pn[6] = pn >> 32; - ccmp_pn[7] = pn >> 40; - txwi->iv = *((__le32 *)&ccmp_pn[0]); - txwi->eiv = *((__le32 *)&ccmp_pn[1]); - } - - spin_lock_bh(&dev->mt76.lock); - if (wcid && (rate->idx < 0 || !rate->count)) { - txwi->rate = wcid->tx_rate; - max_txpwr_adj = wcid->max_txpwr_adj; - nss = wcid->tx_rate_nss; - } else { - txwi->rate = mt76x02_mac_tx_rate_val(&dev->mt76, rate, &nss); - max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(&dev->mt76, rate); - } - spin_unlock_bh(&dev->mt76.lock); - - txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf, - max_txpwr_adj); - txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); - - if (mt76xx_rev(dev) >= MT76XX_REV_E4) - txwi->txstream = 0x13; - else if (mt76xx_rev(dev) >= MT76XX_REV_E3 && - !(txwi->rate & cpu_to_le16(rate_ht_mask))) - txwi->txstream = 0x93; - - mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss); -} -EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi); - -int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) -{ - struct mt76x2_rx_freq_cal *cal = &dev->cal.rx; - - rssi += cal->rssi_offset[chain]; - rssi -= cal->lna_gain; - - return rssi; -} - -static struct mt76x02_sta * -mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx) -{ - struct mt76_wcid *wcid; - - if (idx >= ARRAY_SIZE(dev->mt76.wcid)) - return NULL; - - wcid = rcu_dereference(dev->mt76.wcid[idx]); - if (!wcid) - return NULL; - - return container_of(wcid, struct mt76x02_sta, wcid); -} - -static struct mt76_wcid * -mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x02_sta *sta, - bool unicast) -{ - if (!sta) - return NULL; - - if (unicast) - return &sta->wcid; - else - return &sta->vif->group_wcid; -} - -int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, - void *rxi) -{ - struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; - struct mt76x02_rxwi *rxwi = rxi; - struct mt76x02_sta *sta; - u32 rxinfo = le32_to_cpu(rxwi->rxinfo); - u32 ctl = le32_to_cpu(rxwi->ctl); - u16 rate = le16_to_cpu(rxwi->rate); - u16 tid_sn = le16_to_cpu(rxwi->tid_sn); - bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); - int pad_len = 0; - u8 pn_len; - u8 wcid; - int len; - - if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) - return -EINVAL; - - if (rxinfo & MT_RXINFO_L2PAD) - pad_len += 2; - - if (rxinfo & MT_RXINFO_DECRYPT) { - status->flag |= RX_FLAG_DECRYPTED; - status->flag |= RX_FLAG_MMIC_STRIPPED; - status->flag |= RX_FLAG_MIC_STRIPPED; - status->flag |= RX_FLAG_IV_STRIPPED; - } - - wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl); - sta = mt76x2_rx_get_sta(dev, wcid); - status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast); - - len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); - pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo); - if (pn_len) { - int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len; - u8 *data = skb->data + offset; - - status->iv[0] = data[7]; - status->iv[1] = data[6]; - status->iv[2] = data[5]; - status->iv[3] = data[4]; - status->iv[4] = data[1]; - status->iv[5] = data[0]; - - /* - * Driver CCMP validation can't deal with fragments. - * Let mac80211 take care of it. - */ - if (rxinfo & MT_RXINFO_FRAG) { - status->flag &= ~RX_FLAG_IV_STRIPPED; - } else { - pad_len += pn_len << 2; - len -= pn_len << 2; - } - } - - mt76x02_remove_hdr_pad(skb, pad_len); - - if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) - status->aggr = true; - - if (WARN_ON_ONCE(len > skb->len)) - return -EINVAL; - - pskb_trim(skb, len); - status->chains = BIT(0) | BIT(1); - status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0); - status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1); - status->signal = max(status->chain_signal[0], status->chain_signal[1]); - status->freq = dev->mt76.chandef.chan->center_freq; - status->band = dev->mt76.chandef.chan->band; - - status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); - status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); - - if (sta) { - ewma_signal_add(&sta->rssi, status->signal); - sta->inactive_count = 0; - } - - return mt76x02_mac_process_rate(status, rate); -} -EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c deleted file mode 100644 index 1ec3c293e2c4..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2.h" -#include "dma.h" - -void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mt76x2_dev *dev = hw->priv; - struct ieee80211_vif *vif = info->control.vif; - struct mt76_wcid *wcid = &dev->mt76.global_wcid; - - if (control->sta) { - struct mt76x02_sta *msta; - - msta = (struct mt76x02_sta *)control->sta->drv_priv; - wcid = &msta->wcid; - /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != 0xff) - control->sta = NULL; - } - - if (vif && !control->sta) { - struct mt76x02_vif *mvif; - - mvif = (struct mt76x02_vif *)vif->drv_priv; - wcid = &mvif->group_wcid; - } - - mt76_tx(&dev->mt76, control->sta, wcid, skb); -} -EXPORT_SYMBOL_GPL(mt76x2_tx); - -s8 mt76x2_tx_get_max_txpwr_adj(struct mt76_dev *mdev, - const struct ieee80211_tx_rate *rate) -{ - struct mt76x2_dev *dev = (struct mt76x2_dev *) mdev; - s8 max_txpwr; - - if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { - u8 mcs = ieee80211_rate_get_vht_mcs(rate); - - if (mcs == 8 || mcs == 9) { - max_txpwr = mdev->rate_power.vht[8]; - } else { - u8 nss, idx; - - nss = ieee80211_rate_get_vht_nss(rate); - idx = ((nss - 1) << 3) + mcs; - max_txpwr = mdev->rate_power.ht[idx & 0xf]; - } - } else if (rate->flags & IEEE80211_TX_RC_MCS) { - max_txpwr = mdev->rate_power.ht[rate->idx & 0xf]; - } else { - enum nl80211_band band = dev->mt76.chandef.chan->band; - - if (band == NL80211_BAND_2GHZ) { - const struct ieee80211_rate *r; - struct wiphy *wiphy = mt76_hw(dev)->wiphy; - struct mt76_rate_power *rp = &mdev->rate_power; - - r = &wiphy->bands[band]->bitrates[rate->idx]; - if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) - max_txpwr = rp->cck[r->hw_value & 0x3]; - else - max_txpwr = rp->ofdm[r->hw_value & 0x7]; - } else { - max_txpwr = mdev->rate_power.ofdm[rate->idx & 0x7]; - } - } - - return max_txpwr; -} -EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj); - -s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj) -{ - txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf); - txpwr -= (dev->target_power + dev->target_power_delta[0]); - txpwr = min_t(s8, txpwr, max_txpwr_adj); - - if (!dev->enable_tpc) - return 0; - else if (txpwr >= 0) - return min_t(s8, txpwr, 7); - else - return (txpwr < -16) ? 8 : (txpwr + 32) / 2; -} -EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj); - -void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr) -{ - s8 txpwr_adj; - - txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr, - dev->mt76.rate_power.ofdm[4]); - mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, - MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); - mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, - MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); -} -EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c deleted file mode 100644 index c2ccdebca470..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2u.h" -#include "dma.h" -#include "mt76x02_util.h" -#include "mt76x02_usb.h" - -static int -mt76x2u_check_skb_rooms(struct sk_buff *skb) -{ - int hdr_len = ieee80211_get_hdrlen_from_skb(skb); - u32 need_head; - - need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN; - if (hdr_len % 4) - need_head += 2; - return skb_cow(skb, need_head); -} - -int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data, - struct sk_buff *skb, struct mt76_queue *q, - struct mt76_wcid *wcid, struct ieee80211_sta *sta, - u32 *tx_info) -{ - struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); - struct mt76x02_txwi *txwi; - int err, len = skb->len; - - err = mt76x2u_check_skb_rooms(skb); - if (err < 0) - return -ENOMEM; - - mt76x02_insert_hdr_pad(skb); - - txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); - mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len); - - return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); -} diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index bf0e9e666bc4..7cbce03aa65b 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -96,7 +96,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - if (!ieee80211_is_data_qos(hdr->frame_control)) + if (!ieee80211_is_data_qos(hdr->frame_control) || + !ieee80211_is_data_present(hdr->frame_control)) return; mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index de7785c4f6af..5f0faf07c346 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -286,7 +286,7 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, void *data; int offset; - data = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); + data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC); if (!data) break; @@ -318,7 +318,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, if (!buf->urb) return -ENOMEM; - buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg), + buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg), gfp); if (!buf->urb->sg) return -ENOMEM; @@ -525,8 +525,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) spin_lock_init(&q->rx_page_lock); spin_lock_init(&q->lock); - q->entry = devm_kzalloc(dev->dev, - MT_NUM_RX_ENTRIES * sizeof(*q->entry), + q->entry = devm_kcalloc(dev->dev, + MT_NUM_RX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; @@ -755,8 +755,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) INIT_LIST_HEAD(&q->swq); q->hw_idx = mt76_ac_to_hwq(i); - q->entry = devm_kzalloc(dev->dev, - MT_NUM_TX_ENTRIES * sizeof(*q->entry), + q->entry = devm_kcalloc(dev->dev, + MT_NUM_TX_ENTRIES, sizeof(*q->entry), GFP_KERNEL); if (!q->entry) return -ENOMEM; @@ -862,6 +862,7 @@ int mt76u_init(struct mt76_dev *dev, .copy = mt76u_copy, .wr_rp = mt76u_wr_rp, .rd_rp = mt76u_rd_rp, + .type = MT76_BUS_USB, }; struct mt76_usb *usb = &dev->usb; diff --git a/drivers/net/wireless/quantenna/Kconfig b/drivers/net/wireless/quantenna/Kconfig index de84ce125c26..7628d9c1ea6a 100644 --- a/drivers/net/wireless/quantenna/Kconfig +++ b/drivers/net/wireless/quantenna/Kconfig @@ -1,7 +1,7 @@ config WLAN_VENDOR_QUANTENNA bool "Quantenna wireless cards support" default y - ---help--- + help If you have a wireless card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig index 8d1492a90bd1..b8c12a5f16b4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig +++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig @@ -11,7 +11,7 @@ config QTNFMAC_PEARL_PCIE select QTNFMAC select FW_LOADER select CRC32 - ---help--- + help This option adds support for wireless adapters based on Quantenna 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe. diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 452d4b7c832d..51b33ec78fac 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -141,8 +141,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr); if (ret) { - pr_err("VIF%u.%u: failed to change VIF type: %d\n", - vif->mac->macid, vif->vifid, ret); + pr_err("VIF%u.%u: failed to change type to %d\n", + vif->mac->macid, vif->vifid, type); return ret; } @@ -216,7 +216,6 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, eth_zero_addr(vif->mac_addr); eth_zero_addr(vif->bssid); vif->bss_priority = QTNF_DEF_BSS_PRIORITY; - vif->sta_state = QTNF_STA_DISCONNECTED; memset(&vif->wdev, 0, sizeof(vif->wdev)); vif->wdev.wiphy = wiphy; vif->wdev.iftype = type; @@ -229,18 +228,22 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, if (params) mac_addr = params->macaddr; - if (qtnf_cmd_send_add_intf(vif, type, mac_addr)) { - pr_err("VIF%u.%u: failed to add VIF\n", mac->macid, vif->vifid); + ret = qtnf_cmd_send_add_intf(vif, type, mac_addr); + if (ret) { + pr_err("VIF%u.%u: failed to add VIF %pM\n", + mac->macid, vif->vifid, mac_addr); goto err_cmd; } if (!is_valid_ether_addr(vif->mac_addr)) { pr_err("VIF%u.%u: FW reported bad MAC: %pM\n", mac->macid, vif->vifid, vif->mac_addr); + ret = -EINVAL; goto err_mac; } - if (qtnf_core_net_attach(mac, vif, name, name_assign_t)) { + ret = qtnf_core_net_attach(mac, vif, name, name_assign_t); + if (ret) { pr_err("VIF%u.%u: failed to attach netdev\n", mac->macid, vif->vifid); goto err_net; @@ -256,7 +259,7 @@ err_mac: err_cmd: vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-EFAULT); + return ERR_PTR(ret); } static int qtnf_mgmt_set_appie(struct qtnf_vif *vif, @@ -335,12 +338,11 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev) qtnf_scan_done(vif->mac, true); ret = qtnf_cmd_send_stop_ap(vif); - if (ret) { + if (ret) pr_err("VIF%u.%u: failed to stop AP operation in FW\n", vif->mac->macid, vif->vifid); - netif_carrier_off(vif->netdev); - } + netif_carrier_off(vif->netdev); return ret; } @@ -478,19 +480,31 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev, const struct qtnf_sta_node *sta_node; int ret; - sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx); + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_STATION: + if (idx != 0 || !vif->wdev.current_bss) + return -ENOENT; - if (unlikely(!sta_node)) - return -ENOENT; + ether_addr_copy(mac, vif->bssid); + break; + case NL80211_IFTYPE_AP: + sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx); + if (unlikely(!sta_node)) + return -ENOENT; - ether_addr_copy(mac, sta_node->mac_addr); + ether_addr_copy(mac, sta_node->mac_addr); + break; + default: + return -ENOTSUPP; + } - ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo); + ret = qtnf_cmd_get_sta_info(vif, mac, sinfo); - if (unlikely(ret == -ENOENT)) { - qtnf_sta_list_del(vif, mac); - cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL); - sinfo->filled = 0; + if (vif->wdev.iftype == NL80211_IFTYPE_AP) { + if (ret == -ENOENT) { + cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL); + sinfo->filled = 0; + } } sinfo->generation = vif->generation; @@ -521,9 +535,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev, int ret; ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr); - if (ret) - pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n", - vif->mac->macid, vif->vifid, key_index, pairwise); + if (ret) { + if (ret == -ENOENT) { + pr_debug("VIF%u.%u: key index %d out of bounds\n", + vif->mac->macid, vif->vifid, key_index); + } else { + pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n", + vif->mac->macid, vif->vifid, + key_index, pairwise); + } + } return ret; } @@ -590,6 +611,7 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, if (ret) pr_err("VIF%u.%u: failed to delete STA %pM\n", vif->mac->macid, vif->vifid, params->mac); + return ret; } @@ -597,21 +619,25 @@ static int qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct qtnf_wmac *mac = wiphy_priv(wiphy); + int ret; cancel_delayed_work_sync(&mac->scan_timeout); mac->scan_req = request; - if (qtnf_cmd_send_scan(mac)) { + ret = qtnf_cmd_send_scan(mac); + if (ret) { pr_err("MAC%u: failed to start scan\n", mac->macid); mac->scan_req = NULL; - return -EFAULT; + goto out; } + pr_debug("MAC%u: scan started\n", mac->macid); queue_delayed_work(mac->bus->workqueue, &mac->scan_timeout, QTNF_SCAN_TIMEOUT_SEC * HZ); - return 0; +out: + return ret; } static int @@ -624,9 +650,6 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, if (vif->wdev.iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; - if (vif->sta_state != QTNF_STA_DISCONNECTED) - return -EBUSY; - if (sme->bssid) ether_addr_copy(vif->bssid, sme->bssid); else @@ -634,13 +657,13 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, ret = qtnf_cmd_send_connect(vif, sme); if (ret) { - pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid, - vif->vifid); - return ret; + pr_err("VIF%u.%u: failed to connect\n", + vif->mac->macid, vif->vifid); + goto out; } - vif->sta_state = QTNF_STA_CONNECTING; - return 0; +out: + return ret; } static int @@ -662,22 +685,18 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, goto out; } - qtnf_scan_done(mac, true); - - if (vif->sta_state == QTNF_STA_DISCONNECTED) - goto out; - ret = qtnf_cmd_send_disconnect(vif, reason_code); - if (ret) { - pr_err("VIF%u.%u: failed to disconnect\n", mac->macid, - vif->vifid); - goto out; + if (ret) + pr_err("VIF%u.%u: failed to disconnect\n", + mac->macid, vif->vifid); + + if (vif->wdev.current_bss) { + netif_carrier_off(vif->netdev); + cfg80211_disconnected(vif->netdev, reason_code, + NULL, 0, true, GFP_KERNEL); } out: - if (vif->sta_state == QTNF_STA_CONNECTING) - vif->sta_state = QTNF_STA_DISCONNECTED; - return ret; } @@ -747,7 +766,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, default: pr_debug("failed to get chan(%d) stats from card\n", chan->hw_value); - ret = -EINVAL; break; } @@ -770,6 +788,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, ret = qtnf_cmd_get_channel(vif, chandef); if (ret) { pr_err("%s: failed to get channel: %d\n", ndev->name, ret); + ret = -ENODATA; goto out; } @@ -779,6 +798,7 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, chandef->center_freq1, chandef->center_freq2, chandef->width); ret = -ENODATA; + goto out; } out: @@ -848,10 +868,8 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY : QLINK_PM_OFF, timeout); - if (ret) { + if (ret) pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret); - return ret; - } return ret; } @@ -971,9 +989,16 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, ret = qtnf_cmd_reg_notify(bus, req); if (ret) { - if (ret != -EOPNOTSUPP && ret != -EALREADY) + if (ret == -EOPNOTSUPP) { + pr_warn("reg update not supported\n"); + } else if (ret == -EALREADY) { + pr_info("regulatory domain is already set to %c%c", + req->alpha2[0], req->alpha2[1]); + } else { pr_err("failed to update reg domain to %c%c\n", req->alpha2[0], req->alpha2[1]); + } + return; } @@ -1088,6 +1113,10 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); + if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_DWELL) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL); + wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; @@ -1106,6 +1135,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN)) + wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN; + #ifdef CONFIG_PM if (macinfo->wowlan) wiphy->wowlan = macinfo->wowlan; @@ -1120,6 +1152,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } + if (mac->macinfo.extended_capabilities_len) { + wiphy->extended_capabilities = + mac->macinfo.extended_capabilities; + wiphy->extended_capabilities_mask = + mac->macinfo.extended_capabilities_mask; + wiphy->extended_capabilities_len = + mac->macinfo.extended_capabilities_len; + } + strlcpy(wiphy->fw_version, hw_info->fw_version, sizeof(wiphy->fw_version)); wiphy->hw_version = hw_info->hw_version; @@ -1143,7 +1184,8 @@ void qtnf_netdev_updown(struct net_device *ndev, bool up) struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); if (qtnf_cmd_send_updown_intf(vif, up)) - pr_err("failed to send up/down command to FW\n"); + pr_err("failed to send %s command to VIF%u.%u\n", + up ? "UP" : "DOWN", vif->mac->macid, vif->vifid); } void qtnf_virtual_intf_cleanup(struct net_device *ndev) @@ -1151,57 +1193,20 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev) struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); struct qtnf_wmac *mac = wiphy_priv(vif->wdev.wiphy); - if (vif->wdev.iftype == NL80211_IFTYPE_STATION) { - switch (vif->sta_state) { - case QTNF_STA_DISCONNECTED: - break; - case QTNF_STA_CONNECTING: - cfg80211_connect_result(vif->netdev, - vif->bssid, NULL, 0, - NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - GFP_KERNEL); - qtnf_disconnect(vif->wdev.wiphy, ndev, - WLAN_REASON_DEAUTH_LEAVING); - break; - case QTNF_STA_CONNECTED: - cfg80211_disconnected(vif->netdev, - WLAN_REASON_DEAUTH_LEAVING, - NULL, 0, 1, GFP_KERNEL); - qtnf_disconnect(vif->wdev.wiphy, ndev, - WLAN_REASON_DEAUTH_LEAVING); - break; - } - - vif->sta_state = QTNF_STA_DISCONNECTED; - } + if (vif->wdev.iftype == NL80211_IFTYPE_STATION) + qtnf_disconnect(vif->wdev.wiphy, ndev, + WLAN_REASON_DEAUTH_LEAVING); qtnf_scan_done(mac, true); } void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) { - if (vif->wdev.iftype == NL80211_IFTYPE_STATION) { - switch (vif->sta_state) { - case QTNF_STA_CONNECTING: - cfg80211_connect_result(vif->netdev, - vif->bssid, NULL, 0, - NULL, 0, - WLAN_STATUS_UNSPECIFIED_FAILURE, - GFP_KERNEL); - break; - case QTNF_STA_CONNECTED: - cfg80211_disconnected(vif->netdev, - WLAN_REASON_DEAUTH_LEAVING, - NULL, 0, 1, GFP_KERNEL); - break; - case QTNF_STA_DISCONNECTED: - break; - } - } + if (vif->wdev.iftype == NL80211_IFTYPE_STATION) + cfg80211_disconnected(vif->netdev, WLAN_REASON_DEAUTH_LEAVING, + NULL, 0, 1, GFP_KERNEL); cfg80211_shutdown_all_interfaces(vif->wdev.wiphy); - vif->sta_state = QTNF_STA_DISCONNECTED; } void qtnf_band_init_rates(struct ieee80211_supported_band *band) diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index ae9e77300533..bfdc1ad30c13 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -80,7 +80,6 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode) static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, struct sk_buff *cmd_skb, struct sk_buff **response_skb, - u16 *result_code, size_t const_resp_size, size_t *var_resp_size) { @@ -88,7 +87,8 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, const struct qlink_resp *resp; struct sk_buff *resp_skb = NULL; u16 cmd_id; - u8 mac_id, vif_id; + u8 mac_id; + u8 vif_id; int ret; cmd = (struct qlink_cmd *)cmd_skb->data; @@ -97,8 +97,11 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, vif_id = cmd->vifid; cmd->mhdr.len = cpu_to_le16(cmd_skb->len); - if (unlikely(bus->fw_state != QTNF_FW_STATE_ACTIVE && - le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT)) { + pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, + le16_to_cpu(cmd->cmd_id)); + + if (bus->fw_state != QTNF_FW_STATE_ACTIVE && + le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) { pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", mac_id, vif_id, le16_to_cpu(cmd->cmd_id), bus->fw_state); @@ -106,24 +109,16 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, return -ENODEV; } - pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, - le16_to_cpu(cmd->cmd_id)); - ret = qtnf_trans_send_cmd_with_resp(bus, cmd_skb, &resp_skb); - - if (unlikely(ret)) + if (ret) goto out; resp = (const struct qlink_resp *)resp_skb->data; ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id, const_resp_size); - - if (unlikely(ret)) + if (ret) goto out; - if (likely(result_code)) - *result_code = le16_to_cpu(resp->result); - /* Return length of variable part of response */ if (response_skb && var_resp_size) *var_resp_size = le16_to_cpu(resp->mhdr.len) - const_resp_size; @@ -134,14 +129,18 @@ out: else consume_skb(resp_skb); + if (!ret && resp) + return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result)); + + pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n", + mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret); + return ret; } -static inline int qtnf_cmd_send(struct qtnf_bus *bus, - struct sk_buff *cmd_skb, - u16 *result_code) +static inline int qtnf_cmd_send(struct qtnf_bus *bus, struct sk_buff *cmd_skb) { - return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, result_code, + return qtnf_cmd_send_with_reply(bus, cmd_skb, NULL, sizeof(struct qlink_resp), NULL); } @@ -228,7 +227,6 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_cmd_start_ap *cmd; struct qlink_auth_encr *aen; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; int i; @@ -329,30 +327,21 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif, } qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } netif_carrier_on(vif->netdev); out: qtnf_bus_unlock(vif->mac->bus); + return ret; } int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -362,23 +351,13 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif) return -ENOMEM; qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } - - netif_carrier_off(vif->netdev); out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -386,7 +365,6 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) { struct sk_buff *cmd_skb; struct qlink_cmd_mgmt_frame_register *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -401,20 +379,13 @@ int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg) cmd->frame_type = cpu_to_le16(frame_type); cmd->do_register = reg; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -423,7 +394,6 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, { struct sk_buff *cmd_skb; struct qlink_cmd_mgmt_frame_tx *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) { @@ -448,20 +418,13 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags, if (len && buf) qtnf_cmd_skb_put_buffer(cmd_skb, buf, len); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -469,7 +432,6 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, const u8 *buf, size_t len) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; if (len > QTNF_MAX_CMD_BUF_SIZE) { @@ -487,21 +449,13 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type, qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len); qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u frame %u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, frame_type, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -544,6 +498,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst, rate_dst->flags |= RATE_INFO_FLAGS_MCS; else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS) rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS; + + if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI) + rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI; } static void @@ -730,7 +687,6 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, struct qlink_cmd_get_sta_info *cmd; const struct qlink_resp_get_sta_info *resp; size_t var_resp_len; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -745,31 +701,13 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, ether_addr_copy(cmd->sta_addr, sta_mac); ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb, - &res_code, sizeof(*resp), - &var_resp_len); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - switch (res_code) { - case QLINK_CMD_RESULT_ENOTFOUND: - pr_warn("VIF%u.%u: %pM STA not found\n", - vif->mac->macid, vif->vifid, sta_mac); - ret = -ENOENT; - break; - default: - pr_err("VIF%u.%u: can't get info for %pM: %u\n", - vif->mac->macid, vif->vifid, sta_mac, res_code); - ret = -EFAULT; - break; - } + sizeof(*resp), &var_resp_len); + if (ret) goto out; - } resp = (const struct qlink_resp_get_sta_info *)resp_skb->data; - if (unlikely(!ether_addr_equal(sta_mac, resp->sta_addr))) { + if (!ether_addr_equal(sta_mac, resp->sta_addr)) { pr_err("VIF%u.%u: wrong mac in reply: %pM != %pM\n", vif->mac->macid, vif->vifid, resp->sta_addr, sta_mac); ret = -EINVAL; @@ -795,7 +733,6 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, struct sk_buff *cmd_skb, *resp_skb = NULL; struct qlink_cmd_manage_intf *cmd; const struct qlink_resp_manage_intf *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -828,17 +765,9 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, eth_zero_addr(cmd->intf_info.mac_addr); ret = qtnf_cmd_send_with_reply(vif->mac->bus, cmd_skb, &resp_skb, - &res_code, sizeof(*resp), NULL); - - if (unlikely(ret)) - goto out; - - ret = qtnf_cmd_resp_result_decode(res_code); - if (ret) { - pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid, - vif->vifid, cmd_type, res_code); + sizeof(*resp), NULL); + if (ret) goto out; - } resp = (const struct qlink_resp_manage_intf *)resp_skb->data; ether_addr_copy(vif->mac_addr, resp->intf_info.mac_addr); @@ -868,7 +797,6 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; struct qlink_cmd_manage_intf *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -897,17 +825,9 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif) eth_zero_addr(cmd->intf_info.mac_addr); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); @@ -1353,8 +1273,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, ext_capa_mask = NULL; } - kfree(mac->macinfo.extended_capabilities); - kfree(mac->macinfo.extended_capabilities_mask); + qtnf_mac_ext_caps_free(mac); mac->macinfo.extended_capabilities = ext_capa; mac->macinfo.extended_capabilities_mask = ext_capa_mask; mac->macinfo.extended_capabilities_len = ext_capa_len; @@ -1732,7 +1651,6 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) struct sk_buff *cmd_skb, *resp_skb = NULL; const struct qlink_resp_get_mac_info *resp; size_t var_data_len; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, @@ -1742,18 +1660,11 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) return -ENOMEM; qtnf_bus_lock(mac->bus); - - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &var_data_len); - if (unlikely(ret)) + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - resp = (const struct qlink_resp_get_mac_info *)resp_skb->data; qtnf_cmd_resp_proc_mac_info(mac, resp); ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len); @@ -1769,7 +1680,6 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) { struct sk_buff *cmd_skb, *resp_skb = NULL; const struct qlink_resp_get_hw_info *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; size_t info_len; @@ -1780,18 +1690,10 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) return -ENOMEM; qtnf_bus_lock(bus); - - ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, sizeof(*resp), &info_len); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; + if (ret) goto out; - } resp = (const struct qlink_resp_get_hw_info *)resp_skb->data; ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len); @@ -1810,7 +1712,6 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, size_t info_len; struct qlink_cmd_band_info_get *cmd; struct qlink_resp_band_info_get *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; u8 qband; @@ -1838,18 +1739,10 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, cmd->band = qband; qtnf_bus_lock(mac->bus); - - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &info_len); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; + if (ret) goto out; - } resp = (struct qlink_resp_band_info_get *)resp_skb->data; if (resp->band != qband) { @@ -1873,7 +1766,6 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac) struct sk_buff *cmd_skb, *resp_skb = NULL; size_t response_size; struct qlink_resp_phy_params *resp; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0, @@ -1883,19 +1775,11 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac) return -ENOMEM; qtnf_bus_lock(mac->bus); - - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &response_size); - - if (unlikely(ret)) + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - resp = (struct qlink_resp_phy_params *)resp_skb->data; ret = qtnf_cmd_resp_proc_phy_params(mac, resp->info, response_size); @@ -1910,7 +1794,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) { struct wiphy *wiphy = priv_to_wiphy(mac); struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0, @@ -1931,26 +1814,19 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS, wiphy->coverage_class); - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(mac->bus); + return ret; } int qtnf_cmd_send_init_fw(struct qtnf_bus *bus) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, @@ -1960,20 +1836,13 @@ int qtnf_cmd_send_init_fw(struct qtnf_bus *bus) return -ENOMEM; qtnf_bus_lock(bus); - - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(bus); + return ret; } @@ -1988,9 +1857,7 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus) return; qtnf_bus_lock(bus); - - qtnf_cmd_send(bus, cmd_skb, NULL); - + qtnf_cmd_send(bus, cmd_skb); qtnf_bus_unlock(bus); } @@ -1999,7 +1866,6 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, { struct sk_buff *cmd_skb; struct qlink_cmd_add_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2031,19 +1897,13 @@ int qtnf_cmd_send_add_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, params->seq, params->seq_len); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", - vif->mac->macid, vif->vifid, res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2052,7 +1912,6 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, { struct sk_buff *cmd_skb; struct qlink_cmd_del_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2072,19 +1931,14 @@ int qtnf_cmd_send_del_key(struct qtnf_vif *vif, u8 key_index, bool pairwise, cmd->key_index = key_index; cmd->pairwise = pairwise; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", - vif->mac->macid, vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2093,7 +1947,6 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, { struct sk_buff *cmd_skb; struct qlink_cmd_set_def_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2108,19 +1961,14 @@ int qtnf_cmd_send_set_default_key(struct qtnf_vif *vif, u8 key_index, cmd->key_index = key_index; cmd->unicast = unicast; cmd->multicast = multicast; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2128,7 +1976,6 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) { struct sk_buff *cmd_skb; struct qlink_cmd_set_def_mgmt_key *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2141,19 +1988,14 @@ int qtnf_cmd_send_set_default_mgmt_key(struct qtnf_vif *vif, u8 key_index) cmd = (struct qlink_cmd_set_def_mgmt_key *)cmd_skb->data; cmd->key_index = key_index; - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2183,7 +2025,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, { struct sk_buff *cmd_skb; struct qlink_cmd_change_sta *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2214,19 +2055,13 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, goto out; } - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2235,7 +2070,6 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, { struct sk_buff *cmd_skb; struct qlink_cmd_del_sta *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2256,19 +2090,13 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, cmd->subtype = params->subtype; cmd->reason_code = cpu_to_le16(params->reason_code); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } - out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2312,7 +2140,6 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb, int qtnf_cmd_send_scan(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; struct ieee80211_channel *sc; struct cfg80211_scan_request *scan_req = mac->scan_req; int n_channels; @@ -2370,20 +2197,28 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac) scan_req->mac_addr_mask); } - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); + if (scan_req->flags & NL80211_SCAN_FLAG_FLUSH) { + pr_debug("MAC%u: flush cache before scan\n", mac->macid); - if (unlikely(ret)) - goto out; + qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH); + } - pr_debug("MAC%u: scan started\n", mac->macid); + if (scan_req->duration) { + pr_debug("MAC%u: %s scan duration %u\n", mac->macid, + scan_req->duration_mandatory ? "mandatory" : "max", + scan_req->duration); - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; + qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL, + scan_req->duration); } + + ret = qtnf_cmd_send(mac->bus, cmd_skb); + if (ret) + goto out; + out: qtnf_bus_unlock(mac->bus); + return ret; } @@ -2393,7 +2228,6 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_cmd_connect *cmd; struct qlink_auth_encr *aen; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; int i; u32 connect_flags = 0; @@ -2474,20 +2308,13 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, qtnf_cmd_channel_tlv_add(cmd_skb, sme->channel); qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2495,7 +2322,6 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) { struct sk_buff *cmd_skb; struct qlink_cmd_disconnect *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2509,19 +2335,13 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) cmd = (struct qlink_cmd_disconnect *)cmd_skb->data; cmd->reason = cpu_to_le16(reason_code); - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2529,7 +2349,6 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) { struct sk_buff *cmd_skb; struct qlink_cmd_updown *cmd; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2542,20 +2361,13 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up) cmd->if_up = !!up; qtnf_bus_lock(vif->mac->bus); - - ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid, - vif->vifid, res_code); - ret = -EFAULT; - goto out; - } out: qtnf_bus_unlock(vif->mac->bus); + return ret; } @@ -2563,7 +2375,6 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) { struct sk_buff *cmd_skb; int ret; - u16 res_code; struct qlink_cmd_reg_notify *cmd; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, @@ -2604,29 +2415,10 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) } qtnf_bus_lock(bus); - - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + ret = qtnf_cmd_send(bus, cmd_skb); if (ret) goto out; - switch (res_code) { - case QLINK_CMD_RESULT_ENOTSUPP: - pr_warn("reg update not supported\n"); - ret = -EOPNOTSUPP; - break; - case QLINK_CMD_RESULT_EALREADY: - pr_info("regulatory domain is already set to %c%c", - req->alpha2[0], req->alpha2[1]); - ret = -EALREADY; - break; - case QLINK_CMD_RESULT_OK: - ret = 0; - break; - default: - ret = -EFAULT; - break; - } - out: qtnf_bus_unlock(bus); @@ -2640,7 +2432,6 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, struct qlink_cmd_get_chan_stats *cmd; struct qlink_resp_get_chan_stats *resp; size_t var_data_len; - u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, @@ -2654,25 +2445,10 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data; cmd->channel = cpu_to_le16(channel); - ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, sizeof(*resp), &var_data_len); - if (unlikely(ret)) { - qtnf_bus_unlock(mac->bus); - return ret; - } - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - switch (res_code) { - case QLINK_CMD_RESULT_ENOTFOUND: - ret = -ENOENT; - break; - default: - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - break; - } + if (ret) goto out; - } resp = (struct qlink_resp_get_chan_stats *)resp_skb->data; ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info, @@ -2681,6 +2457,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, out: qtnf_bus_unlock(mac->bus); consume_skb(resp_skb); + return ret; } @@ -2690,7 +2467,6 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, struct qtnf_wmac *mac = vif->mac; struct qlink_cmd_chan_switch *cmd; struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid, @@ -2707,32 +2483,13 @@ int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif, cmd->block_tx = params->block_tx; cmd->beacon_count = params->count; - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(mac->bus, cmd_skb); + if (ret) goto out; - switch (res_code) { - case QLINK_CMD_RESULT_OK: - ret = 0; - break; - case QLINK_CMD_RESULT_ENOTFOUND: - ret = -ENOENT; - break; - case QLINK_CMD_RESULT_ENOTSUPP: - ret = -EOPNOTSUPP; - break; - case QLINK_CMD_RESULT_EALREADY: - ret = -EALREADY; - break; - case QLINK_CMD_RESULT_INVALID: - default: - ret = -EFAULT; - break; - } - out: qtnf_bus_unlock(mac->bus); + return ret; } @@ -2742,7 +2499,6 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef) const struct qlink_resp_channel_get *resp; struct sk_buff *cmd_skb; struct sk_buff *resp_skb = NULL; - u16 res_code = QLINK_CMD_RESULT_OK; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2752,25 +2508,18 @@ int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef) return -ENOMEM; qtnf_bus_lock(bus); - - ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, + ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, sizeof(*resp), NULL); - - qtnf_bus_unlock(bus); - - if (unlikely(ret)) + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - ret = -ENODATA; - goto out; - } - resp = (const struct qlink_resp_channel_get *)resp_skb->data; qlink_chandef_q2cfg(priv_to_wiphy(vif->mac), &resp->chan, chdef); out: + qtnf_bus_unlock(bus); consume_skb(resp_skb); + return ret; } @@ -2782,7 +2531,6 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_cmd_start_cac *cmd; int ret; - u16 res_code; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, QLINK_CMD_START_CAC, @@ -2795,19 +2543,12 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif, qlink_chandef_cfg2q(chdef, &cmd->chan); qtnf_bus_lock(bus); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - qtnf_bus_unlock(bus); - + ret = qtnf_cmd_send(bus, cmd_skb); if (ret) - return ret; + goto out; - switch (res_code) { - case QLINK_CMD_RESULT_OK: - break; - default: - ret = -EOPNOTSUPP; - break; - } +out: + qtnf_bus_unlock(bus); return ret; } @@ -2819,7 +2560,6 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, struct sk_buff *cmd_skb; struct qlink_tlv_hdr *tlv; size_t acl_size = qtnf_cmd_acl_data_size(params); - u16 res_code; int ret; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -2834,22 +2574,12 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, qlink_acl_data_cfg2q(params, (struct qlink_acl_data *)tlv->val); qtnf_bus_lock(bus); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - qtnf_bus_unlock(bus); - - if (unlikely(ret)) - return ret; + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) + goto out; - switch (res_code) { - case QLINK_CMD_RESULT_OK: - break; - case QLINK_CMD_RESULT_INVALID: - ret = -EINVAL; - break; - default: - ret = -EOPNOTSUPP; - break; - } +out: + qtnf_bus_unlock(bus); return ret; } @@ -2858,7 +2588,6 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) { struct qtnf_bus *bus = vif->mac->bus; struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; struct qlink_cmd_pm_set *cmd; int ret = 0; @@ -2873,18 +2602,13 @@ int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout) qtnf_bus_lock(bus); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - } - out: qtnf_bus_unlock(bus); + return ret; } @@ -2893,7 +2617,6 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, { struct qtnf_bus *bus = vif->mac->bus; struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; struct qlink_cmd_wowlan_set *cmd; u32 triggers = 0; int count = 0; @@ -2929,16 +2652,10 @@ int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif, cmd->triggers = cpu_to_le32(triggers); - ret = qtnf_cmd_send(bus, cmd_skb, &res_code); - - if (unlikely(ret)) + ret = qtnf_cmd_send(bus, cmd_skb); + if (ret) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("cmd exec failed: 0x%.4X\n", res_code); - ret = -EFAULT; - } - out: qtnf_bus_unlock(bus); return ret; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 19abbc4e23e0..5d18a4a917c9 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac) } } +void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac) +{ + if (mac->macinfo.extended_capabilities_len) { + kfree(mac->macinfo.extended_capabilities); + mac->macinfo.extended_capabilities = NULL; + + kfree(mac->macinfo.extended_capabilities_mask); + mac->macinfo.extended_capabilities_mask = NULL; + + mac->macinfo.extended_capabilities_len = 0; + } +} + static void qtnf_vif_reset_handler(struct work_struct *work) { struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); @@ -370,6 +383,7 @@ static void qtnf_mac_scan_timeout(struct work_struct *work) static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, unsigned int macid) { + struct qtnf_vif *vif; struct wiphy *wiphy; struct qtnf_wmac *mac; unsigned int i; @@ -382,18 +396,20 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, mac->macid = macid; mac->bus = bus; + mutex_init(&mac->mac_lock); + INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout); for (i = 0; i < QTNF_MAX_INTF; i++) { - memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif)); - mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - mac->iflist[i].mac = mac; - mac->iflist[i].vifid = i; - qtnf_sta_list_init(&mac->iflist[i].sta_list); - mutex_init(&mac->mac_lock); - INIT_DELAYED_WORK(&mac->scan_timeout, qtnf_mac_scan_timeout); - mac->iflist[i].stats64 = - netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!mac->iflist[i].stats64) + vif = &mac->iflist[i]; + + memset(vif, 0, sizeof(*vif)); + vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + vif->mac = mac; + vif->vifid = i; + qtnf_sta_list_init(&vif->sta_list); + + vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!vif->stats64) pr_warn("VIF%u.%u: per cpu stats allocation failed\n", macid, i); } @@ -493,8 +509,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) } qtnf_mac_iface_comb_free(mac); - kfree(mac->macinfo.extended_capabilities); - kfree(mac->macinfo.extended_capabilities_mask); + qtnf_mac_ext_caps_free(mac); kfree(mac->macinfo.wowlan); wiphy_free(wiphy); bus->mac[macid] = NULL; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a1e338a1f055..293055049caa 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -64,12 +64,6 @@ struct qtnf_sta_list { atomic_t size; }; -enum qtnf_sta_state { - QTNF_STA_DISCONNECTED, - QTNF_STA_CONNECTING, - QTNF_STA_CONNECTED -}; - struct qtnf_vif { struct wireless_dev wdev; u8 bssid[ETH_ALEN]; @@ -77,7 +71,6 @@ struct qtnf_vif { u8 vifid; u8 bss_priority; u8 bss_status; - enum qtnf_sta_state sta_state; u16 mgmt_frames_bitmask; struct net_device *netdev; struct qtnf_wmac *mac; @@ -151,6 +144,7 @@ struct qtnf_hw_info { struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac); void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac); +void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac); struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus); int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv, const char *name, unsigned char name_assign_type); diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 68da81bec4e9..8b542b431b75 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -171,24 +171,14 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, return -EPROTO; } - if (vif->sta_state != QTNF_STA_CONNECTING) { - pr_err("VIF%u.%u: BSS_JOIN event when STA is not connecting\n", - vif->mac->macid, vif->vifid); - return -EPROTO; - } - pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid, join_info->bssid); cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL, 0, le16_to_cpu(join_info->status), GFP_KERNEL); - if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) { - vif->sta_state = QTNF_STA_CONNECTED; + if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) netif_carrier_on(vif->netdev); - } else { - vif->sta_state = QTNF_STA_DISCONNECTED; - } return 0; } @@ -211,16 +201,10 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, return -EPROTO; } - if (vif->sta_state != QTNF_STA_CONNECTED) - pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n", - vif->mac->macid, vif->vifid); - pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason), NULL, 0, 0, GFP_KERNEL); - - vif->sta_state = QTNF_STA_DISCONNECTED; netif_carrier_off(vif->netdev); return 0; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index d1637f2354a6..16795dbe475b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -242,7 +242,8 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) return 0; } -static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len) +static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf, + size_t len) { struct qtnf_pcie_bus_priv *priv = arg; struct qtnf_bus *bus = pci_get_drvdata(priv->pdev); @@ -260,7 +261,7 @@ static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len) return; } - skb_put_data(skb, buf, len); + memcpy_fromio(skb_put(skb, len), buf, len); qtnf_trans_handle_rx_ctl_packet(bus, skb); } diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 5aca12a51fe3..95c7b95c6f8a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018 Quantenna Communications */ #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h index f21e97ede090..634480fe6a64 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications */ #ifndef _QTN_FMAC_PCIE_IPC_H_ #define _QTN_FMAC_PCIE_IPC_H_ @@ -85,11 +72,6 @@ #define QTN_EP_LHOST_TQE_PORT 4 -enum qtnf_pcie_bda_ipc_flags { - QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0), - QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1), -}; - enum qtnf_fw_loadtype { QTN_FW_DBEGIN, QTN_FW_DSUB, diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h index 0bfe285b6b48..6e9a5c61d46f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h @@ -1,28 +1,10 @@ -/* - * Copyright (c) 2015 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015 Quantenna Communications */ #ifndef __PEARL_PCIE_H #define __PEARL_PCIE_H -#define PCIE_GEN2_BASE (0xe9000000) -#define PCIE_GEN3_BASE (0xe7000000) - -#define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE) -#define PCIE_HDP_OFFSET (0x2000) - +/* Pearl PCIe HDP registers */ #define PCIE_HDP_CTRL(base) ((base) + 0x2c00) #define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04) #define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10) @@ -86,7 +68,7 @@ #define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c) #define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40) -/* Host HBM pool registers */ +/* Pearl PCIe HBM pool registers */ #define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00) #define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04) #define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08) @@ -104,230 +86,13 @@ #define HBM_INT_STATUS(base) ((base) + 0x2f9c) #define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c) -/* host HBM bit field definition */ +/* Pearl PCIe HBM bit field definitions */ #define HHBM_CONFIG_SOFT_RESET (BIT(8)) #define HHBM_WR_REQ (BIT(0)) #define HHBM_RD_REQ (BIT(1)) #define HHBM_DONE (BIT(31)) #define HHBM_64BIT (BIT(10)) -/* offsets for dual PCIE */ -#define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710) -#define PCIE_GEN2_CTL(base) ((base) + 0x080C) -#define PCIE_GEN3_OFF(base) ((base) + 0x0890) -#define PCIE_ATU_CTRL1(base) ((base) + 0x0904) -#define PCIE_ATU_CTRL2(base) ((base) + 0x0908) -#define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C) -#define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910) -#define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914) -#define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918) -#define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C) -#define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C) -#define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988) -#define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C) -#define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC) -#define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4) -#define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8) -#define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0) -#define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4) -#define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8) -#define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC) -#define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0) -#define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00) -#define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980) -#define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C) -#define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0) -#define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8) -#define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC) -#define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10) -#define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18) -#define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C) -#define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24) -#define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28) -#define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34) -#define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C) -#define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40) -#define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44) -#define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48) -#define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C) -#define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C) -#define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70) -#define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78) -#define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C) -#define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80) -#define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84) -#define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88) -#define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C) -#define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90) -#define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00) -#define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34) -#define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000) -#define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004) -#define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008) -#define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c) -#define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010) -#define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014) -#define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018) -#define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c) -#define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000) -#define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004) -#define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008) -#define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c) -#define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010) -#define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014) -#define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018) -#define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c) -#define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020) -#define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024) -#define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028) -#define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c) -#define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030) -#define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034) -#define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038) -#define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c) -#define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040) -#define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044) -#define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048) -#define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c) -#define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050) -#define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054) -#define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058) -#define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c) -#define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060) -#define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064) -#define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068) -#define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c) -#define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070) -#define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074) -#define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078) -#define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c) - -#define PCIE_ID(base) ((base) + 0x0000) -#define PCIE_CMD(base) ((base) + 0x0004) -#define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2)) -#define PCIE_CAP_PTR(base) ((base) + 0x0034) -#define PCIE_MSI_LBAR(base) ((base) + 0x0054) -#define PCIE_MSI_CTRL(base) ((base) + 0x0050) -#define PCIE_MSI_ADDR_L(base) ((base) + 0x0054) -#define PCIE_MSI_ADDR_H(base) ((base) + 0x0058) -#define PCIE_MSI_DATA(base) ((base) + 0x005C) -#define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060) -#define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064) -#define PCIE_DEVCAP(base) ((base) + 0x0074) -#define PCIE_DEVCTLSTS(base) ((base) + 0x0078) - -#define PCIE_CMDSTS(base) ((base) + 0x0004) -#define PCIE_LINK_STAT(base) ((base) + 0x80) -#define PCIE_LINK_CTL2(base) ((base) + 0xa0) -#define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c) -#define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT) -#define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44) -#define PCIE_L1SUB_CTRL1(base) ((base) + 0x150) -#define PCIE_PMCSR(base) ((base) + 0x44) -#define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100) - -/* PCIe link defines */ -#define PEARL_PCIE_LINKUP (0x7) -#define PEARL_PCIE_DATA_LINK (BIT(0)) -#define PEARL_PCIE_PHY_LINK (BIT(1)) -#define PEARL_PCIE_LINK_RST (BIT(3)) -#define PEARL_PCIE_FATAL_ERR (BIT(5)) -#define PEARL_PCIE_NONFATAL_ERR (BIT(6)) - -/* PCIe Lane defines */ -#define PCIE_G2_LANE_X1 ((BIT(0)) << 16) -#define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16) - -/* PCIe DLL link enable */ -#define PCIE_DLL_LINK_EN ((BIT(0)) << 5) - -#define PCIE_LINK_GEN1 (BIT(0)) -#define PCIE_LINK_GEN2 (BIT(1)) -#define PCIE_LINK_GEN3 (BIT(2)) -#define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7) - -#define MSI_EN (BIT(0)) -#define MSI_64_EN (BIT(7)) -#define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF) -#define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF)) - -#define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2)) -#define PCIE_MAX_BAR (6) - -#define PCIE_ATU_VIEW(base) ((base) + 0x0900) -#define PCIE_ATU_CTL1(base) ((base) + 0x0904) -#define PCIE_ATU_CTL2(base) ((base) + 0x0908) -#define PCIE_ATU_LBAR(base) ((base) + 0x090c) -#define PCIE_ATU_UBAR(base) ((base) + 0x0910) -#define PCIE_ATU_LAR(base) ((base) + 0x0914) -#define PCIE_ATU_LTAR(base) ((base) + 0x0918) -#define PCIE_ATU_UTAR(base) ((base) + 0x091c) - -#define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820) -#define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824) -#define PCIE_MSI_ENABLE(base) ((base) + 0x0828) -#define PCIE_MSI_MASK_RC(base) ((base) + 0x082c) -#define PCIE_MSI_STATUS(base) ((base) + 0x0830) -#define PEARL_PCIE_MSI_REGION (0xce000000) -#define PEARL_PCIE_MSI_DATA (0) -#define PCIE_MSI_GPIO(base) ((base) + 0x0888) - -#define PCIE_HDP_HOST_QUEUE_FULL (BIT(17)) -#define USE_BAR_MATCH_MODE -#define PCIE_ATU_OB_REGION (BIT(0)) -#define PCIE_ATU_EN_REGION (BIT(31)) -#define PCIE_ATU_EN_MATCH (BIT(30)) -#define PCIE_BASE_REGION (0xb0000000) -#define PCIE_MEM_MAP_SIZE (512 * 1024) - -#define PCIE_OB_REG_REGION (0xcf000000) -#define PCIE_CONFIG_REGION (0xcf000000) -#define PCIE_CONFIG_SIZE (4096) -#define PCIE_CONFIG_CH (1) - -/* inbound mapping */ -#define PCIE_IB_BAR0 (0x00000000) /* ddr */ -#define PCIE_IB_BAR0_CH (0) -#define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */ -#define PCIE_IB_BAR3_CH (1) - -/* outbound mapping */ -#define PCIE_MEM_CH (0) -#define PCIE_REG_CH (1) -#define PCIE_MEM_REGION (0xc0000000) -#define PCIE_MEM_SIZE (0x000fffff) -#define PCIE_MEM_TAR (0x80000000) - -#define PCIE_MSI_REGION (0xce000000) -#define PCIE_MSI_SIZE (KBYTE(4) - 1) -#define PCIE_MSI_CH (1) - -/* size of config region */ -#define PCIE_CFG_SIZE (0x0000ffff) - -#define PCIE_ATU_DIR_IB (BIT(31)) -#define PCIE_ATU_DIR_OB (0) -#define PCIE_ATU_DIR_CFG (2) -#define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30)) - -#define PCIE_DMA_WR_0 (0) -#define PCIE_DMA_WR_1 (1) -#define PCIE_DMA_RD_0 (2) -#define PCIE_DMA_RD_1 (3) - -#define PCIE_DMA_CHNL_CNTRL_CB (BIT(0)) -#define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1)) -#define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2)) -#define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3)) -#define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4)) -#define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8)) -#define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9)) -#define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26)) - -#define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31)) -#define PCIE_DMA_CHNL_CONTEXT_WR (0) -#define PCIE_MAX_BAR (6) - /* PCIe HDP interrupt status definition */ #define PCIE_HDP_INT_EP_RXDMA (BIT(0)) #define PCIE_HDP_INT_HBM_UF (BIT(1)) diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 99d37e3efba6..8d62addea895 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -71,6 +71,7 @@ struct qlink_msg_header { * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address * Randomization in probe requests. + * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning. */ enum qlink_hw_capab { QLINK_HW_CAPAB_REG_UPDATE = BIT(0), @@ -78,6 +79,8 @@ enum qlink_hw_capab { QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), QLINK_HW_CAPAB_PWR_MGMT = BIT(4), + QLINK_HW_CAPAB_OBSS_SCAN = BIT(5), + QLINK_HW_CAPAB_SCAN_DWELL = BIT(6), }; enum qlink_iface_type { @@ -1149,6 +1152,8 @@ enum qlink_tlv_id { QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409, QTN_TLV_ID_WOWLAN_CAPAB = 0x0410, QTN_TLV_ID_WOWLAN_PATTERN = 0x0411, + QTN_TLV_ID_SCAN_FLUSH = 0x0412, + QTN_TLV_ID_SCAN_DWELL = 0x0413, }; struct qlink_tlv_hdr { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 54caeb38917c..960d5d97492f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -40,6 +40,14 @@ static inline void qtnf_cmd_skb_put_tlv_arr(struct sk_buff *skb, memcpy(hdr->val, arr, arr_len); } +static inline void qtnf_cmd_skb_put_tlv_tag(struct sk_buff *skb, u16 tlv_id) +{ + struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr)); + + hdr->type = cpu_to_le16(tlv_id); + hdr->len = cpu_to_le16(0); +} + static inline void qtnf_cmd_skb_put_tlv_u8(struct sk_buff *skb, u16 tlv_id, u8 value) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c index aa106dd0a14b..2ec334199c2b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c @@ -42,19 +42,18 @@ static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc) if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) { pr_err("wrong rx packet size: %zu\n", size); rx_buff_ok = false; - } else { - memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size); + } + + if (likely(rx_buff_ok)) { + ipc->rx_packet_count++; + ipc->rx_callback.fn(ipc->rx_callback.arg, + ipc->shm_region->data, size); } writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags); readl(&shm_reg_hdr->flags); /* flush PCIe write */ ipc->interrupt.fn(ipc->interrupt.arg); - - if (likely(rx_buff_ok)) { - ipc->rx_packet_count++; - ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size); - } } static void qtnf_shm_ipc_irq_work(struct work_struct *work) diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h index 453dd6477b12..c2a3702a9ee7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h @@ -32,7 +32,7 @@ struct qtnf_shm_ipc_int { }; struct qtnf_shm_ipc_rx_callback { - void (*fn)(void *arg, const u8 *buf, size_t len); + void (*fn)(void *arg, const u8 __iomem *buf, size_t len); void *arg; }; @@ -51,8 +51,6 @@ struct qtnf_shm_ipc { u8 waiting_for_ack; - u8 rx_data[QTN_IPC_MAX_DATA_SZ] __aligned(sizeof(u32)); - struct qtnf_shm_ipc_int interrupt; struct qtnf_shm_ipc_rx_callback rx_callback; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 73f6fc0d4a01..56040b181cf5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -4918,11 +4918,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, struct device *dev = &priv->udev->dev; u32 queue, rts_rate; u16 pktlen = skb->len; - u16 seq_number; u16 rate_flag = tx_info->control.rates[0].flags; int tx_desc_size = priv->fops->tx_desc_size; int ret; - bool usedesc40, ampdu_enable, sgi = false, short_preamble = false; + bool ampdu_enable, sgi = false, short_preamble = false; if (skb_headroom(skb) < tx_desc_size) { dev_warn(dev, @@ -4946,7 +4945,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (ieee80211_is_action(hdr->frame_control)) rtl8xxxu_dump_action(dev, hdr); - usedesc40 = (tx_desc_size == 40); tx_info->rate_driver_data[0] = hw; if (control && control->sta) @@ -5013,7 +5011,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, else rts_rate = 0; - seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble, ampdu_enable, rts_rate); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 317c1b3101da..ba258318ee9f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -3404,75 +3404,6 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw, "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); } -static u8 _rtl8821ae_mrate_idx_to_arfr_id( - struct ieee80211_hw *hw, u8 rate_index, - enum wireless_mode wirelessmode) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &rtlpriv->phy; - u8 ret = 0; - switch (rate_index) { - case RATR_INX_WIRELESS_NGB: - if (rtlphy->rf_type == RF_1T1R) - ret = 1; - else - ret = 0; - ; break; - case RATR_INX_WIRELESS_N: - case RATR_INX_WIRELESS_NG: - if (rtlphy->rf_type == RF_1T1R) - ret = 5; - else - ret = 4; - ; break; - case RATR_INX_WIRELESS_NB: - if (rtlphy->rf_type == RF_1T1R) - ret = 3; - else - ret = 2; - ; break; - case RATR_INX_WIRELESS_GB: - ret = 6; - break; - case RATR_INX_WIRELESS_G: - ret = 7; - break; - case RATR_INX_WIRELESS_B: - ret = 8; - break; - case RATR_INX_WIRELESS_MC: - if ((wirelessmode == WIRELESS_MODE_B) - || (wirelessmode == WIRELESS_MODE_G) - || (wirelessmode == WIRELESS_MODE_N_24G) - || (wirelessmode == WIRELESS_MODE_AC_24G)) - ret = 6; - else - ret = 7; - case RATR_INX_WIRELESS_AC_5N: - if (rtlphy->rf_type == RF_1T1R) - ret = 10; - else - ret = 9; - break; - case RATR_INX_WIRELESS_AC_24N: - if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { - if (rtlphy->rf_type == RF_1T1R) - ret = 10; - else - ret = 9; - } else { - if (rtlphy->rf_type == RF_1T1R) - ret = 11; - else - ret = 12; - } - break; - default: - ret = 0; break; - } - return ret; -} - static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate) { u8 i, j, tmp_rate; @@ -3761,7 +3692,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, break; } - ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode); + ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode); sta_entry->ratr_index = ratr_index; ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode, ratr_bitmap); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 0f3b98c5227f..87bc21bb5e8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1905,10 +1905,6 @@ struct rtl_efuse { u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE]; u16 efuse_usedbytes; u8 efuse_usedpercentage; -#ifdef EFUSE_REPG_WORKAROUND - bool efuse_re_pg_sec1flag; - u8 efuse_re_pg_data[8]; -#endif u8 autoload_failflag; u8 autoload_status; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 89b0d0fade9f..26b187336875 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -27,6 +27,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> #include "wlcore.h" #include "debug.h" @@ -957,6 +958,8 @@ static void wl1271_recovery_work(struct work_struct *work) BUG_ON(wl->conf.recovery.bug_on_recovery && !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); + clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + if (wl->conf.recovery.no_recovery) { wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); goto out_unlock; @@ -6625,13 +6628,25 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context) } #ifdef CONFIG_PM + device_init_wakeup(wl->dev, true); + ret = enable_irq_wake(wl->irq); if (!ret) { wl->irq_wake_enabled = true; - device_init_wakeup(wl->dev, 1); if (pdev_data->pwr_in_suspend) wl->hw->wiphy->wowlan = &wlcore_wowlan_support; } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + if (res) { + wl->wakeirq = res->start; + wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK; + ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq); + if (ret) + wl->wakeirq = -ENODEV; + } else { + wl->wakeirq = -ENODEV; + } #endif disable_irq(wl->irq); wl1271_power_off(wl); @@ -6659,6 +6674,9 @@ out_unreg: wl1271_unregister_hw(wl); out_irq: + if (wl->wakeirq >= 0) + dev_pm_clear_wake_irq(wl->dev); + device_init_wakeup(wl->dev, false); free_irq(wl->irq, wl); out_free_nvs: @@ -6710,6 +6728,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) int ret; unsigned long start_time = jiffies; bool pending = false; + bool recovery = false; /* Nothing to do if no ELP mode requested */ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) @@ -6726,7 +6745,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); if (ret < 0) { - wl12xx_queue_recovery_work(wl); + recovery = true; goto err; } @@ -6734,11 +6753,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) ret = wait_for_completion_timeout(&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); if (ret == 0) { - wl1271_error("ELP wakeup timeout!"); - wl12xx_queue_recovery_work(wl); + wl1271_warning("ELP wakeup timeout!"); /* Return no error for runtime PM for recovery */ - return 0; + ret = 0; + recovery = true; + goto err; } } @@ -6753,6 +6773,12 @@ err: spin_lock_irqsave(&wl->wl_lock, flags); wl->elp_compl = NULL; spin_unlock_irqrestore(&wl->wl_lock, flags); + + if (recovery) { + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); + wl12xx_queue_recovery_work(wl); + } + return ret; } @@ -6815,10 +6841,16 @@ int wlcore_remove(struct platform_device *pdev) if (!wl->initialized) return 0; - if (wl->irq_wake_enabled) { - device_init_wakeup(wl->dev, 0); - disable_irq_wake(wl->irq); + if (wl->wakeirq >= 0) { + dev_pm_clear_wake_irq(wl->dev); + wl->wakeirq = -ENODEV; } + + device_init_wakeup(wl->dev, false); + + if (wl->irq_wake_enabled) + disable_irq_wake(wl->irq); + wl1271_unregister_hw(wl); pm_runtime_put_sync(wl->dev); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 750bea3574ee..4c2154b9e6a3 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -241,7 +241,7 @@ static const struct of_device_id wlcore_sdio_of_match_table[] = { { } }; -static int wlcore_probe_of(struct device *dev, int *irq, +static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq, struct wlcore_platdev_data *pdev_data) { struct device_node *np = dev->of_node; @@ -259,6 +259,8 @@ static int wlcore_probe_of(struct device *dev, int *irq, return -EINVAL; } + *wakeirq = irq_of_parse_and_map(np, 1); + /* optional clock frequency params */ of_property_read_u32(np, "ref-clock-frequency", &pdev_data->ref_clock_freq); @@ -268,7 +270,7 @@ static int wlcore_probe_of(struct device *dev, int *irq, return 0; } #else -static int wlcore_probe_of(struct device *dev, int *irq, +static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq, struct wlcore_platdev_data *pdev_data) { return -ENODATA; @@ -280,10 +282,10 @@ static int wl1271_probe(struct sdio_func *func, { struct wlcore_platdev_data *pdev_data; struct wl12xx_sdio_glue *glue; - struct resource res[1]; + struct resource res[2]; mmc_pm_flag_t mmcflags; int ret = -ENOMEM; - int irq; + int irq, wakeirq; const char *chip_family; /* We are only able to handle the wlan function */ @@ -308,7 +310,7 @@ static int wl1271_probe(struct sdio_func *func, /* Use block mode for transferring over one block size of data */ func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; - ret = wlcore_probe_of(&func->dev, &irq, pdev_data); + ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data); if (ret) goto out; @@ -351,6 +353,11 @@ static int wl1271_probe(struct sdio_func *func, irqd_get_trigger_type(irq_get_irq_data(irq)); res[0].name = "irq"; + res[1].start = wakeirq; + res[1].flags = IORESOURCE_IRQ | + irqd_get_trigger_type(irq_get_irq_data(wakeirq)); + res[1].name = "wakeirq"; + ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); if (ret) { dev_err(glue->dev, "can't add resources\n"); diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index d4b1f66ef457..dd14850b0603 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -199,8 +199,10 @@ struct wl1271 { struct wl1271_if_operations *if_ops; int irq; + int wakeirq; int irq_flags; + int wakeirq_flags; spinlock_t wl_lock; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index 1f6d9f357e57..9ccd780695f0 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -235,7 +235,7 @@ void zd_mac_clear(struct zd_mac *mac) { flush_workqueue(zd_workqueue); zd_chip_clear(&mac->chip); - ZD_ASSERT(!spin_is_locked(&mac->lock)); + lockdep_assert_held(&mac->lock); ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index a46a1e94505d..936c0b3e0ba2 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -241,8 +241,9 @@ struct xenvif_hash_cache { struct xenvif_hash { unsigned int alg; u32 flags; + bool mapping_sel; u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; - u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE]; + u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE]; unsigned int size; struct xenvif_hash_cache cache; }; diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 3c4c58b9fe76..0ccb021f1e78 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size) return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; vif->hash.size = size; - memset(vif->hash.mapping, 0, sizeof(u32) * size); + memset(vif->hash.mapping[vif->hash.mapping_sel], 0, + sizeof(u32) * size); return XEN_NETIF_CTRL_STATUS_SUCCESS; } @@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size) u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, u32 off) { - u32 *mapping = &vif->hash.mapping[off]; - struct gnttab_copy copy_op = { + u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel]; + unsigned int nr = 1; + struct gnttab_copy copy_op[2] = {{ .source.u.ref = gref, .source.domid = vif->domid, - .dest.u.gmfn = virt_to_gfn(mapping), .dest.domid = DOMID_SELF, - .dest.offset = xen_offset_in_page(mapping), - .len = len * sizeof(u32), + .len = len * sizeof(*mapping), .flags = GNTCOPY_source_gref - }; + }}; - if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE) + if ((off + len < off) || (off + len > vif->hash.size) || + len > XEN_PAGE_SIZE / sizeof(*mapping)) return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; - while (len-- != 0) - if (mapping[off++] >= vif->num_queues) - return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off); + copy_op[0].dest.offset = xen_offset_in_page(mapping + off); + if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) { + copy_op[1] = copy_op[0]; + copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset; + copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len); + copy_op[1].dest.offset = 0; + copy_op[1].len = copy_op[0].len - copy_op[1].source.offset; + copy_op[0].len = copy_op[1].source.offset; + nr = 2; + } - if (copy_op.len != 0) { - gnttab_batch_copy(©_op, 1); + memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel], + vif->hash.size * sizeof(*mapping)); - if (copy_op.status != GNTST_okay) + if (copy_op[0].len != 0) { + gnttab_batch_copy(copy_op, nr); + + if (copy_op[0].status != GNTST_okay || + copy_op[nr - 1].status != GNTST_okay) return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; } + while (len-- != 0) + if (mapping[off++] >= vif->num_queues) + return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; + + vif->hash.mapping_sel = !vif->hash.mapping_sel; + return XEN_NETIF_CTRL_STATUS_SUCCESS; } @@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) } if (vif->hash.size != 0) { + const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel]; + seq_puts(m, "\nHash Mapping:\n"); for (i = 0; i < vif->hash.size; ) { @@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); for (j = 0; j < n; j++, i++) - seq_printf(m, "%4u ", vif->hash.mapping[i]); + seq_printf(m, "%4u ", mapping[i]); seq_puts(m, "\n"); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7e3ea39a1b39..182d6770f102 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, if (size == 0) return skb_get_hash_raw(skb) % dev->real_num_tx_queues; - return vif->hash.mapping[skb_get_hash_raw(skb) % size]; + return vif->hash.mapping[vif->hash.mapping_sel] + [skb_get_hash_raw(skb) % size]; } static netdev_tx_t |