| /**************************************************************************** |
| * Driver for Solarflare network controllers and boards |
| * Copyright 2012-2013 Solarflare Communications Inc. |
| * |
| * This program is free software; you can redistribute it and/or modify it |
| * under the terms of the GNU General Public License version 2 as published |
| * by the Free Software Foundation, incorporated herein by reference. |
| */ |
| |
| #include "net_driver.h" |
| #include "ef10_regs.h" |
| #include "io.h" |
| #include "mcdi.h" |
| #include "mcdi_pcol.h" |
| #include "nic.h" |
| #include "workarounds.h" |
| #include "selftest.h" |
| #include "ef10_sriov.h" |
| #include <linux/in.h> |
| #include <linux/jhash.h> |
| #include <linux/wait.h> |
| #include <linux/workqueue.h> |
| |
| /* Hardware control for EF10 architecture including 'Huntington'. */ |
| |
| #define EFX_EF10_DRVGEN_EV 7 |
| enum { |
| EFX_EF10_TEST = 1, |
| EFX_EF10_REFILL, |
| }; |
| |
| /* The reserved RSS context value */ |
| #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff |
| /* The maximum size of a shared RSS context */ |
| /* TODO: this should really be from the mcdi protocol export */ |
| #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL |
| |
| /* The filter table(s) are managed by firmware and we have write-only |
| * access. When removing filters we must identify them to the |
| * firmware by a 64-bit handle, but this is too wide for Linux kernel |
| * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to |
| * be able to tell in advance whether a requested insertion will |
| * replace an existing filter. Therefore we maintain a software hash |
| * table, which should be at least as large as the hardware hash |
| * table. |
| * |
| * Huntington has a single 8K filter table shared between all filter |
| * types and both ports. |
| */ |
| #define HUNT_FILTER_TBL_ROWS 8192 |
| |
| #define EFX_EF10_FILTER_ID_INVALID 0xffff |
| |
| #define EFX_EF10_FILTER_DEV_UC_MAX 32 |
| #define EFX_EF10_FILTER_DEV_MC_MAX 256 |
| |
| /* VLAN list entry */ |
| struct efx_ef10_vlan { |
| struct list_head list; |
| u16 vid; |
| }; |
| |
| enum efx_ef10_default_filters { |
| EFX_EF10_BCAST, |
| EFX_EF10_UCDEF, |
| EFX_EF10_MCDEF, |
| EFX_EF10_VXLAN4_UCDEF, |
| EFX_EF10_VXLAN4_MCDEF, |
| EFX_EF10_VXLAN6_UCDEF, |
| EFX_EF10_VXLAN6_MCDEF, |
| EFX_EF10_NVGRE4_UCDEF, |
| EFX_EF10_NVGRE4_MCDEF, |
| EFX_EF10_NVGRE6_UCDEF, |
| EFX_EF10_NVGRE6_MCDEF, |
| EFX_EF10_GENEVE4_UCDEF, |
| EFX_EF10_GENEVE4_MCDEF, |
| EFX_EF10_GENEVE6_UCDEF, |
| EFX_EF10_GENEVE6_MCDEF, |
| |
| EFX_EF10_NUM_DEFAULT_FILTERS |
| }; |
| |
| /* Per-VLAN filters information */ |
| struct efx_ef10_filter_vlan { |
| struct list_head list; |
| u16 vid; |
| u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; |
| u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; |
| u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; |
| }; |
| |
| struct efx_ef10_dev_addr { |
| u8 addr[ETH_ALEN]; |
| }; |
| |
| struct efx_ef10_filter_table { |
| /* The MCDI match masks supported by this fw & hw, in order of priority */ |
| u32 rx_match_mcdi_flags[ |
| MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; |
| unsigned int rx_match_count; |
| |
| struct { |
| unsigned long spec; /* pointer to spec plus flag bits */ |
| /* BUSY flag indicates that an update is in progress. AUTO_OLD is |
| * used to mark and sweep MAC filters for the device address lists. |
| */ |
| #define EFX_EF10_FILTER_FLAG_BUSY 1UL |
| #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL |
| #define EFX_EF10_FILTER_FLAGS 3UL |
| u64 handle; /* firmware handle */ |
| } *entry; |
| wait_queue_head_t waitq; |
| /* Shadow of net_device address lists, guarded by mac_lock */ |
| struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; |
| struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; |
| int dev_uc_count; |
| int dev_mc_count; |
| bool uc_promisc; |
| bool mc_promisc; |
| /* Whether in multicast promiscuous mode when last changed */ |
| bool mc_promisc_last; |
| bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */ |
| bool vlan_filter; |
| struct list_head vlan_list; |
| }; |
| |
| /* An arbitrary search limit for the software hash table */ |
| #define EFX_EF10_FILTER_SEARCH_LIMIT 200 |
| |
| static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); |
| static void efx_ef10_filter_table_remove(struct efx_nic *efx); |
| static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); |
| static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, |
| struct efx_ef10_filter_vlan *vlan); |
| static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); |
| static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); |
| |
| static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) |
| { |
| WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); |
| return filter_id & (HUNT_FILTER_TBL_ROWS - 1); |
| } |
| |
| static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) |
| { |
| return filter_id / (HUNT_FILTER_TBL_ROWS * 2); |
| } |
| |
| static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) |
| { |
| return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; |
| } |
| |
| static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) |
| { |
| efx_dword_t reg; |
| |
| efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); |
| return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? |
| EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; |
| } |
| |
| static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) |
| { |
| int bar; |
| |
| bar = efx->type->mem_bar; |
| return resource_size(&efx->pci_dev->resource[bar]); |
| } |
| |
| static bool efx_ef10_is_vf(struct efx_nic *efx) |
| { |
| return efx->type->is_vf; |
| } |
| |
| static int efx_ef10_get_pf_index(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| size_t outlen; |
| int rc; |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, |
| sizeof(outbuf), &outlen); |
| if (rc) |
| return rc; |
| if (outlen < sizeof(outbuf)) |
| return -EIO; |
| |
| nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); |
| return 0; |
| } |
| |
| #ifdef CONFIG_SFC_SRIOV |
| static int efx_ef10_get_vf_index(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| size_t outlen; |
| int rc; |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, |
| sizeof(outbuf), &outlen); |
| if (rc) |
| return rc; |
| if (outlen < sizeof(outbuf)) |
| return -EIO; |
| |
| nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); |
| return 0; |
| } |
| #endif |
| |
| static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| size_t outlen; |
| int rc; |
| |
| BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, |
| outbuf, sizeof(outbuf), &outlen); |
| if (rc) |
| return rc; |
| if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { |
| netif_err(efx, drv, efx->net_dev, |
| "unable to read datapath firmware capabilities\n"); |
| return -EIO; |
| } |
| |
| nic_data->datapath_caps = |
| MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); |
| |
| if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { |
| nic_data->datapath_caps2 = MCDI_DWORD(outbuf, |
| GET_CAPABILITIES_V2_OUT_FLAGS2); |
| nic_data->piobuf_size = MCDI_WORD(outbuf, |
| GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); |
| } else { |
| nic_data->datapath_caps2 = 0; |
| nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; |
| } |
| |
| /* record the DPCPU firmware IDs to determine VEB vswitching support. |
| */ |
| nic_data->rx_dpcpu_fw_id = |
| MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); |
| nic_data->tx_dpcpu_fw_id = |
| MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); |
| |
| if (!(nic_data->datapath_caps & |
| (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { |
| netif_err(efx, probe, efx->net_dev, |
| "current firmware does not support an RX prefix\n"); |
| return -ENODEV; |
| } |
| |
| return 0; |
| } |
| |
| static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); |
| int rc; |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, |
| outbuf, sizeof(outbuf), NULL); |
| if (rc) |
| return rc; |
| rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); |
| return rc > 0 ? rc : -ERANGE; |
| } |
| |
| static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| unsigned int implemented; |
| unsigned int enabled; |
| int rc; |
| |
| nic_data->workaround_35388 = false; |
| nic_data->workaround_61265 = false; |
| |
| rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); |
| |
| if (rc == -ENOSYS) { |
| /* Firmware without GET_WORKAROUNDS - not a problem. */ |
| rc = 0; |
| } else if (rc == 0) { |
| /* Bug61265 workaround is always enabled if implemented. */ |
| if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) |
| nic_data->workaround_61265 = true; |
| |
| if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { |
| nic_data->workaround_35388 = true; |
| } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { |
| /* Workaround is implemented but not enabled. |
| * Try to enable it. |
| */ |
| rc = efx_mcdi_set_workaround(efx, |
| MC_CMD_WORKAROUND_BUG35388, |
| true, NULL); |
| if (rc == 0) |
| nic_data->workaround_35388 = true; |
| /* If we failed to set the workaround just carry on. */ |
| rc = 0; |
| } |
| } |
| |
| netif_dbg(efx, probe, efx->net_dev, |
| "workaround for bug 35388 is %sabled\n", |
| nic_data->workaround_35388 ? "en" : "dis"); |
| netif_dbg(efx, probe, efx->net_dev, |
| "workaround for bug 61265 is %sabled\n", |
| nic_data->workaround_61265 ? "en" : "dis"); |
| |
| return rc; |
| } |
| |
| static void efx_ef10_process_timer_config(struct efx_nic *efx, |
| const efx_dword_t *data) |
| { |
| unsigned int max_count; |
| |
| if (EFX_EF10_WORKAROUND_61265(efx)) { |
| efx->timer_quantum_ns = MCDI_DWORD(data, |
| GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); |
| efx->timer_max_ns = MCDI_DWORD(data, |
| GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); |
| } else if (EFX_EF10_WORKAROUND_35388(efx)) { |
| efx->timer_quantum_ns = MCDI_DWORD(data, |
| GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); |
| max_count = MCDI_DWORD(data, |
| GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); |
| efx->timer_max_ns = max_count * efx->timer_quantum_ns; |
| } else { |
| efx->timer_quantum_ns = MCDI_DWORD(data, |
| GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); |
| max_count = MCDI_DWORD(data, |
| GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); |
| efx->timer_max_ns = max_count * efx->timer_quantum_ns; |
| } |
| |
| netif_dbg(efx, probe, efx->net_dev, |
| "got timer properties from MC: quantum %u ns; max %u ns\n", |
| efx->timer_quantum_ns, efx->timer_max_ns); |
| } |
| |
| static int efx_ef10_get_timer_config(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); |
| int rc; |
| |
| rc = efx_ef10_get_timer_workarounds(efx); |
| if (rc) |
| return rc; |
| |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, |
| outbuf, sizeof(outbuf), NULL); |
| |
| if (rc == 0) { |
| efx_ef10_process_timer_config(efx, outbuf); |
| } else if (rc == -ENOSYS || rc == -EPERM) { |
| /* Not available - fall back to Huntington defaults. */ |
| unsigned int quantum; |
| |
| rc = efx_ef10_get_sysclk_freq(efx); |
| if (rc < 0) |
| return rc; |
| |
| quantum = 1536000 / rc; /* 1536 cycles */ |
| efx->timer_quantum_ns = quantum; |
| efx->timer_max_ns = efx->type->timer_period_max * quantum; |
| rc = 0; |
| } else { |
| efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, |
| MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, |
| NULL, 0, rc); |
| } |
| |
| return rc; |
| } |
| |
| static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) |
| { |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); |
| size_t outlen; |
| int rc; |
| |
| BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, |
| outbuf, sizeof(outbuf), &outlen); |
| if (rc) |
| return rc; |
| if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) |
| return -EIO; |
| |
| ether_addr_copy(mac_address, |
| MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); |
| return 0; |
| } |
| |
| static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); |
| size_t outlen; |
| int num_addrs, rc; |
| |
| MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, |
| EVB_PORT_ID_ASSIGNED); |
| rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, |
| sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); |
| |
| if (rc) |
| return rc; |
| if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) |
| return -EIO; |
| |
| num_addrs = MCDI_DWORD(outbuf, |
| VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); |
| |
| WARN_ON(num_addrs != 1); |
| |
| ether_addr_copy(mac_address, |
| MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); |
| |
| return 0; |
| } |
| |
| static ssize_t efx_ef10_show_link_control_flag(struct device *dev, |
| struct device_attribute *attr, |
| char *buf) |
| { |
| struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
| |
| return sprintf(buf, "%d\n", |
| ((efx->mcdi->fn_flags) & |
| (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) |
| ? 1 : 0); |
| } |
| |
| static ssize_t efx_ef10_show_primary_flag(struct device *dev, |
| struct device_attribute *attr, |
| char *buf) |
| { |
| struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
| |
| return sprintf(buf, "%d\n", |
| ((efx->mcdi->fn_flags) & |
| (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) |
| ? 1 : 0); |
| } |
| |
| static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| struct efx_ef10_vlan *vlan; |
| |
| WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); |
| |
| list_for_each_entry(vlan, &nic_data->vlan_list, list) { |
| if (vlan->vid == vid) |
| return vlan; |
| } |
| |
| return NULL; |
| } |
| |
| static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| struct efx_ef10_vlan *vlan; |
| int rc; |
| |
| mutex_lock(&nic_data->vlan_lock); |
| |
| vlan = efx_ef10_find_vlan(efx, vid); |
| if (vlan) { |
| /* We add VID 0 on init. 8021q adds it on module init |
| * for all interfaces with VLAN filtring feature. |
| */ |
| if (vid == 0) |
| goto done_unlock; |
| netif_warn(efx, drv, efx->net_dev, |
| "VLAN %u already added\n", vid); |
| rc = -EALREADY; |
| goto fail_exist; |
| } |
| |
| rc = -ENOMEM; |
| vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); |
| if (!vlan) |
| goto fail_alloc; |
| |
| vlan->vid = vid; |
| |
| list_add_tail(&vlan->list, &nic_data->vlan_list); |
| |
| if (efx->filter_state) { |
| mutex_lock(&efx->mac_lock); |
| down_write(&efx->filter_sem); |
| rc = efx_ef10_filter_add_vlan(efx, vlan->vid); |
| up_write(&efx->filter_sem); |
| mutex_unlock(&efx->mac_lock); |
| if (rc) |
| goto fail_filter_add_vlan; |
| } |
| |
| done_unlock: |
| mutex_unlock(&nic_data->vlan_lock); |
| return 0; |
| |
| fail_filter_add_vlan: |
| list_del(&vlan->list); |
| kfree(vlan); |
| fail_alloc: |
| fail_exist: |
| mutex_unlock(&nic_data->vlan_lock); |
| return rc; |
| } |
| |
| static void efx_ef10_del_vlan_internal(struct efx_nic *efx, |
| struct efx_ef10_vlan *vlan) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| |
| WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); |
| |
| if (efx->filter_state) { |
| down_write(&efx->filter_sem); |
| efx_ef10_filter_del_vlan(efx, vlan->vid); |
| up_write(&efx->filter_sem); |
| } |
| |
| list_del(&vlan->list); |
| kfree(vlan); |
| } |
| |
| static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| struct efx_ef10_vlan *vlan; |
| int rc = 0; |
| |
| /* 8021q removes VID 0 on module unload for all interfaces |
| * with VLAN filtering feature. We need to keep it to receive |
| * untagged traffic. |
| */ |
| if (vid == 0) |
| return 0; |
| |
| mutex_lock(&nic_data->vlan_lock); |
| |
| vlan = efx_ef10_find_vlan(efx, vid); |
| if (!vlan) { |
| netif_err(efx, drv, efx->net_dev, |
| "VLAN %u to be deleted not found\n", vid); |
| rc = -ENOENT; |
| } else { |
| efx_ef10_del_vlan_internal(efx, vlan); |
| } |
| |
| mutex_unlock(&nic_data->vlan_lock); |
| |
| return rc; |
| } |
| |
| static void efx_ef10_cleanup_vlans(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| struct efx_ef10_vlan *vlan, *next_vlan; |
| |
| mutex_lock(&nic_data->vlan_lock); |
| list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) |
| efx_ef10_del_vlan_internal(efx, vlan); |
| mutex_unlock(&nic_data->vlan_lock); |
| } |
| |
| static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, |
| NULL); |
| static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); |
| |
| static int efx_ef10_probe(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data; |
| int i, rc; |
| |
| /* We can have one VI for each 8K region. However, until we |
| * use TX option descriptors we need two TX queues per channel. |
| */ |
| efx->max_channels = min_t(unsigned int, |
| EFX_MAX_CHANNELS, |
| efx_ef10_mem_map_size(efx) / |
| (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); |
| efx->max_tx_channels = efx->max_channels; |
| if (WARN_ON(efx->max_channels == 0)) |
| return -EIO; |
| |
| nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
| if (!nic_data) |
| return -ENOMEM; |
| efx->nic_data = nic_data; |
| |
| /* we assume later that we can copy from this buffer in dwords */ |
| BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); |
| |
| rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, |
| 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); |
| if (rc) |
| goto fail1; |
| |
| /* Get the MC's warm boot count. In case it's rebooting right |
| * now, be prepared to retry. |
| */ |
| i = 0; |
| for (;;) { |
| rc = efx_ef10_get_warm_boot_count(efx); |
| if (rc >= 0) |
| break; |
| if (++i == 5) |
| goto fail2; |
| ssleep(1); |
| } |
| nic_data->warm_boot_count = rc; |
| |
| nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| |
| nic_data->vport_id = EVB_PORT_ID_ASSIGNED; |
| |
| /* In case we're recovering from a crash (kexec), we want to |
| * cancel any outstanding request by the previous user of this |
| * function. We send a special message using the least |
| * significant bits of the 'high' (doorbell) register. |
| */ |
| _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); |
| |
| rc = efx_mcdi_init(efx); |
| if (rc) |
| goto fail2; |
| |
| mutex_init(&nic_data->udp_tunnels_lock); |
| |
| /* Reset (most) configuration for this function */ |
| rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); |
| if (rc) |
| goto fail3; |
| |
| /* Enable event logging */ |
| rc = efx_mcdi_log_ctrl(efx, true, false, 0); |
| if (rc) |
| goto fail3; |
| |
| rc = device_create_file(&efx->pci_dev->dev, |
| &dev_attr_link_control_flag); |
| if (rc) |
| goto fail3; |
| |
| rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| if (rc) |
| goto fail4; |
| |
| rc = efx_ef10_get_pf_index(efx); |
| if (rc) |
| goto fail5; |
| |
| rc = efx_ef10_init_datapath_caps(efx); |
| if (rc < 0) |
| goto fail5; |
| |
| efx->rx_packet_len_offset = |
| ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; |
| |
| rc = efx_mcdi_port_get_number(efx); |
| if (rc < 0) |
| goto fail5; |
| efx->port_num = rc; |
| |
| rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); |
| if (rc) |
| goto fail5; |
| |
| rc = efx_ef10_get_timer_config(efx); |
| if (rc < 0) |
| goto fail5; |
| |
| rc = efx_mcdi_mon_probe(efx); |
| if (rc && rc != -EPERM) |
| goto fail5; |
| |
| efx_ptp_probe(efx, NULL); |
| |
| #ifdef CONFIG_SFC_SRIOV |
| if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { |
| struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; |
| struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| |
| efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); |
| } else |
| #endif |
| ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); |
| |
| INIT_LIST_HEAD(&nic_data->vlan_list); |
| mutex_init(&nic_data->vlan_lock); |
| |
| /* Add unspecified VID to support VLAN filtering being disabled */ |
| rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); |
| if (rc) |
| goto fail_add_vid_unspec; |
| |
| /* If VLAN filtering is enabled, we need VID 0 to get untagged |
| * traffic. It is added automatically if 8021q module is loaded, |
| * but we can't rely on it since module may be not loaded. |
| */ |
| rc = efx_ef10_add_vlan(efx, 0); |
| if (rc) |
| goto fail_add_vid_0; |
| |
| return 0; |
| |
| fail_add_vid_0: |
| efx_ef10_cleanup_vlans(efx); |
| fail_add_vid_unspec: |
| mutex_destroy(&nic_data->vlan_lock); |
| efx_ptp_remove(efx); |
| efx_mcdi_mon_remove(efx); |
| fail5: |
| device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| fail4: |
| device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); |
| fail3: |
| efx_mcdi_detach(efx); |
| |
| mutex_lock(&nic_data->udp_tunnels_lock); |
| memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); |
| (void)efx_ef10_set_udp_tnl_ports(efx, true); |
| mutex_unlock(&nic_data->udp_tunnels_lock); |
| mutex_destroy(&nic_data->udp_tunnels_lock); |
| |
| efx_mcdi_fini(efx); |
| fail2: |
| efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
| fail1: |
| kfree(nic_data); |
| efx->nic_data = NULL; |
| return rc; |
| } |
| |
| static int efx_ef10_free_vis(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF_ERR(outbuf); |
| size_t outlen; |
| int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, |
| outbuf, sizeof(outbuf), &outlen); |
| |
| /* -EALREADY means nothing to free, so ignore */ |
| if (rc == -EALREADY) |
| rc = 0; |
| if (rc) |
| efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, |
| rc); |
| return rc; |
| } |
| |
| #ifdef EFX_USE_PIO |
| |
| static void efx_ef10_free_piobufs(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); |
| unsigned int i; |
| int rc; |
| |
| BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); |
| |
| for (i = 0; i < nic_data->n_piobufs; i++) { |
| MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, |
| nic_data->piobuf_handle[i]); |
| rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), |
| NULL, 0, NULL); |
| WARN_ON(rc); |
| } |
| |
| nic_data->n_piobufs = 0; |
| } |
| |
| static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); |
| unsigned int i; |
| size_t outlen; |
| int rc = 0; |
| |
| BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); |
| |
| for (i = 0; i < n; i++) { |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, |
| outbuf, sizeof(outbuf), &outlen); |
| if (rc) { |
| /* Don't display the MC error if we didn't have space |
| * for a VF. |
| */ |
| if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) |
| efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, |
| 0, outbuf, outlen, rc); |
| break; |
| } |
| if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { |
| rc = -EIO; |
| break; |
| } |
| nic_data->piobuf_handle[i] = |
| MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); |
| netif_dbg(efx, probe, efx->net_dev, |
| "allocated PIO buffer %u handle %x\n", i, |
| nic_data->piobuf_handle[i]); |
| } |
| |
| nic_data->n_piobufs = i; |
| if (rc) |
| efx_ef10_free_piobufs(efx); |
| return rc; |
| } |
| |
| static int efx_ef10_link_piobufs(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); |
| struct efx_channel *channel; |
| struct efx_tx_queue *tx_queue; |
| unsigned int offset, index; |
| int rc; |
| |
| BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); |
| BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); |
| |
| /* Link a buffer to each VI in the write-combining mapping */ |
| for (index = 0; index < nic_data->n_piobufs; ++index) { |
| MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, |
| nic_data->piobuf_handle[index]); |
| MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, |
| nic_data->pio_write_vi_base + index); |
| rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, |
| inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, |
| NULL, 0, NULL); |
| if (rc) { |
| netif_err(efx, drv, efx->net_dev, |
| "failed to link VI %u to PIO buffer %u (%d)\n", |
| nic_data->pio_write_vi_base + index, index, |
| rc); |
| goto fail; |
| } |
| netif_dbg(efx, probe, efx->net_dev, |
| "linked VI %u to PIO buffer %u\n", |
| nic_data->pio_write_vi_base + index, index); |
| } |
| |
| /* Link a buffer to each TX queue */ |
| efx_for_each_channel(channel, efx) { |
| efx_for_each_channel_tx_queue(tx_queue, channel) { |
| /* We assign the PIO buffers to queues in |
| * reverse order to allow for the following |
| * special case. |
| */ |
| offset = ((efx->tx_channel_offset + efx->n_tx_channels - |
| tx_queue->channel->channel - 1) * |
| efx_piobuf_size); |
| index = offset / nic_data->piobuf_size; |
| offset = offset % nic_data->piobuf_size; |
| |
| /* When the host page size is 4K, the first |
| * host page in the WC mapping may be within |
| * the same VI page as the last TX queue. We |
| * can only link one buffer to each VI. |
| */ |
| if (tx_queue->queue == nic_data->pio_write_vi_base) { |
| BUG_ON(index != 0); |
| rc = 0; |
| } else { |
| MCDI_SET_DWORD(inbuf, |
| LINK_PIOBUF_IN_PIOBUF_HANDLE, |
| nic_data->piobuf_handle[index]); |
| MCDI_SET_DWORD(inbuf, |
| LINK_PIOBUF_IN_TXQ_INSTANCE, |
| tx_queue->queue); |
| rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, |
| inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, |
| NULL, 0, NULL); |
| } |
| |
| if (rc) { |
| /* This is non-fatal; the TX path just |
| * won't use PIO for this queue |
| */ |
| netif_err(efx, drv, efx->net_dev, |
| "failed to link VI %u to PIO buffer %u (%d)\n", |
| tx_queue->queue, index, rc); |
| tx_queue->piobuf = NULL; |
| } else { |
| tx_queue->piobuf = |
| nic_data->pio_write_base + |
| index * EFX_VI_PAGE_SIZE + offset; |
| tx_queue->piobuf_offset = offset; |
| netif_dbg(efx, probe, efx->net_dev, |
| "linked VI %u to PIO buffer %u offset %x addr %p\n", |
| tx_queue->queue, index, |
| tx_queue->piobuf_offset, |
| tx_queue->piobuf); |
| } |
| } |
| } |
| |
| return 0; |
| |
| fail: |
| /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same |
| * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. |
| */ |
| BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); |
| while (index--) { |
| MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, |
| nic_data->pio_write_vi_base + index); |
| efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, |
| inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, |
| NULL, 0, NULL); |
| } |
| return rc; |
| } |
| |
| static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
| { |
| struct efx_channel *channel; |
| struct efx_tx_queue *tx_queue; |
| |
| /* All our existing PIO buffers went away */ |
| efx_for_each_channel(channel, efx) |
| efx_for_each_channel_tx_queue(tx_queue, channel) |
| tx_queue->piobuf = NULL; |
| } |
| |
| #else /* !EFX_USE_PIO */ |
| |
| static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| { |
| return n == 0 ? 0 : -ENOBUFS; |
| } |
| |
| static int efx_ef10_link_piobufs(struct efx_nic *efx) |
| { |
| return 0; |
| } |
| |
| static void efx_ef10_free_piobufs(struct efx_nic *efx) |
| { |
| } |
| |
| static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
| { |
| } |
| |
| #endif /* EFX_USE_PIO */ |
| |
| static void efx_ef10_remove(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| int rc; |
| |
| #ifdef CONFIG_SFC_SRIOV |
| struct efx_ef10_nic_data *nic_data_pf; |
| struct pci_dev *pci_dev_pf; |
| struct efx_nic *efx_pf; |
| struct ef10_vf *vf; |
| |
| if (efx->pci_dev->is_virtfn) { |
| pci_dev_pf = efx->pci_dev->physfn; |
| if (pci_dev_pf) { |
| efx_pf = pci_get_drvdata(pci_dev_pf); |
| nic_data_pf = efx_pf->nic_data; |
| vf = nic_data_pf->vf + nic_data->vf_index; |
| vf->efx = NULL; |
| } else |
| netif_info(efx, drv, efx->net_dev, |
| "Could not get the PF id from VF\n"); |
| } |
| #endif |
| |
| efx_ef10_cleanup_vlans(efx); |
| mutex_destroy(&nic_data->vlan_lock); |
| |
| efx_ptp_remove(efx); |
| |
| efx_mcdi_mon_remove(efx); |
| |
| efx_ef10_rx_free_indir_table(efx); |
| |
| if (nic_data->wc_membase) |
| iounmap(nic_data->wc_membase); |
| |
| rc = efx_ef10_free_vis(efx); |
| WARN_ON(rc != 0); |
| |
| if (!nic_data->must_restore_piobufs) |
| efx_ef10_free_piobufs(efx); |
| |
| device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); |
| |
| efx_mcdi_detach(efx); |
| |
| memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); |
| mutex_lock(&nic_data->udp_tunnels_lock); |
| (void)efx_ef10_set_udp_tnl_ports(efx, true); |
| mutex_unlock(&nic_data->udp_tunnels_lock); |
| |
| mutex_destroy(&nic_data->udp_tunnels_lock); |
| |
| efx_mcdi_fini(efx); |
| efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
| kfree(nic_data); |
| } |
| |
| static int efx_ef10_probe_pf(struct efx_nic *efx) |
| { |
| return efx_ef10_probe(efx); |
| } |
| |
| int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, |
| u32 *port_flags, u32 *vadaptor_flags, |
| unsigned int *vlan_tags) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); |
| size_t outlen; |
| int rc; |
| |
| if (nic_data->datapath_caps & |
| (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { |
| MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, |
| port_id); |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), |
| outbuf, sizeof(outbuf), &outlen); |
| if (rc) |
| return rc; |
| |
| if (outlen < sizeof(outbuf)) { |
| rc = -EIO; |
| return rc; |
| } |
| } |
| |
| if (port_flags) |
| *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); |
| if (vadaptor_flags) |
| *vadaptor_flags = |
| MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); |
| if (vlan_tags) |
| *vlan_tags = |
| MCDI_DWORD(outbuf, |
| VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); |
| |
| return 0; |
| } |
| |
| int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); |
| |
| MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); |
| return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), |
| NULL, 0, NULL); |
| } |
| |
| int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); |
| |
| MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); |
| return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), |
| NULL, 0, NULL); |
| } |
| |
| int efx_ef10_vport_add_mac(struct efx_nic *efx, |
| unsigned int port_id, u8 *mac) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); |
| |
| MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); |
| ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); |
| |
| return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, |
| sizeof(inbuf), NULL, 0, NULL); |
| } |
| |
| int efx_ef10_vport_del_mac(struct efx_nic *efx, |
| unsigned int port_id, u8 *mac) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); |
| |
| MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); |
| ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); |
| |
| return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, |
| sizeof(inbuf), NULL, 0, NULL); |
| } |
| |
| #ifdef CONFIG_SFC_SRIOV |
| static int efx_ef10_probe_vf(struct efx_nic *efx) |
| { |
| int rc; |
| struct pci_dev *pci_dev_pf; |
| |
| /* If the parent PF has no VF data structure, it doesn't know about this |
| * VF so fail probe. The VF needs to be re-created. This can happen |
| * if the PF driver is unloaded while the VF is assigned to a guest. |
| */ |
| pci_dev_pf = efx->pci_dev->physfn; |
| if (pci_dev_pf) { |
| struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; |
| |
| if (!nic_data_pf->vf) { |
| netif_info(efx, drv, efx->net_dev, |
| "The VF cannot link to its parent PF; " |
| "please destroy and re-create the VF\n"); |
| return -EBUSY; |
| } |
| } |
| |
| rc = efx_ef10_probe(efx); |
| if (rc) |
| return rc; |
| |
| rc = efx_ef10_get_vf_index(efx); |
| if (rc) |
| goto fail; |
| |
| if (efx->pci_dev->is_virtfn) { |
| if (efx->pci_dev->physfn) { |
| struct efx_nic *efx_pf = |
| pci_get_drvdata(efx->pci_dev->physfn); |
| struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| |
| nic_data_p->vf[nic_data->vf_index].efx = efx; |
| nic_data_p->vf[nic_data->vf_index].pci_dev = |
| efx->pci_dev; |
| } else |
| netif_info(efx, drv, efx->net_dev, |
| "Could not get the PF id from VF\n"); |
| } |
| |
| return 0; |
| |
| fail: |
| efx_ef10_remove(efx); |
| return rc; |
| } |
| #else |
| static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) |
| { |
| return 0; |
| } |
| #endif |
| |
| static int efx_ef10_alloc_vis(struct efx_nic *efx, |
| unsigned int min_vis, unsigned int max_vis) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| size_t outlen; |
| int rc; |
| |
| MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); |
| MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); |
| rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), |
| outbuf, sizeof(outbuf), &outlen); |
| if (rc != 0) |
| return rc; |
| |
| if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) |
| return -EIO; |
| |
| netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", |
| MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); |
| |
| nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); |
| nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); |
| return 0; |
| } |
| |
| /* Note that the failure path of this function does not free |
| * resources, as this will be done by efx_ef10_remove(). |
| */ |
| static int efx_ef10_dimension_resources(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| unsigned int uc_mem_map_size, wc_mem_map_size; |
| unsigned int min_vis = max(EFX_TXQ_TYPES, |
| efx_separate_tx_channels ? 2 : 1); |
| unsigned int channel_vis, pio_write_vi_base, max_vis; |
| void __iomem *membase; |
| int rc; |
| |
| channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); |
| |
| #ifdef EFX_USE_PIO |
| /* Try to allocate PIO buffers if wanted and if the full |
| * number of PIO buffers would be sufficient to allocate one |
| * copy-buffer per TX channel. Failure is non-fatal, as there |
| * are only a small number of PIO buffers shared between all |
| * functions of the controller. |
| */ |
| if (efx_piobuf_size != 0 && |
| nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= |
| efx->n_tx_channels) { |
| unsigned int n_piobufs = |
| DIV_ROUND_UP(efx->n_tx_channels, |
| nic_data->piobuf_size / efx_piobuf_size); |
| |
| rc = efx_ef10_alloc_piobufs(efx, n_piobufs); |
| if (rc == -ENOSPC) |
| netif_dbg(efx, probe, efx->net_dev, |
| "out of PIO buffers; cannot allocate more\n"); |
| else if (rc == -EPERM) |
| netif_dbg(efx, probe, efx->net_dev, |
| "not permitted to allocate PIO buffers\n"); |
| else if (rc) |
| netif_err(efx, probe, efx->net_dev, |
| "failed to allocate PIO buffers (%d)\n", rc); |
| else |
| netif_dbg(efx, probe, efx->net_dev, |
| "allocated %u PIO buffers\n", n_piobufs); |
| } |
| #else |
| nic_data->n_piobufs = 0; |
| #endif |
| |
| /* PIO buffers should be mapped with write-combining enabled, |
| * and we want to make single UC and WC mappings rather than |
| * several of each (in fact that's the only option if host |
| * page size is >4K). So we may allocate some extra VIs just |
| * for writing PIO buffers through. |
| * |
| * The UC mapping contains (channel_vis - 1) complete VIs and the |
| * first half of the next VI. Then the WC mapping begins with |
| * the second half of this last VI. |
| */ |
| uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE + |
| ER_DZ_TX_PIOBUF); |
| if (nic_data->n_piobufs) { |
| /* pio_write_vi_base rounds down to give the number of complete |
| * VIs inside the UC mapping. |
| */ |
| pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; |
| wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + |
| nic_data->n_piobufs) * |
| EFX_VI_PAGE_SIZE) - |
| uc_mem_map_size); |
| max_vis = pio_write_vi_base + nic_data->n_piobufs; |
| } else { |
| pio_write_vi_base = 0; |
| wc_mem_map_size = 0; |
| max_vis = channel_vis; |
| } |
| |
| /* In case the last attached driver failed to free VIs, do it now */ |
| rc = efx_ef10_free_vis(efx); |
| if (rc != 0) |
| return rc; |
| |
| rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); |
| if (rc != 0) |
| return rc; |
| |
| if (nic_data->n_allocated_vis < channel_vis) { |
| netif_info(efx, drv, efx->net_dev, |
| "Could not allocate enough VIs to satisfy RSS" |
| " requirements. Performance may not be optimal.\n"); |
| /* We didn't get the VIs to populate our channels. |
| * We could keep what we got but then we'd have more |
| * interrupts than we need. |
| * Instead calculate new max_channels and restart |
| */ |
| efx->max_channels = nic_data->n_allocated_vis; |
| efx->max_tx_channels = |
| nic_data->n_allocated_vis / EFX_TXQ_TYPES; |
| |
| efx_ef10_free_vis(efx); |
| return -EAGAIN; |
| } |
| |
| /* If we didn't get enough VIs to map all the PIO buffers, free the |
| * PIO buffers |
| */ |
| if (nic_data->n_piobufs && |
| nic_data->n_allocated_vis < |
| pio_write_vi_base + nic_data->n_piobufs) { |
| netif_dbg(efx, probe, efx->net_dev, |
| "%u VIs are not sufficient to map %u PIO buffers\n", |
| nic_data->n_allocated_vis, nic_data->n_piobufs); |
| efx_ef10_free_piobufs(efx); |
| } |
| |
| /* Shrink the original UC mapping of the memory BAR */ |
| membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); |
| if (!membase) { |
| netif_err(efx, probe, efx->net_dev, |
| "could not shrink memory BAR to %x\n", |
| uc_mem_map_size); |
| return -ENOMEM; |
| } |
| iounmap(efx->membase); |
| efx->membase = membase; |
| |
| /* Set up the WC mapping if needed */ |
| if (wc_mem_map_size) { |
| nic_data->wc_membase = ioremap_wc(efx->membase_phys + |
| uc_mem_map_size, |
| wc_mem_map_size); |
| if (!nic_data->wc_membase) { |
| netif_err(efx, probe, efx->net_dev, |
| "could not allocate WC mapping of size %x\n", |
| wc_mem_map_size); |
| return -ENOMEM; |
| } |
| nic_data->pio_write_vi_base = pio_write_vi_base; |
| nic_data->pio_write_base = |
| nic_data->wc_membase + |
| (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - |
| uc_mem_map_size); |
| |
| rc = efx_ef10_link_piobufs(efx); |
| if (rc) |
| efx_ef10_free_piobufs(efx); |
| } |
| |
| netif_dbg(efx, probe, efx->net_dev, |
| "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", |
| &efx->membase_phys, efx->membase, uc_mem_map_size, |
| nic_data->wc_membase, wc_mem_map_size); |
| |
| return 0; |
| } |
| |
| static int efx_ef10_init_nic(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| int rc; |
| |
| if (nic_data->must_check_datapath_caps) { |
| rc = efx_ef10_init_datapath_caps(efx); |
| if (rc) |
| return rc; |
| nic_data->must_check_datapath_caps = false; |
| } |
| |
| if (nic_data->must_realloc_vis) { |
| /* We cannot let the number of VIs change now */ |
| rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, |
| nic_data->n_allocated_vis); |
| if (rc) |
| return rc; |
| nic_data->must_realloc_vis = false; |
| } |
| |
| if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { |
| rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); |
| if (rc == 0) { |
| rc = efx_ef10_link_piobufs(efx); |
| if (rc) |
| efx_ef10_free_piobufs(efx); |
| } |
| |
| /* Log an error on failure, but this is non-fatal. |
| * Permission errors are less important - we've presumably |
| * had the PIO buffer licence removed. |
| */ |
| if (rc == -EPERM) |
| netif_dbg(efx, drv, efx->net_dev, |
| "not permitted to restore PIO buffers\n"); |
| else if (rc) |
| netif_err(efx, drv, efx->net_dev, |
| "failed to restore PIO buffers (%d)\n", rc); |
| nic_data->must_restore_piobufs = false; |
| } |
| |
| /* don't fail init if RSS setup doesn't work */ |
| rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); |
| efx->rss_active = (rc == 0); |
| |
| return 0; |
| } |
| |
| static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| #ifdef CONFIG_SFC_SRIOV |
| unsigned int i; |
| #endif |
| |
| /* All our allocations have been reset */ |
| nic_data->must_realloc_vis = true; |
| nic_data->must_restore_filters = true; |
| nic_data->must_restore_piobufs = true; |
| efx_ef10_forget_old_piobufs(efx); |
| nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| |
| /* Driver-created vswitches and vports must be re-created */ |
| nic_data->must_probe_vswitching = true; |
| nic_data->vport_id = EVB_PORT_ID_ASSIGNED; |
| #ifdef CONFIG_SFC_SRIOV |
| if (nic_data->vf) |
| for (i = 0; i < efx->vf_count; i++) |
| nic_data->vf[i].vport_id = 0; |
| #endif |
| } |
| |
| static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) |
| { |
| if (reason == RESET_TYPE_MC_FAILURE) |
| return RESET_TYPE_DATAPATH; |
| |
| return efx_mcdi_map_reset_reason(reason); |
| } |
| |
| static int efx_ef10_map_reset_flags(u32 *flags) |
| { |
| enum { |
| EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << |
| ETH_RESET_SHARED_SHIFT), |
| EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | |
| ETH_RESET_OFFLOAD | ETH_RESET_MAC | |
| ETH_RESET_PHY | ETH_RESET_MGMT) << |
| ETH_RESET_SHARED_SHIFT) |
| }; |
| |
| /* We assume for now that our PCI function is permitted to |
| * reset everything. |
| */ |
| |
| if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { |
| *flags &= ~EF10_RESET_MC; |
| return RESET_TYPE_WORLD; |
| } |
| |
| if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { |
| *flags &= ~EF10_RESET_PORT; |
| return RESET_TYPE_ALL; |
| } |
| |
| /* no invisible reset implemented */ |
| |
| return -EINVAL; |
| } |
| |
| static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) |
| { |
| int rc = efx_mcdi_reset(efx, reset_type); |
| |
| /* Unprivileged functions return -EPERM, but need to return success |
| * here so that the datapath is brought back up. |
| */ |
| if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) |
| rc = 0; |
| |
| /* If it was a port reset, trigger reallocation of MC resources. |
| * Note that on an MC reset nothing needs to be done now because we'll |
| * detect the MC reset later and handle it then. |
| * For an FLR, we never get an MC reset event, but the MC has reset all |
| * resources assigned to us, so we have to trigger reallocation now. |
| */ |
| if ((reset_type == RESET_TYPE_ALL || |
| reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) |
| efx_ef10_reset_mc_allocations(efx); |
| return rc; |
| } |
| |
| #define EF10_DMA_STAT(ext_name, mcdi_name) \ |
| [EF10_STAT_ ## ext_name] = \ |
| { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } |
| #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ |
| [EF10_STAT_ ## int_name] = \ |
| { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } |
| #define EF10_OTHER_STAT(ext_name) \ |
| [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
| #define GENERIC_SW_STAT(ext_name) \ |
| [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
| |
| static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { |
| EF10_DMA_STAT(port_tx_bytes, TX_BYTES), |
| EF10_DMA_STAT(port_tx_packets, TX_PKTS), |
| EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), |
| EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), |
| EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), |
| EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), |
| EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), |
| EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), |
| EF10_DMA_STAT(port_tx_64, TX_64_PKTS), |
| EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), |
| EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), |
| EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), |
| EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), |
| EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), |
| EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), |
| EF10_DMA_STAT(port_rx_bytes, RX_BYTES), |
| EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), |
| EF10_OTHER_STAT(port_rx_good_bytes), |
| EF10_OTHER_STAT(port_rx_bad_bytes), |
| EF10_DMA_STAT(port_rx_packets, RX_PKTS), |
| EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), |
| EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), |
| EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), |
| EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), |
| EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), |
| EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), |
| EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), |
| EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), |
| EF10_DMA_STAT(port_rx_64, RX_64_PKTS), |
| EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), |
| EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), |
| EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), |
| EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), |
| EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), |
| EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), |
| EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), |
| EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), |
| EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), |
| EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), |
| EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), |
| EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), |
| GENERIC_SW_STAT(rx_nodesc_trunc), |
| GENERIC_SW_STAT(rx_noskb_drops), |
| EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
| EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), |
| EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), |
| EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), |
| EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), |
| EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), |
| EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), |
| EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), |
| EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), |
| EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), |
| EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), |
| EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), |
| EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), |
| EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), |
| EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), |
| EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), |
| EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), |
| EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), |
| EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), |
| EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), |
| EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), |
| EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), |
| EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), |
| EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), |
| EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), |
| EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), |
| EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), |
| EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), |
| EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), |
| EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), |
| }; |
| |
| #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ |
| (1ULL << EF10_STAT_port_tx_packets) | \ |
| (1ULL << EF10_STAT_port_tx_pause) | \ |
| (1ULL << EF10_STAT_port_tx_unicast) | \ |
| (1ULL << EF10_STAT_port_tx_multicast) | \ |
| (1ULL << EF10_STAT_port_tx_broadcast) | \ |
| (1ULL << EF10_STAT_port_rx_bytes) | \ |
| (1ULL << \ |
| EF10_STAT_port_rx_bytes_minus_good_bytes) | \ |
| (1ULL << EF10_STAT_port_rx_good_bytes) | \ |
| (1ULL << EF10_STAT_port_rx_bad_bytes) | \ |
| (1ULL << EF10_STAT_port_rx_packets) | \ |
| (1ULL << EF10_STAT_port_rx_good) | \ |
| (1ULL << EF10_STAT_port_rx_bad) | \ |
| (1ULL << EF10_STAT_port_rx_pause) | \ |
| (1ULL << EF10_STAT_port_rx_control) | \ |
| (1ULL << EF10_STAT_port_rx_unicast) | \ |
| (1ULL << EF10_STAT_port_rx_multicast) | \ |
| (1ULL << EF10_STAT_port_rx_broadcast) | \ |
| (1ULL << EF10_STAT_port_rx_lt64) | \ |
| (1ULL << EF10_STAT_port_rx_64) | \ |
| (1ULL << EF10_STAT_port_rx_65_to_127) | \ |
| (1ULL << EF10_STAT_port_rx_128_to_255) | \ |
| (1ULL << EF10_STAT_port_rx_256_to_511) | \ |
| (1ULL << EF10_STAT_port_rx_512_to_1023) |\ |
| (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ |
| (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ |
| (1ULL << EF10_STAT_port_rx_gtjumbo) | \ |
| (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ |
| (1ULL << EF10_STAT_port_rx_overflow) | \ |
| (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ |
| (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ |
| (1ULL << GENERIC_STAT_rx_noskb_drops)) |
| |
| /* On 7000 series NICs, these statistics are only provided by the 10G MAC. |
| * For a 10G/40G switchable port we do not expose these because they might |
| * not include all the packets they should. |
| * On 8000 series NICs these statistics are always provided. |
| */ |
| #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ |
| (1ULL << EF10_STAT_port_tx_lt64) | \ |
| (1ULL << EF10_STAT_port_tx_64) | \ |
| (1ULL << EF10_STAT_port_tx_65_to_127) |\ |
| (1ULL << EF10_STAT_port_tx_128_to_255) |\ |
| (1ULL << EF10_STAT_port_tx_256_to_511) |\ |
| (1ULL << EF10_STAT_port_tx_512_to_1023) |\ |
| (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ |
| (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) |
| |
| /* These statistics are only provided by the 40G MAC. For a 10G/40G |
| * switchable port we do expose these because the errors will otherwise |
| * be silent. |
| */ |
| #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ |
| (1ULL << EF10_STAT_port_rx_length_error)) |
| |
| /* These statistics are only provided if the firmware supports the |
| * capability PM_AND_RXDP_COUNTERS. |
| */ |
| #define HUNT_PM_AND_RXDP_STAT_MASK ( \ |
| (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ |
| (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ |
| (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ |
| (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ |
| (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ |
| (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ |
| (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ |
| (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ |
| (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ |
| (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ |
| (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ |
| (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) |
| |
| static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
| { |
| u64 raw_mask = HUNT_COMMON_STAT_MASK; |
| u32 port_caps = efx_mcdi_phy_get_caps(efx); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| |
| if (!(efx->mcdi->fn_flags & |
| 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) |
| return 0; |
| |
| if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { |
| raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
| /* 8000 series have everything even at 40G */ |
| if (nic_data->datapath_caps2 & |
| (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) |
| raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
| } else { |
| raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
| } |
| |
| if (nic_data->datapath_caps & |
| (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) |
| raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; |
| |
| return raw_mask; |
| } |
| |
| static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| u64 raw_mask[2]; |
| |
| raw_mask[0] = efx_ef10_raw_stat_mask(efx); |
| |
| /* Only show vadaptor stats when EVB capability is present */ |
| if (nic_data->datapath_caps & |
| (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { |
| raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); |
| raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; |
| } else { |
| raw_mask[1] = 0; |
| } |
| |
| #if BITS_PER_LONG == 64 |
| BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); |
| mask[0] = raw_mask[0]; |
| mask[1] = raw_mask[1]; |
| #else |
| BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); |
| mask[0] = raw_mask[0] & 0xffffffff; |
| mask[1] = raw_mask[0] >> 32; |
| mask[2] = raw_mask[1] & 0xffffffff; |
| #endif |
| } |
| |
| static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) |
| { |
| DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| |
| efx_ef10_get_stat_mask(efx, mask); |
| return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, |
| mask, names); |
| } |
| |
| static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, |
| struct rtnl_link_stats64 *core_stats) |
| { |
| DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| u64 *stats = nic_data->stats; |
| size_t stats_count = 0, index; |
| |
| efx_ef10_get_stat_mask(efx, mask); |
| |
| if (full_stats) { |
| for_each_set_bit(index, mask, EF10_STAT_COUNT) { |
| if (efx_ef10_stat_desc[index].name) { |
| *full_stats++ = stats[index]; |
| ++stats_count; |
| } |
| } |
| } |
| |
| if (!core_stats) |
| return stats_count; |
| |
| if (nic_data->datapath_caps & |
| 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { |
| /* Use vadaptor stats. */ |
| core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + |
| stats[EF10_STAT_rx_multicast] + |
| stats[EF10_STAT_rx_broadcast]; |
| core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + |
| stats[EF10_STAT_tx_multicast] + |
| stats[EF10_STAT_tx_broadcast]; |
| core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + |
| stats[EF10_STAT_rx_multicast_bytes] + |
| stats[EF10_STAT_rx_broadcast_bytes]; |
| core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + |
| stats[EF10_STAT_tx_multicast_bytes] + |
| stats[EF10_STAT_tx_broadcast_bytes]; |
| core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + |
| stats[GENERIC_STAT_rx_noskb_drops]; |
| core_stats->multicast = stats[EF10_STAT_rx_multicast]; |
| core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; |
| core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; |
| core_stats->rx_errors = core_stats->rx_crc_errors; |
| core_stats->tx_errors = stats[EF10_STAT_tx_bad]; |
| } else { |
| /* Use port stats. */ |
| core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; |
| core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; |
| core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; |
| core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; |
| core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + |
| stats[GENERIC_STAT_rx_nodesc_trunc] + |
| stats[GENERIC_STAT_rx_noskb_drops]; |
| core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; |
| core_stats->rx_length_errors = |
| stats[EF10_STAT_port_rx_gtjumbo] + |
| stats[EF10_STAT_port_rx_length_error]; |
| core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; |
| core_stats->rx_frame_errors = |
| stats[EF10_STAT_port_rx_align_error]; |
| core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; |
| core_stats->rx_errors = (core_stats->rx_length_errors + |
| core_stats->rx_crc_errors + |
| core_stats->rx_frame_errors); |
| } |
| |
| return stats_count; |
| } |
| |
| static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| __le64 generation_start, generation_end; |
| u64 *stats = nic_data->stats; |
| __le64 *dma_stats; |
| |
| efx_ef10_get_stat_mask(efx, mask); |
| |
| dma_stats = efx->stats_buffer.addr; |
| |
| generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
| if (generation_end == EFX_MC_STATS_GENERATION_INVALID) |
| return 0; |
| rmb(); |
| efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
| stats, efx->stats_buffer.addr, false); |
| rmb(); |
| generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| if (generation_end != generation_start) |
| return -EAGAIN; |
| |
| /* Update derived statistics */ |
| efx_nic_fix_nodesc_drop_stat(efx, |
| &stats[EF10_STAT_port_rx_nodesc_drops]); |
| stats[EF10_STAT_port_rx_good_bytes] = |
| stats[EF10_STAT_port_rx_bytes] - |
| stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; |
| efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], |
| stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); |
| efx_update_sw_stats(efx, stats); |
| return 0; |
| } |
| |
| |
| static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, |
| struct rtnl_link_stats64 *core_stats) |
| { |
| int retry; |
| |
| /* If we're unlucky enough to read statistics during the DMA, wait |
| * up to 10ms for it to finish (typically takes <500us) |
| */ |
| for (retry = 0; retry < 100; ++retry) { |
| if (efx_ef10_try_update_nic_stats_pf(efx) == 0) |
| break; |
| udelay(100); |
| } |
| |
| return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
| } |
| |
| static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| __le64 generation_start, generation_end; |
| u64 *stats = nic_data->stats; |
| u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); |
| struct efx_buffer stats_buf; |
| __le64 *dma_stats; |
| int rc; |
| |
| spin_unlock_bh(&efx->stats_lock); |
| |
| if (in_interrupt()) { |
| /* If in atomic context, cannot update stats. Just update the |
| * software stats and return so the caller can continue. |
| */ |
| spin_lock_bh(&efx->stats_lock); |
| efx_update_sw_stats(efx, stats); |
| return 0; |
| } |
| |
| efx_ef10_get_stat_mask(efx, mask); |
| |
| rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); |
| if (rc) { |
| spin_lock_bh(&efx->stats_lock); |
| return rc; |
| } |
| |
| dma_stats = stats_buf.addr; |
| dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; |
| |
| MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); |
| MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, |
| MAC_STATS_IN_DMA, 1); |
| MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); |
| MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); |
| |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), |
| NULL, 0, NULL); |
| spin_lock_bh(&efx->stats_lock); |
| if (rc) { |
| /* Expect ENOENT if DMA queues have not been set up */ |
| if (rc != -ENOENT || atomic_read(&efx->active_queues)) |
| efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, |
| sizeof(inbuf), NULL, 0, rc); |
| goto out; |
| } |
| |
| generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
| if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { |
| WARN_ON_ONCE(1); |
| goto out; |
| } |
| rmb(); |
| efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
| stats, stats_buf.addr, false); |
| rmb(); |
| generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| if (generation_end != generation_start) { |
| rc = -EAGAIN; |
| goto out; |
| } |
| |
| efx_update_sw_stats(efx, stats); |
| out: |
| efx_nic_free_buffer(efx, &stats_buf); |
| return rc; |
| } |
| |
| static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, |
| struct rtnl_link_stats64 *core_stats) |
| { |
| if (efx_ef10_try_update_nic_stats_vf(efx)) |
| return 0; |
| |
| return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
| } |
| |
| static void efx_ef10_push_irq_moderation(struct efx_channel *channel) |
| { |
| struct efx_nic *efx = channel->efx; |
| unsigned int mode, usecs; |
| efx_dword_t timer_cmd; |
| |
| if (channel->irq_moderation_us) { |
| mode = 3; |
| usecs = channel->irq_moderation_us; |
| } else { |
| mode = 0; |
| usecs = 0; |
| } |
| |
| if (EFX_EF10_WORKAROUND_61265(efx)) { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); |
| unsigned int ns = usecs * 1000; |
| |
| MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, |
| channel->channel); |
| MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); |
| MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); |
| MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); |
| |
| efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, |
| inbuf, sizeof(inbuf), 0, NULL, 0); |
| } else if (EFX_EF10_WORKAROUND_35388(efx)) { |
| unsigned int ticks = efx_usecs_to_ticks(efx, usecs); |
| |
| EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, |
| EFE_DD_EVQ_IND_TIMER_FLAGS, |
| ERF_DD_EVQ_IND_TIMER_MODE, mode, |
| ERF_DD_EVQ_IND_TIMER_VAL, ticks); |
| efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, |
| channel->channel); |
| } else { |
| unsigned int ticks = efx_usecs_to_ticks(efx, usecs); |
| |
| EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, |
| ERF_DZ_TC_TIMER_VAL, ticks); |
| efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, |
| channel->channel); |
| } |
| } |
| |
| static void efx_ef10_get_wol_vf(struct efx_nic *efx, |
| struct ethtool_wolinfo *wol) {} |
| |
| static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) |
| { |
| return -EOPNOTSUPP; |
| } |
| |
| static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
| { |
| wol->supported = 0; |
| wol->wolopts = 0; |
| memset(&wol->sopass, 0, sizeof(wol->sopass)); |
| } |
| |
| static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) |
| { |
| if (type != 0) |
| return -EINVAL; |
| return 0; |
| } |
| |
| static void efx_ef10_mcdi_request(struct efx_nic *efx, |
| const efx_dword_t *hdr, size_t hdr_len, |
| const efx_dword_t *sdu, size_t sdu_len) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| u8 *pdu = nic_data->mcdi_buf.addr; |
| |
| memcpy(pdu, hdr, hdr_len); |
| memcpy(pdu + hdr_len, sdu, sdu_len); |
| wmb(); |
| |
| /* The hardware provides 'low' and 'high' (doorbell) registers |
| * for passing the 64-bit address of an MCDI request to |
| * firmware. However the dwords are swapped by firmware. The |
| * least significant bits of the doorbell are then 0 for all |
| * MCDI requests due to alignment. |
| */ |
| _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), |
| ER_DZ_MC_DB_LWRD); |
| _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), |
| ER_DZ_MC_DB_HWRD); |
| } |
| |
| static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; |
| |
| rmb(); |
| return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); |
| } |
| |
| static void |
| efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, |
| size_t offset, size_t outlen) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| const u8 *pdu = nic_data->mcdi_buf.addr; |
| |
| memcpy(outbuf, pdu + offset, outlen); |
| } |
| |
| static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| |
| /* All our allocations have been reset */ |
| efx_ef10_reset_mc_allocations(efx); |
| |
| /* The datapath firmware might have been changed */ |
| nic_data->must_check_datapath_caps = true; |
| |
| /* MAC statistics have been cleared on the NIC; clear the local |
| * statistic that we update with efx_update_diff_stat(). |
| */ |
| nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; |
| } |
| |
| static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| int rc; |
| |
| rc = efx_ef10_get_warm_boot_count(efx); |
| if (rc < 0) { |
| /* The firmware is presumably in the process of |
| * rebooting. However, we are supposed to report each |
| * reboot just once, so we must only do that once we |
| * can read and store the updated warm boot count. |
| */ |
| return 0; |
| } |
| |
| if (rc == nic_data->warm_boot_count) |
| return 0; |
| |
| nic_data->warm_boot_count = rc; |
| efx_ef10_mcdi_reboot_detected(efx); |
| |
| return -EIO; |
| } |
| |
| /* Handle an MSI interrupt |
| * |
| * Handle an MSI hardware interrupt. This routine schedules event |
| * queue processing. No interrupt acknowledgement cycle is necessary. |
| * Also, we never need to check that the interrupt is for us, since |
| * MSI interrupts cannot be shared. |
| */ |
| static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) |
| { |
| struct efx_msi_context *context = dev_id; |
| struct efx_nic *efx = context->efx; |
| |
| netif_vdbg(efx, intr, efx->net_dev, |
| "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); |
| |
| if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { |
| /* Note test interrupts */ |
| if (context->index == efx->irq_level) |
| efx->last_irq_cpu = raw_smp_processor_id(); |
| |
| /* Schedule processing of the channel */ |
| efx_schedule_channel_irq(efx->channel[context->index]); |
| } |
| |
| return IRQ_HANDLED; |
| } |
| |
| static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) |
| { |
| struct efx_nic *efx = dev_id; |
| bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); |
| struct efx_channel *channel; |
| efx_dword_t reg; |
| u32 queues; |
| |
| /* Read the ISR which also ACKs the interrupts */ |
| efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); |
| queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); |
| |
| if (queues == 0) |
| return IRQ_NONE; |
| |
| if (likely(soft_enabled)) { |
| /* Note test interrupts */ |
| if (queues & (1U << efx->irq_level)) |
| efx->last_irq_cpu = raw_smp_processor_id(); |
| |
| efx_for_each_channel(channel, efx) { |
| if (queues & 1) |
| efx_schedule_channel_irq(channel); |
| queues >>= 1; |
| } |
| } |
| |
| netif_vdbg(efx, intr, efx->net_dev, |
| "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
| irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
| |
| return IRQ_HANDLED; |
| } |
| |
| static int efx_ef10_irq_test_generate(struct efx_nic *efx) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); |
| |
| if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, |
| NULL) == 0) |
| return -ENOTSUPP; |
| |
| BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); |
| |
| MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); |
| return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, |
| inbuf, sizeof(inbuf), NULL, 0, NULL); |
| } |
| |
| static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) |
| { |
| return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, |
| (tx_queue->ptr_mask + 1) * |
| sizeof(efx_qword_t), |
| GFP_KERNEL); |
| } |
| |
| /* This writes to the TX_DESC_WPTR and also pushes data */ |
| static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, |
| const efx_qword_t *txd) |
| { |
| unsigned int write_ptr; |
| efx_oword_t reg; |
| |
| write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); |
| reg.qword[0] = *txd; |
| efx_writeo_page(tx_queue->efx, ®, |
| ER_DZ_TX_DESC_UPD, tx_queue->queue); |
| } |
| |
| /* Add Firmware-Assisted TSO v2 option descriptors to a queue. |
| */ |
| static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, |
| struct sk_buff *skb, |
| bool *data_mapped) |
| { |
| struct efx_tx_buffer *buffer; |
| struct tcphdr *tcp; |
| struct iphdr *ip; |
| |
| u16 ipv4_id; |
| u32 seqnum; |
| u32 mss; |
| |
| EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); |
| |
| mss = skb_shinfo(skb)->gso_size; |
| |
| if (unlikely(mss < 4)) { |
| WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); |
| return -EINVAL; |
| } |
| |
| ip = ip_hdr(skb); |
| if (ip->version == 4) { |
| /* Modify IPv4 header if needed. */ |
| ip->tot_len = 0; |
| ip->check = 0; |
| ipv4_id = ntohs(ip->id); |
| } else { |
| /* Modify IPv6 header if needed. */ |
| struct ipv6hdr *ipv6 = ipv6_hdr(skb); |
| |
| ipv6->payload_len = 0; |
| ipv4_id = 0; |
| } |
| |
| tcp = tcp_hdr(skb); |
| seqnum = ntohl(tcp->seq); |
| |
| buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
| |
| buffer->flags = EFX_TX_BUF_OPTION; |
| buffer->len = 0; |
| buffer->unmap_len = 0; |
| EFX_POPULATE_QWORD_5(buffer->option, |
| ESF_DZ_TX_DESC_IS_OPT, 1, |
| ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, |
| ESF_DZ_TX_TSO_OPTION_TYPE, |
| ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, |
| ESF_DZ_TX_TSO_IP_ID, ipv4_id, |
| ESF_DZ_TX_TSO_TCP_SEQNO, seqnum |
| ); |
| ++tx_queue->insert_count; |
| |
| buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
| |
| buffer->flags = EFX_TX_BUF_OPTION; |
| buffer->len = 0; |
| buffer->unmap_len = 0; |
| EFX_POPULATE_QWORD_4(buffer->option, |
| ESF_DZ_TX_DESC_IS_OPT, 1, |
| ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, |
| ESF_DZ_TX_TSO_OPTION_TYPE, |
| ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, |
| ESF_DZ_TX_TSO_TCP_MSS, mss |
| ); |
| ++tx_queue->insert_count; |
| |
| return 0; |
| } |
| |
| static u32 efx_ef10_tso_versions(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| u32 tso_versions = 0; |
| |
| if (nic_data->datapath_caps & |
| (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) |
| tso_versions |= BIT(1); |
| if (nic_data->datapath_caps2 & |
| (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) |
| tso_versions |= BIT(2); |
| return tso_versions; |
| } |
| |
| static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
| EFX_BUF_SIZE)); |
| bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
| size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; |
| struct efx_channel *channel = tx_queue->channel; |
| struct efx_nic *efx = tx_queue->efx; |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| bool tso_v2 = false; |
| size_t inlen; |
| dma_addr_t dma_addr; |
| efx_qword_t *txd; |
| int rc; |
| int i; |
| BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); |
| |
| /* TSOv2 is a limited resource that can only be configured on a limited |
| * number of queues. TSO without checksum offload is not really a thing, |
| * so we only enable it for those queues. |
| */ |
| if (csum_offload && (nic_data->datapath_caps2 & |
| (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) { |
| tso_v2 = true; |
| netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", |
| channel->channel); |
| } |
| |
| MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); |
| MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); |
| MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); |
| MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); |
| MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); |
| MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); |
| |
| dma_addr = tx_queue->txd.buf.dma_addr; |
| |
| netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", |
| tx_queue->queue, entries, (u64)dma_addr); |
| |
| for (i = 0; i < entries; ++i) { |
| MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); |
| dma_addr += EFX_BUF_SIZE; |
| } |
| |
| inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); |
| |
| do { |
| MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS, |
| /* This flag was removed from mcdi_pcol.h for |
| * the non-_EXT version of INIT_TXQ. However, |
| * firmware still honours it. |
| */ |
| INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, |
| INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, |
| INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); |
| |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, |
| NULL, 0, NULL); |
| if (rc == -ENOSPC && tso_v2) { |
| /* Retry without TSOv2 if we're short on contexts. */ |
| tso_v2 = false; |
| netif_warn(efx, probe, efx->net_dev, |
| "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n"); |
| } else if (rc) { |
| efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, |
| MC_CMD_INIT_TXQ_EXT_IN_LEN, |
| NULL, 0, rc); |
| goto fail; |
| } |
| } while (rc); |
| |
| /* A previous user of this TX queue might have set us up the |
| * bomb by writing a descriptor to the TX push collector but |
| * not the doorbell. (Each collector belongs to a port, not a |
| * queue or function, so cannot easily be reset.) We must |
| * attempt to push a no-op descriptor in its place. |
| */ |
| tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; |
| tx_queue->insert_count = 1; |
| txd = efx_tx_desc(tx_queue, 0); |
| EFX_POPULATE_QWORD_4(*txd, |
| ESF_DZ_TX_DESC_IS_OPT, true, |
| ESF_DZ_TX_OPTION_TYPE, |
| ESE_DZ_TX_OPTION_DESC_CRC_CSUM, |
| ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, |
| ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); |
| tx_queue->write_count = 1; |
| |
| if (tso_v2) { |
| tx_queue->handle_tso = efx_ef10_tx_tso_desc; |
| tx_queue->tso_version = 2; |
| } else if (nic_data->datapath_caps & |
| (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { |
| tx_queue->tso_version = 1; |
| } |
| |
| wmb(); |
| efx_ef10_push_tx_desc(tx_queue, txd); |
| |
| return; |
| |
| fail: |
| netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", |
| tx_queue->queue); |
| } |
| |
| static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); |
| MCDI_DECLARE_BUF_ERR(outbuf); |
| struct efx_nic *efx = tx_queue->efx; |
| size_t outlen; |
| int rc; |
| |
| MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, |
| tx_queue->queue); |
| |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), |
| outbuf, sizeof(outbuf), &outlen); |
| |
| if (rc && rc != -EALREADY) |
| goto fail; |
| |
| return; |
| |
| fail: |
| efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, |
| outbuf, outlen, rc); |
| } |
| |
| static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) |
| { |
| efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); |
| } |
| |
| /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
| static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) |
| { |
| unsigned int write_ptr; |
| efx_dword_t reg; |
| |
| write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); |
| efx_writed_page(tx_queue->efx, ®, |
| ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); |
| } |
| |
| #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff |
| |
| static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, |
| dma_addr_t dma_addr, unsigned int len) |
| { |
| if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { |
| /* If we need to break across multiple descriptors we should |
| * stop at a page boundary. This assumes the length limit is |
| * greater than the page size. |
| */ |
| dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; |
| |
| BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); |
| len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; |
| } |
| |
| return len; |
| } |
| |
| static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) |
| { |
| unsigned int old_write_count = tx_queue->write_count; |
| struct efx_tx_buffer *buffer; |
| unsigned int write_ptr; |
| efx_qword_t *txd; |
| |
| tx_queue->xmit_more_available = false; |
| if (unlikely(tx_queue->write_count == tx_queue->insert_count)) |
| return; |
| |
| do { |
| write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| buffer = &tx_queue->buffer[write_ptr]; |
| txd = efx_tx_desc(tx_queue, write_ptr); |
| ++tx_queue->write_count; |
| |
| /* Create TX descriptor ring entry */ |
| if (buffer->flags & EFX_TX_BUF_OPTION) { |
| *txd = buffer->option; |
| if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) |
| /* PIO descriptor */ |
| tx_queue->packet_write_count = tx_queue->write_count; |
| } else { |
| tx_queue->packet_write_count = tx_queue->write_count; |
| BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); |
| EFX_POPULATE_QWORD_3( |
| *txd, |
| ESF_DZ_TX_KER_CONT, |
| buffer->flags & EFX_TX_BUF_CONT, |
| ESF_DZ_TX_KER_BYTE_CNT, buffer->len, |
| ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
| } |
| } while (tx_queue->write_count != tx_queue->insert_count); |
| |
| wmb(); /* Ensure descriptors are written before they are fetched */ |
| |
| if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { |
| txd = efx_tx_desc(tx_queue, |
| old_write_count & tx_queue->ptr_mask); |
| efx_ef10_push_tx_desc(tx_queue, txd); |
| ++tx_queue->pushes; |
| } else { |
| efx_ef10_notify_tx_desc(tx_queue); |
| } |
| } |
| |
| #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ |
| 1 << RSS_MODE_HASH_DST_ADDR_LBN) |
| #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ |
| 1 << RSS_MODE_HASH_DST_PORT_LBN) |
| #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ |
| 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ |
| 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ |
| 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ |
| (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ |
| RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ |
| RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ |
| (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ |
| RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ |
| RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) |
| |
| static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags) |
| { |
| /* Firmware had a bug (sfc bug 61952) where it would not actually |
| * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. |
| * This meant that it would always contain whatever was previously |
| * in the MCDI buffer. Fortunately, all firmware versions with |
| * this bug have the same default flags value for a newly-allocated |
| * RSS context, and the only time we want to get the flags is just |
| * after allocating. Moreover, the response has a 32-bit hole |
| * where the context ID would be in the request, so we can use an |
| * overlength buffer in the request and pre-fill the flags field |
| * with what we believe the default to be. Thus if the firmware |
| * has the bug, it will leave our pre-filled value in the flags |
| * field of the response, and we will get the right answer. |
| * |
| * However, this does mean that this function should NOT be used if |
| * the RSS context flags might not be their defaults - it is ONLY |
| * reliably correct for a newly-allocated RSS context. |
| */ |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); |
| size_t outlen; |
| int rc; |
| |
| /* Check we have a hole for the context ID */ |
| BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, |
| RSS_CONTEXT_FLAGS_DEFAULT); |
| rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, |
| sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); |
| if (rc == 0) { |
| if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) |
| rc = -EIO; |
| else |
| *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); |
| } |
| return rc; |
| } |
| |
| /* Attempt to enable 4-tuple UDP hashing on the specified RSS context. |
| * If we fail, we just leave the RSS context at its default hash settings, |
| * which is safe but may slightly reduce performance. |
| * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we |
| * just need to set the UDP ports flags (for both IP versions). |
| */ |
| static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); |
| u32 flags; |
| |
| BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); |
| |
| if (efx_ef10_get_rss_flags(efx, context, &flags) != 0) |
| return; |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context); |
| flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; |
| flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); |
| if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), |
| NULL, 0, NULL)) |
| /* Succeeded, so UDP 4-tuple is now enabled */ |
| efx->rx_hash_udp_4tuple = true; |
| } |
| |
| static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, |
| bool exclusive, unsigned *context_size) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| size_t outlen; |
| int rc; |
| u32 alloc_type = exclusive ? |
| MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : |
| MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; |
| unsigned rss_spread = exclusive ? |
| efx->rss_spread : |
| min(rounddown_pow_of_two(efx->rss_spread), |
| EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); |
| |
| if (!exclusive && rss_spread == 1) { |
| *context = EFX_EF10_RSS_CONTEXT_INVALID; |
| if (context_size) |
| *context_size = 1; |
| return 0; |
| } |
| |
| if (nic_data->datapath_caps & |
| 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) |
| return -EOPNOTSUPP; |
| |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, |
| nic_data->vport_id); |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), |
| outbuf, sizeof(outbuf), &outlen); |
| if (rc != 0) |
| return rc; |
| |
| if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) |
| return -EIO; |
| |
| *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); |
| |
| if (context_size) |
| *context_size = rss_spread; |
| |
| if (nic_data->datapath_caps & |
| 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) |
| efx_ef10_set_rss_flags(efx, *context); |
| |
| return 0; |
| } |
| |
| static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); |
| int rc; |
| |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, |
| context); |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), |
| NULL, 0, NULL); |
| WARN_ON(rc != 0); |
| } |
| |
| static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, |
| const u32 *rx_indir_table, const u8 *key) |
| { |
| MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); |
| MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); |
| int i, rc; |
| |
| MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, |
| context); |
| BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
| MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); |
| |
| /* This iterates over the length of efx->rx_indir_table, but copies |
| * bytes from rx_indir_table. That's because the latter is a pointer |
| * rather than an array, but should have the same length. |
| * The efx->rx_hash_key loop below is similar. |
| */ |
| for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) |
| MCDI_PTR(tablebuf, |
| RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = |
| (u8) rx_indir_table[i]; |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, |
| sizeof(tablebuf), NULL, 0, NULL); |
| if (rc != 0) |
| return rc; |
| |
| MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, |
| context); |
| BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != |
| MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); |
| for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) |
| MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; |
| |
| return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, |
| sizeof(keybuf), NULL, 0, NULL); |
| } |
| |
| static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| |
| if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) |
| efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); |
| nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| } |
| |
| static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, |
| unsigned *context_size) |
| { |
| u32 new_rx_rss_context; |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
| false, context_size); |
| |
| if (rc != 0) |
| return rc; |
| |
| nic_data->rx_rss_context = new_rx_rss_context; |
| nic_data->rx_rss_context_exclusive = false; |
| efx_set_default_rx_indir_table(efx); |
| return 0; |
| } |
| |
| static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, |
| const u32 *rx_indir_table, |
| const u8 *key) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| int rc; |
| u32 new_rx_rss_context; |
| |
| if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || |
| !nic_data->rx_rss_context_exclusive) { |
| rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
| true, NULL); |
| if (rc == -EOPNOTSUPP) |
| return rc; |
| else if (rc != 0) |
| goto fail1; |
| } else { |
| new_rx_rss_context = nic_data->rx_rss_context; |
| } |
| |
| rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, |
| rx_indir_table, key); |
| if (rc != 0) |
| goto fail2; |
| |
| if (nic_data->rx_rss_context != new_rx_rss_context) |
| efx_ef10_rx_free_indir_table(efx); |
| nic_data->rx_rss_context = new_rx_rss_context; |
| nic_data->rx_rss_context_exclusive = true; |
| if (rx_indir_table != efx->rx_indir_table) |
| memcpy(efx->rx_indir_table, rx_indir_table, |
| sizeof(efx->rx_indir_table)); |
| if (key != efx->rx_hash_key) |
| memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); |
| |
| return 0; |
| |
| fail2: |
| if (new_rx_rss_context != nic_data->rx_rss_context) |
| efx_ef10_free_rss_context(efx, new_rx_rss_context); |
| fail1: |
| netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
| return rc; |
| } |
| |
| static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); |
| MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); |
| MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); |
| size_t outlen; |
| int rc, i; |
| |
| BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != |
| MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); |
| |
| if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) |
| return -ENOENT; |
| |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, |
| nic_data->rx_rss_context); |
| BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
| MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); |
| rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), |
| tablebuf, sizeof(tablebuf), &outlen); |
| if (rc != 0) |
| return rc; |
| |
| if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) |
| return -EIO; |
| |
| for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) |
| efx->rx_indir_table[i] = MCDI_PTR(tablebuf, |
| RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; |
| |
| MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, |
| nic_data->rx_rss_context); |
| BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != |
| MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); |
| rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), |
| keybuf, sizeof(keybuf), &outlen); |
| if (rc != 0) |
| return rc; |
| |
| if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) |
| return -EIO; |
| |
| for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) |
| efx->rx_hash_key[i] = MCDI_PTR( |
| keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; |
| |
| return 0; |
| } |
| |
| static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, |
| const u32 *rx_indir_table, |
| const u8 *key) |
| { |
| int rc; |
| |
| if (efx->rss_spread == 1) |
| return 0; |
| |
| if (!key) |
| key = efx->rx_hash_key; |
| |
| rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); |
| |
| if (rc == -ENOBUFS && !user) { |
| unsigned context_size; |
| bool mismatch = false; |
| size_t i; |
| |
| for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; |
| i++) |
| mismatch = rx_indir_table[i] != |
| ethtool_rxfh_indir_default(i, efx->rss_spread); |
| |
| rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); |
| if (rc == 0) { |
| if (context_size != efx->rss_spread) |
| netif_warn(efx, probe, efx->net_dev, |
| "Could not allocate an exclusive RSS" |
| " context; allocated a shared one of" |
| " different size." |
| " Wanted %u, got %u.\n", |
| efx->rss_spread, context_size); |
| else if (mismatch) |
| netif_warn(efx, probe, efx->net_dev, |
| "Could not allocate an exclusive RSS" |
| " context; allocated a shared one but" |
| " could not apply custom" |
| " indirection.\n"); |
| else |
| netif_info(efx, probe, efx->net_dev, |
| "Could not allocate an exclusive RSS" |
| " context; allocated a shared one.\n"); |
| } |
| } |
| return rc; |
| } |
| |
| static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, |
| const u32 *rx_indir_table |
| __attribute__ ((unused)), |
| const u8 *key |
| __attribute__ ((unused))) |
| { |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| |
| if (user) |
| return -EOPNOTSUPP; |
| if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) |
| return 0; |
| return efx_ef10_rx_push_shared_rss_config(efx, NULL); |
| } |
| |
| static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) |
| { |
| return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, |
| (rx_queue->ptr_mask + 1) * |
| sizeof(efx_qword_t), |
| GFP_KERNEL); |
| } |
| |
| static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) |
| { |
| MCDI_DECLARE_BUF(inbuf, |
| MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
| EFX_BUF_SIZE)); |
| struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
| size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; |
| struct efx_nic *efx = rx_queue->efx; |
| struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| size_t inlen; |
| dma_addr_t dma_addr; |
| int rc; |
| int i; |
| BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); |
| |
| rx_queue->scatter_n = 0; |
| rx_queue->scatter_len = 0; |
| |
| MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); |
| MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); |
| MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); |
| MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, |
| efx_rx_queue_index(rx_queue)); |
| MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, |
| INIT_RXQ_IN_FLAG_PREFIX, 1, |
| INIT_RXQ_IN_FLAG_TIMESTAMP, 1); |
| MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); |
| MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); |
| |
| dma_addr = rx_queue->rxd.buf.dma_addr; |
| |
| netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", |
| efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); |
| |
| for (i = 0; i < entries; ++i) { |
| MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); |
| dma_addr += EFX_BUF_SIZE; |
| } |
| |
| inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); |
| |
| rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, |
| NULL, 0, NULL); |
| if (rc) |
| netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", |
| efx_rx_queue_index(rx_queue)); |
| } |
| |
| static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); |
| MCDI_DECLARE_BUF_ERR(outbuf); |
| struct efx_nic *efx = rx_queue->efx; |
| size_t outlen; |
| int rc; |
| |
| MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, |
| efx_rx_queue_index(rx_queue)); |
| |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), |
| outbuf, sizeof(outbuf), &outlen); |
| |
| if (rc && rc != -EALREADY) |
| goto fail; |
| |
| return; |
| |
| fail: |
| efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, |
| outbuf, outlen, rc); |
| } |
| |
| static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) |
| { |
| efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); |
| } |
| |
| /* This creates an entry in the RX descriptor queue */ |
| static inline void |
| efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) |
| { |
| struct efx_rx_buffer *rx_buf; |
| efx_qword_t *rxd; |
| |
| rxd = efx_rx_desc(rx_queue, index); |
| rx_buf = efx_rx_buffer(rx_queue, index); |
| EFX_POPULATE_QWORD_2(*rxd, |
| ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, |
| ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
| } |
| |
| static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) |
| { |
| struct efx_nic *efx = rx_queue->efx; |
| unsigned int write_count; |
| efx_dword_t reg; |
| |
| /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ |
| write_count = rx_queue->added_count & ~7; |
| if (rx_queue->notified_count == write_count) |
| return; |
| |
| do |
| efx_ef10_build_rx_desc( |
| rx_queue, |
| rx_queue->notified_count & rx_queue->ptr_mask); |
| while (++rx_queue->notified_count != write_count); |
| |
| wmb(); |
| EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, |
| write_count & rx_queue->ptr_mask); |
| efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, |
| efx_rx_queue_index(rx_queue)); |
| } |
| |
| static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; |
| |
| static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) |
| { |
| struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); |
| efx_qword_t event; |
| |
| EFX_POPULATE_QWORD_2(event, |
| ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, |
| ESF_DZ_EV_DATA, EFX_EF10_REFILL); |
| |
| MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
| |
| /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
| * already swapped the data to little-endian order. |
| */ |
| memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], |
| sizeof(efx_qword_t)); |
| |
| efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, |
| inbuf, sizeof(inbuf), 0, |
| efx_ef10_rx_defer_refill_complete, 0); |
| } |
| |
| static void |
| efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, |
| int rc, efx_dword_t *outbuf, |
| size_t outlen_actual) |
| { |
| /* nothing to do */ |
| } |
| |
| static int efx_ef10_ev_probe(struct efx_channel *channel) |
| { |
| return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, |
| (channel->eventq_mask + 1) * |
| sizeof(efx_qword_t), |
| GFP_KERNEL); |
| } |
| |
| static void efx_ef10_ev_fini(struct efx_channel *channel) |
| { |
| MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); |
| MCDI_DECLARE_BUF_ERR(outbuf); |
| struct efx_nic *efx = channel->efx; |
| size_t outlen; |
| int rc; |
| |
| MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); |
| |
| rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), |
| outbuf, sizeof(outbuf), &outlen); |
| |
| if (rc && rc != -EALREADY) |
| goto fail; |
| |
| return; |
| |
| fail: |
| efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, |
| outbuf, outlen, rc); |
| } |
| |
| static int efx_ef10_ev_init(struct efx_channel *channel) |
| { |
| MCDI_DECLARE_BUF(inbuf, |
| MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / |
| EFX_BUF_SIZE)); |
| MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); |
| size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; |
| struct efx_nic *efx = channel->efx; |
| struct efx_ef10_nic_data *nic_data; |
| size_t inlen, outlen; |
| unsigned int enabled, implemented; |
| dma_addr_t dma_addr; |
| int rc; |
| int i; |
| |
| nic_data = efx->nic_data; |
| |
| /* Fill event queue with all ones (i.e. empty events) */ |
| memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); |
| |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); |
| /* INIT_EVQ expects index in vector table, not absolute */ |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, |
| MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, |
| MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); |
| MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); |
| |
| if (nic_data->datapath_caps2 & |
|