blob: 7dcc907a449d60adcbeb27cf58434edb0442792b [file] [log] [blame]
Johannes Bergab69bde2013-06-17 22:44:02 +02001/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
Tobias Regnery702e8412016-11-15 12:43:11 +010054static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
Johannes Bergab69bde2013-06-17 22:44:02 +020055{
Tobias Regnery702e8412016-11-15 12:43:11 +010056 struct alx_buffer *txb = &txq->bufs[entry];
Johannes Bergab69bde2013-06-17 22:44:02 +020057
58 if (dma_unmap_len(txb, size)) {
Tobias Regnery702e8412016-11-15 12:43:11 +010059 dma_unmap_single(txq->dev,
Johannes Bergab69bde2013-06-17 22:44:02 +020060 dma_unmap_addr(txb, dma),
61 dma_unmap_len(txb, size),
62 DMA_TO_DEVICE);
63 dma_unmap_len_set(txb, size, 0);
64 }
65
66 if (txb->skb) {
67 dev_kfree_skb_any(txb->skb);
68 txb->skb = NULL;
69 }
70}
71
72static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
73{
Tobias Regnery702e8412016-11-15 12:43:11 +010074 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
Johannes Bergab69bde2013-06-17 22:44:02 +020075 struct sk_buff *skb;
76 struct alx_buffer *cur_buf;
77 dma_addr_t dma;
78 u16 cur, next, count = 0;
79
80 next = cur = rxq->write_idx;
81 if (++next == alx->rx_ringsz)
82 next = 0;
83 cur_buf = &rxq->bufs[cur];
84
85 while (!cur_buf->skb && next != rxq->read_idx) {
86 struct alx_rfd *rfd = &rxq->rfd[cur];
87
Feng Tang881d0322016-06-12 17:36:37 +080088 /*
89 * When DMA RX address is set to something like
90 * 0x....fc0, it will be very likely to cause DMA
91 * RFD overflow issue.
92 *
93 * To work around it, we apply rx skb with 64 bytes
94 * longer space, and offset the address whenever
95 * 0x....fc0 is detected.
96 */
97 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
Johannes Bergab69bde2013-06-17 22:44:02 +020098 if (!skb)
99 break;
Feng Tang881d0322016-06-12 17:36:37 +0800100
101 if (((unsigned long)skb->data & 0xfff) == 0xfc0)
102 skb_reserve(skb, 64);
103
Johannes Bergab69bde2013-06-17 22:44:02 +0200104 dma = dma_map_single(&alx->hw.pdev->dev,
105 skb->data, alx->rxbuf_size,
106 DMA_FROM_DEVICE);
107 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
108 dev_kfree_skb(skb);
109 break;
110 }
111
112 /* Unfortunately, RX descriptor buffers must be 4-byte
113 * aligned, so we can't use IP alignment.
114 */
115 if (WARN_ON(dma & 3)) {
116 dev_kfree_skb(skb);
117 break;
118 }
119
120 cur_buf->skb = skb;
121 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
122 dma_unmap_addr_set(cur_buf, dma, dma);
123 rfd->addr = cpu_to_le64(dma);
124
125 cur = next;
126 if (++next == alx->rx_ringsz)
127 next = 0;
128 cur_buf = &rxq->bufs[cur];
129 count++;
130 }
131
132 if (count) {
133 /* flush all updates before updating hardware */
134 wmb();
135 rxq->write_idx = cur;
136 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
137 }
138
139 return count;
140}
141
Tobias Regnery2e068262016-11-15 12:43:14 +0100142static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
143 struct sk_buff *skb)
144{
145 unsigned int r_idx = skb->queue_mapping;
146
147 if (r_idx >= alx->num_txq)
148 r_idx = r_idx % alx->num_txq;
149
150 return alx->qnapi[r_idx]->txq;
151}
152
153static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
154{
155 return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
156}
157
Tobias Regnery702e8412016-11-15 12:43:11 +0100158static inline int alx_tpd_avail(struct alx_tx_queue *txq)
Johannes Bergab69bde2013-06-17 22:44:02 +0200159{
Johannes Bergab69bde2013-06-17 22:44:02 +0200160 if (txq->write_idx >= txq->read_idx)
Tobias Regnery702e8412016-11-15 12:43:11 +0100161 return txq->count + txq->read_idx - txq->write_idx - 1;
Johannes Bergab69bde2013-06-17 22:44:02 +0200162 return txq->read_idx - txq->write_idx - 1;
163}
164
Tobias Regnery702e8412016-11-15 12:43:11 +0100165static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
Johannes Bergab69bde2013-06-17 22:44:02 +0200166{
Tobias Regnery702e8412016-11-15 12:43:11 +0100167 struct alx_priv *alx;
Tobias Regnery2e068262016-11-15 12:43:14 +0100168 struct netdev_queue *tx_queue;
Johannes Bergab69bde2013-06-17 22:44:02 +0200169 u16 hw_read_idx, sw_read_idx;
170 unsigned int total_bytes = 0, total_packets = 0;
171 int budget = ALX_DEFAULT_TX_WORK;
172
Tobias Regnery702e8412016-11-15 12:43:11 +0100173 alx = netdev_priv(txq->netdev);
Tobias Regnery2e068262016-11-15 12:43:14 +0100174 tx_queue = alx_get_tx_queue(txq);
Tobias Regnery702e8412016-11-15 12:43:11 +0100175
Johannes Bergab69bde2013-06-17 22:44:02 +0200176 sw_read_idx = txq->read_idx;
Tobias Regnery2e068262016-11-15 12:43:14 +0100177 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
Johannes Bergab69bde2013-06-17 22:44:02 +0200178
179 if (sw_read_idx != hw_read_idx) {
180 while (sw_read_idx != hw_read_idx && budget > 0) {
181 struct sk_buff *skb;
182
183 skb = txq->bufs[sw_read_idx].skb;
184 if (skb) {
185 total_bytes += skb->len;
186 total_packets++;
187 budget--;
188 }
189
Tobias Regnery702e8412016-11-15 12:43:11 +0100190 alx_free_txbuf(txq, sw_read_idx);
Johannes Bergab69bde2013-06-17 22:44:02 +0200191
Tobias Regnery702e8412016-11-15 12:43:11 +0100192 if (++sw_read_idx == txq->count)
Johannes Bergab69bde2013-06-17 22:44:02 +0200193 sw_read_idx = 0;
194 }
195 txq->read_idx = sw_read_idx;
196
Tobias Regnery2e068262016-11-15 12:43:14 +0100197 netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
Johannes Bergab69bde2013-06-17 22:44:02 +0200198 }
199
Tobias Regnery2e068262016-11-15 12:43:14 +0100200 if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
Tobias Regnery702e8412016-11-15 12:43:11 +0100201 alx_tpd_avail(txq) > txq->count / 4)
Tobias Regnery2e068262016-11-15 12:43:14 +0100202 netif_tx_wake_queue(tx_queue);
Johannes Bergab69bde2013-06-17 22:44:02 +0200203
204 return sw_read_idx == hw_read_idx;
205}
206
207static void alx_schedule_link_check(struct alx_priv *alx)
208{
209 schedule_work(&alx->link_check_wk);
210}
211
212static void alx_schedule_reset(struct alx_priv *alx)
213{
214 schedule_work(&alx->reset_wk);
215}
216
Tobias Regnery702e8412016-11-15 12:43:11 +0100217static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
Johannes Bergab69bde2013-06-17 22:44:02 +0200218{
Tobias Regnery702e8412016-11-15 12:43:11 +0100219 struct alx_priv *alx;
Johannes Bergab69bde2013-06-17 22:44:02 +0200220 struct alx_rrd *rrd;
221 struct alx_buffer *rxb;
222 struct sk_buff *skb;
223 u16 length, rfd_cleaned = 0;
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800224 int work = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +0200225
Tobias Regnery702e8412016-11-15 12:43:11 +0100226 alx = netdev_priv(rxq->netdev);
227
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800228 while (work < budget) {
Johannes Bergab69bde2013-06-17 22:44:02 +0200229 rrd = &rxq->rrd[rxq->rrd_read_idx];
230 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
231 break;
232 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
233
234 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
235 RRD_SI) != rxq->read_idx ||
236 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
237 RRD_NOR) != 1) {
238 alx_schedule_reset(alx);
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800239 return work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200240 }
241
242 rxb = &rxq->bufs[rxq->read_idx];
Tobias Regnery702e8412016-11-15 12:43:11 +0100243 dma_unmap_single(rxq->dev,
Johannes Bergab69bde2013-06-17 22:44:02 +0200244 dma_unmap_addr(rxb, dma),
245 dma_unmap_len(rxb, size),
246 DMA_FROM_DEVICE);
247 dma_unmap_len_set(rxb, size, 0);
248 skb = rxb->skb;
249 rxb->skb = NULL;
250
251 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
252 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
253 rrd->word3 = 0;
254 dev_kfree_skb_any(skb);
255 goto next_pkt;
256 }
257
258 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
259 RRD_PKTLEN) - ETH_FCS_LEN;
260 skb_put(skb, length);
Tobias Regnery702e8412016-11-15 12:43:11 +0100261 skb->protocol = eth_type_trans(skb, rxq->netdev);
Johannes Bergab69bde2013-06-17 22:44:02 +0200262
263 skb_checksum_none_assert(skb);
264 if (alx->dev->features & NETIF_F_RXCSUM &&
265 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
266 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
267 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
268 RRD_PID)) {
269 case RRD_PID_IPV6UDP:
270 case RRD_PID_IPV4UDP:
271 case RRD_PID_IPV4TCP:
272 case RRD_PID_IPV6TCP:
273 skb->ip_summed = CHECKSUM_UNNECESSARY;
274 break;
275 }
276 }
277
Tobias Regnery702e8412016-11-15 12:43:11 +0100278 napi_gro_receive(&rxq->np->napi, skb);
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800279 work++;
Johannes Bergab69bde2013-06-17 22:44:02 +0200280
281next_pkt:
Tobias Regnery702e8412016-11-15 12:43:11 +0100282 if (++rxq->read_idx == rxq->count)
Johannes Bergab69bde2013-06-17 22:44:02 +0200283 rxq->read_idx = 0;
Tobias Regnery702e8412016-11-15 12:43:11 +0100284 if (++rxq->rrd_read_idx == rxq->count)
Johannes Bergab69bde2013-06-17 22:44:02 +0200285 rxq->rrd_read_idx = 0;
286
287 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
288 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
289 }
290
291 if (rfd_cleaned)
292 alx_refill_rx_ring(alx, GFP_ATOMIC);
293
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800294 return work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200295}
296
297static int alx_poll(struct napi_struct *napi, int budget)
298{
Tobias Regnery702e8412016-11-15 12:43:11 +0100299 struct alx_napi *np = container_of(napi, struct alx_napi, napi);
300 struct alx_priv *alx = np->alx;
Johannes Bergab69bde2013-06-17 22:44:02 +0200301 struct alx_hw *hw = &alx->hw;
Johannes Bergab69bde2013-06-17 22:44:02 +0200302 unsigned long flags;
Tobias Regnerye0eac252016-11-15 12:43:12 +0100303 bool tx_complete = true;
304 int work = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +0200305
Tobias Regnerye0eac252016-11-15 12:43:12 +0100306 if (np->txq)
307 tx_complete = alx_clean_tx_irq(np->txq);
308 if (np->rxq)
309 work = alx_clean_rx_irq(np->rxq, budget);
Johannes Bergab69bde2013-06-17 22:44:02 +0200310
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800311 if (!tx_complete || work == budget)
312 return budget;
Johannes Bergab69bde2013-06-17 22:44:02 +0200313
Tobias Regnery702e8412016-11-15 12:43:11 +0100314 napi_complete(&np->napi);
Johannes Bergab69bde2013-06-17 22:44:02 +0200315
316 /* enable interrupt */
Tobias Regnerydc39a782016-09-09 12:19:54 +0200317 if (alx->flags & ALX_FLAG_USING_MSIX) {
Tobias Regnerye0eac252016-11-15 12:43:12 +0100318 alx_mask_msix(hw, np->vec_idx, false);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200319 } else {
320 spin_lock_irqsave(&alx->irq_lock, flags);
321 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
322 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
323 spin_unlock_irqrestore(&alx->irq_lock, flags);
324 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200325
326 alx_post_write(hw);
327
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800328 return work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200329}
330
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200331static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
Johannes Bergab69bde2013-06-17 22:44:02 +0200332{
333 struct alx_hw *hw = &alx->hw;
Johannes Bergab69bde2013-06-17 22:44:02 +0200334
335 if (intr & ALX_ISR_FATAL) {
336 netif_warn(alx, hw, alx->dev,
337 "fatal interrupt 0x%x, resetting\n", intr);
338 alx_schedule_reset(alx);
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200339 return true;
Johannes Bergab69bde2013-06-17 22:44:02 +0200340 }
341
342 if (intr & ALX_ISR_ALERT)
343 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
344
345 if (intr & ALX_ISR_PHY) {
346 /* suppress PHY interrupt, because the source
347 * is from PHY internal. only the internal status
348 * is cleared, the interrupt status could be cleared.
349 */
350 alx->int_mask &= ~ALX_ISR_PHY;
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200351 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
Johannes Bergab69bde2013-06-17 22:44:02 +0200352 alx_schedule_link_check(alx);
353 }
354
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200355 return false;
356}
357
358static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
359{
360 struct alx_hw *hw = &alx->hw;
361
362 spin_lock(&alx->irq_lock);
363
364 /* ACK interrupt */
365 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
366 intr &= alx->int_mask;
367
368 if (alx_intr_handle_misc(alx, intr))
369 goto out;
370
Johannes Bergab69bde2013-06-17 22:44:02 +0200371 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
Tobias Regnery702e8412016-11-15 12:43:11 +0100372 napi_schedule(&alx->qnapi[0]->napi);
Johannes Bergab69bde2013-06-17 22:44:02 +0200373 /* mask rx/tx interrupt, enable them when napi complete */
374 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
Johannes Bergab69bde2013-06-17 22:44:02 +0200375 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200376 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200377
378 alx_write_mem32(hw, ALX_ISR, 0);
379
380 out:
381 spin_unlock(&alx->irq_lock);
382 return IRQ_HANDLED;
383}
384
Tobias Regnerydc39a782016-09-09 12:19:54 +0200385static irqreturn_t alx_intr_msix_ring(int irq, void *data)
386{
Tobias Regnery702e8412016-11-15 12:43:11 +0100387 struct alx_napi *np = data;
388 struct alx_hw *hw = &np->alx->hw;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200389
390 /* mask interrupt to ACK chip */
Tobias Regnerye0eac252016-11-15 12:43:12 +0100391 alx_mask_msix(hw, np->vec_idx, true);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200392 /* clear interrupt status */
Tobias Regnerye0eac252016-11-15 12:43:12 +0100393 alx_write_mem32(hw, ALX_ISR, np->vec_mask);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200394
Tobias Regnery702e8412016-11-15 12:43:11 +0100395 napi_schedule(&np->napi);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200396
397 return IRQ_HANDLED;
398}
399
400static irqreturn_t alx_intr_msix_misc(int irq, void *data)
401{
402 struct alx_priv *alx = data;
403 struct alx_hw *hw = &alx->hw;
404 u32 intr;
405
406 /* mask interrupt to ACK chip */
407 alx_mask_msix(hw, 0, true);
408
409 /* read interrupt status */
410 intr = alx_read_mem32(hw, ALX_ISR);
411 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
412
413 if (alx_intr_handle_misc(alx, intr))
414 return IRQ_HANDLED;
415
416 /* clear interrupt status */
417 alx_write_mem32(hw, ALX_ISR, intr);
418
419 /* enable interrupt again */
420 alx_mask_msix(hw, 0, false);
421
422 return IRQ_HANDLED;
423}
424
Johannes Bergab69bde2013-06-17 22:44:02 +0200425static irqreturn_t alx_intr_msi(int irq, void *data)
426{
427 struct alx_priv *alx = data;
428
429 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
430}
431
432static irqreturn_t alx_intr_legacy(int irq, void *data)
433{
434 struct alx_priv *alx = data;
435 struct alx_hw *hw = &alx->hw;
436 u32 intr;
437
438 intr = alx_read_mem32(hw, ALX_ISR);
439
440 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
441 return IRQ_NONE;
442
443 return alx_intr_handle(alx, intr);
444}
445
Tobias Regnerya4076d32016-11-15 12:43:13 +0100446static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
447 ALX_TPD_PRI1_ADDR_LO,
448 ALX_TPD_PRI2_ADDR_LO,
449 ALX_TPD_PRI3_ADDR_LO};
450
Johannes Bergab69bde2013-06-17 22:44:02 +0200451static void alx_init_ring_ptrs(struct alx_priv *alx)
452{
453 struct alx_hw *hw = &alx->hw;
454 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
Tobias Regnerya4076d32016-11-15 12:43:13 +0100455 struct alx_napi *np;
456 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +0200457
Tobias Regnerya4076d32016-11-15 12:43:13 +0100458 for (i = 0; i < alx->num_napi; i++) {
459 np = alx->qnapi[i];
460 if (np->txq) {
461 np->txq->read_idx = 0;
462 np->txq->write_idx = 0;
463 alx_write_mem32(hw,
464 txring_header_reg[np->txq->queue_idx],
465 np->txq->tpd_dma);
466 }
467
468 if (np->rxq) {
469 np->rxq->read_idx = 0;
470 np->rxq->write_idx = 0;
471 np->rxq->rrd_read_idx = 0;
472 alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
473 alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
474 }
475 }
476
477 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
478 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
479
Johannes Bergab69bde2013-06-17 22:44:02 +0200480 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
Johannes Bergab69bde2013-06-17 22:44:02 +0200481 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
Johannes Bergab69bde2013-06-17 22:44:02 +0200482 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
483 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
484
Johannes Bergab69bde2013-06-17 22:44:02 +0200485 /* load these pointers into the chip */
486 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
487}
488
Tobias Regnery702e8412016-11-15 12:43:11 +0100489static void alx_free_txring_buf(struct alx_tx_queue *txq)
Johannes Bergab69bde2013-06-17 22:44:02 +0200490{
Johannes Bergab69bde2013-06-17 22:44:02 +0200491 int i;
492
493 if (!txq->bufs)
494 return;
495
Tobias Regnery702e8412016-11-15 12:43:11 +0100496 for (i = 0; i < txq->count; i++)
497 alx_free_txbuf(txq, i);
Johannes Bergab69bde2013-06-17 22:44:02 +0200498
Tobias Regnery702e8412016-11-15 12:43:11 +0100499 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
500 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
Johannes Bergab69bde2013-06-17 22:44:02 +0200501 txq->write_idx = 0;
502 txq->read_idx = 0;
503
Tobias Regnery2e068262016-11-15 12:43:14 +0100504 netdev_tx_reset_queue(alx_get_tx_queue(txq));
Johannes Bergab69bde2013-06-17 22:44:02 +0200505}
506
Tobias Regnery702e8412016-11-15 12:43:11 +0100507static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
Johannes Bergab69bde2013-06-17 22:44:02 +0200508{
Johannes Bergab69bde2013-06-17 22:44:02 +0200509 struct alx_buffer *cur_buf;
510 u16 i;
511
Tobias Regnerya4076d32016-11-15 12:43:13 +0100512 if (!rxq->bufs)
Johannes Bergab69bde2013-06-17 22:44:02 +0200513 return;
514
Tobias Regnery702e8412016-11-15 12:43:11 +0100515 for (i = 0; i < rxq->count; i++) {
Johannes Bergab69bde2013-06-17 22:44:02 +0200516 cur_buf = rxq->bufs + i;
517 if (cur_buf->skb) {
Tobias Regnery702e8412016-11-15 12:43:11 +0100518 dma_unmap_single(rxq->dev,
Johannes Bergab69bde2013-06-17 22:44:02 +0200519 dma_unmap_addr(cur_buf, dma),
520 dma_unmap_len(cur_buf, size),
521 DMA_FROM_DEVICE);
522 dev_kfree_skb(cur_buf->skb);
523 cur_buf->skb = NULL;
524 dma_unmap_len_set(cur_buf, size, 0);
525 dma_unmap_addr_set(cur_buf, dma, 0);
526 }
527 }
528
529 rxq->write_idx = 0;
530 rxq->read_idx = 0;
531 rxq->rrd_read_idx = 0;
532}
533
534static void alx_free_buffers(struct alx_priv *alx)
535{
Tobias Regnerya4076d32016-11-15 12:43:13 +0100536 int i;
537
538 for (i = 0; i < alx->num_txq; i++)
539 if (alx->qnapi[i] && alx->qnapi[i]->txq)
540 alx_free_txring_buf(alx->qnapi[i]->txq);
541
542 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
543 alx_free_rxring_buf(alx->qnapi[0]->rxq);
Johannes Bergab69bde2013-06-17 22:44:02 +0200544}
545
546static int alx_reinit_rings(struct alx_priv *alx)
547{
548 alx_free_buffers(alx);
549
550 alx_init_ring_ptrs(alx);
551
552 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
553 return -ENOMEM;
554
555 return 0;
556}
557
558static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
559{
560 u32 crc32, bit, reg;
561
562 crc32 = ether_crc(ETH_ALEN, addr);
563 reg = (crc32 >> 31) & 0x1;
564 bit = (crc32 >> 26) & 0x1F;
565
566 mc_hash[reg] |= BIT(bit);
567}
568
569static void __alx_set_rx_mode(struct net_device *netdev)
570{
571 struct alx_priv *alx = netdev_priv(netdev);
572 struct alx_hw *hw = &alx->hw;
573 struct netdev_hw_addr *ha;
574 u32 mc_hash[2] = {};
575
576 if (!(netdev->flags & IFF_ALLMULTI)) {
577 netdev_for_each_mc_addr(ha, netdev)
578 alx_add_mc_addr(hw, ha->addr, mc_hash);
579
580 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
581 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
582 }
583
584 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
585 if (netdev->flags & IFF_PROMISC)
586 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
587 if (netdev->flags & IFF_ALLMULTI)
588 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
589
590 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
591}
592
593static void alx_set_rx_mode(struct net_device *netdev)
594{
595 __alx_set_rx_mode(netdev);
596}
597
598static int alx_set_mac_address(struct net_device *netdev, void *data)
599{
600 struct alx_priv *alx = netdev_priv(netdev);
601 struct alx_hw *hw = &alx->hw;
602 struct sockaddr *addr = data;
603
604 if (!is_valid_ether_addr(addr->sa_data))
605 return -EADDRNOTAVAIL;
606
607 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
608 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
609
610 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
611 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
612 alx_set_macaddr(hw, hw->mac_addr);
613
614 return 0;
615}
616
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100617static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
618 int offset)
Johannes Bergab69bde2013-06-17 22:44:02 +0200619{
Tobias Regnery702e8412016-11-15 12:43:11 +0100620 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100621 if (!txq->bufs)
Johannes Bergab69bde2013-06-17 22:44:02 +0200622 return -ENOMEM;
623
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100624 txq->tpd = alx->descmem.virt + offset;
625 txq->tpd_dma = alx->descmem.dma + offset;
Tobias Regnery702e8412016-11-15 12:43:11 +0100626 offset += sizeof(struct alx_txd) * txq->count;
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100627
628 return offset;
629}
630
631static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
632 int offset)
633{
Tobias Regnery702e8412016-11-15 12:43:11 +0100634 rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100635 if (!rxq->bufs)
636 return -ENOMEM;
637
638 rxq->rrd = alx->descmem.virt + offset;
639 rxq->rrd_dma = alx->descmem.dma + offset;
Tobias Regnery702e8412016-11-15 12:43:11 +0100640 offset += sizeof(struct alx_rrd) * rxq->count;
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100641
642 rxq->rfd = alx->descmem.virt + offset;
643 rxq->rfd_dma = alx->descmem.dma + offset;
Tobias Regnery702e8412016-11-15 12:43:11 +0100644 offset += sizeof(struct alx_rfd) * rxq->count;
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100645
646 return offset;
647}
648
649static int alx_alloc_rings(struct alx_priv *alx)
650{
Tobias Regnerya4076d32016-11-15 12:43:13 +0100651 int i, offset = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +0200652
653 /* physical tx/rx ring descriptors
654 *
655 * Allocate them as a single chunk because they must not cross a
656 * 4G boundary (hardware has a single register for high 32 bits
657 * of addresses only)
658 */
Tobias Regnerya4076d32016-11-15 12:43:13 +0100659 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
660 alx->num_txq +
Johannes Bergab69bde2013-06-17 22:44:02 +0200661 sizeof(struct alx_rrd) * alx->rx_ringsz +
662 sizeof(struct alx_rfd) * alx->rx_ringsz;
663 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
664 alx->descmem.size,
665 &alx->descmem.dma,
666 GFP_KERNEL);
667 if (!alx->descmem.virt)
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100668 return -ENOMEM;
Johannes Bergab69bde2013-06-17 22:44:02 +0200669
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100670 /* alignment requirements */
Johannes Bergab69bde2013-06-17 22:44:02 +0200671 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
Johannes Bergab69bde2013-06-17 22:44:02 +0200672 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
673
Tobias Regnerya4076d32016-11-15 12:43:13 +0100674 for (i = 0; i < alx->num_txq; i++) {
675 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
676 if (offset < 0) {
677 netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
678 return -ENOMEM;
679 }
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100680 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200681
Tobias Regnery702e8412016-11-15 12:43:11 +0100682 offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100683 if (offset < 0) {
684 netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
Tobias Regneryb0999222016-11-15 12:43:10 +0100685 return -ENOMEM;
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100686 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200687
Johannes Bergab69bde2013-06-17 22:44:02 +0200688 return 0;
689}
690
691static void alx_free_rings(struct alx_priv *alx)
692{
Tobias Regnerya4076d32016-11-15 12:43:13 +0100693 int i;
Tobias Regnery702e8412016-11-15 12:43:11 +0100694
Johannes Bergab69bde2013-06-17 22:44:02 +0200695 alx_free_buffers(alx);
696
Tobias Regnerya4076d32016-11-15 12:43:13 +0100697 for (i = 0; i < alx->num_txq; i++)
698 if (alx->qnapi[i] && alx->qnapi[i]->txq)
699 kfree(alx->qnapi[i]->txq->bufs);
700
701 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
702 kfree(alx->qnapi[0]->rxq->bufs);
Johannes Bergab69bde2013-06-17 22:44:02 +0200703
Tobias Regneryf1db5c12017-01-24 14:34:22 +0100704 if (alx->descmem.virt)
Tobias Regneryb0999222016-11-15 12:43:10 +0100705 dma_free_coherent(&alx->hw.pdev->dev,
706 alx->descmem.size,
707 alx->descmem.virt,
708 alx->descmem.dma);
709}
710
711static void alx_free_napis(struct alx_priv *alx)
712{
713 struct alx_napi *np;
Tobias Regnerya4076d32016-11-15 12:43:13 +0100714 int i;
Tobias Regneryb0999222016-11-15 12:43:10 +0100715
Tobias Regnerya4076d32016-11-15 12:43:13 +0100716 for (i = 0; i < alx->num_napi; i++) {
717 np = alx->qnapi[i];
718 if (!np)
719 continue;
Tobias Regneryb0999222016-11-15 12:43:10 +0100720
Tobias Regnerya4076d32016-11-15 12:43:13 +0100721 netif_napi_del(&np->napi);
722 kfree(np->txq);
723 kfree(np->rxq);
724 kfree(np);
725 alx->qnapi[i] = NULL;
726 }
Tobias Regneryb0999222016-11-15 12:43:10 +0100727}
728
Tobias Regnery2e068262016-11-15 12:43:14 +0100729static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
730 ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
731static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
732 ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
Tobias Regnerye0eac252016-11-15 12:43:12 +0100733static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
734 ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
735static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
736 ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
737 ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
738 ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
739
Tobias Regneryb0999222016-11-15 12:43:10 +0100740static int alx_alloc_napis(struct alx_priv *alx)
741{
742 struct alx_napi *np;
743 struct alx_rx_queue *rxq;
744 struct alx_tx_queue *txq;
Tobias Regnerya4076d32016-11-15 12:43:13 +0100745 int i;
Tobias Regneryb0999222016-11-15 12:43:10 +0100746
747 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
Tobias Regneryb0999222016-11-15 12:43:10 +0100748
749 /* allocate alx_napi structures */
Tobias Regnerya4076d32016-11-15 12:43:13 +0100750 for (i = 0; i < alx->num_napi; i++) {
751 np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
752 if (!np)
753 goto err_out;
Tobias Regneryb0999222016-11-15 12:43:10 +0100754
Tobias Regnerya4076d32016-11-15 12:43:13 +0100755 np->alx = alx;
756 netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
757 alx->qnapi[i] = np;
758 }
Tobias Regneryb0999222016-11-15 12:43:10 +0100759
760 /* allocate tx queues */
Tobias Regnerya4076d32016-11-15 12:43:13 +0100761 for (i = 0; i < alx->num_txq; i++) {
762 np = alx->qnapi[i];
763 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
764 if (!txq)
765 goto err_out;
Tobias Regneryb0999222016-11-15 12:43:10 +0100766
Tobias Regnerya4076d32016-11-15 12:43:13 +0100767 np->txq = txq;
Tobias Regnery2e068262016-11-15 12:43:14 +0100768 txq->p_reg = tx_pidx_reg[i];
769 txq->c_reg = tx_cidx_reg[i];
Tobias Regnerya4076d32016-11-15 12:43:13 +0100770 txq->queue_idx = i;
771 txq->count = alx->tx_ringsz;
772 txq->netdev = alx->dev;
773 txq->dev = &alx->hw.pdev->dev;
774 np->vec_mask |= tx_vect_mask[i];
775 alx->int_mask |= tx_vect_mask[i];
776 }
Tobias Regneryb0999222016-11-15 12:43:10 +0100777
778 /* allocate rx queues */
779 np = alx->qnapi[0];
780 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
781 if (!rxq)
782 goto err_out;
783
784 np->rxq = rxq;
785 rxq->np = alx->qnapi[0];
Tobias Regnerye0eac252016-11-15 12:43:12 +0100786 rxq->queue_idx = 0;
Tobias Regneryb0999222016-11-15 12:43:10 +0100787 rxq->count = alx->rx_ringsz;
788 rxq->netdev = alx->dev;
789 rxq->dev = &alx->hw.pdev->dev;
Tobias Regnerye0eac252016-11-15 12:43:12 +0100790 np->vec_mask |= rx_vect_mask[0];
791 alx->int_mask |= rx_vect_mask[0];
Tobias Regneryb0999222016-11-15 12:43:10 +0100792
793 return 0;
794
795err_out:
796 netdev_err(alx->dev, "error allocating internal structures\n");
797 alx_free_napis(alx);
798 return -ENOMEM;
Johannes Bergab69bde2013-06-17 22:44:02 +0200799}
800
Tobias Regnerye0eac252016-11-15 12:43:12 +0100801static const int txq_vec_mapping_shift[] = {
802 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
803 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
804 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
805 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
806};
807
Johannes Bergab69bde2013-06-17 22:44:02 +0200808static void alx_config_vector_mapping(struct alx_priv *alx)
809{
810 struct alx_hw *hw = &alx->hw;
Tobias Regnerye0eac252016-11-15 12:43:12 +0100811 u32 tbl[2] = {0, 0};
812 int i, vector, idx, shift;
Johannes Bergab69bde2013-06-17 22:44:02 +0200813
Tobias Regnerydc39a782016-09-09 12:19:54 +0200814 if (alx->flags & ALX_FLAG_USING_MSIX) {
Tobias Regnerye0eac252016-11-15 12:43:12 +0100815 /* tx mappings */
816 for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
817 idx = txq_vec_mapping_shift[i * 2];
818 shift = txq_vec_mapping_shift[i * 2 + 1];
819 tbl[idx] |= vector << shift;
820 }
821
822 /* rx mapping */
823 tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200824 }
825
Tobias Regnerye0eac252016-11-15 12:43:12 +0100826 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
827 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
Johannes Bergab69bde2013-06-17 22:44:02 +0200828 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
829}
830
Tobias Regnerydc39a782016-09-09 12:19:54 +0200831static bool alx_enable_msix(struct alx_priv *alx)
832{
Tobias Regnerye0eac252016-11-15 12:43:12 +0100833 int i, err, num_vec, num_txq, num_rxq;
834
Tobias Regneryd7683192016-11-15 12:43:16 +0100835 num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
Tobias Regnerye0eac252016-11-15 12:43:12 +0100836 num_rxq = 1;
837 num_vec = max_t(int, num_txq, num_rxq) + 1;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200838
839 alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
840 GFP_KERNEL);
841 if (!alx->msix_entries) {
842 netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
843 return false;
844 }
845
846 for (i = 0; i < num_vec; i++)
847 alx->msix_entries[i].entry = i;
848
849 err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
850 if (err) {
851 kfree(alx->msix_entries);
852 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
853 return false;
854 }
855
856 alx->num_vec = num_vec;
Tobias Regnerye0eac252016-11-15 12:43:12 +0100857 alx->num_napi = num_vec - 1;
858 alx->num_txq = num_txq;
859 alx->num_rxq = num_rxq;
860
Tobias Regnerydc39a782016-09-09 12:19:54 +0200861 return true;
862}
863
864static int alx_request_msix(struct alx_priv *alx)
865{
866 struct net_device *netdev = alx->dev;
867 int i, err, vector = 0, free_vector = 0;
868
869 err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
870 0, netdev->name, alx);
871 if (err)
872 goto out_err;
873
Tobias Regnerye0eac252016-11-15 12:43:12 +0100874 for (i = 0; i < alx->num_napi; i++) {
875 struct alx_napi *np = alx->qnapi[i];
Tobias Regnerydc39a782016-09-09 12:19:54 +0200876
Tobias Regnerye0eac252016-11-15 12:43:12 +0100877 vector++;
878
879 if (np->txq && np->rxq)
880 sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
881 np->txq->queue_idx);
882 else if (np->txq)
883 sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
884 np->txq->queue_idx);
885 else if (np->rxq)
886 sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
887 np->rxq->queue_idx);
888 else
889 sprintf(np->irq_lbl, "%s-unused", netdev->name);
890
891 np->vec_idx = vector;
892 err = request_irq(alx->msix_entries[vector].vector,
893 alx_intr_msix_ring, 0, np->irq_lbl, np);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200894 if (err)
895 goto out_free;
Tobias Regnerye0eac252016-11-15 12:43:12 +0100896 }
Tobias Regnerydc39a782016-09-09 12:19:54 +0200897 return 0;
898
899out_free:
900 free_irq(alx->msix_entries[free_vector++].vector, alx);
901
902 vector--;
903 for (i = 0; i < vector; i++)
Tobias Regnerye0eac252016-11-15 12:43:12 +0100904 free_irq(alx->msix_entries[free_vector++].vector,
905 alx->qnapi[i]);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200906
907out_err:
908 return err;
909}
910
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200911static void alx_init_intr(struct alx_priv *alx, bool msix)
912{
Tobias Regnerydc39a782016-09-09 12:19:54 +0200913 if (msix) {
914 if (alx_enable_msix(alx))
915 alx->flags |= ALX_FLAG_USING_MSIX;
916 }
917
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200918 if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
Tobias Regnerydc39a782016-09-09 12:19:54 +0200919 alx->num_vec = 1;
Tobias Regnerye0eac252016-11-15 12:43:12 +0100920 alx->num_napi = 1;
921 alx->num_txq = 1;
922 alx->num_rxq = 1;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200923
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200924 if (!pci_enable_msi(alx->hw.pdev))
925 alx->flags |= ALX_FLAG_USING_MSI;
926 }
927}
928
929static void alx_disable_advanced_intr(struct alx_priv *alx)
930{
Tobias Regnerydc39a782016-09-09 12:19:54 +0200931 if (alx->flags & ALX_FLAG_USING_MSIX) {
932 kfree(alx->msix_entries);
933 pci_disable_msix(alx->hw.pdev);
934 alx->flags &= ~ALX_FLAG_USING_MSIX;
935 }
936
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200937 if (alx->flags & ALX_FLAG_USING_MSI) {
938 pci_disable_msi(alx->hw.pdev);
939 alx->flags &= ~ALX_FLAG_USING_MSI;
940 }
941}
942
Johannes Bergab69bde2013-06-17 22:44:02 +0200943static void alx_irq_enable(struct alx_priv *alx)
944{
945 struct alx_hw *hw = &alx->hw;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200946 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +0200947
948 /* level-1 interrupt switch */
949 alx_write_mem32(hw, ALX_ISR, 0);
950 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
951 alx_post_write(hw);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200952
953 if (alx->flags & ALX_FLAG_USING_MSIX)
954 /* enable all msix irqs */
955 for (i = 0; i < alx->num_vec; i++)
956 alx_mask_msix(hw, i, false);
Johannes Bergab69bde2013-06-17 22:44:02 +0200957}
958
959static void alx_irq_disable(struct alx_priv *alx)
960{
961 struct alx_hw *hw = &alx->hw;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200962 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +0200963
964 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
965 alx_write_mem32(hw, ALX_IMR, 0);
966 alx_post_write(hw);
967
Tobias Regnerydc39a782016-09-09 12:19:54 +0200968 if (alx->flags & ALX_FLAG_USING_MSIX) {
969 for (i = 0; i < alx->num_vec; i++) {
970 alx_mask_msix(hw, i, true);
971 synchronize_irq(alx->msix_entries[i].vector);
972 }
973 } else {
974 synchronize_irq(alx->hw.pdev->irq);
975 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200976}
977
Tobias Regnerye0eac252016-11-15 12:43:12 +0100978static int alx_realloc_resources(struct alx_priv *alx)
979{
980 int err;
981
982 alx_free_rings(alx);
983 alx_free_napis(alx);
984 alx_disable_advanced_intr(alx);
Tobias Regnery37187a02017-01-24 14:34:23 +0100985 alx_init_intr(alx, false);
Tobias Regnerye0eac252016-11-15 12:43:12 +0100986
987 err = alx_alloc_napis(alx);
988 if (err)
989 return err;
990
991 err = alx_alloc_rings(alx);
992 if (err)
993 return err;
994
995 return 0;
996}
997
Johannes Bergab69bde2013-06-17 22:44:02 +0200998static int alx_request_irq(struct alx_priv *alx)
999{
1000 struct pci_dev *pdev = alx->hw.pdev;
1001 struct alx_hw *hw = &alx->hw;
1002 int err;
1003 u32 msi_ctrl;
1004
1005 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
1006
Tobias Regnerydc39a782016-09-09 12:19:54 +02001007 if (alx->flags & ALX_FLAG_USING_MSIX) {
1008 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
1009 err = alx_request_msix(alx);
1010 if (!err)
1011 goto out;
1012
1013 /* msix request failed, realloc resources */
Tobias Regnerye0eac252016-11-15 12:43:12 +01001014 err = alx_realloc_resources(alx);
1015 if (err)
1016 goto out;
Tobias Regnerydc39a782016-09-09 12:19:54 +02001017 }
1018
Tobias Regnery9ee7b682016-09-09 12:19:52 +02001019 if (alx->flags & ALX_FLAG_USING_MSI) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001020 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
1021 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
1022 err = request_irq(pdev->irq, alx_intr_msi, 0,
1023 alx->dev->name, alx);
1024 if (!err)
1025 goto out;
1026 /* fall back to legacy interrupt */
Tobias Regnery9ee7b682016-09-09 12:19:52 +02001027 alx->flags &= ~ALX_FLAG_USING_MSI;
Johannes Bergab69bde2013-06-17 22:44:02 +02001028 pci_disable_msi(alx->hw.pdev);
1029 }
1030
1031 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
1032 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
1033 alx->dev->name, alx);
1034out:
1035 if (!err)
1036 alx_config_vector_mapping(alx);
Tobias Regnerydc39a782016-09-09 12:19:54 +02001037 else
1038 netdev_err(alx->dev, "IRQ registration failed!\n");
Johannes Bergab69bde2013-06-17 22:44:02 +02001039 return err;
1040}
1041
1042static void alx_free_irq(struct alx_priv *alx)
1043{
1044 struct pci_dev *pdev = alx->hw.pdev;
Tobias Regnery702e8412016-11-15 12:43:11 +01001045 int i, vector = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +02001046
Tobias Regnerydc39a782016-09-09 12:19:54 +02001047 if (alx->flags & ALX_FLAG_USING_MSIX) {
Tobias Regnery702e8412016-11-15 12:43:11 +01001048 free_irq(alx->msix_entries[vector++].vector, alx);
Tobias Regnerye0eac252016-11-15 12:43:12 +01001049 for (i = 0; i < alx->num_napi; i++)
1050 free_irq(alx->msix_entries[vector++].vector,
1051 alx->qnapi[i]);
Tobias Regnerydc39a782016-09-09 12:19:54 +02001052 } else {
1053 free_irq(pdev->irq, alx);
1054 }
Johannes Bergab69bde2013-06-17 22:44:02 +02001055
Tobias Regnery9ee7b682016-09-09 12:19:52 +02001056 alx_disable_advanced_intr(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001057}
1058
1059static int alx_identify_hw(struct alx_priv *alx)
1060{
1061 struct alx_hw *hw = &alx->hw;
1062 int rev = alx_hw_revision(hw);
1063
1064 if (rev > ALX_REV_C0)
1065 return -EINVAL;
1066
1067 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
1068
1069 return 0;
1070}
1071
1072static int alx_init_sw(struct alx_priv *alx)
1073{
1074 struct pci_dev *pdev = alx->hw.pdev;
1075 struct alx_hw *hw = &alx->hw;
1076 int err;
1077
1078 err = alx_identify_hw(alx);
1079 if (err) {
1080 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
1081 return err;
1082 }
1083
1084 alx->hw.lnk_patch =
1085 pdev->device == ALX_DEV_ID_AR8161 &&
1086 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
1087 pdev->subsystem_device == 0x0091 &&
1088 pdev->revision == 0;
1089
1090 hw->smb_timer = 400;
1091 hw->mtu = alx->dev->mtu;
Jarod Wilsonc4067002016-01-06 09:36:37 -05001092 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
Jarod Wilson67bef942016-10-17 15:54:03 -04001093 /* MTU range: 34 - 9256 */
1094 alx->dev->min_mtu = 34;
1095 alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
Johannes Bergab69bde2013-06-17 22:44:02 +02001096 alx->tx_ringsz = 256;
1097 alx->rx_ringsz = 512;
Johannes Bergab69bde2013-06-17 22:44:02 +02001098 hw->imt = 200;
1099 alx->int_mask = ALX_ISR_MISC;
1100 hw->dma_chnl = hw->max_dma_chnl;
1101 hw->ith_tpd = alx->tx_ringsz / 3;
1102 hw->link_speed = SPEED_UNKNOWN;
Johannes Berga5b87cc2013-06-29 19:23:17 +02001103 hw->duplex = DUPLEX_UNKNOWN;
Johannes Bergab69bde2013-06-17 22:44:02 +02001104 hw->adv_cfg = ADVERTISED_Autoneg |
1105 ADVERTISED_10baseT_Half |
1106 ADVERTISED_10baseT_Full |
1107 ADVERTISED_100baseT_Full |
1108 ADVERTISED_100baseT_Half |
1109 ADVERTISED_1000baseT_Full;
1110 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
1111
1112 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
1113 ALX_MAC_CTRL_MHASH_ALG_HI5B |
1114 ALX_MAC_CTRL_BRD_EN |
1115 ALX_MAC_CTRL_PCRCE |
1116 ALX_MAC_CTRL_CRCE |
1117 ALX_MAC_CTRL_RXFC_EN |
1118 ALX_MAC_CTRL_TXFC_EN |
1119 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
1120
1121 return err;
1122}
1123
1124
1125static netdev_features_t alx_fix_features(struct net_device *netdev,
1126 netdev_features_t features)
1127{
1128 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
1129 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1130
1131 return features;
1132}
1133
1134static void alx_netif_stop(struct alx_priv *alx)
1135{
Tobias Regnerya4076d32016-11-15 12:43:13 +01001136 int i;
1137
Florian Westphal860e9532016-05-03 16:33:13 +02001138 netif_trans_update(alx->dev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001139 if (netif_carrier_ok(alx->dev)) {
1140 netif_carrier_off(alx->dev);
1141 netif_tx_disable(alx->dev);
Tobias Regnerya4076d32016-11-15 12:43:13 +01001142 for (i = 0; i < alx->num_napi; i++)
1143 napi_disable(&alx->qnapi[i]->napi);
Johannes Bergab69bde2013-06-17 22:44:02 +02001144 }
1145}
1146
1147static void alx_halt(struct alx_priv *alx)
1148{
1149 struct alx_hw *hw = &alx->hw;
1150
1151 alx_netif_stop(alx);
1152 hw->link_speed = SPEED_UNKNOWN;
Johannes Berga5b87cc2013-06-29 19:23:17 +02001153 hw->duplex = DUPLEX_UNKNOWN;
Johannes Bergab69bde2013-06-17 22:44:02 +02001154
1155 alx_reset_mac(hw);
1156
1157 /* disable l0s/l1 */
1158 alx_enable_aspm(hw, false, false);
1159 alx_irq_disable(alx);
1160 alx_free_buffers(alx);
1161}
1162
1163static void alx_configure(struct alx_priv *alx)
1164{
1165 struct alx_hw *hw = &alx->hw;
1166
1167 alx_configure_basic(hw);
1168 alx_disable_rss(hw);
1169 __alx_set_rx_mode(alx->dev);
1170
1171 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
1172}
1173
1174static void alx_activate(struct alx_priv *alx)
1175{
1176 /* hardware setting lost, restore it */
1177 alx_reinit_rings(alx);
1178 alx_configure(alx);
1179
1180 /* clear old interrupts */
1181 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1182
1183 alx_irq_enable(alx);
1184
1185 alx_schedule_link_check(alx);
1186}
1187
1188static void alx_reinit(struct alx_priv *alx)
1189{
1190 ASSERT_RTNL();
1191
1192 alx_halt(alx);
1193 alx_activate(alx);
1194}
1195
1196static int alx_change_mtu(struct net_device *netdev, int mtu)
1197{
1198 struct alx_priv *alx = netdev_priv(netdev);
Jarod Wilsonc4067002016-01-06 09:36:37 -05001199 int max_frame = ALX_MAX_FRAME_LEN(mtu);
Johannes Bergab69bde2013-06-17 22:44:02 +02001200
Johannes Bergab69bde2013-06-17 22:44:02 +02001201 netdev->mtu = mtu;
1202 alx->hw.mtu = mtu;
Jarod Wilsonc4067002016-01-06 09:36:37 -05001203 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
Johannes Bergab69bde2013-06-17 22:44:02 +02001204 netdev_update_features(netdev);
1205 if (netif_running(netdev))
1206 alx_reinit(alx);
1207 return 0;
1208}
1209
1210static void alx_netif_start(struct alx_priv *alx)
1211{
Tobias Regnerya4076d32016-11-15 12:43:13 +01001212 int i;
1213
Johannes Bergab69bde2013-06-17 22:44:02 +02001214 netif_tx_wake_all_queues(alx->dev);
Tobias Regnerya4076d32016-11-15 12:43:13 +01001215 for (i = 0; i < alx->num_napi; i++)
1216 napi_enable(&alx->qnapi[i]->napi);
Johannes Bergab69bde2013-06-17 22:44:02 +02001217 netif_carrier_on(alx->dev);
1218}
1219
1220static int __alx_open(struct alx_priv *alx, bool resume)
1221{
1222 int err;
1223
Tobias Regneryf58e0f72016-11-15 12:43:15 +01001224 alx_init_intr(alx, true);
Tobias Regnery9ee7b682016-09-09 12:19:52 +02001225
Johannes Bergab69bde2013-06-17 22:44:02 +02001226 if (!resume)
1227 netif_carrier_off(alx->dev);
1228
Tobias Regneryb0999222016-11-15 12:43:10 +01001229 err = alx_alloc_napis(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001230 if (err)
Tobias Regnery0ca4e202016-09-13 12:06:57 +02001231 goto out_disable_adv_intr;
Johannes Bergab69bde2013-06-17 22:44:02 +02001232
Tobias Regneryb0999222016-11-15 12:43:10 +01001233 err = alx_alloc_rings(alx);
1234 if (err)
1235 goto out_free_rings;
1236
Johannes Bergab69bde2013-06-17 22:44:02 +02001237 alx_configure(alx);
1238
1239 err = alx_request_irq(alx);
1240 if (err)
1241 goto out_free_rings;
1242
Tobias Regnery185acee2017-01-24 14:34:24 +01001243 /* must be called after alx_request_irq because the chip stops working
1244 * if we copy the dma addresses in alx_init_ring_ptrs twice when
1245 * requesting msi-x interrupts failed
1246 */
1247 alx_reinit_rings(alx);
1248
Tobias Regneryd7683192016-11-15 12:43:16 +01001249 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1250 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1251
Johannes Bergab69bde2013-06-17 22:44:02 +02001252 /* clear old interrupts */
1253 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1254
1255 alx_irq_enable(alx);
1256
1257 if (!resume)
1258 netif_tx_start_all_queues(alx->dev);
1259
1260 alx_schedule_link_check(alx);
1261 return 0;
1262
1263out_free_rings:
1264 alx_free_rings(alx);
Tobias Regneryb0999222016-11-15 12:43:10 +01001265 alx_free_napis(alx);
Tobias Regnery0ca4e202016-09-13 12:06:57 +02001266out_disable_adv_intr:
1267 alx_disable_advanced_intr(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001268 return err;
1269}
1270
1271static void __alx_stop(struct alx_priv *alx)
1272{
1273 alx_halt(alx);
1274 alx_free_irq(alx);
1275 alx_free_rings(alx);
Tobias Regneryb0999222016-11-15 12:43:10 +01001276 alx_free_napis(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001277}
1278
Johannes Berga5b87cc2013-06-29 19:23:17 +02001279static const char *alx_speed_desc(struct alx_hw *hw)
Johannes Bergab69bde2013-06-17 22:44:02 +02001280{
Johannes Berga5b87cc2013-06-29 19:23:17 +02001281 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
1282 case ADVERTISED_1000baseT_Full:
Johannes Bergab69bde2013-06-17 22:44:02 +02001283 return "1 Gbps Full";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001284 case ADVERTISED_100baseT_Full:
Johannes Bergab69bde2013-06-17 22:44:02 +02001285 return "100 Mbps Full";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001286 case ADVERTISED_100baseT_Half:
Johannes Bergab69bde2013-06-17 22:44:02 +02001287 return "100 Mbps Half";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001288 case ADVERTISED_10baseT_Full:
Johannes Bergab69bde2013-06-17 22:44:02 +02001289 return "10 Mbps Full";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001290 case ADVERTISED_10baseT_Half:
Johannes Bergab69bde2013-06-17 22:44:02 +02001291 return "10 Mbps Half";
1292 default:
1293 return "Unknown speed";
1294 }
1295}
1296
1297static void alx_check_link(struct alx_priv *alx)
1298{
1299 struct alx_hw *hw = &alx->hw;
1300 unsigned long flags;
Johannes Berga5b87cc2013-06-29 19:23:17 +02001301 int old_speed;
1302 u8 old_duplex;
Johannes Bergab69bde2013-06-17 22:44:02 +02001303 int err;
1304
1305 /* clear PHY internal interrupt status, otherwise the main
1306 * interrupt status will be asserted forever
1307 */
1308 alx_clear_phy_intr(hw);
1309
Johannes Berga5b87cc2013-06-29 19:23:17 +02001310 old_speed = hw->link_speed;
1311 old_duplex = hw->duplex;
1312 err = alx_read_phy_link(hw);
Johannes Bergab69bde2013-06-17 22:44:02 +02001313 if (err < 0)
1314 goto reset;
1315
1316 spin_lock_irqsave(&alx->irq_lock, flags);
1317 alx->int_mask |= ALX_ISR_PHY;
1318 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
1319 spin_unlock_irqrestore(&alx->irq_lock, flags);
1320
Johannes Berga5b87cc2013-06-29 19:23:17 +02001321 if (old_speed == hw->link_speed)
Johannes Bergab69bde2013-06-17 22:44:02 +02001322 return;
Johannes Bergab69bde2013-06-17 22:44:02 +02001323
Johannes Berga5b87cc2013-06-29 19:23:17 +02001324 if (hw->link_speed != SPEED_UNKNOWN) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001325 netif_info(alx, link, alx->dev,
Johannes Berga5b87cc2013-06-29 19:23:17 +02001326 "NIC Up: %s\n", alx_speed_desc(hw));
Johannes Bergab69bde2013-06-17 22:44:02 +02001327 alx_post_phy_link(hw);
1328 alx_enable_aspm(hw, true, true);
1329 alx_start_mac(hw);
1330
1331 if (old_speed == SPEED_UNKNOWN)
1332 alx_netif_start(alx);
1333 } else {
1334 /* link is now down */
1335 alx_netif_stop(alx);
1336 netif_info(alx, link, alx->dev, "Link Down\n");
1337 err = alx_reset_mac(hw);
1338 if (err)
1339 goto reset;
1340 alx_irq_disable(alx);
1341
1342 /* MAC reset causes all HW settings to be lost, restore all */
1343 err = alx_reinit_rings(alx);
1344 if (err)
1345 goto reset;
1346 alx_configure(alx);
1347 alx_enable_aspm(hw, false, true);
1348 alx_post_phy_link(hw);
1349 alx_irq_enable(alx);
1350 }
1351
1352 return;
1353
1354reset:
1355 alx_schedule_reset(alx);
1356}
1357
1358static int alx_open(struct net_device *netdev)
1359{
1360 return __alx_open(netdev_priv(netdev), false);
1361}
1362
1363static int alx_stop(struct net_device *netdev)
1364{
1365 __alx_stop(netdev_priv(netdev));
1366 return 0;
1367}
1368
Johannes Bergab69bde2013-06-17 22:44:02 +02001369static void alx_link_check(struct work_struct *work)
1370{
1371 struct alx_priv *alx;
1372
1373 alx = container_of(work, struct alx_priv, link_check_wk);
1374
1375 rtnl_lock();
1376 alx_check_link(alx);
1377 rtnl_unlock();
1378}
1379
1380static void alx_reset(struct work_struct *work)
1381{
1382 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1383
1384 rtnl_lock();
1385 alx_reinit(alx);
1386 rtnl_unlock();
1387}
1388
Tobias Regneryab725982016-08-25 20:09:53 +02001389static int alx_tpd_req(struct sk_buff *skb)
1390{
1391 int num;
1392
1393 num = skb_shinfo(skb)->nr_frags + 1;
1394 /* we need one extra descriptor for LSOv2 */
1395 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1396 num++;
1397
1398 return num;
1399}
1400
Johannes Bergab69bde2013-06-17 22:44:02 +02001401static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1402{
1403 u8 cso, css;
1404
1405 if (skb->ip_summed != CHECKSUM_PARTIAL)
1406 return 0;
1407
1408 cso = skb_checksum_start_offset(skb);
1409 if (cso & 1)
1410 return -EINVAL;
1411
1412 css = cso + skb->csum_offset;
1413 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1414 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1415 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1416
1417 return 0;
1418}
1419
Tobias Regneryab725982016-08-25 20:09:53 +02001420static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
1421{
1422 int err;
1423
1424 if (skb->ip_summed != CHECKSUM_PARTIAL)
1425 return 0;
1426
1427 if (!skb_is_gso(skb))
1428 return 0;
1429
1430 err = skb_cow_head(skb, 0);
1431 if (err < 0)
1432 return err;
1433
1434 if (skb->protocol == htons(ETH_P_IP)) {
1435 struct iphdr *iph = ip_hdr(skb);
1436
1437 iph->check = 0;
1438 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1439 0, IPPROTO_TCP, 0);
1440 first->word1 |= 1 << TPD_IPV4_SHIFT;
1441 } else if (skb_is_gso_v6(skb)) {
1442 ipv6_hdr(skb)->payload_len = 0;
1443 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1444 &ipv6_hdr(skb)->daddr,
1445 0, IPPROTO_TCP, 0);
1446 /* LSOv2: the first TPD only provides the packet length */
1447 first->adrl.l.pkt_len = skb->len;
1448 first->word1 |= 1 << TPD_LSO_V2_SHIFT;
1449 }
1450
1451 first->word1 |= 1 << TPD_LSO_EN_SHIFT;
1452 first->word1 |= (skb_transport_offset(skb) &
1453 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
1454 first->word1 |= (skb_shinfo(skb)->gso_size &
1455 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1456 return 1;
1457}
1458
Tobias Regnery702e8412016-11-15 12:43:11 +01001459static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
Johannes Bergab69bde2013-06-17 22:44:02 +02001460{
Johannes Bergab69bde2013-06-17 22:44:02 +02001461 struct alx_txd *tpd, *first_tpd;
1462 dma_addr_t dma;
1463 int maplen, f, first_idx = txq->write_idx;
1464
1465 first_tpd = &txq->tpd[txq->write_idx];
1466 tpd = first_tpd;
1467
Tobias Regneryab725982016-08-25 20:09:53 +02001468 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
Tobias Regnery702e8412016-11-15 12:43:11 +01001469 if (++txq->write_idx == txq->count)
Tobias Regneryab725982016-08-25 20:09:53 +02001470 txq->write_idx = 0;
1471
1472 tpd = &txq->tpd[txq->write_idx];
1473 tpd->len = first_tpd->len;
1474 tpd->vlan_tag = first_tpd->vlan_tag;
1475 tpd->word1 = first_tpd->word1;
1476 }
1477
Johannes Bergab69bde2013-06-17 22:44:02 +02001478 maplen = skb_headlen(skb);
Tobias Regnery702e8412016-11-15 12:43:11 +01001479 dma = dma_map_single(txq->dev, skb->data, maplen,
Johannes Bergab69bde2013-06-17 22:44:02 +02001480 DMA_TO_DEVICE);
Tobias Regnery702e8412016-11-15 12:43:11 +01001481 if (dma_mapping_error(txq->dev, dma))
Johannes Bergab69bde2013-06-17 22:44:02 +02001482 goto err_dma;
1483
1484 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1485 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1486
1487 tpd->adrl.addr = cpu_to_le64(dma);
1488 tpd->len = cpu_to_le16(maplen);
1489
1490 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1491 struct skb_frag_struct *frag;
1492
1493 frag = &skb_shinfo(skb)->frags[f];
1494
Tobias Regnery702e8412016-11-15 12:43:11 +01001495 if (++txq->write_idx == txq->count)
Johannes Bergab69bde2013-06-17 22:44:02 +02001496 txq->write_idx = 0;
1497 tpd = &txq->tpd[txq->write_idx];
1498
1499 tpd->word1 = first_tpd->word1;
1500
1501 maplen = skb_frag_size(frag);
Tobias Regnery702e8412016-11-15 12:43:11 +01001502 dma = skb_frag_dma_map(txq->dev, frag, 0,
Johannes Bergab69bde2013-06-17 22:44:02 +02001503 maplen, DMA_TO_DEVICE);
Tobias Regnery702e8412016-11-15 12:43:11 +01001504 if (dma_mapping_error(txq->dev, dma))
Johannes Bergab69bde2013-06-17 22:44:02 +02001505 goto err_dma;
1506 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1507 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1508
1509 tpd->adrl.addr = cpu_to_le64(dma);
1510 tpd->len = cpu_to_le16(maplen);
1511 }
1512
1513 /* last TPD, set EOP flag and store skb */
1514 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1515 txq->bufs[txq->write_idx].skb = skb;
1516
Tobias Regnery702e8412016-11-15 12:43:11 +01001517 if (++txq->write_idx == txq->count)
Johannes Bergab69bde2013-06-17 22:44:02 +02001518 txq->write_idx = 0;
1519
1520 return 0;
1521
1522err_dma:
1523 f = first_idx;
1524 while (f != txq->write_idx) {
Tobias Regnery702e8412016-11-15 12:43:11 +01001525 alx_free_txbuf(txq, f);
1526 if (++f == txq->count)
Johannes Bergab69bde2013-06-17 22:44:02 +02001527 f = 0;
1528 }
1529 return -ENOMEM;
1530}
1531
Tobias Regnery2e068262016-11-15 12:43:14 +01001532static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
1533 struct alx_tx_queue *txq)
Johannes Bergab69bde2013-06-17 22:44:02 +02001534{
Tobias Regnery2e068262016-11-15 12:43:14 +01001535 struct alx_priv *alx;
Johannes Bergab69bde2013-06-17 22:44:02 +02001536 struct alx_txd *first;
Tobias Regneryab725982016-08-25 20:09:53 +02001537 int tso;
Johannes Bergab69bde2013-06-17 22:44:02 +02001538
Tobias Regnery2e068262016-11-15 12:43:14 +01001539 alx = netdev_priv(txq->netdev);
1540
Tobias Regnery702e8412016-11-15 12:43:11 +01001541 if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
Tobias Regnery2e068262016-11-15 12:43:14 +01001542 netif_tx_stop_queue(alx_get_tx_queue(txq));
Johannes Bergab69bde2013-06-17 22:44:02 +02001543 goto drop;
1544 }
1545
1546 first = &txq->tpd[txq->write_idx];
1547 memset(first, 0, sizeof(*first));
1548
Tobias Regneryab725982016-08-25 20:09:53 +02001549 tso = alx_tso(skb, first);
1550 if (tso < 0)
1551 goto drop;
1552 else if (!tso && alx_tx_csum(skb, first))
Johannes Bergab69bde2013-06-17 22:44:02 +02001553 goto drop;
1554
Tobias Regnery702e8412016-11-15 12:43:11 +01001555 if (alx_map_tx_skb(txq, skb) < 0)
Johannes Bergab69bde2013-06-17 22:44:02 +02001556 goto drop;
1557
Tobias Regnery2e068262016-11-15 12:43:14 +01001558 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
Johannes Bergab69bde2013-06-17 22:44:02 +02001559
1560 /* flush updates before updating hardware */
1561 wmb();
Tobias Regnery2e068262016-11-15 12:43:14 +01001562 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001563
Tobias Regnery702e8412016-11-15 12:43:11 +01001564 if (alx_tpd_avail(txq) < txq->count / 8)
Tobias Regnery2e068262016-11-15 12:43:14 +01001565 netif_tx_stop_queue(alx_get_tx_queue(txq));
Johannes Bergab69bde2013-06-17 22:44:02 +02001566
1567 return NETDEV_TX_OK;
1568
1569drop:
Eric W. Biederman548ff1e2014-03-15 15:53:09 -07001570 dev_kfree_skb_any(skb);
Johannes Bergab69bde2013-06-17 22:44:02 +02001571 return NETDEV_TX_OK;
1572}
1573
Tobias Regnery2e068262016-11-15 12:43:14 +01001574static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1575 struct net_device *netdev)
1576{
1577 struct alx_priv *alx = netdev_priv(netdev);
1578 return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
1579}
1580
Johannes Bergab69bde2013-06-17 22:44:02 +02001581static void alx_tx_timeout(struct net_device *dev)
1582{
1583 struct alx_priv *alx = netdev_priv(dev);
1584
1585 alx_schedule_reset(alx);
1586}
1587
1588static int alx_mdio_read(struct net_device *netdev,
1589 int prtad, int devad, u16 addr)
1590{
1591 struct alx_priv *alx = netdev_priv(netdev);
1592 struct alx_hw *hw = &alx->hw;
1593 u16 val;
1594 int err;
1595
1596 if (prtad != hw->mdio.prtad)
1597 return -EINVAL;
1598
1599 if (devad == MDIO_DEVAD_NONE)
1600 err = alx_read_phy_reg(hw, addr, &val);
1601 else
1602 err = alx_read_phy_ext(hw, devad, addr, &val);
1603
1604 if (err)
1605 return err;
1606 return val;
1607}
1608
1609static int alx_mdio_write(struct net_device *netdev,
1610 int prtad, int devad, u16 addr, u16 val)
1611{
1612 struct alx_priv *alx = netdev_priv(netdev);
1613 struct alx_hw *hw = &alx->hw;
1614
1615 if (prtad != hw->mdio.prtad)
1616 return -EINVAL;
1617
1618 if (devad == MDIO_DEVAD_NONE)
1619 return alx_write_phy_reg(hw, addr, val);
1620
1621 return alx_write_phy_ext(hw, devad, addr, val);
1622}
1623
1624static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1625{
1626 struct alx_priv *alx = netdev_priv(netdev);
1627
1628 if (!netif_running(netdev))
1629 return -EAGAIN;
1630
1631 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1632}
1633
1634#ifdef CONFIG_NET_POLL_CONTROLLER
1635static void alx_poll_controller(struct net_device *netdev)
1636{
1637 struct alx_priv *alx = netdev_priv(netdev);
Tobias Regnerye0eac252016-11-15 12:43:12 +01001638 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +02001639
Tobias Regnerydc39a782016-09-09 12:19:54 +02001640 if (alx->flags & ALX_FLAG_USING_MSIX) {
1641 alx_intr_msix_misc(0, alx);
Tobias Regnerye0eac252016-11-15 12:43:12 +01001642 for (i = 0; i < alx->num_txq; i++)
1643 alx_intr_msix_ring(0, alx->qnapi[i]);
Tobias Regnerydc39a782016-09-09 12:19:54 +02001644 } else if (alx->flags & ALX_FLAG_USING_MSI)
Johannes Bergab69bde2013-06-17 22:44:02 +02001645 alx_intr_msi(0, alx);
1646 else
1647 alx_intr_legacy(0, alx);
1648}
1649#endif
1650
Sabrina Dubrocaf1b6b102014-01-09 10:09:30 +01001651static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
1652 struct rtnl_link_stats64 *net_stats)
1653{
1654 struct alx_priv *alx = netdev_priv(dev);
1655 struct alx_hw_stats *hw_stats = &alx->hw.stats;
1656
1657 spin_lock(&alx->stats_lock);
1658
1659 alx_update_hw_stats(&alx->hw);
1660
1661 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1662 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1663 net_stats->multicast = hw_stats->rx_mcast;
1664 net_stats->collisions = hw_stats->tx_single_col +
1665 hw_stats->tx_multi_col +
1666 hw_stats->tx_late_col +
1667 hw_stats->tx_abort_col;
1668
1669 net_stats->rx_errors = hw_stats->rx_frag +
1670 hw_stats->rx_fcs_err +
1671 hw_stats->rx_len_err +
1672 hw_stats->rx_ov_sz +
1673 hw_stats->rx_ov_rrd +
1674 hw_stats->rx_align_err +
1675 hw_stats->rx_ov_rxf;
1676
1677 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
1678 net_stats->rx_length_errors = hw_stats->rx_len_err;
1679 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1680 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1681 net_stats->rx_dropped = hw_stats->rx_ov_rrd;
1682
1683 net_stats->tx_errors = hw_stats->tx_late_col +
1684 hw_stats->tx_abort_col +
1685 hw_stats->tx_underrun +
1686 hw_stats->tx_trunc;
1687
1688 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1689 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1690 net_stats->tx_window_errors = hw_stats->tx_late_col;
1691
1692 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1693 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1694
1695 spin_unlock(&alx->stats_lock);
1696
1697 return net_stats;
1698}
1699
Johannes Bergab69bde2013-06-17 22:44:02 +02001700static const struct net_device_ops alx_netdev_ops = {
1701 .ndo_open = alx_open,
1702 .ndo_stop = alx_stop,
1703 .ndo_start_xmit = alx_start_xmit,
Sabrina Dubrocaf1b6b102014-01-09 10:09:30 +01001704 .ndo_get_stats64 = alx_get_stats64,
Johannes Bergab69bde2013-06-17 22:44:02 +02001705 .ndo_set_rx_mode = alx_set_rx_mode,
1706 .ndo_validate_addr = eth_validate_addr,
1707 .ndo_set_mac_address = alx_set_mac_address,
1708 .ndo_change_mtu = alx_change_mtu,
1709 .ndo_do_ioctl = alx_ioctl,
1710 .ndo_tx_timeout = alx_tx_timeout,
1711 .ndo_fix_features = alx_fix_features,
1712#ifdef CONFIG_NET_POLL_CONTROLLER
1713 .ndo_poll_controller = alx_poll_controller,
1714#endif
1715};
1716
1717static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1718{
1719 struct net_device *netdev;
1720 struct alx_priv *alx;
1721 struct alx_hw *hw;
1722 bool phy_configured;
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001723 int err;
Johannes Bergab69bde2013-06-17 22:44:02 +02001724
1725 err = pci_enable_device_mem(pdev);
1726 if (err)
1727 return err;
1728
1729 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1730 * shared register for the high 32 bits, so only a single, aligned,
1731 * 4 GB physical address range can be used for descriptors.
1732 */
Peter Senna Tschudin8d7f1fb2014-03-16 00:30:52 +01001733 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001734 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1735 } else {
Peter Senna Tschudin8d7f1fb2014-03-16 00:30:52 +01001736 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Johannes Bergab69bde2013-06-17 22:44:02 +02001737 if (err) {
Peter Senna Tschudin8d7f1fb2014-03-16 00:30:52 +01001738 dev_err(&pdev->dev, "No usable DMA config, aborting\n");
1739 goto out_pci_disable;
Johannes Bergab69bde2013-06-17 22:44:02 +02001740 }
1741 }
1742
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001743 err = pci_request_mem_regions(pdev, alx_drv_name);
Johannes Bergab69bde2013-06-17 22:44:02 +02001744 if (err) {
1745 dev_err(&pdev->dev,
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001746 "pci_request_mem_regions failed\n");
Johannes Bergab69bde2013-06-17 22:44:02 +02001747 goto out_pci_disable;
1748 }
1749
1750 pci_enable_pcie_error_reporting(pdev);
1751 pci_set_master(pdev);
1752
Yijing Wangc3eb7a72013-09-11 10:07:00 +08001753 if (!pdev->pm_cap) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001754 dev_err(&pdev->dev,
1755 "Can't find power management capability, aborting\n");
1756 err = -EIO;
1757 goto out_pci_release;
1758 }
1759
Tobias Regneryd7683192016-11-15 12:43:16 +01001760 netdev = alloc_etherdev_mqs(sizeof(*alx),
1761 ALX_MAX_TX_QUEUES, 1);
Johannes Bergab69bde2013-06-17 22:44:02 +02001762 if (!netdev) {
1763 err = -ENOMEM;
1764 goto out_pci_release;
1765 }
1766
1767 SET_NETDEV_DEV(netdev, &pdev->dev);
1768 alx = netdev_priv(netdev);
Maarten Lankhorsta8798a52013-07-11 15:53:21 +02001769 spin_lock_init(&alx->hw.mdio_lock);
1770 spin_lock_init(&alx->irq_lock);
John Greene3e5ccc22014-02-10 14:34:04 -05001771 spin_lock_init(&alx->stats_lock);
Johannes Bergab69bde2013-06-17 22:44:02 +02001772 alx->dev = netdev;
1773 alx->hw.pdev = pdev;
1774 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1775 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1776 hw = &alx->hw;
1777 pci_set_drvdata(pdev, alx);
1778
1779 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1780 if (!hw->hw_addr) {
1781 dev_err(&pdev->dev, "cannot map device registers\n");
1782 err = -EIO;
1783 goto out_free_netdev;
1784 }
1785
1786 netdev->netdev_ops = &alx_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001787 netdev->ethtool_ops = &alx_ethtool_ops;
Johannes Bergab69bde2013-06-17 22:44:02 +02001788 netdev->irq = pdev->irq;
1789 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1790
1791 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1792 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1793
1794 err = alx_init_sw(alx);
1795 if (err) {
1796 dev_err(&pdev->dev, "net device private data init failed\n");
1797 goto out_unmap;
1798 }
1799
1800 alx_reset_pcie(hw);
1801
1802 phy_configured = alx_phy_configured(hw);
1803
1804 if (!phy_configured)
1805 alx_reset_phy(hw);
1806
1807 err = alx_reset_mac(hw);
1808 if (err) {
1809 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1810 goto out_unmap;
1811 }
1812
1813 /* setup link to put it in a known good starting state */
1814 if (!phy_configured) {
1815 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1816 if (err) {
1817 dev_err(&pdev->dev,
1818 "failed to configure PHY speed/duplex (err=%d)\n",
1819 err);
1820 goto out_unmap;
1821 }
1822 }
1823
Tobias Regneryab725982016-08-25 20:09:53 +02001824 netdev->hw_features = NETIF_F_SG |
1825 NETIF_F_HW_CSUM |
1826 NETIF_F_TSO |
1827 NETIF_F_TSO6;
Johannes Bergab69bde2013-06-17 22:44:02 +02001828
1829 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1830 dev_warn(&pdev->dev,
1831 "Invalid permanent address programmed, using random one\n");
1832 eth_hw_addr_random(netdev);
1833 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1834 }
1835
1836 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1837 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1838 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1839
1840 hw->mdio.prtad = 0;
1841 hw->mdio.mmds = 0;
1842 hw->mdio.dev = netdev;
1843 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1844 MDIO_SUPPORTS_C22 |
1845 MDIO_EMULATE_C22;
1846 hw->mdio.mdio_read = alx_mdio_read;
1847 hw->mdio.mdio_write = alx_mdio_write;
1848
1849 if (!alx_get_phy_info(hw)) {
1850 dev_err(&pdev->dev, "failed to identify PHY\n");
1851 err = -EIO;
1852 goto out_unmap;
1853 }
1854
1855 INIT_WORK(&alx->link_check_wk, alx_link_check);
1856 INIT_WORK(&alx->reset_wk, alx_reset);
Johannes Bergab69bde2013-06-17 22:44:02 +02001857 netif_carrier_off(netdev);
1858
1859 err = register_netdev(netdev);
1860 if (err) {
1861 dev_err(&pdev->dev, "register netdevice failed\n");
1862 goto out_unmap;
1863 }
1864
Johannes Bergab69bde2013-06-17 22:44:02 +02001865 netdev_info(netdev,
1866 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1867 netdev->dev_addr);
1868
1869 return 0;
1870
1871out_unmap:
1872 iounmap(hw->hw_addr);
1873out_free_netdev:
1874 free_netdev(netdev);
1875out_pci_release:
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001876 pci_release_mem_regions(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001877out_pci_disable:
1878 pci_disable_device(pdev);
1879 return err;
1880}
1881
1882static void alx_remove(struct pci_dev *pdev)
1883{
1884 struct alx_priv *alx = pci_get_drvdata(pdev);
1885 struct alx_hw *hw = &alx->hw;
1886
1887 cancel_work_sync(&alx->link_check_wk);
1888 cancel_work_sync(&alx->reset_wk);
1889
1890 /* restore permanent mac address */
1891 alx_set_macaddr(hw, hw->perm_addr);
1892
1893 unregister_netdev(alx->dev);
1894 iounmap(hw->hw_addr);
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001895 pci_release_mem_regions(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001896
1897 pci_disable_pcie_error_reporting(pdev);
1898 pci_disable_device(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001899
1900 free_netdev(alx->dev);
1901}
1902
1903#ifdef CONFIG_PM_SLEEP
1904static int alx_suspend(struct device *dev)
1905{
1906 struct pci_dev *pdev = to_pci_dev(dev);
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001907 struct alx_priv *alx = pci_get_drvdata(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001908
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001909 if (!netif_running(alx->dev))
1910 return 0;
1911 netif_device_detach(alx->dev);
1912 __alx_stop(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001913 return 0;
1914}
1915
1916static int alx_resume(struct device *dev)
1917{
1918 struct pci_dev *pdev = to_pci_dev(dev);
1919 struct alx_priv *alx = pci_get_drvdata(pdev);
hahnjob54629e2013-11-12 18:19:24 +01001920 struct alx_hw *hw = &alx->hw;
1921
1922 alx_reset_phy(hw);
Johannes Bergab69bde2013-06-17 22:44:02 +02001923
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001924 if (!netif_running(alx->dev))
1925 return 0;
1926 netif_device_attach(alx->dev);
1927 return __alx_open(alx, true);
Johannes Bergab69bde2013-06-17 22:44:02 +02001928}
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001929
1930static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1931#define ALX_PM_OPS (&alx_pm_ops)
1932#else
1933#define ALX_PM_OPS NULL
Johannes Bergab69bde2013-06-17 22:44:02 +02001934#endif
1935
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001936
Johannes Bergab69bde2013-06-17 22:44:02 +02001937static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1938 pci_channel_state_t state)
1939{
1940 struct alx_priv *alx = pci_get_drvdata(pdev);
1941 struct net_device *netdev = alx->dev;
1942 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1943
1944 dev_info(&pdev->dev, "pci error detected\n");
1945
1946 rtnl_lock();
1947
1948 if (netif_running(netdev)) {
1949 netif_device_detach(netdev);
1950 alx_halt(alx);
1951 }
1952
1953 if (state == pci_channel_io_perm_failure)
1954 rc = PCI_ERS_RESULT_DISCONNECT;
1955 else
1956 pci_disable_device(pdev);
1957
1958 rtnl_unlock();
1959
1960 return rc;
1961}
1962
1963static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1964{
1965 struct alx_priv *alx = pci_get_drvdata(pdev);
1966 struct alx_hw *hw = &alx->hw;
1967 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1968
1969 dev_info(&pdev->dev, "pci error slot reset\n");
1970
1971 rtnl_lock();
1972
1973 if (pci_enable_device(pdev)) {
1974 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1975 goto out;
1976 }
1977
1978 pci_set_master(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001979
1980 alx_reset_pcie(hw);
1981 if (!alx_reset_mac(hw))
1982 rc = PCI_ERS_RESULT_RECOVERED;
1983out:
1984 pci_cleanup_aer_uncorrect_error_status(pdev);
1985
1986 rtnl_unlock();
1987
1988 return rc;
1989}
1990
1991static void alx_pci_error_resume(struct pci_dev *pdev)
1992{
1993 struct alx_priv *alx = pci_get_drvdata(pdev);
1994 struct net_device *netdev = alx->dev;
1995
1996 dev_info(&pdev->dev, "pci error resume\n");
1997
1998 rtnl_lock();
1999
2000 if (netif_running(netdev)) {
2001 alx_activate(alx);
2002 netif_device_attach(netdev);
2003 }
2004
2005 rtnl_unlock();
2006}
2007
2008static const struct pci_error_handlers alx_err_handlers = {
2009 .error_detected = alx_pci_error_detected,
2010 .slot_reset = alx_pci_error_slot_reset,
2011 .resume = alx_pci_error_resume,
2012};
2013
Benoit Taine9baa3c32014-08-08 15:56:03 +02002014static const struct pci_device_id alx_pci_tbl[] = {
Johannes Bergab69bde2013-06-17 22:44:02 +02002015 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
2016 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2017 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
2018 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
Ben Pope0208e952015-11-17 18:21:07 -07002019 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
2020 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
Owen Linb99b43b2016-08-26 13:49:09 +08002021 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
2022 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
Johannes Bergab69bde2013-06-17 22:44:02 +02002023 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
2024 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2025 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
2026 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
2027 {}
2028};
2029
2030static struct pci_driver alx_driver = {
2031 .name = alx_drv_name,
2032 .id_table = alx_pci_tbl,
2033 .probe = alx_probe,
2034 .remove = alx_remove,
Johannes Bergab69bde2013-06-17 22:44:02 +02002035 .err_handler = &alx_err_handlers,
2036 .driver.pm = ALX_PM_OPS,
2037};
2038
2039module_pci_driver(alx_driver);
2040MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
2041MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
2042MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
2043MODULE_DESCRIPTION(
2044 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
2045MODULE_LICENSE("GPL");