root/drivers/net/ethernet/aquantia/atlantic/aq_ring.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. aq_free_rxpage
  2. aq_get_rxpage
  3. aq_get_rxpages
  4. aq_ring_alloc
  5. aq_ring_tx_alloc
  6. aq_ring_rx_alloc
  7. aq_ring_init
  8. aq_ring_dx_in_range
  9. aq_ring_update_queue_state
  10. aq_ring_queue_wake
  11. aq_ring_queue_stop
  12. aq_ring_tx_clean
  13. aq_rx_checksum
  14. aq_ring_rx_fill
  15. aq_ring_rx_deinit
  16. aq_ring_free

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * aQuantia Corporation Network Driver
   4  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
   5  */
   6 
   7 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
   8 
   9 #include "aq_ring.h"
  10 #include "aq_nic.h"
  11 #include "aq_hw.h"
  12 #include "aq_hw_utils.h"
  13 
  14 #include <linux/netdevice.h>
  15 #include <linux/etherdevice.h>
  16 
  17 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
  18 {
  19         unsigned int len = PAGE_SIZE << rxpage->order;
  20 
  21         dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
  22 
  23         /* Drop the ref for being in the ring. */
  24         __free_pages(rxpage->page, rxpage->order);
  25         rxpage->page = NULL;
  26 }
  27 
  28 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
  29                          struct device *dev)
  30 {
  31         struct page *page;
  32         dma_addr_t daddr;
  33         int ret = -ENOMEM;
  34 
  35         page = dev_alloc_pages(order);
  36         if (unlikely(!page))
  37                 goto err_exit;
  38 
  39         daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
  40                              DMA_FROM_DEVICE);
  41 
  42         if (unlikely(dma_mapping_error(dev, daddr)))
  43                 goto free_page;
  44 
  45         rxpage->page = page;
  46         rxpage->daddr = daddr;
  47         rxpage->order = order;
  48         rxpage->pg_off = 0;
  49 
  50         return 0;
  51 
  52 free_page:
  53         __free_pages(page, order);
  54 
  55 err_exit:
  56         return ret;
  57 }
  58 
  59 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
  60                           int order)
  61 {
  62         int ret;
  63 
  64         if (rxbuf->rxdata.page) {
  65                 /* One means ring is the only user and can reuse */
  66                 if (page_ref_count(rxbuf->rxdata.page) > 1) {
  67                         /* Try reuse buffer */
  68                         rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
  69                         if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
  70                                 (PAGE_SIZE << order)) {
  71                                 self->stats.rx.pg_flips++;
  72                         } else {
  73                                 /* Buffer exhausted. We have other users and
  74                                  * should release this page and realloc
  75                                  */
  76                                 aq_free_rxpage(&rxbuf->rxdata,
  77                                                aq_nic_get_dev(self->aq_nic));
  78                                 self->stats.rx.pg_losts++;
  79                         }
  80                 } else {
  81                         rxbuf->rxdata.pg_off = 0;
  82                         self->stats.rx.pg_reuses++;
  83                 }
  84         }
  85 
  86         if (!rxbuf->rxdata.page) {
  87                 ret = aq_get_rxpage(&rxbuf->rxdata, order,
  88                                     aq_nic_get_dev(self->aq_nic));
  89                 return ret;
  90         }
  91 
  92         return 0;
  93 }
  94 
  95 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
  96                                        struct aq_nic_s *aq_nic)
  97 {
  98         int err = 0;
  99 
 100         self->buff_ring =
 101                 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
 102 
 103         if (!self->buff_ring) {
 104                 err = -ENOMEM;
 105                 goto err_exit;
 106         }
 107         self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
 108                                            self->size * self->dx_size,
 109                                            &self->dx_ring_pa, GFP_KERNEL);
 110         if (!self->dx_ring) {
 111                 err = -ENOMEM;
 112                 goto err_exit;
 113         }
 114 
 115 err_exit:
 116         if (err < 0) {
 117                 aq_ring_free(self);
 118                 self = NULL;
 119         }
 120         return self;
 121 }
 122 
 123 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
 124                                    struct aq_nic_s *aq_nic,
 125                                    unsigned int idx,
 126                                    struct aq_nic_cfg_s *aq_nic_cfg)
 127 {
 128         int err = 0;
 129 
 130         self->aq_nic = aq_nic;
 131         self->idx = idx;
 132         self->size = aq_nic_cfg->txds;
 133         self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
 134 
 135         self = aq_ring_alloc(self, aq_nic);
 136         if (!self) {
 137                 err = -ENOMEM;
 138                 goto err_exit;
 139         }
 140 
 141 err_exit:
 142         if (err < 0) {
 143                 aq_ring_free(self);
 144                 self = NULL;
 145         }
 146         return self;
 147 }
 148 
 149 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
 150                                    struct aq_nic_s *aq_nic,
 151                                    unsigned int idx,
 152                                    struct aq_nic_cfg_s *aq_nic_cfg)
 153 {
 154         int err = 0;
 155 
 156         self->aq_nic = aq_nic;
 157         self->idx = idx;
 158         self->size = aq_nic_cfg->rxds;
 159         self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
 160         self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
 161                                (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
 162 
 163         if (aq_nic_cfg->rxpageorder > self->page_order)
 164                 self->page_order = aq_nic_cfg->rxpageorder;
 165 
 166         self = aq_ring_alloc(self, aq_nic);
 167         if (!self) {
 168                 err = -ENOMEM;
 169                 goto err_exit;
 170         }
 171 
 172 err_exit:
 173         if (err < 0) {
 174                 aq_ring_free(self);
 175                 self = NULL;
 176         }
 177         return self;
 178 }
 179 
 180 int aq_ring_init(struct aq_ring_s *self)
 181 {
 182         self->hw_head = 0;
 183         self->sw_head = 0;
 184         self->sw_tail = 0;
 185         return 0;
 186 }
 187 
 188 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
 189                                        unsigned int t)
 190 {
 191         return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
 192 }
 193 
 194 void aq_ring_update_queue_state(struct aq_ring_s *ring)
 195 {
 196         if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
 197                 aq_ring_queue_stop(ring);
 198         else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
 199                 aq_ring_queue_wake(ring);
 200 }
 201 
 202 void aq_ring_queue_wake(struct aq_ring_s *ring)
 203 {
 204         struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
 205 
 206         if (__netif_subqueue_stopped(ndev, ring->idx)) {
 207                 netif_wake_subqueue(ndev, ring->idx);
 208                 ring->stats.tx.queue_restarts++;
 209         }
 210 }
 211 
 212 void aq_ring_queue_stop(struct aq_ring_s *ring)
 213 {
 214         struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
 215 
 216         if (!__netif_subqueue_stopped(ndev, ring->idx))
 217                 netif_stop_subqueue(ndev, ring->idx);
 218 }
 219 
 220 bool aq_ring_tx_clean(struct aq_ring_s *self)
 221 {
 222         struct device *dev = aq_nic_get_dev(self->aq_nic);
 223         unsigned int budget;
 224 
 225         for (budget = AQ_CFG_TX_CLEAN_BUDGET;
 226              budget && self->sw_head != self->hw_head; budget--) {
 227                 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 228 
 229                 if (likely(buff->is_mapped)) {
 230                         if (unlikely(buff->is_sop)) {
 231                                 if (!buff->is_eop &&
 232                                     buff->eop_index != 0xffffU &&
 233                                     (!aq_ring_dx_in_range(self->sw_head,
 234                                                 buff->eop_index,
 235                                                 self->hw_head)))
 236                                         break;
 237 
 238                                 dma_unmap_single(dev, buff->pa, buff->len,
 239                                                  DMA_TO_DEVICE);
 240                         } else {
 241                                 dma_unmap_page(dev, buff->pa, buff->len,
 242                                                DMA_TO_DEVICE);
 243                         }
 244                 }
 245 
 246                 if (unlikely(buff->is_eop)) {
 247                         ++self->stats.rx.packets;
 248                         self->stats.tx.bytes += buff->skb->len;
 249 
 250                         dev_kfree_skb_any(buff->skb);
 251                 }
 252                 buff->pa = 0U;
 253                 buff->eop_index = 0xffffU;
 254                 self->sw_head = aq_ring_next_dx(self, self->sw_head);
 255         }
 256 
 257         return !!budget;
 258 }
 259 
 260 static void aq_rx_checksum(struct aq_ring_s *self,
 261                            struct aq_ring_buff_s *buff,
 262                            struct sk_buff *skb)
 263 {
 264         if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
 265                 return;
 266 
 267         if (unlikely(buff->is_cso_err)) {
 268                 ++self->stats.rx.errors;
 269                 skb->ip_summed = CHECKSUM_NONE;
 270                 return;
 271         }
 272         if (buff->is_ip_cso) {
 273                 __skb_incr_checksum_unnecessary(skb);
 274         } else {
 275                 skb->ip_summed = CHECKSUM_NONE;
 276         }
 277 
 278         if (buff->is_udp_cso || buff->is_tcp_cso)
 279                 __skb_incr_checksum_unnecessary(skb);
 280 }
 281 
 282 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 283 int aq_ring_rx_clean(struct aq_ring_s *self,
 284                      struct napi_struct *napi,
 285                      int *work_done,
 286                      int budget)
 287 {
 288         struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
 289         bool is_rsc_completed = true;
 290         int err = 0;
 291 
 292         for (; (self->sw_head != self->hw_head) && budget;
 293                 self->sw_head = aq_ring_next_dx(self, self->sw_head),
 294                 --budget, ++(*work_done)) {
 295                 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 296                 struct aq_ring_buff_s *buff_ = NULL;
 297                 struct sk_buff *skb = NULL;
 298                 unsigned int next_ = 0U;
 299                 unsigned int i = 0U;
 300                 u16 hdr_len;
 301 
 302                 if (buff->is_cleaned)
 303                         continue;
 304 
 305                 if (!buff->is_eop) {
 306                         buff_ = buff;
 307                         do {
 308                                 next_ = buff_->next,
 309                                 buff_ = &self->buff_ring[next_];
 310                                 is_rsc_completed =
 311                                         aq_ring_dx_in_range(self->sw_head,
 312                                                             next_,
 313                                                             self->hw_head);
 314 
 315                                 if (unlikely(!is_rsc_completed))
 316                                         break;
 317 
 318                                 buff->is_error |= buff_->is_error;
 319                                 buff->is_cso_err |= buff_->is_cso_err;
 320 
 321                         } while (!buff_->is_eop);
 322 
 323                         if (!is_rsc_completed) {
 324                                 err = 0;
 325                                 goto err_exit;
 326                         }
 327                         if (buff->is_error || buff->is_cso_err) {
 328                                 buff_ = buff;
 329                                 do {
 330                                         next_ = buff_->next,
 331                                         buff_ = &self->buff_ring[next_];
 332 
 333                                         buff_->is_cleaned = true;
 334                                 } while (!buff_->is_eop);
 335 
 336                                 ++self->stats.rx.errors;
 337                                 continue;
 338                         }
 339                 }
 340 
 341                 if (buff->is_error) {
 342                         ++self->stats.rx.errors;
 343                         continue;
 344                 }
 345 
 346                 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
 347                                               buff->rxdata.daddr,
 348                                               buff->rxdata.pg_off,
 349                                               buff->len, DMA_FROM_DEVICE);
 350 
 351                 /* for single fragment packets use build_skb() */
 352                 if (buff->is_eop &&
 353                     buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
 354                         skb = build_skb(aq_buf_vaddr(&buff->rxdata),
 355                                         AQ_CFG_RX_FRAME_MAX);
 356                         if (unlikely(!skb)) {
 357                                 err = -ENOMEM;
 358                                 goto err_exit;
 359                         }
 360                         skb_put(skb, buff->len);
 361                         page_ref_inc(buff->rxdata.page);
 362                 } else {
 363                         skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
 364                         if (unlikely(!skb)) {
 365                                 err = -ENOMEM;
 366                                 goto err_exit;
 367                         }
 368 
 369                         hdr_len = buff->len;
 370                         if (hdr_len > AQ_CFG_RX_HDR_SIZE)
 371                                 hdr_len = eth_get_headlen(skb->dev,
 372                                                           aq_buf_vaddr(&buff->rxdata),
 373                                                           AQ_CFG_RX_HDR_SIZE);
 374 
 375                         memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
 376                                ALIGN(hdr_len, sizeof(long)));
 377 
 378                         if (buff->len - hdr_len > 0) {
 379                                 skb_add_rx_frag(skb, 0, buff->rxdata.page,
 380                                                 buff->rxdata.pg_off + hdr_len,
 381                                                 buff->len - hdr_len,
 382                                                 AQ_CFG_RX_FRAME_MAX);
 383                                 page_ref_inc(buff->rxdata.page);
 384                         }
 385 
 386                         if (!buff->is_eop) {
 387                                 buff_ = buff;
 388                                 i = 1U;
 389                                 do {
 390                                         next_ = buff_->next,
 391                                         buff_ = &self->buff_ring[next_];
 392 
 393                                         dma_sync_single_range_for_cpu(
 394                                                         aq_nic_get_dev(self->aq_nic),
 395                                                         buff_->rxdata.daddr,
 396                                                         buff_->rxdata.pg_off,
 397                                                         buff_->len,
 398                                                         DMA_FROM_DEVICE);
 399                                         skb_add_rx_frag(skb, i++,
 400                                                         buff_->rxdata.page,
 401                                                         buff_->rxdata.pg_off,
 402                                                         buff_->len,
 403                                                         AQ_CFG_RX_FRAME_MAX);
 404                                         page_ref_inc(buff_->rxdata.page);
 405                                         buff_->is_cleaned = 1;
 406 
 407                                         buff->is_ip_cso &= buff_->is_ip_cso;
 408                                         buff->is_udp_cso &= buff_->is_udp_cso;
 409                                         buff->is_tcp_cso &= buff_->is_tcp_cso;
 410                                         buff->is_cso_err |= buff_->is_cso_err;
 411 
 412                                 } while (!buff_->is_eop);
 413                         }
 414                 }
 415 
 416                 if (buff->is_vlan)
 417                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 418                                                buff->vlan_rx_tag);
 419 
 420                 skb->protocol = eth_type_trans(skb, ndev);
 421 
 422                 aq_rx_checksum(self, buff, skb);
 423 
 424                 skb_set_hash(skb, buff->rss_hash,
 425                              buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
 426                              PKT_HASH_TYPE_NONE);
 427 
 428                 skb_record_rx_queue(skb, self->idx);
 429 
 430                 ++self->stats.rx.packets;
 431                 self->stats.rx.bytes += skb->len;
 432 
 433                 napi_gro_receive(napi, skb);
 434         }
 435 
 436 err_exit:
 437         return err;
 438 }
 439 
 440 int aq_ring_rx_fill(struct aq_ring_s *self)
 441 {
 442         unsigned int page_order = self->page_order;
 443         struct aq_ring_buff_s *buff = NULL;
 444         int err = 0;
 445         int i = 0;
 446 
 447         if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
 448                                            self->size / 2))
 449                 return err;
 450 
 451         for (i = aq_ring_avail_dx(self); i--;
 452                 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
 453                 buff = &self->buff_ring[self->sw_tail];
 454 
 455                 buff->flags = 0U;
 456                 buff->len = AQ_CFG_RX_FRAME_MAX;
 457 
 458                 err = aq_get_rxpages(self, buff, page_order);
 459                 if (err)
 460                         goto err_exit;
 461 
 462                 buff->pa = aq_buf_daddr(&buff->rxdata);
 463                 buff = NULL;
 464         }
 465 
 466 err_exit:
 467         return err;
 468 }
 469 
 470 void aq_ring_rx_deinit(struct aq_ring_s *self)
 471 {
 472         if (!self)
 473                 goto err_exit;
 474 
 475         for (; self->sw_head != self->sw_tail;
 476                 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
 477                 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
 478 
 479                 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
 480         }
 481 
 482 err_exit:;
 483 }
 484 
 485 void aq_ring_free(struct aq_ring_s *self)
 486 {
 487         if (!self)
 488                 goto err_exit;
 489 
 490         kfree(self->buff_ring);
 491 
 492         if (self->dx_ring)
 493                 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
 494                                   self->size * self->dx_size, self->dx_ring,
 495                                   self->dx_ring_pa);
 496 
 497 err_exit:;
 498 }

/* [<][>][^][v][top][bottom][index][help] */