root/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qtnf_shm_ipc_has_new_data
  2. qtnf_shm_handle_new_data
  3. qtnf_shm_ipc_irq_work
  4. qtnf_shm_ipc_irq_inbound_handler
  5. qtnf_shm_ipc_irq_outbound_handler
  6. qtnf_shm_ipc_init
  7. qtnf_shm_ipc_free
  8. qtnf_shm_ipc_send

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */
   3 
   4 #include <linux/types.h>
   5 #include <linux/io.h>
   6 
   7 #include "shm_ipc.h"
   8 
   9 #undef pr_fmt
  10 #define pr_fmt(fmt)     "qtnfmac shm_ipc: %s: " fmt, __func__
  11 
  12 static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
  13 {
  14         const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
  15 
  16         return (flags & QTNF_SHM_IPC_NEW_DATA);
  17 }
  18 
  19 static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
  20 {
  21         size_t size;
  22         bool rx_buff_ok = true;
  23         struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
  24 
  25         shm_reg_hdr = &ipc->shm_region->headroom.hdr;
  26 
  27         size = readw(&shm_reg_hdr->data_len);
  28 
  29         if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
  30                 pr_err("wrong rx packet size: %zu\n", size);
  31                 rx_buff_ok = false;
  32         }
  33 
  34         if (likely(rx_buff_ok)) {
  35                 ipc->rx_packet_count++;
  36                 ipc->rx_callback.fn(ipc->rx_callback.arg,
  37                                     ipc->shm_region->data, size);
  38         }
  39 
  40         writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
  41         readl(&shm_reg_hdr->flags); /* flush PCIe write */
  42 
  43         ipc->interrupt.fn(ipc->interrupt.arg);
  44 }
  45 
  46 static void qtnf_shm_ipc_irq_work(struct work_struct *work)
  47 {
  48         struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
  49                                                 irq_work);
  50 
  51         while (qtnf_shm_ipc_has_new_data(ipc))
  52                 qtnf_shm_handle_new_data(ipc);
  53 }
  54 
  55 static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
  56 {
  57         u32 flags;
  58 
  59         flags = readl(&ipc->shm_region->headroom.hdr.flags);
  60 
  61         if (flags & QTNF_SHM_IPC_NEW_DATA)
  62                 queue_work(ipc->workqueue, &ipc->irq_work);
  63 }
  64 
  65 static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
  66 {
  67         u32 flags;
  68 
  69         if (!READ_ONCE(ipc->waiting_for_ack))
  70                 return;
  71 
  72         flags = readl(&ipc->shm_region->headroom.hdr.flags);
  73 
  74         if (flags & QTNF_SHM_IPC_ACK) {
  75                 WRITE_ONCE(ipc->waiting_for_ack, 0);
  76                 complete(&ipc->tx_completion);
  77         }
  78 }
  79 
  80 int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
  81                       enum qtnf_shm_ipc_direction direction,
  82                       struct qtnf_shm_ipc_region __iomem *shm_region,
  83                       struct workqueue_struct *workqueue,
  84                       const struct qtnf_shm_ipc_int *interrupt,
  85                       const struct qtnf_shm_ipc_rx_callback *rx_callback)
  86 {
  87         BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
  88                      QTN_IPC_REG_HDR_SZ);
  89         BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
  90 
  91         ipc->shm_region = shm_region;
  92         ipc->direction = direction;
  93         ipc->interrupt = *interrupt;
  94         ipc->rx_callback = *rx_callback;
  95         ipc->tx_packet_count = 0;
  96         ipc->rx_packet_count = 0;
  97         ipc->workqueue = workqueue;
  98         ipc->waiting_for_ack = 0;
  99         ipc->tx_timeout_count = 0;
 100 
 101         switch (direction) {
 102         case QTNF_SHM_IPC_OUTBOUND:
 103                 ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
 104                 break;
 105         case QTNF_SHM_IPC_INBOUND:
 106                 ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
 107                 break;
 108         default:
 109                 return -EINVAL;
 110         }
 111 
 112         INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
 113         init_completion(&ipc->tx_completion);
 114 
 115         return 0;
 116 }
 117 
 118 void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
 119 {
 120         complete_all(&ipc->tx_completion);
 121 }
 122 
 123 int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
 124 {
 125         int ret = 0;
 126         struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
 127 
 128         shm_reg_hdr = &ipc->shm_region->headroom.hdr;
 129 
 130         if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
 131                 return -E2BIG;
 132 
 133         ipc->tx_packet_count++;
 134 
 135         writew(size, &shm_reg_hdr->data_len);
 136         memcpy_toio(ipc->shm_region->data, buf, size);
 137 
 138         /* sync previous writes before proceeding */
 139         dma_wmb();
 140 
 141         WRITE_ONCE(ipc->waiting_for_ack, 1);
 142 
 143         /* sync previous memory write before announcing new data ready */
 144         wmb();
 145 
 146         writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
 147         readl(&shm_reg_hdr->flags); /* flush PCIe write */
 148 
 149         ipc->interrupt.fn(ipc->interrupt.arg);
 150 
 151         if (!wait_for_completion_timeout(&ipc->tx_completion,
 152                                          QTN_SHM_IPC_ACK_TIMEOUT)) {
 153                 ret = -ETIMEDOUT;
 154                 ipc->tx_timeout_count++;
 155                 pr_err("TX ACK timeout\n");
 156         }
 157 
 158         /* now we're not waiting for ACK even in case of timeout */
 159         WRITE_ONCE(ipc->waiting_for_ack, 0);
 160 
 161         return ret;
 162 }

/* [<][>][^][v][top][bottom][index][help] */