This source file includes following definitions.
- xgene_enet_ring_init
- xgene_enet_ring_set_type
- xgene_enet_ring_set_recombbuf
- xgene_enet_ring_wr32
- xgene_enet_write_ring_state
- xgene_enet_clr_ring_state
- xgene_enet_set_ring_state
- xgene_enet_set_ring_id
- xgene_enet_clr_desc_ring_id
- xgene_enet_setup_ring
- xgene_enet_clear_ring
- xgene_enet_wr_cmd
- xgene_enet_ring_len
- xgene_enet_setup_coalescing
1
2
3
4
5
6
7
8 #include "xgene_enet_main.h"
9 #include "xgene_enet_hw.h"
10 #include "xgene_enet_ring2.h"
11
12 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
13 {
14 u32 *ring_cfg = ring->state;
15 u64 addr = ring->dma;
16
17 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
18 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
19 ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
20 }
21 ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
22
23 addr >>= 8;
24 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
25
26 addr >>= 27;
27 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
28 | ACCEPTLERR
29 | SET_VAL(RINGADDRH, addr);
30 ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
31 ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
32 }
33
34 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
35 {
36 u32 *ring_cfg = ring->state;
37 bool is_bufpool;
38 u32 val;
39
40 is_bufpool = xgene_enet_is_bufpool(ring->id);
41 val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
42 ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
43 if (is_bufpool)
44 ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
45 }
46
47 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
48 {
49 u32 *ring_cfg = ring->state;
50
51 ring_cfg[3] |= RECOMBBUF;
52 ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
53 }
54
55 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
56 u32 offset, u32 data)
57 {
58 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
59
60 iowrite32(data, pdata->ring_csr_addr + offset);
61 }
62
63 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
64 {
65 struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
66 int i;
67
68 xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
69 for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
70 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
71 ring->state[i]);
72 }
73 }
74
75 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
76 {
77 memset(ring->state, 0, sizeof(ring->state));
78 xgene_enet_write_ring_state(ring);
79 }
80
81 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
82 {
83 enum xgene_ring_owner owner;
84
85 xgene_enet_ring_set_type(ring);
86
87 owner = xgene_enet_ring_owner(ring->id);
88 if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
89 xgene_enet_ring_set_recombbuf(ring);
90
91 xgene_enet_ring_init(ring);
92 xgene_enet_write_ring_state(ring);
93 }
94
95 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
96 {
97 u32 ring_id_val, ring_id_buf;
98 bool is_bufpool;
99
100 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
101 return;
102
103 is_bufpool = xgene_enet_is_bufpool(ring->id);
104
105 ring_id_val = ring->id & GENMASK(9, 0);
106 ring_id_val |= OVERWRITE;
107
108 ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
109 ring_id_buf |= PREFETCH_BUF_EN;
110
111 if (is_bufpool)
112 ring_id_buf |= IS_BUFFER_POOL;
113
114 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
115 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
116 }
117
118 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
119 {
120 u32 ring_id;
121
122 ring_id = ring->id | OVERWRITE;
123 xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
124 xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
125 }
126
127 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
128 struct xgene_enet_desc_ring *ring)
129 {
130 bool is_bufpool;
131 u32 addr, i;
132
133 xgene_enet_clr_ring_state(ring);
134 xgene_enet_set_ring_state(ring);
135 xgene_enet_set_ring_id(ring);
136
137 ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
138
139 is_bufpool = xgene_enet_is_bufpool(ring->id);
140 if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
141 return ring;
142
143 addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
144 xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
145
146 for (i = 0; i < ring->slots; i++)
147 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
148
149 return ring;
150 }
151
152 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
153 {
154 xgene_enet_clr_desc_ring_id(ring);
155 xgene_enet_clr_ring_state(ring);
156 }
157
158 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
159 {
160 u32 data = 0;
161
162 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
163 data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
164 INTR_CLEAR;
165 }
166 data |= (count & GENMASK(16, 0));
167
168 iowrite32(data, ring->cmd);
169 }
170
171 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
172 {
173 u32 __iomem *cmd_base = ring->cmd_base;
174 u32 ring_state, num_msgs;
175
176 ring_state = ioread32(&cmd_base[1]);
177 num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
178
179 return num_msgs;
180 }
181
182 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
183 {
184 u32 data = 0x77777777;
185
186 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
187 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
188 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
189 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
190 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
191 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
192 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
193 }
194
195 struct xgene_ring_ops xgene_ring2_ops = {
196 .num_ring_config = X2_NUM_RING_CONFIG,
197 .num_ring_id_shift = 13,
198 .setup = xgene_enet_setup_ring,
199 .clear = xgene_enet_clear_ring,
200 .wr_cmd = xgene_enet_wr_cmd,
201 .len = xgene_enet_ring_len,
202 .coalesce = xgene_enet_setup_coalescing,
203 };