Lines Matching refs:pool

151 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,  in ibmveth_init_buffer_pool()  argument
155 pool->size = pool_size; in ibmveth_init_buffer_pool()
156 pool->index = pool_index; in ibmveth_init_buffer_pool()
157 pool->buff_size = buff_size; in ibmveth_init_buffer_pool()
158 pool->threshold = pool_size * 7 / 8; in ibmveth_init_buffer_pool()
159 pool->active = pool_active; in ibmveth_init_buffer_pool()
163 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) in ibmveth_alloc_buffer_pool() argument
167 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool()
169 if (!pool->free_map) in ibmveth_alloc_buffer_pool()
172 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool()
173 if (!pool->dma_addr) { in ibmveth_alloc_buffer_pool()
174 kfree(pool->free_map); in ibmveth_alloc_buffer_pool()
175 pool->free_map = NULL; in ibmveth_alloc_buffer_pool()
179 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); in ibmveth_alloc_buffer_pool()
181 if (!pool->skbuff) { in ibmveth_alloc_buffer_pool()
182 kfree(pool->dma_addr); in ibmveth_alloc_buffer_pool()
183 pool->dma_addr = NULL; in ibmveth_alloc_buffer_pool()
185 kfree(pool->free_map); in ibmveth_alloc_buffer_pool()
186 pool->free_map = NULL; in ibmveth_alloc_buffer_pool()
190 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); in ibmveth_alloc_buffer_pool()
192 for (i = 0; i < pool->size; ++i) in ibmveth_alloc_buffer_pool()
193 pool->free_map[i] = i; in ibmveth_alloc_buffer_pool()
195 atomic_set(&pool->available, 0); in ibmveth_alloc_buffer_pool()
196 pool->producer_index = 0; in ibmveth_alloc_buffer_pool()
197 pool->consumer_index = 0; in ibmveth_alloc_buffer_pool()
214 struct ibmveth_buff_pool *pool) in ibmveth_replenish_buffer_pool() argument
217 u32 count = pool->size - atomic_read(&pool->available); in ibmveth_replenish_buffer_pool()
230 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); in ibmveth_replenish_buffer_pool()
239 free_index = pool->consumer_index; in ibmveth_replenish_buffer_pool()
240 pool->consumer_index++; in ibmveth_replenish_buffer_pool()
241 if (pool->consumer_index >= pool->size) in ibmveth_replenish_buffer_pool()
242 pool->consumer_index = 0; in ibmveth_replenish_buffer_pool()
243 index = pool->free_map[free_index]; in ibmveth_replenish_buffer_pool()
246 BUG_ON(pool->skbuff[index] != NULL); in ibmveth_replenish_buffer_pool()
249 pool->buff_size, DMA_FROM_DEVICE); in ibmveth_replenish_buffer_pool()
254 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; in ibmveth_replenish_buffer_pool()
255 pool->dma_addr[index] = dma_addr; in ibmveth_replenish_buffer_pool()
256 pool->skbuff[index] = skb; in ibmveth_replenish_buffer_pool()
258 correlator = ((u64)pool->index << 32) | index; in ibmveth_replenish_buffer_pool()
261 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; in ibmveth_replenish_buffer_pool()
265 unsigned int len = min(pool->buff_size, in ibmveth_replenish_buffer_pool()
282 atomic_add(buffers_added, &(pool->available)); in ibmveth_replenish_buffer_pool()
286 pool->free_map[free_index] = index; in ibmveth_replenish_buffer_pool()
287 pool->skbuff[index] = NULL; in ibmveth_replenish_buffer_pool()
288 if (pool->consumer_index == 0) in ibmveth_replenish_buffer_pool()
289 pool->consumer_index = pool->size - 1; in ibmveth_replenish_buffer_pool()
291 pool->consumer_index--; in ibmveth_replenish_buffer_pool()
294 pool->dma_addr[index], pool->buff_size, in ibmveth_replenish_buffer_pool()
300 atomic_add(buffers_added, &(pool->available)); in ibmveth_replenish_buffer_pool()
323 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; in ibmveth_replenish_task() local
325 if (pool->active && in ibmveth_replenish_task()
326 (atomic_read(&pool->available) < pool->threshold)) in ibmveth_replenish_task()
327 ibmveth_replenish_buffer_pool(adapter, pool); in ibmveth_replenish_task()
335 struct ibmveth_buff_pool *pool) in ibmveth_free_buffer_pool() argument
339 kfree(pool->free_map); in ibmveth_free_buffer_pool()
340 pool->free_map = NULL; in ibmveth_free_buffer_pool()
342 if (pool->skbuff && pool->dma_addr) { in ibmveth_free_buffer_pool()
343 for (i = 0; i < pool->size; ++i) { in ibmveth_free_buffer_pool()
344 struct sk_buff *skb = pool->skbuff[i]; in ibmveth_free_buffer_pool()
347 pool->dma_addr[i], in ibmveth_free_buffer_pool()
348 pool->buff_size, in ibmveth_free_buffer_pool()
351 pool->skbuff[i] = NULL; in ibmveth_free_buffer_pool()
356 if (pool->dma_addr) { in ibmveth_free_buffer_pool()
357 kfree(pool->dma_addr); in ibmveth_free_buffer_pool()
358 pool->dma_addr = NULL; in ibmveth_free_buffer_pool()
361 if (pool->skbuff) { in ibmveth_free_buffer_pool()
362 kfree(pool->skbuff); in ibmveth_free_buffer_pool()
363 pool->skbuff = NULL; in ibmveth_free_buffer_pool()
371 unsigned int pool = correlator >> 32; in ibmveth_remove_buffer_from_pool() local
376 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); in ibmveth_remove_buffer_from_pool()
377 BUG_ON(index >= adapter->rx_buff_pool[pool].size); in ibmveth_remove_buffer_from_pool()
379 skb = adapter->rx_buff_pool[pool].skbuff[index]; in ibmveth_remove_buffer_from_pool()
383 adapter->rx_buff_pool[pool].skbuff[index] = NULL; in ibmveth_remove_buffer_from_pool()
386 adapter->rx_buff_pool[pool].dma_addr[index], in ibmveth_remove_buffer_from_pool()
387 adapter->rx_buff_pool[pool].buff_size, in ibmveth_remove_buffer_from_pool()
390 free_index = adapter->rx_buff_pool[pool].producer_index; in ibmveth_remove_buffer_from_pool()
391 adapter->rx_buff_pool[pool].producer_index++; in ibmveth_remove_buffer_from_pool()
392 if (adapter->rx_buff_pool[pool].producer_index >= in ibmveth_remove_buffer_from_pool()
393 adapter->rx_buff_pool[pool].size) in ibmveth_remove_buffer_from_pool()
394 adapter->rx_buff_pool[pool].producer_index = 0; in ibmveth_remove_buffer_from_pool()
395 adapter->rx_buff_pool[pool].free_map[free_index] = index; in ibmveth_remove_buffer_from_pool()
399 atomic_dec(&(adapter->rx_buff_pool[pool].available)); in ibmveth_remove_buffer_from_pool()
406 unsigned int pool = correlator >> 32; in ibmveth_rxq_get_buffer() local
409 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); in ibmveth_rxq_get_buffer()
410 BUG_ON(index >= adapter->rx_buff_pool[pool].size); in ibmveth_rxq_get_buffer()
412 return adapter->rx_buff_pool[pool].skbuff[index]; in ibmveth_rxq_get_buffer()
420 unsigned int pool = correlator >> 32; in ibmveth_rxq_recycle_buffer() local
426 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); in ibmveth_rxq_recycle_buffer()
427 BUG_ON(index >= adapter->rx_buff_pool[pool].size); in ibmveth_rxq_recycle_buffer()
429 if (!adapter->rx_buff_pool[pool].active) { in ibmveth_rxq_recycle_buffer()
431 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); in ibmveth_rxq_recycle_buffer()
436 adapter->rx_buff_pool[pool].buff_size; in ibmveth_rxq_recycle_buffer()
437 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; in ibmveth_rxq_recycle_buffer()
1617 struct ibmveth_buff_pool *pool = container_of(kobj, in veth_pool_show() local
1622 return sprintf(buf, "%d\n", pool->active); in veth_pool_show()
1624 return sprintf(buf, "%d\n", pool->size); in veth_pool_show()
1626 return sprintf(buf, "%d\n", pool->buff_size); in veth_pool_show()
1633 struct ibmveth_buff_pool *pool = container_of(kobj, in veth_pool_store() local
1643 if (value && !pool->active) { in veth_pool_store()
1645 if (ibmveth_alloc_buffer_pool(pool)) { in veth_pool_store()
1650 pool->active = 1; in veth_pool_store()
1657 pool->active = 1; in veth_pool_store()
1659 } else if (!value && pool->active) { in veth_pool_store()
1665 if (pool == &adapter->rx_buff_pool[i]) in veth_pool_store()
1681 pool->active = 0; in veth_pool_store()
1686 pool->active = 0; in veth_pool_store()
1696 pool->size = value; in veth_pool_store()
1700 pool->size = value; in veth_pool_store()
1711 pool->buff_size = value; in veth_pool_store()
1715 pool->buff_size = value; in veth_pool_store()