Lines Matching refs:pool
143 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, in ibmveth_init_buffer_pool() argument
147 pool->size = pool_size; in ibmveth_init_buffer_pool()
148 pool->index = pool_index; in ibmveth_init_buffer_pool()
149 pool->buff_size = buff_size; in ibmveth_init_buffer_pool()
150 pool->threshold = pool_size * 7 / 8; in ibmveth_init_buffer_pool()
151 pool->active = pool_active; in ibmveth_init_buffer_pool()
155 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) in ibmveth_alloc_buffer_pool() argument
159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool()
161 if (!pool->free_map) in ibmveth_alloc_buffer_pool()
164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool()
165 if (!pool->dma_addr) { in ibmveth_alloc_buffer_pool()
166 kfree(pool->free_map); in ibmveth_alloc_buffer_pool()
167 pool->free_map = NULL; in ibmveth_alloc_buffer_pool()
171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); in ibmveth_alloc_buffer_pool()
173 if (!pool->skbuff) { in ibmveth_alloc_buffer_pool()
174 kfree(pool->dma_addr); in ibmveth_alloc_buffer_pool()
175 pool->dma_addr = NULL; in ibmveth_alloc_buffer_pool()
177 kfree(pool->free_map); in ibmveth_alloc_buffer_pool()
178 pool->free_map = NULL; in ibmveth_alloc_buffer_pool()
182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); in ibmveth_alloc_buffer_pool()
184 for (i = 0; i < pool->size; ++i) in ibmveth_alloc_buffer_pool()
185 pool->free_map[i] = i; in ibmveth_alloc_buffer_pool()
187 atomic_set(&pool->available, 0); in ibmveth_alloc_buffer_pool()
188 pool->producer_index = 0; in ibmveth_alloc_buffer_pool()
189 pool->consumer_index = 0; in ibmveth_alloc_buffer_pool()
206 struct ibmveth_buff_pool *pool) in ibmveth_replenish_buffer_pool() argument
209 u32 count = pool->size - atomic_read(&pool->available); in ibmveth_replenish_buffer_pool()
222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); in ibmveth_replenish_buffer_pool()
231 free_index = pool->consumer_index; in ibmveth_replenish_buffer_pool()
232 pool->consumer_index++; in ibmveth_replenish_buffer_pool()
233 if (pool->consumer_index >= pool->size) in ibmveth_replenish_buffer_pool()
234 pool->consumer_index = 0; in ibmveth_replenish_buffer_pool()
235 index = pool->free_map[free_index]; in ibmveth_replenish_buffer_pool()
238 BUG_ON(pool->skbuff[index] != NULL); in ibmveth_replenish_buffer_pool()
241 pool->buff_size, DMA_FROM_DEVICE); in ibmveth_replenish_buffer_pool()
246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; in ibmveth_replenish_buffer_pool()
247 pool->dma_addr[index] = dma_addr; in ibmveth_replenish_buffer_pool()
248 pool->skbuff[index] = skb; in ibmveth_replenish_buffer_pool()
250 correlator = ((u64)pool->index << 32) | index; in ibmveth_replenish_buffer_pool()
253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; in ibmveth_replenish_buffer_pool()
257 unsigned int len = min(pool->buff_size, in ibmveth_replenish_buffer_pool()
274 atomic_add(buffers_added, &(pool->available)); in ibmveth_replenish_buffer_pool()
278 pool->free_map[free_index] = index; in ibmveth_replenish_buffer_pool()
279 pool->skbuff[index] = NULL; in ibmveth_replenish_buffer_pool()
280 if (pool->consumer_index == 0) in ibmveth_replenish_buffer_pool()
281 pool->consumer_index = pool->size - 1; in ibmveth_replenish_buffer_pool()
283 pool->consumer_index--; in ibmveth_replenish_buffer_pool()
286 pool->dma_addr[index], pool->buff_size, in ibmveth_replenish_buffer_pool()
292 atomic_add(buffers_added, &(pool->available)); in ibmveth_replenish_buffer_pool()
315 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; in ibmveth_replenish_task() local
317 if (pool->active && in ibmveth_replenish_task()
318 (atomic_read(&pool->available) < pool->threshold)) in ibmveth_replenish_task()
319 ibmveth_replenish_buffer_pool(adapter, pool); in ibmveth_replenish_task()
327 struct ibmveth_buff_pool *pool) in ibmveth_free_buffer_pool() argument
331 kfree(pool->free_map); in ibmveth_free_buffer_pool()
332 pool->free_map = NULL; in ibmveth_free_buffer_pool()
334 if (pool->skbuff && pool->dma_addr) { in ibmveth_free_buffer_pool()
335 for (i = 0; i < pool->size; ++i) { in ibmveth_free_buffer_pool()
336 struct sk_buff *skb = pool->skbuff[i]; in ibmveth_free_buffer_pool()
339 pool->dma_addr[i], in ibmveth_free_buffer_pool()
340 pool->buff_size, in ibmveth_free_buffer_pool()
343 pool->skbuff[i] = NULL; in ibmveth_free_buffer_pool()
348 if (pool->dma_addr) { in ibmveth_free_buffer_pool()
349 kfree(pool->dma_addr); in ibmveth_free_buffer_pool()
350 pool->dma_addr = NULL; in ibmveth_free_buffer_pool()
353 if (pool->skbuff) { in ibmveth_free_buffer_pool()
354 kfree(pool->skbuff); in ibmveth_free_buffer_pool()
355 pool->skbuff = NULL; in ibmveth_free_buffer_pool()
363 unsigned int pool = correlator >> 32; in ibmveth_remove_buffer_from_pool() local
368 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); in ibmveth_remove_buffer_from_pool()
369 BUG_ON(index >= adapter->rx_buff_pool[pool].size); in ibmveth_remove_buffer_from_pool()
371 skb = adapter->rx_buff_pool[pool].skbuff[index]; in ibmveth_remove_buffer_from_pool()
375 adapter->rx_buff_pool[pool].skbuff[index] = NULL; in ibmveth_remove_buffer_from_pool()
378 adapter->rx_buff_pool[pool].dma_addr[index], in ibmveth_remove_buffer_from_pool()
379 adapter->rx_buff_pool[pool].buff_size, in ibmveth_remove_buffer_from_pool()
382 free_index = adapter->rx_buff_pool[pool].producer_index; in ibmveth_remove_buffer_from_pool()
383 adapter->rx_buff_pool[pool].producer_index++; in ibmveth_remove_buffer_from_pool()
384 if (adapter->rx_buff_pool[pool].producer_index >= in ibmveth_remove_buffer_from_pool()
385 adapter->rx_buff_pool[pool].size) in ibmveth_remove_buffer_from_pool()
386 adapter->rx_buff_pool[pool].producer_index = 0; in ibmveth_remove_buffer_from_pool()
387 adapter->rx_buff_pool[pool].free_map[free_index] = index; in ibmveth_remove_buffer_from_pool()
391 atomic_dec(&(adapter->rx_buff_pool[pool].available)); in ibmveth_remove_buffer_from_pool()
398 unsigned int pool = correlator >> 32; in ibmveth_rxq_get_buffer() local
401 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); in ibmveth_rxq_get_buffer()
402 BUG_ON(index >= adapter->rx_buff_pool[pool].size); in ibmveth_rxq_get_buffer()
404 return adapter->rx_buff_pool[pool].skbuff[index]; in ibmveth_rxq_get_buffer()
412 unsigned int pool = correlator >> 32; in ibmveth_rxq_recycle_buffer() local
418 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); in ibmveth_rxq_recycle_buffer()
419 BUG_ON(index >= adapter->rx_buff_pool[pool].size); in ibmveth_rxq_recycle_buffer()
421 if (!adapter->rx_buff_pool[pool].active) { in ibmveth_rxq_recycle_buffer()
423 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); in ibmveth_rxq_recycle_buffer()
428 adapter->rx_buff_pool[pool].buff_size; in ibmveth_rxq_recycle_buffer()
429 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; in ibmveth_rxq_recycle_buffer()
1487 struct ibmveth_buff_pool *pool = container_of(kobj, in veth_pool_show() local
1492 return sprintf(buf, "%d\n", pool->active); in veth_pool_show()
1494 return sprintf(buf, "%d\n", pool->size); in veth_pool_show()
1496 return sprintf(buf, "%d\n", pool->buff_size); in veth_pool_show()
1503 struct ibmveth_buff_pool *pool = container_of(kobj, in veth_pool_store() local
1513 if (value && !pool->active) { in veth_pool_store()
1515 if (ibmveth_alloc_buffer_pool(pool)) { in veth_pool_store()
1520 pool->active = 1; in veth_pool_store()
1527 pool->active = 1; in veth_pool_store()
1529 } else if (!value && pool->active) { in veth_pool_store()
1535 if (pool == &adapter->rx_buff_pool[i]) in veth_pool_store()
1551 pool->active = 0; in veth_pool_store()
1556 pool->active = 0; in veth_pool_store()
1566 pool->size = value; in veth_pool_store()
1570 pool->size = value; in veth_pool_store()
1581 pool->buff_size = value; in veth_pool_store()
1585 pool->buff_size = value; in veth_pool_store()