Home
last modified time | relevance | path

Searched refs:kiblnd_tunables (Results 1 – 4 of 4) sorted by relevance

/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd_modparams.c146 kib_tunables_t kiblnd_tunables = { variable
175 if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { in kiblnd_tunables_init()
177 *kiblnd_tunables.kib_ib_mtu); in kiblnd_tunables_init()
181 if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT) in kiblnd_tunables_init()
182 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT; in kiblnd_tunables_init()
184 if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX) in kiblnd_tunables_init()
185 *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX; in kiblnd_tunables_init()
187 if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits) in kiblnd_tunables_init()
188 *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits; in kiblnd_tunables_init()
190 if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2) in kiblnd_tunables_init()
[all …]
Do2iblnd.h111 extern kib_tunables_t kiblnd_tunables;
121 *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
124 *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
133 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2) in kiblnd_concurrent_sends_v1()
136 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2) in kiblnd_concurrent_sends_v1()
139 return *kiblnd_tunables.kib_concurrent_sends; in kiblnd_concurrent_sends_v1()
144 *kiblnd_tunables.kib_concurrent_sends)
151 #define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
152 *kiblnd_tunables.kib_map_on_demand : \
166 #define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
[all …]
Do2iblnd.c205 if (*kiblnd_tunables.kib_cksum) { in kiblnd_pack_msg()
603 mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); in kiblnd_setup_mtu_locked()
1320 if (*kiblnd_tunables.kib_map_on_demand > 0 && in kiblnd_find_rd_dma_mr()
1321 *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) in kiblnd_find_rd_dma_mr()
1371 int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts; in kiblnd_fmr_pool_size()
1378 int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts; in kiblnd_fmr_flush_trigger()
1398 .cache = !!*kiblnd_tunables.kib_fmr_cache}; in kiblnd_create_fmr_pool()
1842 int ntx = *kiblnd_tunables.kib_ntx / ncpts; in kiblnd_tx_pool_size()
1978 if (*kiblnd_tunables.kib_map_on_demand == 0 && in kiblnd_net_init_pools()
1986 if (*kiblnd_tunables.kib_fmr_pool_size < in kiblnd_net_init_pools()
[all …]
Do2iblnd_cb.c1140 tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); in kiblnd_queue_tx_locked()
1260 dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); in kiblnd_connect_peer()
1265 if (*kiblnd_tunables.kib_use_priv_port) { in kiblnd_connect_peer()
1267 *kiblnd_tunables.kib_timeout * 1000); in kiblnd_connect_peer()
1272 *kiblnd_tunables.kib_timeout * 1000); in kiblnd_connect_peer()
2170 if (*kiblnd_tunables.kib_require_priv_port && in kiblnd_passive_connect()
2382 cp.retry_count = *kiblnd_tunables.kib_retry_count; in kiblnd_passive_connect()
2383 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; in kiblnd_passive_connect()
2493 *kiblnd_tunables.kib_service); in kiblnd_rejected()
2760 cp.retry_count = *kiblnd_tunables.kib_retry_count; in kiblnd_active_connect()
[all …]