mru 118 drivers/net/caif/caif_virtio.c u32 mru; mru 225 drivers/net/caif/caif_virtio.c if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) { mru 228 drivers/net/caif/caif_virtio.c frm_len, cfv->mru, cfv->rx_hr, mru 695 drivers/net/caif/caif_virtio.c &cfv->mru); mru 702 drivers/net/caif/caif_virtio.c cfv->mru = CFV_DEF_MTU_SIZE; mru 5044 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + mru 1181 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 mru; mru 1318 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 mru; mru 2733 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 mru; mru 4794 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 mru; mru 4828 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 mru; mru 647 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c req.mru = cpu_to_le16(mtu); mru 145 drivers/net/ppp/bsd_comp.c unsigned int mru; /* size of receive (decompress) bufr */ mru 178 drivers/net/ppp/bsd_comp.c int opt_len, int unit, int opthdr, int mru, mru 495 drivers/net/ppp/bsd_comp.c db->mru = 0; mru 513 drivers/net/ppp/bsd_comp.c int opt_len, int unit, int opthdr, int mru, mru 46 drivers/net/ppp/ppp_async.c int mru; mru 171 drivers/net/ppp/ppp_async.c ap->mru = PPP_MRU; mru 466 drivers/net/ppp/ppp_async.c if (put_user(ap->mru, p)) mru 475 drivers/net/ppp/ppp_async.c ap->mru = val; mru 874 drivers/net/ppp/ppp_async.c skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); mru 999 drivers/net/ppp/ppp_async.c ap->mru = val; mru 28 drivers/net/ppp/ppp_deflate.c int mru; mru 45 drivers/net/ppp/ppp_deflate.c int unit, int hdrlen, int mru, int debug); mru 353 drivers/net/ppp/ppp_deflate.c int unit, int hdrlen, int mru, int debug) mru 368 drivers/net/ppp/ppp_deflate.c state->mru = mru; mru 121 drivers/net/ppp/ppp_generic.c int mru; /* max receive unit 60 */ mru 654 drivers/net/ppp/ppp_generic.c ppp->mru = val; mru 1016 drivers/net/ppp/ppp_generic.c ppp->mru = PPP_MRU; mru 2257 drivers/net/ppp/ppp_generic.c obuff_size = ppp->mru + PPP_HDRLEN + 1; mru 2260 drivers/net/ppp/ppp_generic.c obuff_size = ppp->mru + PPP_HDRLEN; mru 2862 drivers/net/ppp/ppp_generic.c ppp->file.index, 0, ppp->mru, ppp->debug)) { mru 415 drivers/net/ppp/ppp_mppe.c int hdrlen, int mru, int debug) mru 56 drivers/net/ppp/ppp_synctty.c int mru; mru 172 drivers/net/ppp/ppp_synctty.c ap->mru = PPP_MRU; mru 459 drivers/net/ppp/ppp_synctty.c if (put_user(ap->mru, (int __user *) argp)) mru 468 drivers/net/ppp/ppp_synctty.c ap->mru = val; mru 680 drivers/net/ppp/ppp_synctty.c skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); mru 58 drivers/tty/ipwireless/network.c int mru; mru 222 drivers/tty/ipwireless/network.c if (put_user(network->mru, user_arg)) mru 232 drivers/tty/ipwireless/network.c network->mru = val; mru 273 drivers/tty/ipwireless/network.c network->mru = PPP_MRU; mru 516 drivers/tty/ipwireless/network.c return network->mru; mru 224 drivers/tty/n_gsm.c unsigned int mru; mru 1881 drivers/tty/n_gsm.c if (gsm->len > gsm->mru) { mru 1899 drivers/tty/n_gsm.c if (gsm->len > gsm->mru) { mru 1992 drivers/tty/n_gsm.c if (gsm->count > gsm->mru) { /* Allow one for the FCS */ mru 2217 drivers/tty/n_gsm.c gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ mru 2240 drivers/tty/n_gsm.c c->mru = gsm->mru; mru 2254 drivers/tty/n_gsm.c if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) mru 2280 drivers/tty/n_gsm.c if (c->mru != gsm->mru) mru 2302 drivers/tty/n_gsm.c gsm->mru = c->mru; mru 23 fs/xfs/xfs_filestream.c struct xfs_mru_cache_elem mru; mru 112 fs/xfs/xfs_filestream.c struct xfs_mru_cache_elem *mru) mru 116 fs/xfs/xfs_filestream.c container_of(mru, struct xfs_fstrm_item, mru); mru 119 fs/xfs/xfs_filestream.c trace_xfs_filestream_free(mp, mru->key, item->ag); mru 256 fs/xfs/xfs_filestream.c err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); mru 309 fs/xfs/xfs_filestream.c struct xfs_mru_cache_elem *mru; mru 317 fs/xfs/xfs_filestream.c mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino); mru 318 fs/xfs/xfs_filestream.c if (mru) { mru 319 fs/xfs/xfs_filestream.c ag = container_of(mru, struct xfs_fstrm_item, mru)->ag; mru 362 fs/xfs/xfs_filestream.c struct xfs_mru_cache_elem *mru; mru 370 fs/xfs/xfs_filestream.c mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino); mru 371 fs/xfs/xfs_filestream.c if (mru) { mru 373 fs/xfs/xfs_filestream.c container_of(mru, struct xfs_fstrm_item, mru); mru 387 fs/xfs/xfs_filestream.c if (mru) mru 388 fs/xfs/xfs_filestream.c xfs_fstrm_free_func(mp, mru); mru 127 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru, mru 135 fs/xfs/xfs_mru_cache.c if (!mru->time_zero) mru 139 fs/xfs/xfs_mru_cache.c while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { mru 145 fs/xfs/xfs_mru_cache.c lru_list = mru->lists + mru->lru_grp; mru 147 fs/xfs/xfs_mru_cache.c list_splice_init(lru_list, mru->reap_list.prev); mru 153 fs/xfs/xfs_mru_cache.c mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; mru 154 fs/xfs/xfs_mru_cache.c mru->time_zero += mru->grp_time; mru 160 fs/xfs/xfs_mru_cache.c if (++migrated == mru->grp_count) { mru 161 fs/xfs/xfs_mru_cache.c mru->lru_grp = 0; mru 162 fs/xfs/xfs_mru_cache.c mru->time_zero = 0; mru 168 fs/xfs/xfs_mru_cache.c for (grp = 0; grp < mru->grp_count; grp++) { mru 171 fs/xfs/xfs_mru_cache.c lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); mru 173 fs/xfs/xfs_mru_cache.c return mru->time_zero + mru 174 fs/xfs/xfs_mru_cache.c (mru->grp_count + grp) * mru->grp_time; mru 178 fs/xfs/xfs_mru_cache.c mru->lru_grp = 0; mru 179 fs/xfs/xfs_mru_cache.c mru->time_zero = 0; mru 191 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru, mru 202 fs/xfs/xfs_mru_cache.c if (!_xfs_mru_cache_migrate(mru, now)) { mru 203 fs/xfs/xfs_mru_cache.c mru->time_zero = now; mru 204 fs/xfs/xfs_mru_cache.c if (!mru->queued) { mru 205 fs/xfs/xfs_mru_cache.c mru->queued = 1; mru 206 fs/xfs/xfs_mru_cache.c queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru 207 fs/xfs/xfs_mru_cache.c mru->grp_count * mru->grp_time); mru 210 fs/xfs/xfs_mru_cache.c grp = (now - mru->time_zero) / mru->grp_time; mru 211 fs/xfs/xfs_mru_cache.c grp = (mru->lru_grp + grp) % mru->grp_count; mru 215 fs/xfs/xfs_mru_cache.c list_add_tail(&elem->list_node, mru->lists + grp); mru 229 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru) mru 230 fs/xfs/xfs_mru_cache.c __releases(mru->lock) __acquires(mru->lock) mru 236 fs/xfs/xfs_mru_cache.c list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { mru 239 fs/xfs/xfs_mru_cache.c radix_tree_delete(&mru->store, elem->key); mru 247 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 251 fs/xfs/xfs_mru_cache.c mru->free_func(mru->data, elem); mru 254 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 268 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru = mru 272 fs/xfs/xfs_mru_cache.c ASSERT(mru && mru->lists); mru 273 fs/xfs/xfs_mru_cache.c if (!mru || !mru->lists) mru 276 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 277 fs/xfs/xfs_mru_cache.c next = _xfs_mru_cache_migrate(mru, jiffies); mru 278 fs/xfs/xfs_mru_cache.c _xfs_mru_cache_clear_reap_list(mru); mru 280 fs/xfs/xfs_mru_cache.c mru->queued = next; mru 281 fs/xfs/xfs_mru_cache.c if ((mru->queued > 0)) { mru 287 fs/xfs/xfs_mru_cache.c queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); mru 290 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 323 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru = NULL; mru 336 fs/xfs/xfs_mru_cache.c if (!(mru = kmem_zalloc(sizeof(*mru), 0))) mru 340 fs/xfs/xfs_mru_cache.c mru->grp_count = grp_count + 1; mru 341 fs/xfs/xfs_mru_cache.c mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0); mru 343 fs/xfs/xfs_mru_cache.c if (!mru->lists) { mru 348 fs/xfs/xfs_mru_cache.c for (grp = 0; grp < mru->grp_count; grp++) mru 349 fs/xfs/xfs_mru_cache.c INIT_LIST_HEAD(mru->lists + grp); mru 355 fs/xfs/xfs_mru_cache.c INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); mru 356 fs/xfs/xfs_mru_cache.c INIT_LIST_HEAD(&mru->reap_list); mru 357 fs/xfs/xfs_mru_cache.c spin_lock_init(&mru->lock); mru 358 fs/xfs/xfs_mru_cache.c INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); mru 360 fs/xfs/xfs_mru_cache.c mru->grp_time = grp_time; mru 361 fs/xfs/xfs_mru_cache.c mru->free_func = free_func; mru 362 fs/xfs/xfs_mru_cache.c mru->data = data; mru 363 fs/xfs/xfs_mru_cache.c *mrup = mru; mru 366 fs/xfs/xfs_mru_cache.c if (err && mru && mru->lists) mru 367 fs/xfs/xfs_mru_cache.c kmem_free(mru->lists); mru 368 fs/xfs/xfs_mru_cache.c if (err && mru) mru 369 fs/xfs/xfs_mru_cache.c kmem_free(mru); mru 382 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru) mru 384 fs/xfs/xfs_mru_cache.c if (!mru || !mru->lists) mru 387 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 388 fs/xfs/xfs_mru_cache.c if (mru->queued) { mru 389 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 390 fs/xfs/xfs_mru_cache.c cancel_delayed_work_sync(&mru->work); mru 391 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 394 fs/xfs/xfs_mru_cache.c _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); mru 395 fs/xfs/xfs_mru_cache.c _xfs_mru_cache_clear_reap_list(mru); mru 397 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 402 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru) mru 404 fs/xfs/xfs_mru_cache.c if (!mru || !mru->lists) mru 407 fs/xfs/xfs_mru_cache.c xfs_mru_cache_flush(mru); mru 409 fs/xfs/xfs_mru_cache.c kmem_free(mru->lists); mru 410 fs/xfs/xfs_mru_cache.c kmem_free(mru); mru 420 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru, mru 426 fs/xfs/xfs_mru_cache.c ASSERT(mru && mru->lists); mru 427 fs/xfs/xfs_mru_cache.c if (!mru || !mru->lists) mru 436 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 437 fs/xfs/xfs_mru_cache.c error = radix_tree_insert(&mru->store, key, elem); mru 440 fs/xfs/xfs_mru_cache.c _xfs_mru_cache_list_insert(mru, elem); mru 441 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 454 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru, mru 459 fs/xfs/xfs_mru_cache.c ASSERT(mru && mru->lists); mru 460 fs/xfs/xfs_mru_cache.c if (!mru || !mru->lists) mru 463 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 464 fs/xfs/xfs_mru_cache.c elem = radix_tree_delete(&mru->store, key); mru 467 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 478 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru, mru 483 fs/xfs/xfs_mru_cache.c elem = xfs_mru_cache_remove(mru, key); mru 485 fs/xfs/xfs_mru_cache.c mru->free_func(mru->data, elem); mru 510 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru, mru 515 fs/xfs/xfs_mru_cache.c ASSERT(mru && mru->lists); mru 516 fs/xfs/xfs_mru_cache.c if (!mru || !mru->lists) mru 519 fs/xfs/xfs_mru_cache.c spin_lock(&mru->lock); mru 520 fs/xfs/xfs_mru_cache.c elem = radix_tree_lookup(&mru->store, key); mru 523 fs/xfs/xfs_mru_cache.c _xfs_mru_cache_list_insert(mru, elem); mru 526 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 538 fs/xfs/xfs_mru_cache.c struct xfs_mru_cache *mru) mru 539 fs/xfs/xfs_mru_cache.c __releases(mru->lock) mru 541 fs/xfs/xfs_mru_cache.c spin_unlock(&mru->lock); mru 24 fs/xfs/xfs_mru_cache.h void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); mru 25 fs/xfs/xfs_mru_cache.h int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, mru 28 fs/xfs/xfs_mru_cache.h xfs_mru_cache_remove(struct xfs_mru_cache *mru, unsigned long key); mru 29 fs/xfs/xfs_mru_cache.h void xfs_mru_cache_delete(struct xfs_mru_cache *mru, unsigned long key); mru 31 fs/xfs/xfs_mru_cache.h xfs_mru_cache_lookup(struct xfs_mru_cache *mru, unsigned long key); mru 32 fs/xfs/xfs_mru_cache.h void xfs_mru_cache_done(struct xfs_mru_cache *mru); mru 64 include/linux/ppp-comp.h int opt_len, int unit, int opthdr, int mru, mru 18 include/uapi/linux/gsmmux.h unsigned int mru; mru 815 net/openvswitch/actions.c struct sk_buff *skb, u16 mru, mru 842 net/openvswitch/actions.c IPCB(skb)->frag_max_size = mru; mru 863 net/openvswitch/actions.c IP6CB(skb)->frag_max_size = mru; mru 869 net/openvswitch/actions.c ovs_vport_name(vport), ntohs(key->eth.type), mru, mru 885 net/openvswitch/actions.c u16 mru = OVS_CB(skb)->mru; mru 895 net/openvswitch/actions.c if (likely(!mru || mru 896 net/openvswitch/actions.c (skb->len <= mru + vport->dev->hard_header_len))) { mru 898 net/openvswitch/actions.c } else if (mru <= vport->dev->mtu) { mru 901 net/openvswitch/actions.c ovs_fragment(net, vport, skb, mru, key); mru 921 net/openvswitch/actions.c upcall.mru = OVS_CB(skb)->mru; mru 507 net/openvswitch/conntrack.c ovs_cb.mru = IPCB(skb)->frag_max_size; mru 521 net/openvswitch/conntrack.c ovs_cb.mru = IP6CB(skb)->frag_max_size; mru 237 net/openvswitch/datapath.c upcall.mru = OVS_CB(skb)->mru; mru 367 net/openvswitch/datapath.c if (upcall_info->mru) mru 368 net/openvswitch/datapath.c size += nla_total_size(sizeof(upcall_info->mru)); mru 487 net/openvswitch/datapath.c if (upcall_info->mru) { mru 489 net/openvswitch/datapath.c upcall_info->mru)) { mru 544 net/openvswitch/datapath.c u16 mru = 0; mru 565 net/openvswitch/datapath.c mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]); mru 568 net/openvswitch/datapath.c OVS_CB(packet)->mru = mru; mru 99 net/openvswitch/datapath.h u16 mru; mru 123 net/openvswitch/datapath.h u16 mru; mru 431 net/openvswitch/vport.c OVS_CB(skb)->mru = 0;