nl 44 arch/arm/vfp/vfp.h static inline void add128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml) nl 50 arch/arm/vfp/vfp.h : "=r" (nl), "=r" (nh) nl 51 arch/arm/vfp/vfp.h : "0" (nl), "1" (nh), "r" (ml), "r" (mh) nl 54 arch/arm/vfp/vfp.h *resl = nl; nl 57 arch/arm/vfp/vfp.h static inline void sub128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml) nl 63 arch/arm/vfp/vfp.h : "=r" (nl), "=r" (nh) nl 64 arch/arm/vfp/vfp.h : "0" (nl), "1" (nh), "r" (ml), "r" (mh) nl 67 arch/arm/vfp/vfp.h *resl = nl; nl 72 arch/arm/vfp/vfp.h u32 nh, nl, mh, ml; nl 75 arch/arm/vfp/vfp.h nl = n; nl 77 arch/arm/vfp/vfp.h rl = (u64)nl * ml; nl 83 arch/arm/vfp/vfp.h rmb = (u64)nl * mh; nl 110 arch/arm/vfp/vfp.h static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m) nl 125 arch/arm/vfp/vfp.h sub128(&remh, &reml, nh, nl, termh, terml); nl 470 arch/powerpc/include/asm/eeh.h int nl) nl 472 arch/powerpc/include/asm/eeh.h _insl(addr, buf, nl); nl 473 arch/powerpc/include/asm/eeh.h if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) nl 199 arch/powerpc/perf/hv-24x7.c unsigned nl = be16_to_cpu(ev->event_name_len); nl 200 arch/powerpc/perf/hv-24x7.c __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2); nl 203 arch/powerpc/perf/hv-24x7.c return (char *)ev->remainder + nl; nl 208 arch/powerpc/perf/hv-24x7.c unsigned nl = be16_to_cpu(ev->event_name_len); nl 209 arch/powerpc/perf/hv-24x7.c __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2); nl 211 arch/powerpc/perf/hv-24x7.c __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2); nl 214 arch/powerpc/perf/hv-24x7.c return (char *)ev->remainder + nl + desc_len; nl 238 arch/powerpc/perf/hv-24x7.c unsigned nl = be16_to_cpu(ev->event_name_len); nl 240 arch/powerpc/perf/hv-24x7.c if (nl < 2) { nl 241 arch/powerpc/perf/hv-24x7.c pr_debug("%s: name length too short: %d", __func__, nl); nl 245 arch/powerpc/perf/hv-24x7.c if (start + nl > end) { nl 247 arch/powerpc/perf/hv-24x7.c __func__, start, nl, end); nl 251 arch/powerpc/perf/hv-24x7.c dl_ = (__be16 *)(ev->remainder + nl - 2); nl 260 arch/powerpc/perf/hv-24x7.c if (start + nl + dl > end) { nl 262 arch/powerpc/perf/hv-24x7.c __func__, start, nl, dl, start + nl + dl, end); nl 266 arch/powerpc/perf/hv-24x7.c ldl_ = (__be16 *)(ev->remainder + nl + dl - 2); nl 276 arch/powerpc/perf/hv-24x7.c if (start + nl + dl + ldl > end) { nl 278 arch/powerpc/perf/hv-24x7.c __func__, start, nl, dl, ldl, end); nl 282 arch/powerpc/perf/hv-24x7.c return start + nl + dl + ldl; nl 491 arch/powerpc/perf/hv-24x7.c int nl, dl; nl 492 arch/powerpc/perf/hv-24x7.c char *name = event_name(event, &nl); nl 499 arch/powerpc/perf/hv-24x7.c return device_str_attr_create(name, nl, nonce, desc, dl); nl 505 arch/powerpc/perf/hv-24x7.c int nl, dl; nl 506 arch/powerpc/perf/hv-24x7.c char *name = event_name(event, &nl); nl 513 arch/powerpc/perf/hv-24x7.c return device_str_attr_create(name, nl, nonce, desc, dl); nl 530 arch/powerpc/perf/hv-24x7.c int nl; nl 559 arch/powerpc/perf/hv-24x7.c static int event_uniq_add(struct rb_root *root, const char *name, int nl, nl 571 arch/powerpc/perf/hv-24x7.c result = ev_uniq_ord(name, nl, domain, it->name, it->nl, nl 581 arch/powerpc/perf/hv-24x7.c pr_info("found a duplicate event %.*s, ct=%u\n", nl, nl 593 arch/powerpc/perf/hv-24x7.c .nl = nl, nl 800 arch/powerpc/perf/hv-24x7.c int nl; nl 809 arch/powerpc/perf/hv-24x7.c name = event_name(event, &nl); nl 813 arch/powerpc/perf/hv-24x7.c event_idx, nl, name); nl 820 arch/powerpc/perf/hv-24x7.c event_idx, nl, name, event->domain); nl 860 arch/powerpc/perf/hv-24x7.c int nl; nl 871 arch/powerpc/perf/hv-24x7.c name = event_name(event, &nl); nl 872 arch/powerpc/perf/hv-24x7.c nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); nl 877 arch/powerpc/perf/hv-24x7.c event_idx, nl, name); nl 155 arch/s390/include/asm/sysinfo.h unsigned char nl; nl 166 arch/s390/include/asm/sysinfo.h unsigned char nl; nl 172 arch/s390/include/asm/sysinfo.h unsigned char nl; nl 170 arch/s390/kernel/topology.c if (!tle->nl) nl 186 arch/s390/kernel/topology.c switch (tle->nl) { nl 164 arch/sh/kernel/cpu/sh2a/fpu.c unsigned long long mh, ml, nh, nl; nl 177 arch/sh/kernel/cpu/sh2a/fpu.c nl = ml; nl 183 arch/sh/kernel/cpu/sh2a/fpu.c while (nl) { nl >>= 1; w++;} nl 65 drivers/firmware/efi/libstub/efi-stub-helper.c efi_char16_t nl[2] = { '\r', 0 }; nl 66 drivers/firmware/efi/libstub/efi-stub-helper.c efi_char16_printk(sys_table_arg, nl); nl 512 drivers/md/dm-ioctl.c struct dm_name_list *orig_nl, *nl, *old_nl = NULL; nl 531 drivers/md/dm-ioctl.c nl = orig_nl = get_result_buffer(param, param_size, &len); nl 538 drivers/md/dm-ioctl.c nl->dev = 0; /* Flags no data */ nl 546 drivers/md/dm-ioctl.c old_nl->next = (uint32_t) ((void *) nl - nl 549 drivers/md/dm-ioctl.c nl->dev = huge_encode_dev(disk_devt(disk)); nl 550 drivers/md/dm-ioctl.c nl->next = 0; nl 551 drivers/md/dm-ioctl.c strcpy(nl->name, hc->name); nl 553 drivers/md/dm-ioctl.c old_nl = nl; nl 554 drivers/md/dm-ioctl.c event_nr = align_ptr(nl->name + strlen(hc->name) + 1); nl 556 drivers/md/dm-ioctl.c nl = align_ptr(event_nr + 1); nl 563 drivers/md/dm-ioctl.c BUG_ON((char *)nl - (char *)orig_nl != needed); nl 285 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 296 drivers/net/plip/plip.c nl->port_owner = 0; nl 299 drivers/net/plip/plip.c nl->trigger = PLIP_TRIGGER_WAIT; nl 300 drivers/net/plip/plip.c nl->nibble = PLIP_NIBBLE_WAIT; nl 303 drivers/net/plip/plip.c INIT_WORK(&nl->immediate, plip_bh); nl 304 drivers/net/plip/plip.c INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); nl 307 drivers/net/plip/plip.c INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); nl 309 drivers/net/plip/plip.c spin_lock_init(&nl->lock); nl 318 drivers/net/plip/plip.c struct net_local *nl = nl 321 drivers/net/plip/plip.c if (nl->is_deferred) nl 322 drivers/net/plip/plip.c schedule_work(&nl->immediate); nl 336 drivers/net/plip/plip.c static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, nl 346 drivers/net/plip/plip.c typedef int (*plip_func)(struct net_device *dev, struct net_local *nl, nl 362 drivers/net/plip/plip.c struct net_local *nl = container_of(work, struct net_local, immediate); nl 363 drivers/net/plip/plip.c struct plip_local *snd = &nl->snd_data; nl 364 drivers/net/plip/plip.c struct plip_local *rcv = &nl->rcv_data; nl 368 drivers/net/plip/plip.c nl->is_deferred = 0; nl 369 drivers/net/plip/plip.c f = connection_state_table[nl->connection]; nl 370 drivers/net/plip/plip.c if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK && nl 371 drivers/net/plip/plip.c (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { nl 372 drivers/net/plip/plip.c nl->is_deferred = 1; nl 373 drivers/net/plip/plip.c schedule_delayed_work(&nl->deferred, 1); nl 380 drivers/net/plip/plip.c struct net_local *nl = nl 383 drivers/net/plip/plip.c if (!(atomic_read (&nl->kill_timer))) { nl 384 drivers/net/plip/plip.c plip_interrupt (nl->dev); nl 386 drivers/net/plip/plip.c schedule_delayed_work(&nl->timer, 1); nl 389 drivers/net/plip/plip.c complete(&nl->killed_timer_cmp); nl 394 drivers/net/plip/plip.c plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, nl 409 drivers/net/plip/plip.c spin_lock_irq(&nl->lock); nl 410 drivers/net/plip/plip.c if (nl->connection == PLIP_CN_SEND) { nl 413 drivers/net/plip/plip.c nl->timeout_count++; nl 414 drivers/net/plip/plip.c if ((error == HS_TIMEOUT && nl->timeout_count <= 10) || nl 415 drivers/net/plip/plip.c nl->timeout_count <= 3) { nl 416 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 427 drivers/net/plip/plip.c } else if (nl->connection == PLIP_CN_RECEIVE) { nl 430 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 434 drivers/net/plip/plip.c if (++nl->timeout_count <= 3) { nl 435 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 455 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 462 drivers/net/plip/plip.c nl->connection = PLIP_CN_ERROR; nl 469 drivers/net/plip/plip.c plip_none(struct net_device *dev, struct net_local *nl, nl 581 drivers/net/plip/plip.c plip_receive_packet(struct net_device *dev, struct net_local *nl, nl 584 drivers/net/plip/plip.c unsigned short nibble_timeout = nl->nibble; nl 601 drivers/net/plip/plip.c if (plip_receive(nl->trigger, dev, nl 605 drivers/net/plip/plip.c nl->is_deferred = 1; nl 606 drivers/net/plip/plip.c nl->connection = PLIP_CN_SEND; nl 607 drivers/net/plip/plip.c schedule_delayed_work(&nl->deferred, 1); nl 681 drivers/net/plip/plip.c spin_lock_irq(&nl->lock); nl 683 drivers/net/plip/plip.c nl->connection = PLIP_CN_SEND; nl 684 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 685 drivers/net/plip/plip.c schedule_work(&nl->immediate); nl 690 drivers/net/plip/plip.c nl->connection = PLIP_CN_NONE; nl 691 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 749 drivers/net/plip/plip.c plip_send_packet(struct net_device *dev, struct net_local *nl, nl 752 drivers/net/plip/plip.c unsigned short nibble_timeout = nl->nibble; nl 771 drivers/net/plip/plip.c cx = nl->trigger; nl 774 drivers/net/plip/plip.c spin_lock_irq(&nl->lock); nl 775 drivers/net/plip/plip.c if (nl->connection == PLIP_CN_RECEIVE) { nl 776 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 783 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 786 drivers/net/plip/plip.c if (nl->connection == PLIP_CN_RECEIVE) { nl 802 drivers/net/plip/plip.c nl->timeout_count = 0; nl 805 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 857 drivers/net/plip/plip.c nl->connection = PLIP_CN_CLOSING; nl 858 drivers/net/plip/plip.c nl->is_deferred = 1; nl 859 drivers/net/plip/plip.c schedule_delayed_work(&nl->deferred, 1); nl 868 drivers/net/plip/plip.c plip_connection_close(struct net_device *dev, struct net_local *nl, nl 871 drivers/net/plip/plip.c spin_lock_irq(&nl->lock); nl 872 drivers/net/plip/plip.c if (nl->connection == PLIP_CN_CLOSING) { nl 873 drivers/net/plip/plip.c nl->connection = PLIP_CN_NONE; nl 876 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 877 drivers/net/plip/plip.c if (nl->should_relinquish) { nl 878 drivers/net/plip/plip.c nl->should_relinquish = nl->port_owner = 0; nl 879 drivers/net/plip/plip.c parport_release(nl->pardev); nl 886 drivers/net/plip/plip.c plip_error(struct net_device *dev, struct net_local *nl, nl 895 drivers/net/plip/plip.c nl->connection = PLIP_CN_NONE; nl 896 drivers/net/plip/plip.c nl->should_relinquish = 0; nl 902 drivers/net/plip/plip.c nl->is_deferred = 1; nl 903 drivers/net/plip/plip.c schedule_delayed_work(&nl->deferred, 1); nl 914 drivers/net/plip/plip.c struct net_local *nl; nl 919 drivers/net/plip/plip.c nl = netdev_priv(dev); nl 920 drivers/net/plip/plip.c rcv = &nl->rcv_data; nl 922 drivers/net/plip/plip.c spin_lock_irqsave (&nl->lock, flags); nl 928 drivers/net/plip/plip.c spin_unlock_irqrestore (&nl->lock, flags); nl 935 drivers/net/plip/plip.c switch (nl->connection) { nl 942 drivers/net/plip/plip.c nl->connection = PLIP_CN_RECEIVE; nl 943 drivers/net/plip/plip.c nl->timeout_count = 0; nl 944 drivers/net/plip/plip.c schedule_work(&nl->immediate); nl 958 drivers/net/plip/plip.c spin_unlock_irqrestore(&nl->lock, flags); nl 964 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 965 drivers/net/plip/plip.c struct plip_local *snd = &nl->snd_data; nl 971 drivers/net/plip/plip.c if (!nl->port_owner) { nl 972 drivers/net/plip/plip.c if (parport_claim(nl->pardev)) nl 974 drivers/net/plip/plip.c nl->port_owner = 1; nl 988 drivers/net/plip/plip.c spin_lock_irq(&nl->lock); nl 992 drivers/net/plip/plip.c if (nl->connection == PLIP_CN_NONE) { nl 993 drivers/net/plip/plip.c nl->connection = PLIP_CN_SEND; nl 994 drivers/net/plip/plip.c nl->timeout_count = 0; nl 996 drivers/net/plip/plip.c schedule_work(&nl->immediate); nl 997 drivers/net/plip/plip.c spin_unlock_irq(&nl->lock); nl 1061 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 1065 drivers/net/plip/plip.c if (!nl->port_owner) { nl 1066 drivers/net/plip/plip.c if (parport_claim(nl->pardev)) return -EAGAIN; nl 1067 drivers/net/plip/plip.c nl->port_owner = 1; nl 1070 drivers/net/plip/plip.c nl->should_relinquish = 0; nl 1079 drivers/net/plip/plip.c atomic_set (&nl->kill_timer, 0); nl 1080 drivers/net/plip/plip.c schedule_delayed_work(&nl->timer, 1); nl 1084 drivers/net/plip/plip.c nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE; nl 1085 drivers/net/plip/plip.c nl->rcv_data.skb = nl->snd_data.skb = NULL; nl 1086 drivers/net/plip/plip.c nl->connection = PLIP_CN_NONE; nl 1087 drivers/net/plip/plip.c nl->is_deferred = 0; nl 1121 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 1122 drivers/net/plip/plip.c struct plip_local *snd = &nl->snd_data; nl 1123 drivers/net/plip/plip.c struct plip_local *rcv = &nl->rcv_data; nl 1131 drivers/net/plip/plip.c init_completion(&nl->killed_timer_cmp); nl 1132 drivers/net/plip/plip.c atomic_set (&nl->kill_timer, 1); nl 1133 drivers/net/plip/plip.c wait_for_completion(&nl->killed_timer_cmp); nl 1139 drivers/net/plip/plip.c nl->is_deferred = 0; nl 1140 drivers/net/plip/plip.c nl->connection = PLIP_CN_NONE; nl 1141 drivers/net/plip/plip.c if (nl->port_owner) { nl 1142 drivers/net/plip/plip.c parport_release(nl->pardev); nl 1143 drivers/net/plip/plip.c nl->port_owner = 0; nl 1168 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 1171 drivers/net/plip/plip.c if (nl->connection != PLIP_CN_NONE) { nl 1172 drivers/net/plip/plip.c nl->should_relinquish = 1; nl 1176 drivers/net/plip/plip.c nl->port_owner = 0; /* Remember that we released the bus */ nl 1184 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 1186 drivers/net/plip/plip.c if (nl->port_owner) { nl 1189 drivers/net/plip/plip.c if (!parport_claim(nl->pardev)) nl 1200 drivers/net/plip/plip.c if (!parport_claim(nl->pardev)) { nl 1201 drivers/net/plip/plip.c nl->port_owner = 1; nl 1210 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 1218 drivers/net/plip/plip.c pc->trigger = nl->trigger; nl 1219 drivers/net/plip/plip.c pc->nibble = nl->nibble; nl 1224 drivers/net/plip/plip.c nl->trigger = pc->trigger; nl 1225 drivers/net/plip/plip.c nl->nibble = pc->nibble; nl 1258 drivers/net/plip/plip.c struct net_local *nl; nl 1283 drivers/net/plip/plip.c nl = netdev_priv(dev); nl 1284 drivers/net/plip/plip.c nl->dev = dev; nl 1292 drivers/net/plip/plip.c nl->pardev = parport_register_dev_model(port, dev->name, nl 1295 drivers/net/plip/plip.c if (!nl->pardev) { nl 1321 drivers/net/plip/plip.c parport_unregister_device(nl->pardev); nl 1359 drivers/net/plip/plip.c struct net_local *nl = netdev_priv(dev); nl 1361 drivers/net/plip/plip.c if (nl->port_owner) nl 1362 drivers/net/plip/plip.c parport_release(nl->pardev); nl 1363 drivers/net/plip/plip.c parport_unregister_device(nl->pardev); nl 350 drivers/net/wan/sbni.c struct net_local *nl; nl 381 drivers/net/wan/sbni.c nl = netdev_priv(dev); nl 382 drivers/net/wan/sbni.c if( !nl ) { nl 388 drivers/net/wan/sbni.c memset( nl, 0, sizeof(struct net_local) ); nl 389 drivers/net/wan/sbni.c spin_lock_init( &nl->lock ); nl 399 drivers/net/wan/sbni.c nl->maxframe = DEFAULT_FRAME_LEN; nl 400 drivers/net/wan/sbni.c nl->csr1.rate = baud[ num ]; nl 402 drivers/net/wan/sbni.c if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) nl 404 drivers/net/wan/sbni.c nl->cur_rxl_index = DEF_RXL, nl 405 drivers/net/wan/sbni.c nl->delta_rxl = DEF_RXL_DELTA; nl 407 drivers/net/wan/sbni.c nl->delta_rxl = 0; nl 408 drivers/net/wan/sbni.c nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; nl 410 drivers/net/wan/sbni.c nl->state |= FL_SLOW_MODE; nl 420 drivers/net/wan/sbni.c ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000) nl 421 drivers/net/wan/sbni.c / (1 << nl->csr1.rate)); nl 423 drivers/net/wan/sbni.c if( nl->delta_rxl == 0 ) nl 424 drivers/net/wan/sbni.c pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index); nl 429 drivers/net/wan/sbni.c nl->master = dev; nl 430 drivers/net/wan/sbni.c nl->link = NULL; nl 450 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(p); nl 451 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 452 drivers/net/wan/sbni.c if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) { nl 453 drivers/net/wan/sbni.c p = nl->link; nl 454 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 458 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 472 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 475 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 479 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 506 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 509 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 510 drivers/net/wan/sbni.c if( nl->second ) nl 511 drivers/net/wan/sbni.c spin_lock(&NET_LOCAL_LOCK(nl->second)); nl 518 drivers/net/wan/sbni.c if( nl->second && /* second channel present */ nl 519 drivers/net/wan/sbni.c (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) nl 520 drivers/net/wan/sbni.c handle_channel( nl->second ), nl 524 drivers/net/wan/sbni.c if( nl->second ) nl 525 drivers/net/wan/sbni.c spin_unlock(&NET_LOCAL_LOCK(nl->second)); nl 526 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 534 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 542 drivers/net/wan/sbni.c if( nl->state & FL_SLAVE ) nl 543 drivers/net/wan/sbni.c spin_lock(&NET_LOCAL_LOCK(nl->master)); nl 548 drivers/net/wan/sbni.c nl->timer_ticks = CHANGE_LEVEL_START_TICKS; nl 554 drivers/net/wan/sbni.c req_ans = !(nl->state & FL_PREV_OK); nl 568 drivers/net/wan/sbni.c if( req_ans || nl->tx_frameno != 0 ) nl 578 drivers/net/wan/sbni.c if( nl->state & FL_SLAVE ) nl 579 drivers/net/wan/sbni.c spin_unlock(&NET_LOCAL_LOCK(nl->master)); nl 592 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 610 drivers/net/wan/sbni.c nl->state |= FL_PREV_OK; nl 612 drivers/net/wan/sbni.c nl->in_stats.all_rx_number++; nl 614 drivers/net/wan/sbni.c nl->state &= ~FL_PREV_OK, nl 616 drivers/net/wan/sbni.c nl->in_stats.all_rx_number++, nl 617 drivers/net/wan/sbni.c nl->in_stats.bad_rx_number++; nl 626 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 630 drivers/net/wan/sbni.c if( nl->state & FL_NEED_RESEND ) { nl 633 drivers/net/wan/sbni.c if( nl->trans_errors ) { nl 634 drivers/net/wan/sbni.c --nl->trans_errors; nl 635 drivers/net/wan/sbni.c if( nl->framelen != 0 ) nl 636 drivers/net/wan/sbni.c nl->in_stats.resend_tx_number++; nl 640 drivers/net/wan/sbni.c if( (nl->state & FL_SLAVE) || nl->link ) nl 642 drivers/net/wan/sbni.c nl->state |= FL_LINE_DOWN; nl 647 drivers/net/wan/sbni.c nl->trans_errors = TR_ERROR_COUNT; nl 650 drivers/net/wan/sbni.c nl->state |= FL_NEED_RESEND; nl 657 drivers/net/wan/sbni.c if( nl->framelen ) { nl 659 drivers/net/wan/sbni.c nl->in_stats.all_tx_number++; nl 660 drivers/net/wan/sbni.c nl->state |= FL_WAIT_ACK; nl 668 drivers/net/wan/sbni.c if( nl->tx_frameno ) nl 683 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 684 drivers/net/wan/sbni.c struct sk_buff *skb = nl->tx_buf_p; nl 686 drivers/net/wan/sbni.c unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen); nl 688 drivers/net/wan/sbni.c outsb( dev->base_addr + DAT, skb->data + nl->outpos, len ); nl 689 drivers/net/wan/sbni.c *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len ); nl 692 drivers/net/wan/sbni.c for( len = nl->framelen - len; len--; ) nl 702 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 707 drivers/net/wan/sbni.c nl->wait_frameno = frameno, nl 708 drivers/net/wan/sbni.c nl->inppos = 0; nl 710 drivers/net/wan/sbni.c if( nl->wait_frameno == frameno ) { nl 712 drivers/net/wan/sbni.c if( nl->inppos + framelen <= ETHER_MAX_LEN ) nl 721 drivers/net/wan/sbni.c nl->wait_frameno = 0, nl 722 drivers/net/wan/sbni.c nl->inppos = 0, nl 724 drivers/net/wan/sbni.c nl->master->stats.rx_errors++, nl 725 drivers/net/wan/sbni.c nl->master->stats.rx_missed_errors++; nl 739 drivers/net/wan/sbni.c nl->wait_frameno = 0, nl 741 drivers/net/wan/sbni.c nl->master->stats.rx_errors++, nl 742 drivers/net/wan/sbni.c nl->master->stats.rx_crc_errors++; nl 755 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 758 drivers/net/wan/sbni.c nl->master->stats.tx_packets++; nl 759 drivers/net/wan/sbni.c nl->master->stats.tx_bytes += nl->tx_buf_p->len; nl 762 drivers/net/wan/sbni.c dev->stats.tx_bytes += nl->tx_buf_p->len; nl 764 drivers/net/wan/sbni.c dev_consume_skb_irq(nl->tx_buf_p); nl 766 drivers/net/wan/sbni.c nl->tx_buf_p = NULL; nl 768 drivers/net/wan/sbni.c nl->outpos = 0; nl 769 drivers/net/wan/sbni.c nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); nl 770 drivers/net/wan/sbni.c nl->framelen = 0; nl 777 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 780 drivers/net/wan/sbni.c nl->state &= ~FL_NEED_RESEND; nl 782 drivers/net/wan/sbni.c if( nl->state & FL_WAIT_ACK ) { nl 783 drivers/net/wan/sbni.c nl->outpos += nl->framelen; nl 785 drivers/net/wan/sbni.c if( --nl->tx_frameno ) nl 786 drivers/net/wan/sbni.c nl->framelen = min_t(unsigned int, nl 787 drivers/net/wan/sbni.c nl->maxframe, nl 788 drivers/net/wan/sbni.c nl->tx_buf_p->len - nl->outpos); nl 792 drivers/net/wan/sbni.c netif_wake_queue( nl->master ); nl 799 drivers/net/wan/sbni.c nl->state &= ~FL_WAIT_ACK; nl 811 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 815 drivers/net/wan/sbni.c if( nl->inppos + framelen > ETHER_MAX_LEN ) nl 818 drivers/net/wan/sbni.c if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) ) nl 821 drivers/net/wan/sbni.c p = nl->rx_buf_p->data + nl->inppos; nl 826 drivers/net/wan/sbni.c nl->inppos += framelen - 4; nl 827 drivers/net/wan/sbni.c if( --nl->wait_frameno == 0 ) /* last frame received */ nl 842 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 847 drivers/net/wan/sbni.c if( nl->tx_buf_p ) nl 850 drivers/net/wan/sbni.c nl->outpos = 0; nl 851 drivers/net/wan/sbni.c nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); nl 857 drivers/net/wan/sbni.c nl->tx_buf_p = skb; nl 858 drivers/net/wan/sbni.c nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe); nl 859 drivers/net/wan/sbni.c nl->framelen = len < nl->maxframe ? len : nl->maxframe; nl 863 drivers/net/wan/sbni.c netif_trans_update(nl->master); nl 873 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 875 drivers/net/wan/sbni.c if( nl->tx_buf_p ) nl 876 drivers/net/wan/sbni.c dev_kfree_skb_any( nl->tx_buf_p ), nl 877 drivers/net/wan/sbni.c nl->tx_buf_p = NULL, nl 879 drivers/net/wan/sbni.c nl->master->stats.tx_errors++, nl 880 drivers/net/wan/sbni.c nl->master->stats.tx_carrier_errors++; nl 886 drivers/net/wan/sbni.c nl->tx_frameno = 0; nl 887 drivers/net/wan/sbni.c nl->framelen = 0; nl 888 drivers/net/wan/sbni.c nl->outpos = 0; nl 889 drivers/net/wan/sbni.c nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); nl 891 drivers/net/wan/sbni.c netif_start_queue( nl->master ); nl 892 drivers/net/wan/sbni.c netif_trans_update(nl->master); nl 903 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 906 drivers/net/wan/sbni.c u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */ nl 909 drivers/net/wan/sbni.c if( nl->state & FL_NEED_RESEND ) nl 912 drivers/net/wan/sbni.c if( nl->outpos == 0 ) nl 915 drivers/net/wan/sbni.c len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD; nl 925 drivers/net/wan/sbni.c outb( nl->tx_frameno, dev->base_addr + DAT ); nl 926 drivers/net/wan/sbni.c crc = CRC32( nl->tx_frameno, crc ); nl 1005 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1006 drivers/net/wan/sbni.c struct sk_buff *skb = nl->rx_buf_p; nl 1008 drivers/net/wan/sbni.c skb_put( skb, nl->inppos ); nl 1011 drivers/net/wan/sbni.c skb->protocol = eth_type_trans( skb, nl->master ); nl 1013 drivers/net/wan/sbni.c ++nl->master->stats.rx_packets; nl 1014 drivers/net/wan/sbni.c nl->master->stats.rx_bytes += nl->inppos; nl 1019 drivers/net/wan/sbni.c dev->stats.rx_bytes += nl->inppos; nl 1021 drivers/net/wan/sbni.c nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */ nl 1035 drivers/net/wan/sbni.c struct net_local *nl = from_timer(nl, t, watchdog); nl 1036 drivers/net/wan/sbni.c struct net_device *dev = nl->watchdog_dev; nl 1040 drivers/net/wan/sbni.c spin_lock_irqsave( &nl->lock, flags ); nl 1045 drivers/net/wan/sbni.c if( nl->timer_ticks ) { nl 1048 drivers/net/wan/sbni.c nl->timer_ticks--; nl 1050 drivers/net/wan/sbni.c nl->in_stats.timeout_number++; nl 1051 drivers/net/wan/sbni.c if( nl->delta_rxl ) nl 1054 drivers/net/wan/sbni.c outb( *(u_char *)&nl->csr1 | PR_RES, nl 1059 drivers/net/wan/sbni.c nl->state &= ~FL_LINE_DOWN; nl 1065 drivers/net/wan/sbni.c spin_unlock_irqrestore( &nl->lock, flags ); nl 1084 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1086 drivers/net/wan/sbni.c nl->timer_ticks = CHANGE_LEVEL_START_TICKS; nl 1087 drivers/net/wan/sbni.c nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); nl 1088 drivers/net/wan/sbni.c nl->state |= FL_PREV_OK; nl 1090 drivers/net/wan/sbni.c nl->inppos = nl->outpos = 0; nl 1091 drivers/net/wan/sbni.c nl->wait_frameno = 0; nl 1092 drivers/net/wan/sbni.c nl->tx_frameno = 0; nl 1093 drivers/net/wan/sbni.c nl->framelen = 0; nl 1095 drivers/net/wan/sbni.c outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); nl 1106 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1108 drivers/net/wan/sbni.c if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */ nl 1111 drivers/net/wan/sbni.c if( nl->cur_rxl_index == 0 ) nl 1112 drivers/net/wan/sbni.c nl->delta_rxl = 1; nl 1113 drivers/net/wan/sbni.c else if( nl->cur_rxl_index == 15 ) nl 1114 drivers/net/wan/sbni.c nl->delta_rxl = -1; nl 1115 drivers/net/wan/sbni.c else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd ) nl 1116 drivers/net/wan/sbni.c nl->delta_rxl = -nl->delta_rxl; nl 1118 drivers/net/wan/sbni.c nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ]; nl 1120 drivers/net/wan/sbni.c outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 ); nl 1122 drivers/net/wan/sbni.c nl->prev_rxl_rcvd = nl->cur_rxl_rcvd; nl 1123 drivers/net/wan/sbni.c nl->cur_rxl_rcvd = 0; nl 1130 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1132 drivers/net/wan/sbni.c nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ]; nl 1133 drivers/net/wan/sbni.c if( ++nl->timeout_rxl >= 4 ) nl 1134 drivers/net/wan/sbni.c nl->timeout_rxl = 0; nl 1136 drivers/net/wan/sbni.c nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; nl 1138 drivers/net/wan/sbni.c outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 ); nl 1140 drivers/net/wan/sbni.c nl->prev_rxl_rcvd = nl->cur_rxl_rcvd; nl 1141 drivers/net/wan/sbni.c nl->cur_rxl_rcvd = 0; nl 1153 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1154 drivers/net/wan/sbni.c struct timer_list *w = &nl->watchdog; nl 1173 drivers/net/wan/sbni.c nl->state |= FL_SECONDARY; nl 1185 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 1187 drivers/net/wan/sbni.c memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); nl 1194 drivers/net/wan/sbni.c nl->watchdog_dev = dev; nl 1199 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 1207 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1209 drivers/net/wan/sbni.c if( nl->second && nl->second->flags & IFF_UP ) { nl 1211 drivers/net/wan/sbni.c nl->second->name); nl 1216 drivers/net/wan/sbni.c if( nl->state & FL_SLAVE ) nl 1219 drivers/net/wan/sbni.c while( nl->link ) /* it's master device! */ nl 1220 drivers/net/wan/sbni.c emancipate( nl->link ); nl 1223 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 1225 drivers/net/wan/sbni.c nl->second = NULL; nl 1229 drivers/net/wan/sbni.c del_timer( &nl->watchdog ); nl 1233 drivers/net/wan/sbni.c if( !(nl->state & FL_SECONDARY) ) nl 1235 drivers/net/wan/sbni.c nl->state &= FL_SECONDARY; nl 1237 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 1291 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1302 drivers/net/wan/sbni.c if (copy_to_user( ifr->ifr_data, &nl->in_stats, nl 1310 drivers/net/wan/sbni.c memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); nl 1315 drivers/net/wan/sbni.c flags.rate = nl->csr1.rate; nl 1316 drivers/net/wan/sbni.c flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0; nl 1317 drivers/net/wan/sbni.c flags.rxl = nl->cur_rxl_index; nl 1318 drivers/net/wan/sbni.c flags.fixed_rxl = nl->delta_rxl == 0; nl 1328 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 1331 drivers/net/wan/sbni.c nl->delta_rxl = 0, nl 1332 drivers/net/wan/sbni.c nl->cur_rxl_index = flags.rxl; nl 1334 drivers/net/wan/sbni.c nl->delta_rxl = DEF_RXL_DELTA, nl 1335 drivers/net/wan/sbni.c nl->cur_rxl_index = DEF_RXL; nl 1337 drivers/net/wan/sbni.c nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ]; nl 1338 drivers/net/wan/sbni.c nl->csr1.rate = flags.rate; nl 1339 drivers/net/wan/sbni.c outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 ); nl 1340 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 1383 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(dev); nl 1386 drivers/net/wan/sbni.c if( nl->state & FL_SLAVE ) /* This isn't master or free device */ nl 1392 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 1396 drivers/net/wan/sbni.c snl->link = nl->link; nl 1397 drivers/net/wan/sbni.c nl->link = slave_dev; nl 1408 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 1419 drivers/net/wan/sbni.c struct net_local *nl = netdev_priv(p); nl 1424 drivers/net/wan/sbni.c spin_lock( &nl->lock ); nl 1445 drivers/net/wan/sbni.c spin_unlock( &nl->lock ); nl 341 drivers/net/wireless/ath/wil6210/cfg80211.c enum nl80211_iftype nl; nl 354 drivers/net/wireless/ath/wil6210/cfg80211.c if (__nl2wmi[i].nl == type) nl 92 drivers/staging/isdn/hysdn/hysdn_net.c flush_tx_buffers(struct net_local *nl) nl 95 drivers/staging/isdn/hysdn/hysdn_net.c while (nl->sk_count) { nl 96 drivers/staging/isdn/hysdn/hysdn_net.c dev_kfree_skb(nl->skbs[nl->out_idx++]); /* free skb */ nl 97 drivers/staging/isdn/hysdn/hysdn_net.c if (nl->out_idx >= MAX_SKB_BUFFERS) nl 98 drivers/staging/isdn/hysdn/hysdn_net.c nl->out_idx = 0; /* wrap around */ nl 99 drivers/staging/isdn/hysdn/hysdn_net.c nl->sk_count--; nl 23 drivers/staging/rtl8192e/rtllib_softmac.c static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl); nl 2101 drivers/staging/rtl8192e/rtllib_softmac.c static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl) nl 2104 drivers/staging/rtl8192e/rtllib_softmac.c if (nl) { nl 2120 drivers/staging/rtl8192e/rtllib_softmac.c if (nl) { nl 210 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl); nl 1742 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl) nl 1745 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c if (nl) { nl 1758 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c if (nl) { nl 142 drivers/video/fbdev/matrox/matroxfb_crtc2.c unsigned int nl; nl 145 drivers/video/fbdev/matrox/matroxfb_crtc2.c while ((nl = mga_inl(0x3C48) & 0xFFF) >= lastl) { nl 146 drivers/video/fbdev/matrox/matroxfb_crtc2.c lastl = nl; nl 71 fs/ceph/ioctl.c struct ceph_ioctl_layout nl; nl 82 fs/ceph/ioctl.c memset(&nl, 0, sizeof(nl)); nl 84 fs/ceph/ioctl.c nl.stripe_count = l.stripe_count; nl 86 fs/ceph/ioctl.c nl.stripe_count = ci->i_layout.stripe_count; nl 88 fs/ceph/ioctl.c nl.stripe_unit = l.stripe_unit; nl 90 fs/ceph/ioctl.c nl.stripe_unit = ci->i_layout.stripe_unit; nl 92 fs/ceph/ioctl.c nl.object_size = l.object_size; nl 94 fs/ceph/ioctl.c nl.object_size = ci->i_layout.object_size; nl 96 fs/ceph/ioctl.c nl.data_pool = l.data_pool; nl 98 fs/ceph/ioctl.c nl.data_pool = ci->i_layout.pool_id; nl 101 fs/ceph/ioctl.c nl.preferred_osd = -1; nl 103 fs/ceph/ioctl.c err = __validate_layout(mdsc, &nl); nl 54 fs/nfsd/fault_inject.c char *nl; nl 61 fs/nfsd/fault_inject.c nl = strchr(write_buf, '\n'); nl 62 fs/nfsd/fault_inject.c if (nl) { nl 63 fs/nfsd/fault_inject.c size = nl - write_buf; nl 64 fs/nfsd/fault_inject.c *nl = '\0'; nl 52 fs/xfs/libxfs/xfs_trans_space.h #define XFS_DIRENTER_MAX_SPLIT(mp,nl) 1 nl 53 fs/xfs/libxfs/xfs_trans_space.h #define XFS_DIRENTER_SPACE_RES(mp,nl) \ nl 55 fs/xfs/libxfs/xfs_trans_space.h XFS_DIRENTER_MAX_SPLIT(mp,nl)) nl 73 fs/xfs/libxfs/xfs_trans_space.h #define XFS_CREATE_SPACE_RES(mp,nl) \ nl 74 fs/xfs/libxfs/xfs_trans_space.h (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) nl 81 fs/xfs/libxfs/xfs_trans_space.h #define XFS_LINK_SPACE_RES(mp,nl) \ nl 82 fs/xfs/libxfs/xfs_trans_space.h XFS_DIRENTER_SPACE_RES(mp,nl) nl 83 fs/xfs/libxfs/xfs_trans_space.h #define XFS_MKDIR_SPACE_RES(mp,nl) \ nl 84 fs/xfs/libxfs/xfs_trans_space.h (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) nl 92 fs/xfs/libxfs/xfs_trans_space.h #define XFS_RENAME_SPACE_RES(mp,nl) \ nl 93 fs/xfs/libxfs/xfs_trans_space.h (XFS_DIRREMOVE_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl)) nl 94 fs/xfs/libxfs/xfs_trans_space.h #define XFS_SYMLINK_SPACE_RES(mp,nl,b) \ nl 95 fs/xfs/libxfs/xfs_trans_space.h (XFS_IALLOC_SPACE_RES(mp) + XFS_DIRENTER_SPACE_RES(mp,nl) + (b)) nl 11 include/linux/netfilter/nfnetlink.h int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, nl 15 include/linux/netfilter/nfnetlink.h int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, nl 19 include/linux/netfilter/nfnetlink.h int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, nl 360 kernel/locking/lockdep_proc.c unsigned long nl, nr; nl 362 kernel/locking/lockdep_proc.c nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr; nl 365 kernel/locking/lockdep_proc.c return nr - nl; nl 22 kernel/notifier.c static int notifier_chain_register(struct notifier_block **nl, nl 25 kernel/notifier.c while ((*nl) != NULL) { nl 26 kernel/notifier.c WARN_ONCE(((*nl) == n), "double register detected"); nl 27 kernel/notifier.c if (n->priority > (*nl)->priority) nl 29 kernel/notifier.c nl = &((*nl)->next); nl 31 kernel/notifier.c n->next = *nl; nl 32 kernel/notifier.c rcu_assign_pointer(*nl, n); nl 36 kernel/notifier.c static int notifier_chain_cond_register(struct notifier_block **nl, nl 39 kernel/notifier.c while ((*nl) != NULL) { nl 40 kernel/notifier.c if ((*nl) == n) nl 42 kernel/notifier.c if (n->priority > (*nl)->priority) nl 44 kernel/notifier.c nl = &((*nl)->next); nl 46 kernel/notifier.c n->next = *nl; nl 47 kernel/notifier.c rcu_assign_pointer(*nl, n); nl 51 kernel/notifier.c static int notifier_chain_unregister(struct notifier_block **nl, nl 54 kernel/notifier.c while ((*nl) != NULL) { nl 55 kernel/notifier.c if ((*nl) == n) { nl 56 kernel/notifier.c rcu_assign_pointer(*nl, n->next); nl 59 kernel/notifier.c nl = &((*nl)->next); nl 76 kernel/notifier.c static int notifier_call_chain(struct notifier_block **nl, nl 83 kernel/notifier.c nb = rcu_dereference_raw(*nl); nl 2328 kernel/rcu/tree_plugin.h int nl = 0; /* Next GP kthread. */ nl 2347 kernel/rcu/tree_plugin.h if (rdp->cpu >= nl) { nl 2350 kernel/rcu/tree_plugin.h nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; nl 842 lib/inflate.c unsigned nl; /* number of literal/length codes */ nl 867 lib/inflate.c nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */ nl 876 lib/inflate.c if (nl > 288 || nd > 32) nl 878 lib/inflate.c if (nl > 286 || nd > 30) nl 912 lib/inflate.c n = nl + nd; nl 978 lib/inflate.c if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0) nl 990 lib/inflate.c if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0) nl 287 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ nl 292 lib/mpi/longlong.h "0" ((USItype)(nl)), \ nl 486 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ nl 491 lib/mpi/longlong.h __nn.__i.__h = (nh); __nn.__i.__l = (nl); \ nl 827 lib/mpi/longlong.h #define sdiv_qrnnd(q, r, nh, nl, d) \ nl 830 lib/mpi/longlong.h : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d))) nl 1345 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ nl 1348 lib/mpi/longlong.h (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \ nl 207 net/bridge/br_sysfs_if.c char *nl = strchr(buf, '\n'); nl 209 net/bridge/br_sysfs_if.c if (nl) nl 210 net/bridge/br_sysfs_if.c *nl = '\0'; nl 718 net/tipc/bcast.c void tipc_nlist_init(struct tipc_nlist *nl, u32 self) nl 720 net/tipc/bcast.c memset(nl, 0, sizeof(*nl)); nl 721 net/tipc/bcast.c INIT_LIST_HEAD(&nl->list); nl 722 net/tipc/bcast.c nl->self = self; nl 725 net/tipc/bcast.c void tipc_nlist_add(struct tipc_nlist *nl, u32 node) nl 727 net/tipc/bcast.c if (node == nl->self) nl 728 net/tipc/bcast.c nl->local = true; nl 729 net/tipc/bcast.c else if (tipc_dest_push(&nl->list, node, 0)) nl 730 net/tipc/bcast.c nl->remote++; nl 733 net/tipc/bcast.c void tipc_nlist_del(struct tipc_nlist *nl, u32 node) nl 735 net/tipc/bcast.c if (node == nl->self) nl 736 net/tipc/bcast.c nl->local = false; nl 737 net/tipc/bcast.c else if (tipc_dest_del(&nl->list, node, 0)) nl 738 net/tipc/bcast.c nl->remote--; nl 741 net/tipc/bcast.c void tipc_nlist_purge(struct tipc_nlist *nl) nl 743 net/tipc/bcast.c tipc_dest_list_purge(&nl->list); nl 744 net/tipc/bcast.c nl->remote = 0; nl 745 net/tipc/bcast.c nl->local = false; nl 62 net/tipc/bcast.h void tipc_nlist_init(struct tipc_nlist *nl, u32 self); nl 63 net/tipc/bcast.h void tipc_nlist_purge(struct tipc_nlist *nl); nl 64 net/tipc/bcast.h void tipc_nlist_add(struct tipc_nlist *nl, u32 node); nl 65 net/tipc/bcast.h void tipc_nlist_del(struct tipc_nlist *nl, u32 node); nl 686 net/tipc/node.c struct tipc_link *nl = n->links[bearer_id].link; nl 688 net/tipc/node.c if (!nl || tipc_link_is_up(nl)) nl 691 net/tipc/node.c tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT); nl 692 net/tipc/node.c if (!tipc_link_is_up(nl)) nl 697 net/tipc/node.c n->link_id = tipc_link_id(nl); nl 700 net/tipc/node.c n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE; nl 706 net/tipc/node.c tipc_link_name(nl), tipc_link_plane(nl)); nl 710 net/tipc/node.c tipc_link_build_state_msg(nl, xmitq); nl 718 net/tipc/node.c tipc_link_set_active(nl, true); nl 719 net/tipc/node.c tipc_bcast_add_peer(n->net, nl, xmitq); nl 724 net/tipc/node.c if (tipc_link_prio(nl) > tipc_link_prio(ol)) { nl 728 net/tipc/node.c tipc_link_set_active(nl, true); nl 730 net/tipc/node.c } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) { nl 731 net/tipc/node.c tipc_link_set_active(nl, true); nl 734 net/tipc/node.c pr_debug("New link <%s> is standby\n", tipc_link_name(nl)); nl 738 net/tipc/node.c tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq); nl 323 samples/bpf/xdp_router_ipv4_user.c struct nlmsghdr nl; nl 341 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); nl 342 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; nl 343 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_type = RTM_GETROUTE; nl 347 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_pid = 0; nl 348 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_seq = ++seq; nl 350 samples/bpf/xdp_router_ipv4_user.c iov.iov_base = (void *)&req.nl; nl 351 samples/bpf/xdp_router_ipv4_user.c iov.iov_len = req.nl.nlmsg_len; nl 462 samples/bpf/xdp_router_ipv4_user.c struct nlmsghdr nl; nl 480 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); nl 481 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; nl 482 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_type = RTM_GETNEIGH; nl 485 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_pid = 0; nl 486 samples/bpf/xdp_router_ipv4_user.c req.nl.nlmsg_seq = ++seq; nl 488 samples/bpf/xdp_router_ipv4_user.c iov.iov_base = (void *)&req.nl; nl 489 samples/bpf/xdp_router_ipv4_user.c iov.iov_len = req.nl.nlmsg_len; nl 349 scripts/asn1_compiler.c char *line, *nl, *start, *p, *q; nl 367 scripts/asn1_compiler.c nl = memchr(line, '\n', end - buffer); nl 368 scripts/asn1_compiler.c if (!nl) { nl 369 scripts/asn1_compiler.c buffer = nl = end; nl 371 scripts/asn1_compiler.c buffer = nl + 1; nl 372 scripts/asn1_compiler.c *nl = '\0'; nl 378 scripts/asn1_compiler.c while ((p = memchr(p, '-', nl - p))) { nl 382 scripts/asn1_compiler.c while ((q = memchr(q, '-', nl - q))) { nl 386 scripts/asn1_compiler.c memmove(p, q, nl - q); nl 392 scripts/asn1_compiler.c nl = p; nl 400 scripts/asn1_compiler.c while (p < nl) { nl 402 scripts/asn1_compiler.c while (p < nl && isspace(*p)) nl 404 scripts/asn1_compiler.c if (p >= nl) nl 418 scripts/asn1_compiler.c while (q < nl && (isalnum(*q) || *q == '-' || *q == '_')) nl 459 scripts/asn1_compiler.c while (q < nl && (isdigit(*q))) nl 474 scripts/asn1_compiler.c if (nl - p >= 3) { nl 484 scripts/asn1_compiler.c if (nl - p >= 2) { nl 501 scripts/asn1_compiler.c if (nl - p >= 1) { nl 405 tools/perf/util/stat-display.c new_line_t nl; nl 408 tools/perf/util/stat-display.c nl = new_line_metric; nl 414 tools/perf/util/stat-display.c nl = new_line_std; nl 427 tools/perf/util/stat-display.c nl = new_line_csv; nl 474 tools/perf/util/stat-display.c out.new_line = nl; nl 105 tools/perf/util/synthetic-events.c char *nl; nl 108 tools/perf/util/synthetic-events.c nl = strchr(name, '\n'); nl 109 tools/perf/util/synthetic-events.c if (nl) nl 110 tools/perf/util/synthetic-events.c *nl = '\0'; nl 197 tools/thermal/tmon/sysfs.c static int find_tzone_cdev(struct dirent *nl, char *tz_name, nl 206 tools/thermal/tmon/sysfs.c if (nl->d_type == DT_LNK) { nl 207 tools/thermal/tmon/sysfs.c syslog(LOG_DEBUG, "TZ%d: cdev: %s cid %d\n", tz_id, nl->d_name, nl 216 tools/thermal/tmon/sysfs.c snprintf(cdev_name, 256, "%s/%s", tz_name, nl->d_name); nl 229 tools/thermal/tmon/sysfs.c snprintf(cdev_trip_name, 256, "%s%s", nl->d_name,