nc 375 arch/arm/include/asm/assembler.h .if \inc == 1 nc 377 arch/arm/include/asm/assembler.h .elseif \inc == 4 nc 403 arch/arm/include/asm/assembler.h usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort nc 405 arch/arm/include/asm/assembler.h usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort nc 408 arch/arm/include/asm/assembler.h add\cond \ptr, #\rept * \inc nc 416 arch/arm/include/asm/assembler.h .if \inc == 1 nc 417 arch/arm/include/asm/assembler.h \instr\()b\t\cond \reg, [\ptr], #\inc nc 418 arch/arm/include/asm/assembler.h .elseif \inc == 4 nc 419 arch/arm/include/asm/assembler.h \instr\t\cond \reg, [\ptr], #\inc nc 434 arch/arm/include/asm/assembler.h usracc str, \reg, \ptr, \inc, \cond, \rept, \abort nc 438 arch/arm/include/asm/assembler.h usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort nc 31 arch/mips/include/asm/asmmacro.h .hword ((\enc) >> 16) nc 32 arch/mips/include/asm/asmmacro.h .hword ((\enc) & 0xffff) nc 43 arch/mips/include/asm/asmmacro.h .word (\enc) nc 2081 arch/sparc/kernel/ldc.c u32 nc; nc 2105 arch/sparc/kernel/ldc.c sp->cookies[sp->nc - 1].cookie_size += tlen; nc 2107 arch/sparc/kernel/ldc.c sp->cookies[sp->nc].cookie_addr = this_cookie; nc 2108 arch/sparc/kernel/ldc.c sp->cookies[sp->nc].cookie_size = tlen; nc 2109 arch/sparc/kernel/ldc.c sp->nc++; nc 2182 arch/sparc/kernel/ldc.c state.nc = 0; nc 2189 arch/sparc/kernel/ldc.c return state.nc; nc 2224 arch/sparc/kernel/ldc.c state.nc = 0; nc 2226 arch/sparc/kernel/ldc.c BUG_ON(state.nc > ncookies); nc 2228 arch/sparc/kernel/ldc.c return state.nc; nc 101 arch/unicore32/include/asm/assembler.h .if \inc == 1 nc 102 arch/unicore32/include/asm/assembler.h \instr\()b.u \reg, [\ptr], #\inc nc 103 arch/unicore32/include/asm/assembler.h .elseif \inc == 4 nc 104 arch/unicore32/include/asm/assembler.h \instr\()w.u \reg, [\ptr], #\inc nc 117 arch/unicore32/include/asm/assembler.h usracc st, \reg, \ptr, \inc, \cond, \rept, \abort nc 121 arch/unicore32/include/asm/assembler.h usracc ld, \reg, \ptr, \inc, \cond, \rept, \abort nc 3422 arch/x86/kvm/svm.c struct vmcb_control_area *nc = &nested_vmcb->control; nc 3424 arch/x86/kvm/svm.c nc->exit_int_info = vmcb->control.event_inj; nc 3425 arch/x86/kvm/svm.c nc->exit_int_info_err = vmcb->control.event_inj_err; nc 2168 drivers/block/drbd/drbd_int.h struct net_conf *nc; nc 2172 drivers/block/drbd/drbd_int.h nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc 2173 drivers/block/drbd/drbd_int.h mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ nc 725 drivers/block/drbd/drbd_main.c struct net_conf *nc; nc 734 drivers/block/drbd/drbd_main.c nc = rcu_dereference(peer_device->connection->net_conf); nc 738 drivers/block/drbd/drbd_main.c + strlen(nc->verify_alg) + 1 nc 764 drivers/block/drbd/drbd_main.c strcpy(p->verify_alg, nc->verify_alg); nc 766 drivers/block/drbd/drbd_main.c strcpy(p->csums_alg, nc->csums_alg); nc 776 drivers/block/drbd/drbd_main.c struct net_conf *nc; nc 785 drivers/block/drbd/drbd_main.c nc = rcu_dereference(connection->net_conf); nc 787 drivers/block/drbd/drbd_main.c if (nc->tentative && connection->agreed_pro_version < 92) { nc 795 drivers/block/drbd/drbd_main.c size += strlen(nc->integrity_alg) + 1; nc 797 drivers/block/drbd/drbd_main.c p->protocol = cpu_to_be32(nc->wire_protocol); nc 798 drivers/block/drbd/drbd_main.c p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); nc 799 drivers/block/drbd/drbd_main.c p->after_sb_1p = cpu_to_be32(nc->after_sb_1p); nc 800 drivers/block/drbd/drbd_main.c p->after_sb_2p = cpu_to_be32(nc->after_sb_2p); nc 801 drivers/block/drbd/drbd_main.c p->two_primaries = cpu_to_be32(nc->two_primaries); nc 803 drivers/block/drbd/drbd_main.c if (nc->discard_my_data) nc 805 drivers/block/drbd/drbd_main.c if (nc->tentative) nc 810 drivers/block/drbd/drbd_main.c strcpy(p->integrity_alg, nc->integrity_alg); nc 3785 drivers/block/drbd/drbd_main.c struct net_conf *nc; nc 3790 drivers/block/drbd/drbd_main.c nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc 3791 drivers/block/drbd/drbd_main.c if (!nc) { nc 3795 drivers/block/drbd/drbd_main.c timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT; nc 622 drivers/block/drbd/drbd_nl.c struct net_conf *nc; nc 692 drivers/block/drbd/drbd_nl.c nc = rcu_dereference(connection->net_conf); nc 693 drivers/block/drbd/drbd_nl.c timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; nc 726 drivers/block/drbd/drbd_nl.c nc = connection->net_conf; nc 727 drivers/block/drbd/drbd_nl.c if (nc) nc 728 drivers/block/drbd/drbd_nl.c nc->discard_my_data = 0; /* without copy; single bit op is atomic */ nc 1807 drivers/block/drbd/drbd_nl.c struct net_conf *nc; nc 1880 drivers/block/drbd/drbd_nl.c nc = rcu_dereference(connection->net_conf); nc 1881 drivers/block/drbd/drbd_nl.c if (nc) { nc 1882 drivers/block/drbd/drbd_nl.c if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { nc 3818 drivers/block/drbd/drbd_nl.c struct net_conf *nc; nc 3820 drivers/block/drbd/drbd_nl.c nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc 3821 drivers/block/drbd/drbd_nl.c if (nc) nc 3822 drivers/block/drbd/drbd_nl.c err = net_conf_to_skb(skb, nc, exclude_sensitive); nc 4009 drivers/block/drbd/drbd_nl.c struct net_conf *nc; nc 4011 drivers/block/drbd/drbd_nl.c nc = rcu_dereference(connection->net_conf); nc 4012 drivers/block/drbd/drbd_nl.c if (nc && net_conf_to_skb(skb, nc, 1) != 0) nc 220 drivers/block/drbd/drbd_proc.c struct net_conf *nc; nc 270 drivers/block/drbd/drbd_proc.c nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc 271 drivers/block/drbd/drbd_proc.c wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; nc 265 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 270 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(peer_device->connection->net_conf); nc 271 drivers/block/drbd/drbd_receiver.c mxb = nc ? nc->max_buffers : 1000000; nc 592 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 598 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 599 drivers/block/drbd/drbd_receiver.c if (!nc) { nc 603 drivers/block/drbd/drbd_receiver.c sndbuf_size = nc->sndbuf_size; nc 604 drivers/block/drbd/drbd_receiver.c rcvbuf_size = nc->rcvbuf_size; nc 605 drivers/block/drbd/drbd_receiver.c connect_int = nc->connect_int; nc 698 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 702 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 703 drivers/block/drbd/drbd_receiver.c if (!nc) { nc 707 drivers/block/drbd/drbd_receiver.c sndbuf_size = nc->sndbuf_size; nc 708 drivers/block/drbd/drbd_receiver.c rcvbuf_size = nc->rcvbuf_size; nc 768 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 771 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 772 drivers/block/drbd/drbd_receiver.c if (!nc) { nc 776 drivers/block/drbd/drbd_receiver.c connect_int = nc->connect_int; nc 815 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 819 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 820 drivers/block/drbd/drbd_receiver.c if (!nc) { nc 824 drivers/block/drbd/drbd_receiver.c sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10; nc 866 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 874 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 875 drivers/block/drbd/drbd_receiver.c timeout = (nc->sock_check_timeo ?: nc->ping_timeo) * HZ / 10; nc 925 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 1039 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 1042 drivers/block/drbd/drbd_receiver.c sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10; nc 1044 drivers/block/drbd/drbd_receiver.c msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ; nc 1045 drivers/block/drbd/drbd_receiver.c timeout = nc->timeout * HZ / 10; nc 1046 drivers/block/drbd/drbd_receiver.c discard_my_data = nc->discard_my_data; nc 2589 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 2665 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(peer_device->connection->net_conf); nc 2666 drivers/block/drbd/drbd_receiver.c tp = nc->two_primaries; nc 2668 drivers/block/drbd/drbd_receiver.c switch (nc->wire_protocol) { nc 3493 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 3544 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(peer_device->connection->net_conf); nc 3545 drivers/block/drbd/drbd_receiver.c always_asbp = nc->always_asbp; nc 3546 drivers/block/drbd/drbd_receiver.c rr_conflict = nc->rr_conflict; nc 3547 drivers/block/drbd/drbd_receiver.c tentative = nc->tentative; nc 3671 drivers/block/drbd/drbd_receiver.c struct net_conf *nc, *old_net_conf, *new_net_conf = NULL; nc 3702 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 3704 drivers/block/drbd/drbd_receiver.c if (p_proto != nc->wire_protocol) { nc 3709 drivers/block/drbd/drbd_receiver.c if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) { nc 3714 drivers/block/drbd/drbd_receiver.c if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) { nc 3719 drivers/block/drbd/drbd_receiver.c if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) { nc 3724 drivers/block/drbd/drbd_receiver.c if (p_discard_my_data && nc->discard_my_data) { nc 3729 drivers/block/drbd/drbd_receiver.c if (p_two_primaries != nc->two_primaries) { nc 3734 drivers/block/drbd/drbd_receiver.c if (strcmp(integrity_alg, nc->integrity_alg)) { nc 5422 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 5428 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 5429 drivers/block/drbd/drbd_receiver.c key_len = strlen(nc->shared_secret); nc 5430 drivers/block/drbd/drbd_receiver.c memcpy(secret, nc->shared_secret, key_len); nc 5966 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 5969 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 5970 drivers/block/drbd/drbd_receiver.c t = ping_timeout ? nc->ping_timeo : nc->ping_int; nc 6155 drivers/block/drbd/drbd_receiver.c struct net_conf *nc; nc 6159 drivers/block/drbd/drbd_receiver.c nc = rcu_dereference(connection->net_conf); nc 6160 drivers/block/drbd/drbd_receiver.c tcp_cork = nc->tcp_cork; nc 571 drivers/block/drbd/drbd_req.c struct net_conf *nc; nc 593 drivers/block/drbd/drbd_req.c nc = rcu_dereference(connection->net_conf); nc 594 drivers/block/drbd/drbd_req.c p = nc->wire_protocol; nc 706 drivers/block/drbd/drbd_req.c nc = rcu_dereference(connection->net_conf); nc 707 drivers/block/drbd/drbd_req.c p = nc->max_epoch_size; nc 993 drivers/block/drbd/drbd_req.c struct net_conf *nc; nc 998 drivers/block/drbd/drbd_req.c nc = rcu_dereference(connection->net_conf); nc 999 drivers/block/drbd/drbd_req.c on_congestion = nc ? nc->on_congestion : OC_BLOCK; nc 1015 drivers/block/drbd/drbd_req.c if (nc->cong_fill && nc 1016 drivers/block/drbd/drbd_req.c atomic_read(&device->ap_in_flight) >= nc->cong_fill) { nc 1021 drivers/block/drbd/drbd_req.c if (device->act_log->used >= nc->cong_extents) { nc 1711 drivers/block/drbd/drbd_req.c struct net_conf *nc; nc 1718 drivers/block/drbd/drbd_req.c nc = rcu_dereference(connection->net_conf); nc 1719 drivers/block/drbd/drbd_req.c if (nc && device->state.conn >= C_WF_REPORT_PARAMS) { nc 1720 drivers/block/drbd/drbd_req.c ko_count = nc->ko_count; nc 1721 drivers/block/drbd/drbd_req.c timeout = nc->timeout; nc 829 drivers/block/drbd/drbd_state.c struct net_conf *nc; nc 838 drivers/block/drbd/drbd_state.c nc = rcu_dereference(first_peer_device(device)->connection->net_conf); nc 839 drivers/block/drbd/drbd_state.c if (nc) { nc 840 drivers/block/drbd/drbd_state.c if (!nc->two_primaries && ns.role == R_PRIMARY) { nc 880 drivers/block/drbd/drbd_state.c (nc->verify_alg[0] == 0)) nc 974 drivers/block/drbd/drbd_state.c is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc) nc 977 drivers/block/drbd/drbd_state.c if (oc == nc) nc 981 drivers/block/drbd/drbd_state.c if (oc == C_STANDALONE && nc == C_DISCONNECTING) nc 985 drivers/block/drbd/drbd_state.c if (oc == C_STANDALONE && nc != C_UNCONNECTED) nc 990 drivers/block/drbd/drbd_state.c if (oc < C_WF_REPORT_PARAMS && nc >= C_CONNECTED) nc 994 drivers/block/drbd/drbd_state.c if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING) nc 998 drivers/block/drbd/drbd_state.c if (oc == C_DISCONNECTING && nc != C_STANDALONE) nc 1885 drivers/block/drbd/drbd_worker.c struct net_conf *nc; nc 1889 drivers/block/drbd/drbd_worker.c nc = rcu_dereference(connection->net_conf); nc 1890 drivers/block/drbd/drbd_worker.c timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; nc 2080 drivers/block/drbd/drbd_worker.c struct net_conf *nc; nc 2094 drivers/block/drbd/drbd_worker.c nc = rcu_dereference(connection->net_conf); nc 2095 drivers/block/drbd/drbd_worker.c uncork = nc ? nc->tcp_cork : 0; nc 2149 drivers/block/drbd/drbd_worker.c nc = rcu_dereference(connection->net_conf); nc 2150 drivers/block/drbd/drbd_worker.c cork = nc ? nc->tcp_cork : 0; nc 205 drivers/bus/moxtet.c of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc) nc 214 drivers/bus/moxtet.c "Moxtet device alloc error for %pOF\n", nc); nc 218 drivers/bus/moxtet.c ret = of_property_read_u32(nc, "reg", &val); nc 221 drivers/bus/moxtet.c nc, ret); nc 229 drivers/bus/moxtet.c nc, dev->idx); nc 237 drivers/bus/moxtet.c dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc, nc 243 drivers/bus/moxtet.c of_node_get(nc); nc 244 drivers/bus/moxtet.c dev->dev.of_node = nc; nc 249 drivers/bus/moxtet.c "Moxtet device register error for %pOF\n", nc); nc 250 drivers/bus/moxtet.c of_node_put(nc); nc 264 drivers/bus/moxtet.c struct device_node *nc; nc 269 drivers/bus/moxtet.c for_each_available_child_of_node(moxtet->dev->of_node, nc) { nc 270 drivers/bus/moxtet.c if (of_node_test_and_set_flag(nc, OF_POPULATED)) nc 272 drivers/bus/moxtet.c dev = of_register_moxtet_device(moxtet, nc); nc 276 drivers/bus/moxtet.c nc); nc 277 drivers/bus/moxtet.c of_node_clear_flag(nc, OF_POPULATED); nc 155 drivers/crypto/cavium/zip/zip_regs.h u64 nc : 1; nc 159 drivers/crypto/cavium/zip/zip_regs.h u64 nc : 1; nc 308 drivers/crypto/cavium/zip/zip_regs.h u64 nc : 1; nc 312 drivers/crypto/cavium/zip/zip_regs.h u64 nc : 1; nc 119 drivers/gpio/gpio-moxtet.c struct device_node *nc = dev->of_node; nc 126 drivers/gpio/gpio-moxtet.c nc, id); nc 1682 drivers/gpu/drm/i915/gt/selftest_lrc.c unsigned long n, prime, nc; nc 1723 drivers/gpu/drm/i915/gt/selftest_lrc.c for (nc = 0; nc < nctx; nc++) { nc 1725 drivers/gpu/drm/i915/gt/selftest_lrc.c request[nc] = nc 1726 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_create(ve[nc]); nc 1727 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(request[nc])) { nc 1728 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(request[nc]); nc 1732 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_add(request[nc]); nc 1737 drivers/gpu/drm/i915/gt/selftest_lrc.c for (nc = 0; nc < nctx; nc++) { nc 1738 drivers/gpu/drm/i915/gt/selftest_lrc.c request[nc] = nc 1739 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_create(ve[nc]); nc 1740 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(request[nc])) { nc 1741 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(request[nc]); nc 1745 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_add(request[nc]); nc 1750 drivers/gpu/drm/i915/gt/selftest_lrc.c for (nc = 0; nc < nctx; nc++) { nc 1751 drivers/gpu/drm/i915/gt/selftest_lrc.c if (i915_request_wait(request[nc], 0, HZ / 10) < 0) { nc 1754 drivers/gpu/drm/i915/gt/selftest_lrc.c request[nc]->fence.context, nc 1755 drivers/gpu/drm/i915/gt/selftest_lrc.c request[nc]->fence.seqno); nc 1759 drivers/gpu/drm/i915/gt/selftest_lrc.c request[nc]->fence.context, nc 1760 drivers/gpu/drm/i915/gt/selftest_lrc.c request[nc]->fence.seqno); nc 1787 drivers/gpu/drm/i915/gt/selftest_lrc.c for (nc = 0; nc < nctx; nc++) { nc 1788 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_context_unpin(ve[nc]); nc 1789 drivers/gpu/drm/i915/gt/selftest_lrc.c intel_context_put(ve[nc]); nc 1790 drivers/gpu/drm/i915/gt/selftest_lrc.c kernel_context_close(ctx[nc]); nc 152 drivers/gpu/drm/i915/selftests/i915_vma.c unsigned long no, nc; nc 172 drivers/gpu/drm/i915/selftests/i915_vma.c nc = 0; nc 174 drivers/gpu/drm/i915/selftests/i915_vma.c for (; nc < num_ctx; nc++) { nc 188 drivers/gpu/drm/i915/selftests/i915_vma.c __func__, no, nc)) nc 345 drivers/input/input-mt.c static void find_reduced_matrix(int *w, int nr, int nc, int nrc, int mu) nc 351 drivers/input/input-mt.c adjust_dual(w + i, nr, w + i + nrc, nr <= nc, mu); nc 354 drivers/input/input-mt.c sum += adjust_dual(w + i, 1, w + i + nr, nc <= nr, mu); nc 204 drivers/leds/leds-bcm6328.c static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, nc 220 drivers/leds/leds-bcm6328.c cnt = of_property_count_elems_of_size(nc, "brcm,link-signal-sources", nc 231 drivers/leds/leds-bcm6328.c of_property_read_u32_index(nc, "brcm,link-signal-sources", i, nc 246 drivers/leds/leds-bcm6328.c cnt = of_property_count_elems_of_size(nc, nc 258 drivers/leds/leds-bcm6328.c of_property_read_u32_index(nc, "brcm,activity-signal-sources", nc 276 drivers/leds/leds-bcm6328.c static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, nc 294 drivers/leds/leds-bcm6328.c if (of_property_read_bool(nc, "active-low")) nc 297 drivers/leds/leds-bcm6328.c led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name; nc 298 drivers/leds/leds-bcm6328.c led->cdev.default_trigger = of_get_property(nc, nc 302 drivers/leds/leds-bcm6328.c if (!of_property_read_string(nc, "default-state", &state)) { nc 94 drivers/leds/leds-bcm6358.c static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, nc 109 drivers/leds/leds-bcm6358.c if (of_property_read_bool(nc, "active-low")) nc 112 drivers/leds/leds-bcm6358.c led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name; nc 113 drivers/leds/leds-bcm6358.c led->cdev.default_trigger = of_get_property(nc, nc 117 drivers/leds/leds-bcm6358.c if (!of_property_read_string(nc, "default-state", &state)) { nc 634 drivers/mailbox/zynqmp-ipi-mailbox.c struct device_node *nc, *np = pdev->dev.of_node; nc 658 drivers/mailbox/zynqmp-ipi-mailbox.c for_each_available_child_of_node(np, nc) { nc 660 drivers/mailbox/zynqmp-ipi-mailbox.c ret = zynqmp_ipi_mbox_probe(mbox, nc); nc 3577 drivers/md/raid10.c int nc, fc, fo; nc 3603 drivers/md/raid10.c nc = layout & 255; nc 3607 drivers/md/raid10.c geo->near_copies = nc; nc 3621 drivers/md/raid10.c geo->far_set_size = fc * nc; nc 3628 drivers/md/raid10.c return nc*fc; nc 199 drivers/mtd/nand/raw/atmel/nand-controller.c int (*remove)(struct atmel_nand_controller *nc); nc 200 drivers/mtd/nand/raw/atmel/nand-controller.c void (*nand_init)(struct atmel_nand_controller *nc, nc 285 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc = data; nc 289 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr); nc 291 drivers/mtd/nand/raw/atmel/nand-controller.c rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS); nc 292 drivers/mtd/nand/raw/atmel/nand-controller.c done = atmel_nfc_op_done(&nc->op, sr); nc 295 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd); nc 298 drivers/mtd/nand/raw/atmel/nand-controller.c complete(&nc->complete); nc 303 drivers/mtd/nand/raw/atmel/nand-controller.c static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll, nc 314 drivers/mtd/nand/raw/atmel/nand-controller.c ret = regmap_read_poll_timeout(nc->base.smc, nc 316 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nfc_op_done(&nc->op, nc 320 drivers/mtd/nand/raw/atmel/nand-controller.c init_completion(&nc->complete); nc 321 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER, nc 322 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS); nc 323 drivers/mtd/nand/raw/atmel/nand-controller.c ret = wait_for_completion_timeout(&nc->complete, nc 330 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff); nc 333 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) { nc 334 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n"); nc 338 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) { nc 339 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, "Access to an undefined area\n"); nc 343 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) { nc 344 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, "Access while busy\n"); nc 348 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) { nc 349 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, "Wrong access size\n"); nc 363 drivers/mtd/nand/raw/atmel/nand-controller.c static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc, nc 372 drivers/mtd/nand/raw/atmel/nand-controller.c buf_dma = dma_map_single(nc->dev, buf, len, dir); nc 373 drivers/mtd/nand/raw/atmel/nand-controller.c if (dma_mapping_error(nc->dev, dev_dma)) { nc 374 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 387 drivers/mtd/nand/raw/atmel/nand-controller.c tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len, nc 390 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Failed to prepare DMA memcpy\n"); nc 399 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Failed to do DMA tx_submit\n"); nc 403 drivers/mtd/nand/raw/atmel/nand-controller.c dma_async_issue_pending(nc->dmac); nc 409 drivers/mtd/nand/raw/atmel/nand-controller.c dma_unmap_single(nc->dev, buf_dma, len, dir); nc 412 drivers/mtd/nand/raw/atmel/nand-controller.c dev_dbg(nc->dev, "Fall back to CPU I/O\n"); nc 437 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 439 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 446 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->dmac && virt_addr_valid(buf) && nc 448 drivers/mtd/nand/raw/atmel/nand-controller.c !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len, nc 461 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 463 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 470 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->dmac && virt_addr_valid(buf) && nc 472 drivers/mtd/nand/raw/atmel/nand-controller.c !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma, nc 508 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 511 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 513 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &status); nc 522 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 524 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 529 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL, nc 537 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG, nc 545 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL, nc 549 drivers/mtd/nand/raw/atmel/nand-controller.c static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll) nc 551 drivers/mtd/nand/raw/atmel/nand-controller.c u8 *addrs = nc->op.addrs; nc 556 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE; nc 558 drivers/mtd/nand/raw/atmel/nand-controller.c for (i = 0; i < nc->op.ncmds; i++) nc 559 drivers/mtd/nand/raw/atmel/nand-controller.c op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]); nc 561 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES) nc 562 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++); nc 564 drivers/mtd/nand/raw/atmel/nand-controller.c op |= ATMEL_NFC_CSID(nc->op.cs) | nc 565 drivers/mtd/nand/raw/atmel/nand-controller.c ATMEL_NFC_ACYCLE(nc->op.naddrs); nc 567 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.ncmds > 1) nc 573 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.data != ATMEL_NFC_NO_DATA) { nc 575 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE; nc 577 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.data == ATMEL_NFC_WRITE_DATA) nc 582 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val); nc 585 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->io, op, addr); nc 587 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nfc_wait(nc, poll, 0); nc 589 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, nc 594 drivers/mtd/nand/raw/atmel/nand-controller.c memset(&nc->op, 0, sizeof(nc->op)); nc 603 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 605 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 608 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES) nc 611 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.addrs[nc->op.naddrs++] = dat; nc 613 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->op.ncmds > 1) nc 616 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cmds[nc->op.ncmds++] = dat; nc 620 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; nc 621 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nfc_exec_op(nc, true); nc 629 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 631 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 641 drivers/mtd/nand/raw/atmel/nand-controller.c writeb(cmd, nand->activecs->io.virt + nc->caps->ale_offs); nc 643 drivers/mtd/nand/raw/atmel/nand-controller.c writeb(cmd, nand->activecs->io.virt + nc->caps->cle_offs); nc 650 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 653 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 655 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->base.dmac) nc 656 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_dma_transfer(&nc->base, (void *)buf, nc 657 drivers/mtd/nand/raw/atmel/nand-controller.c nc->sram.dma, mtd->writesize, nc 662 drivers/mtd/nand/raw/atmel/nand-controller.c memcpy_toio(nc->sram.virt, buf, mtd->writesize); nc 665 drivers/mtd/nand/raw/atmel/nand-controller.c memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi, nc 673 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 676 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 678 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->base.dmac) nc 679 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma, nc 684 drivers/mtd/nand/raw/atmel/nand-controller.c memcpy_fromio(buf, nc->sram.virt, mtd->writesize); nc 687 drivers/mtd/nand/raw/atmel/nand-controller.c memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize, nc 694 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 696 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 699 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.addrs[nc->op.naddrs++] = column; nc 705 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.addrs[nc->op.naddrs++] = column >> 8; nc 709 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.addrs[nc->op.naddrs++] = page; nc 710 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.addrs[nc->op.naddrs++] = page >> 8; nc 713 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.addrs[nc->op.naddrs++] = page >> 16; nc 720 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 723 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 730 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 748 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 753 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 760 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 783 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 788 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 795 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 909 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 912 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 916 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cmds[0] = NAND_CMD_SEQIN; nc 917 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.ncmds = 1; nc 919 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; nc 920 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.data = ATMEL_NFC_WRITE_DATA; nc 926 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nfc_exec_op(nc, false); nc 929 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, nc 944 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cmds[0] = NAND_CMD_PAGEPROG; nc 945 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.ncmds = 1; nc 946 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; nc 947 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nfc_exec_op(nc, false); nc 949 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n", nc 981 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 984 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(chip->controller); nc 998 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0; nc 1001 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART; nc 1004 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.cs = nand->activecs->id; nc 1005 drivers/mtd/nand/raw/atmel/nand-controller.c nc->op.data = ATMEL_NFC_READ_DATA; nc 1011 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nfc_exec_op(nc, false); nc 1014 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, nc 1048 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 1051 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 1053 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc->pmecc) { nc 1054 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "HW ECC not supported\n"); nc 1058 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->caps->legacy_of_bindings) { nc 1061 drivers/mtd/nand/raw/atmel/nand-controller.c if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap", nc 1065 drivers/mtd/nand/raw/atmel/nand-controller.c if (!of_property_read_u32(nc->dev->of_node, nc 1098 drivers/mtd/nand/raw/atmel/nand-controller.c nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req); nc 1116 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 1119 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(chip->controller); nc 1142 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Unsupported ECC mode: %d\n", nc 1175 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 1178 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(nand->base.controller); nc 1193 drivers/mtd/nand/raw/atmel/nand-controller.c mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck); nc 1404 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 1409 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(nand->base.controller); nc 1420 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf); nc 1429 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 1434 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_hsmc_nand_controller(nand->base.controller); nc 1449 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id, nc 1459 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc; nc 1461 drivers/mtd/nand/raw/atmel/nand-controller.c nc = to_nand_controller(nand->base.controller); nc 1467 drivers/mtd/nand/raw/atmel/nand-controller.c return nc->caps->ops->setup_data_interface(nand, csline, conf); nc 1470 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_nand_init(struct atmel_nand_controller *nc, nc 1476 drivers/mtd/nand/raw/atmel/nand-controller.c mtd->dev.parent = nc->dev; nc 1477 drivers/mtd/nand/raw/atmel/nand-controller.c nand->base.controller = &nc->base; nc 1486 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc->mck || !nc->caps->ops->setup_data_interface) nc 1496 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->dmac) nc 1500 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->pmecc) nc 1504 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_smc_nand_init(struct atmel_nand_controller *nc, nc 1511 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_init(nc, nand); nc 1530 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc, nc 1535 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_init(nc, nand); nc 1558 drivers/mtd/nand/raw/atmel/nand-controller.c static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc, nc 1569 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Missing or invalid reg property\n"); nc 1573 drivers/mtd/nand/raw/atmel/nand-controller.c nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL); nc 1575 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Failed to allocate NAND object\n"); nc 1581 drivers/mtd/nand/raw/atmel/nand-controller.c gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "det", 0, nc 1585 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 1600 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Invalid reg property (err = %d)\n", nc 1608 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Invalid reg property (err = %d)\n", nc 1616 drivers/mtd/nand/raw/atmel/nand-controller.c nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res); nc 1627 drivers/mtd/nand/raw/atmel/nand-controller.c gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, nc 1631 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 1643 drivers/mtd/nand/raw/atmel/nand-controller.c gpio = devm_fwnode_get_index_gpiod_from_child(nc->dev, "cs", nc 1648 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, nc 1664 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_controller_add_nand(struct atmel_nand_controller *nc, nc 1673 drivers/mtd/nand/raw/atmel/nand-controller.c dev_info(nc->dev, "No SmartMedia card inserted.\n"); nc 1677 drivers/mtd/nand/raw/atmel/nand-controller.c nc->caps->ops->nand_init(nc, nand); nc 1681 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "NAND scan failed: %d\n", ret); nc 1687 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Failed to register mtd device: %d\n", ret); nc 1692 drivers/mtd/nand/raw/atmel/nand-controller.c list_add_tail(&nand->node, &nc->chips); nc 1698 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc) nc 1703 drivers/mtd/nand/raw/atmel/nand-controller.c list_for_each_entry_safe(nand, tmp, &nc->chips, node) { nc 1713 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc) nc 1715 drivers/mtd/nand/raw/atmel/nand-controller.c struct device *dev = nc->dev; nc 1725 drivers/mtd/nand/raw/atmel/nand-controller.c nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs), nc 1773 drivers/mtd/nand/raw/atmel/nand-controller.c gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN); nc 1783 drivers/mtd/nand/raw/atmel/nand-controller.c nand_set_flash_node(&nand->base, nc->dev->of_node); nc 1785 drivers/mtd/nand/raw/atmel/nand-controller.c return atmel_nand_controller_add_nand(nc, nand); nc 1788 drivers/mtd/nand/raw/atmel/nand-controller.c static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc) nc 1791 drivers/mtd/nand/raw/atmel/nand-controller.c struct device *dev = nc->dev; nc 1796 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->caps->legacy_of_bindings) nc 1797 drivers/mtd/nand/raw/atmel/nand-controller.c return atmel_nand_controller_legacy_add_nands(nc); nc 1820 drivers/mtd/nand/raw/atmel/nand-controller.c nand = atmel_nand_create(nc, nand_np, reg_cells); nc 1826 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_controller_add_nand(nc, nand); nc 1834 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_controller_remove_nands(nc); nc 1839 drivers/mtd/nand/raw/atmel/nand-controller.c static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc) nc 1841 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->dmac) nc 1842 drivers/mtd/nand/raw/atmel/nand-controller.c dma_release_channel(nc->dmac); nc 1844 drivers/mtd/nand/raw/atmel/nand-controller.c clk_put(nc->mck); nc 1918 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc = to_nand_controller(chip->controller); nc 1923 drivers/mtd/nand/raw/atmel/nand-controller.c ret = nc->caps->ops->ecc_init(chip); nc 1927 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->caps->legacy_of_bindings || !nc->dev->of_node) { nc 1945 drivers/mtd/nand/raw/atmel/nand-controller.c mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL, nc 1946 drivers/mtd/nand/raw/atmel/nand-controller.c "%s:nand.%d", dev_name(nc->dev), nc 1949 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Failed to allocate mtd->name\n"); nc 1962 drivers/mtd/nand/raw/atmel/nand-controller.c static int atmel_nand_controller_init(struct atmel_nand_controller *nc, nc 1970 drivers/mtd/nand/raw/atmel/nand-controller.c nand_controller_init(&nc->base); nc 1971 drivers/mtd/nand/raw/atmel/nand-controller.c nc->base.ops = &atmel_nand_controller_ops; nc 1972 drivers/mtd/nand/raw/atmel/nand-controller.c INIT_LIST_HEAD(&nc->chips); nc 1973 drivers/mtd/nand/raw/atmel/nand-controller.c nc->dev = dev; nc 1974 drivers/mtd/nand/raw/atmel/nand-controller.c nc->caps = caps; nc 1976 drivers/mtd/nand/raw/atmel/nand-controller.c platform_set_drvdata(pdev, nc); nc 1978 drivers/mtd/nand/raw/atmel/nand-controller.c nc->pmecc = devm_atmel_pmecc_get(dev); nc 1979 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->pmecc)) { nc 1980 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->pmecc); nc 1987 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->caps->has_dma && !atmel_nand_avoid_dma) { nc 1993 drivers/mtd/nand/raw/atmel/nand-controller.c nc->dmac = dma_request_channel(mask, NULL, NULL); nc 1994 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc->dmac) nc 1995 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->dev, "Failed to request DMA channel\n"); nc 1999 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->caps->legacy_of_bindings) nc 2002 drivers/mtd/nand/raw/atmel/nand-controller.c nc->mck = of_clk_get(dev->parent->of_node, 0); nc 2003 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->mck)) { nc 2005 drivers/mtd/nand/raw/atmel/nand-controller.c return PTR_ERR(nc->mck); nc 2014 drivers/mtd/nand/raw/atmel/nand-controller.c nc->smc = syscon_node_to_regmap(np); nc 2016 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->smc)) { nc 2017 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->smc); nc 2026 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc) nc 2028 drivers/mtd/nand/raw/atmel/nand-controller.c struct device *dev = nc->base.dev; nc 2034 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->base.caps->legacy_of_bindings) nc 2038 drivers/mtd/nand/raw/atmel/nand-controller.c nc->base.caps->ebi_csa_regmap_name, 0); nc 2048 drivers/mtd/nand/raw/atmel/nand-controller.c nc->ebi_csa_regmap = syscon_node_to_regmap(np); nc 2050 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->ebi_csa_regmap)) { nc 2051 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->ebi_csa_regmap); nc 2056 drivers/mtd/nand/raw/atmel/nand-controller.c nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data; nc 2064 drivers/mtd/nand/raw/atmel/nand-controller.c nc->ebi_csa->offs += 4; nc 2070 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc) nc 2078 drivers/mtd/nand/raw/atmel/nand-controller.c struct device *dev = nc->base.dev; nc 2091 drivers/mtd/nand/raw/atmel/nand-controller.c nc->clk = of_clk_get(nfc_np, 0); nc 2092 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->clk)) { nc 2093 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->clk); nc 2099 drivers/mtd/nand/raw/atmel/nand-controller.c ret = clk_prepare_enable(nc->clk); nc 2106 drivers/mtd/nand/raw/atmel/nand-controller.c nc->irq = of_irq_get(nand_np, 0); nc 2107 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->irq <= 0) { nc 2108 drivers/mtd/nand/raw/atmel/nand-controller.c ret = nc->irq ?: -ENXIO; nc 2130 drivers/mtd/nand/raw/atmel/nand-controller.c nc->io = devm_regmap_init_mmio(dev, iomem, ®map_conf); nc 2131 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->io)) { nc 2132 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->io); nc 2153 drivers/mtd/nand/raw/atmel/nand-controller.c nc->base.smc = devm_regmap_init_mmio(dev, iomem, ®map_conf); nc 2154 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->base.smc)) { nc 2155 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->base.smc); nc 2168 drivers/mtd/nand/raw/atmel/nand-controller.c nc->sram.virt = devm_ioremap_resource(dev, &res); nc 2169 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->sram.virt)) { nc 2170 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->sram.virt); nc 2174 drivers/mtd/nand/raw/atmel/nand-controller.c nc->sram.dma = res.start; nc 2183 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc) nc 2185 drivers/mtd/nand/raw/atmel/nand-controller.c struct device *dev = nc->base.dev; nc 2195 drivers/mtd/nand/raw/atmel/nand-controller.c nc->hsmc_layout = atmel_hsmc_get_reg_layout(np); nc 2197 drivers/mtd/nand/raw/atmel/nand-controller.c nc->irq = of_irq_get(np, 0); nc 2199 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->irq <= 0) { nc 2200 drivers/mtd/nand/raw/atmel/nand-controller.c ret = nc->irq ?: -ENXIO; nc 2213 drivers/mtd/nand/raw/atmel/nand-controller.c nc->io = syscon_node_to_regmap(np); nc 2215 drivers/mtd/nand/raw/atmel/nand-controller.c if (IS_ERR(nc->io)) { nc 2216 drivers/mtd/nand/raw/atmel/nand-controller.c ret = PTR_ERR(nc->io); nc 2221 drivers/mtd/nand/raw/atmel/nand-controller.c nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node, nc 2223 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc->sram.pool) { nc 2224 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, "Missing SRAM\n"); nc 2228 drivers/mtd/nand/raw/atmel/nand-controller.c nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool, nc 2230 drivers/mtd/nand/raw/atmel/nand-controller.c &nc->sram.dma); nc 2231 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc->sram.virt) { nc 2232 drivers/mtd/nand/raw/atmel/nand-controller.c dev_err(nc->base.dev, nc 2241 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc) nc 2246 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_controller_remove_nands(nc); nc 2250 drivers/mtd/nand/raw/atmel/nand-controller.c hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base); nc 2261 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_controller_cleanup(nc); nc 2270 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_hsmc_nand_controller *nc; nc 2273 drivers/mtd/nand/raw/atmel/nand-controller.c nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL); nc 2274 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc) nc 2277 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_controller_init(&nc->base, pdev, caps); nc 2282 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_hsmc_nand_controller_legacy_init(nc); nc 2284 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_hsmc_nand_controller_init(nc); nc 2290 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff); nc 2291 drivers/mtd/nand/raw/atmel/nand-controller.c ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt, nc 2292 drivers/mtd/nand/raw/atmel/nand-controller.c IRQF_SHARED, "nfc", nc); nc 2301 drivers/mtd/nand/raw/atmel/nand-controller.c regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG, nc 2304 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_controller_add_nands(&nc->base); nc 2311 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_hsmc_nand_controller_remove(&nc->base); nc 2344 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_smc_nand_controller *nc; nc 2347 drivers/mtd/nand/raw/atmel/nand-controller.c nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL); nc 2348 drivers/mtd/nand/raw/atmel/nand-controller.c if (!nc) nc 2351 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_controller_init(&nc->base, pdev, caps); nc 2355 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_smc_nand_controller_init(nc); nc 2359 drivers/mtd/nand/raw/atmel/nand-controller.c return atmel_nand_controller_add_nands(&nc->base); nc 2363 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) nc 2367 drivers/mtd/nand/raw/atmel/nand-controller.c ret = atmel_nand_controller_remove_nands(nc); nc 2371 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_nand_controller_cleanup(nc); nc 2556 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc = platform_get_drvdata(pdev); nc 2558 drivers/mtd/nand/raw/atmel/nand-controller.c return nc->caps->ops->remove(nc); nc 2563 drivers/mtd/nand/raw/atmel/nand-controller.c struct atmel_nand_controller *nc = dev_get_drvdata(dev); nc 2566 drivers/mtd/nand/raw/atmel/nand-controller.c if (nc->pmecc) nc 2567 drivers/mtd/nand/raw/atmel/nand-controller.c atmel_pmecc_reset(nc->pmecc); nc 2569 drivers/mtd/nand/raw/atmel/nand-controller.c list_for_each_entry(nand, &nc->chips, node) { nc 267 drivers/mtd/nand/raw/marvell_nand.c #define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \ nc 272 drivers/mtd/nand/raw/marvell_nand.c .nchunks = nc, \ nc 29 drivers/mtd/nand/raw/orion_nand.c static void orion_nand_cmd_ctrl(struct nand_chip *nc, int cmd, nc 32 drivers/mtd/nand/raw/orion_nand.c struct orion_nand_data *board = nand_get_controller_data(nc); nc 45 drivers/mtd/nand/raw/orion_nand.c if (nc->options & NAND_BUSWIDTH_16) nc 48 drivers/mtd/nand/raw/orion_nand.c writeb(cmd, nc->legacy.IO_ADDR_W + offs); nc 89 drivers/mtd/nand/raw/orion_nand.c struct nand_chip *nc; nc 101 drivers/mtd/nand/raw/orion_nand.c nc = &info->chip; nc 102 drivers/mtd/nand/raw/orion_nand.c mtd = nand_to_mtd(nc); nc 137 drivers/mtd/nand/raw/orion_nand.c nand_set_controller_data(nc, board); nc 138 drivers/mtd/nand/raw/orion_nand.c nand_set_flash_node(nc, pdev->dev.of_node); nc 139 drivers/mtd/nand/raw/orion_nand.c nc->legacy.IO_ADDR_R = nc->legacy.IO_ADDR_W = io_base; nc 140 drivers/mtd/nand/raw/orion_nand.c nc->legacy.cmd_ctrl = orion_nand_cmd_ctrl; nc 141 drivers/mtd/nand/raw/orion_nand.c nc->legacy.read_buf = orion_nand_read_buf; nc 142 drivers/mtd/nand/raw/orion_nand.c nc->ecc.mode = NAND_ECC_SOFT; nc 143 drivers/mtd/nand/raw/orion_nand.c nc->ecc.algo = NAND_ECC_HAMMING; nc 146 drivers/mtd/nand/raw/orion_nand.c nc->legacy.chip_delay = board->chip_delay; nc 153 drivers/mtd/nand/raw/orion_nand.c nc->options |= NAND_BUSWIDTH_16; nc 176 drivers/mtd/nand/raw/orion_nand.c ret = nand_scan(nc, 1); nc 183 drivers/mtd/nand/raw/orion_nand.c nand_release(nc); nc 1172 drivers/net/ethernet/socionext/sni_ave.c int nc, nr, ret; nc 1175 drivers/net/ethernet/socionext/sni_ave.c for (nc = 0; nc < priv->nclks; nc++) { nc 1176 drivers/net/ethernet/socionext/sni_ave.c ret = clk_prepare_enable(priv->clk[nc]); nc 1242 drivers/net/ethernet/socionext/sni_ave.c while (--nc >= 0) nc 1243 drivers/net/ethernet/socionext/sni_ave.c clk_disable_unprepare(priv->clk[nc]); nc 449 drivers/net/ethernet/stmicro/stmmac/dwmac5.c entry->val.nc = 1; nc 1015 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c entry->val.nc = 1; nc 109 drivers/net/ethernet/stmicro/stmmac/stmmac.h u8 nc:1; nc 1068 drivers/net/ethernet/sun/sunvnet_common.c int i, nc, err, blen; nc 1081 drivers/net/ethernet/sun/sunvnet_common.c nc = err; nc 1087 drivers/net/ethernet/sun/sunvnet_common.c if (nc < ncookies) { nc 1092 drivers/net/ethernet/sun/sunvnet_common.c blen, cookies + nc, ncookies - nc, nc 1100 drivers/net/ethernet/sun/sunvnet_common.c ldc_unmap(lp, cookies, nc); nc 1103 drivers/net/ethernet/sun/sunvnet_common.c nc += err; nc 1105 drivers/net/ethernet/sun/sunvnet_common.c return nc; nc 210 drivers/platform/x86/intel_scu_ipc.c int nc; nc 225 drivers/platform/x86/intel_scu_ipc.c for (nc = 0; nc < count; nc++, offset += 2) { nc 226 drivers/platform/x86/intel_scu_ipc.c cbuf[offset] = addr[nc]; nc 227 drivers/platform/x86/intel_scu_ipc.c cbuf[offset + 1] = addr[nc] >> 8; nc 231 drivers/platform/x86/intel_scu_ipc.c for (nc = 0, offset = 0; nc < count; nc++, offset += 4) nc 232 drivers/platform/x86/intel_scu_ipc.c ipc_data_writel(scu, wbuf[nc], offset); nc 235 drivers/platform/x86/intel_scu_ipc.c for (nc = 0; nc < count; nc++, offset += 1) nc 236 drivers/platform/x86/intel_scu_ipc.c cbuf[offset] = data[nc]; nc 237 drivers/platform/x86/intel_scu_ipc.c for (nc = 0, offset = 0; nc < count; nc++, offset += 4) nc 238 drivers/platform/x86/intel_scu_ipc.c ipc_data_writel(scu, wbuf[nc], offset); nc 251 drivers/platform/x86/intel_scu_ipc.c for (nc = 0; nc < count; nc++) nc 252 drivers/platform/x86/intel_scu_ipc.c data[nc] = ipc_data_readb(scu, nc); nc 618 drivers/spi/spi-fsl-espi.c struct device_node *nc; nc 629 drivers/spi/spi-fsl-espi.c for_each_available_child_of_node(master->dev.of_node, nc) { nc 631 drivers/spi/spi-fsl-espi.c ret = of_property_read_u32(nc, "reg", &cs); nc 638 drivers/spi/spi-fsl-espi.c ret = of_property_read_u32(nc, "fsl,csbef", &prop); nc 645 drivers/spi/spi-fsl-espi.c ret = of_property_read_u32(nc, "fsl,csaft", &prop); nc 1700 drivers/spi/spi.c struct device_node *nc) nc 1706 drivers/spi/spi.c if (of_property_read_bool(nc, "spi-cpha")) nc 1708 drivers/spi/spi.c if (of_property_read_bool(nc, "spi-cpol")) nc 1710 drivers/spi/spi.c if (of_property_read_bool(nc, "spi-3wire")) nc 1712 drivers/spi/spi.c if (of_property_read_bool(nc, "spi-lsb-first")) nc 1714 drivers/spi/spi.c if (of_property_read_bool(nc, "spi-cs-high")) nc 1718 drivers/spi/spi.c if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { nc 1739 drivers/spi/spi.c if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { nc 1761 drivers/spi/spi.c if (!of_node_name_eq(nc, "slave")) { nc 1763 drivers/spi/spi.c nc); nc 1770 drivers/spi/spi.c rc = of_property_read_u32(nc, "reg", &value); nc 1773 drivers/spi/spi.c nc, rc); nc 1788 drivers/spi/spi.c rc = of_property_read_u32(nc, "spi-max-frequency", &value); nc 1791 drivers/spi/spi.c "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); nc 1800 drivers/spi/spi.c of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) nc 1808 drivers/spi/spi.c dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); nc 1814 drivers/spi/spi.c rc = of_modalias_node(nc, spi->modalias, nc 1817 drivers/spi/spi.c dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); nc 1821 drivers/spi/spi.c rc = of_spi_parse_dt(ctlr, spi, nc); nc 1826 drivers/spi/spi.c of_node_get(nc); nc 1827 drivers/spi/spi.c spi->dev.of_node = nc; nc 1832 drivers/spi/spi.c dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); nc 1839 drivers/spi/spi.c of_node_put(nc); nc 1855 drivers/spi/spi.c struct device_node *nc; nc 1860 drivers/spi/spi.c for_each_available_child_of_node(ctlr->dev.of_node, nc) { nc 1861 drivers/spi/spi.c if (of_node_test_and_set_flag(nc, OF_POPULATED)) nc 1863 drivers/spi/spi.c spi = of_register_spi_device(ctlr, nc); nc 1866 drivers/spi/spi.c "Failed to create SPI device for %pOF\n", nc); nc 1867 drivers/spi/spi.c of_node_clear_flag(nc, OF_POPULATED); nc 2781 drivers/tty/n_gsm.c static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc) nc 2795 drivers/tty/n_gsm.c if (nc->protocol != htons(ETH_P_IP)) nc 2798 drivers/tty/n_gsm.c if (nc->adaption != 3 && nc->adaption != 4) nc 2804 drivers/tty/n_gsm.c if (nc->if_name[0] != '\0') nc 2805 drivers/tty/n_gsm.c netname = nc->if_name; nc 2818 drivers/tty/n_gsm.c strncpy(nc->if_name, net->name, IFNAMSIZ); /* return net name */ nc 2823 drivers/tty/n_gsm.c dlci->adaption = nc->adaption; nc 3106 drivers/tty/n_gsm.c struct gsm_netconfig nc; nc 3113 drivers/tty/n_gsm.c if (copy_from_user(&nc, (void __user *)arg, sizeof(nc))) nc 3115 drivers/tty/n_gsm.c nc.if_name[IFNAMSIZ-1] = '\0'; nc 3118 drivers/tty/n_gsm.c index = gsm_create_network(dlci, &nc); nc 3120 drivers/tty/n_gsm.c if (copy_to_user((void __user *)arg, &nc, sizeof(nc))) nc 217 fs/fat/dir.c unsigned char nc = t->charset2lower[*c]; nc 219 fs/fat/dir.c if (!nc) nc 220 fs/fat/dir.c nc = *c; nc 222 fs/fat/dir.c charlen = t->char2uni(&nc, 1, uni); nc 2262 fs/nfsd/nfs4state.c struct nfsdfs_client *nc; nc 2263 fs/nfsd/nfs4state.c nc = get_nfsdfs_client(inode); nc 2264 fs/nfsd/nfs4state.c if (!nc) nc 2266 fs/nfsd/nfs4state.c return container_of(nc, struct nfs4_client, cl_nfsdfs); nc 1223 fs/nfsd/nfsctl.c struct nfsdfs_client *nc = inode->i_private; nc 1225 fs/nfsd/nfsctl.c if (nc) nc 1226 fs/nfsd/nfsctl.c kref_get(&nc->cl_ref); nc 1227 fs/nfsd/nfsctl.c return nc; nc 1232 fs/nfsd/nfsctl.c struct nfsdfs_client *nc; nc 1235 fs/nfsd/nfsctl.c nc = __get_nfsdfs_client(inode); nc 1237 fs/nfsd/nfsctl.c return nc; nc 429 fs/nilfs2/page.c unsigned int nc = 0; nc 436 fs/nilfs2/page.c nc++; nc 438 fs/nilfs2/page.c return nc; nc 652 fs/nilfs2/sufile.c ssize_t n, nc; nc 696 fs/nilfs2/sufile.c nc = 0; nc 700 fs/nilfs2/sufile.c nc++; nc 704 fs/nilfs2/sufile.c if (nc > 0) { nc 706 fs/nilfs2/sufile.c ncleaned += nc; nc 58 fs/nls/nls_base.c int c0, c, nc; nc 61 fs/nls/nls_base.c nc = 0; nc 65 fs/nls/nls_base.c nc++; nc 72 fs/nls/nls_base.c return nc; nc 74 fs/nls/nls_base.c if (inlen <= nc) nc 89 fs/nls/nls_base.c int c, nc; nc 99 fs/nls/nls_base.c nc = 0; nc 101 fs/nls/nls_base.c nc++; nc 110 fs/nls/nls_base.c return nc; nc 2436 fs/unicode/mkutf8data.c unsigned int nc = (vc * tc); nc 2450 fs/unicode/mkutf8data.c unsigned int li = si / nc; nc 2451 fs/unicode/mkutf8data.c unsigned int vi = (si % nc) / tc; nc 573 include/linux/gfp.h extern void *page_frag_alloc(struct page_frag_cache *nc, nc 64 include/linux/nls.h unsigned char nc = t->charset2lower[c]; nc 66 include/linux/nls.h return nc ? nc : c; nc 71 include/linux/nls.h unsigned char nc = t->charset2upper[c]; nc 73 include/linux/nls.h return nc ? nc : c; nc 1240 ipc/mqueue.c struct sk_buff *nc; nc 1244 ipc/mqueue.c nc = NULL; nc 1259 ipc/mqueue.c nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); nc 1260 ipc/mqueue.c if (!nc) nc 1263 ipc/mqueue.c if (copy_from_user(nc->data, nc 1271 ipc/mqueue.c skb_put(nc, NOTIFY_COOKIE_LEN); nc 1287 ipc/mqueue.c ret = netlink_attachskb(sock, nc, &timeo, NULL); nc 1326 ipc/mqueue.c info->notify_cookie = nc; nc 1328 ipc/mqueue.c nc = NULL; nc 1348 ipc/mqueue.c netlink_detachskb(sock, nc); nc 1351 ipc/mqueue.c dev_kfree_skb(nc); nc 4847 mm/page_alloc.c static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, nc 4858 mm/page_alloc.c nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; nc 4863 mm/page_alloc.c nc->va = page ? page_address(page) : NULL; nc 4877 mm/page_alloc.c void *page_frag_alloc(struct page_frag_cache *nc, nc 4884 mm/page_alloc.c if (unlikely(!nc->va)) { nc 4886 mm/page_alloc.c page = __page_frag_cache_refill(nc, gfp_mask); nc 4892 mm/page_alloc.c size = nc->size; nc 4900 mm/page_alloc.c nc->pfmemalloc = page_is_pfmemalloc(page); nc 4901 mm/page_alloc.c nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc 4902 mm/page_alloc.c nc->offset = size; nc 4905 mm/page_alloc.c offset = nc->offset - fragsz; nc 4907 mm/page_alloc.c page = virt_to_page(nc->va); nc 4909 mm/page_alloc.c if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) nc 4914 mm/page_alloc.c size = nc->size; nc 4920 mm/page_alloc.c nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc 4924 mm/page_alloc.c nc->pagecnt_bias--; nc 4925 mm/page_alloc.c nc->offset = offset; nc 4927 mm/page_alloc.c return nc->va + offset; nc 948 mm/slab.c struct array_cache *nc; nc 963 mm/slab.c nc = per_cpu_ptr(cachep->cpu_cache, cpu); nc 964 mm/slab.c free_block(cachep, nc->entry, nc->avail, node, &list); nc 965 mm/slab.c nc->avail = 0; nc 79 net/batman-adv/network-coding.c queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work, nc 144 net/batman-adv/network-coding.c bat_priv->nc.timestamp_fwd_flush = jiffies; nc 145 net/batman-adv/network-coding.c bat_priv->nc.timestamp_sniffed_purge = jiffies; nc 147 net/batman-adv/network-coding.c if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash) nc 150 net/batman-adv/network-coding.c bat_priv->nc.coding_hash = batadv_hash_new(128); nc 151 net/batman-adv/network-coding.c if (!bat_priv->nc.coding_hash) nc 154 net/batman-adv/network-coding.c batadv_hash_set_lock_class(bat_priv->nc.coding_hash, nc 157 net/batman-adv/network-coding.c bat_priv->nc.decoding_hash = batadv_hash_new(128); nc 158 net/batman-adv/network-coding.c if (!bat_priv->nc.decoding_hash) nc 161 net/batman-adv/network-coding.c batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, nc 164 net/batman-adv/network-coding.c INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); nc 184 net/batman-adv/network-coding.c bat_priv->nc.min_tq = 200; nc 185 net/batman-adv/network-coding.c bat_priv->nc.max_fwd_delay = 10; nc 186 net/batman-adv/network-coding.c bat_priv->nc.max_buffer_time = 200; nc 300 net/batman-adv/network-coding.c bat_priv->nc.max_fwd_delay * 10); nc 321 net/batman-adv/network-coding.c bat_priv->nc.max_buffer_time * 10); nc 596 net/batman-adv/network-coding.c unsigned long timeout = bat_priv->nc.max_buffer_time; nc 635 net/batman-adv/network-coding.c unsigned long timeout = bat_priv->nc.max_fwd_delay; nc 716 net/batman-adv/network-coding.c bat_priv = container_of(priv_nc, struct batadv_priv, nc); nc 719 net/batman-adv/network-coding.c batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, nc 721 net/batman-adv/network-coding.c batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, nc 724 net/batman-adv/network-coding.c timeout = bat_priv->nc.max_fwd_delay; nc 726 net/batman-adv/network-coding.c if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) { nc 727 net/batman-adv/network-coding.c batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash, nc 729 net/batman-adv/network-coding.c bat_priv->nc.timestamp_fwd_flush = jiffies; nc 732 net/batman-adv/network-coding.c if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge, nc 733 net/batman-adv/network-coding.c bat_priv->nc.max_buffer_time)) { nc 734 net/batman-adv/network-coding.c batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash, nc 736 net/batman-adv/network-coding.c bat_priv->nc.timestamp_sniffed_purge = jiffies; nc 778 net/batman-adv/network-coding.c if (ogm_packet->tq < bat_priv->nc.min_tq) nc 1271 net/batman-adv/network-coding.c struct batadv_hashtable *hash = bat_priv->nc.coding_hash; nc 1527 net/batman-adv/network-coding.c bat_priv->nc.coding_hash, nc 1576 net/batman-adv/network-coding.c bat_priv->nc.decoding_hash, nc 1738 net/batman-adv/network-coding.c struct batadv_hashtable *hash = bat_priv->nc.decoding_hash; nc 1872 net/batman-adv/network-coding.c cancel_delayed_work_sync(&bat_priv->nc.work); nc 1874 net/batman-adv/network-coding.c batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL); nc 1875 net/batman-adv/network-coding.c batadv_hash_destroy(bat_priv->nc.coding_hash); nc 1876 net/batman-adv/network-coding.c batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL); nc 1877 net/batman-adv/network-coding.c batadv_hash_destroy(bat_priv->nc.decoding_hash); nc 1954 net/batman-adv/network-coding.c debugfs_create_u8("min_tq", 0644, nc_dir, &bat_priv->nc.min_tq); nc 1957 net/batman-adv/network-coding.c &bat_priv->nc.max_fwd_delay); nc 1960 net/batman-adv/network-coding.c &bat_priv->nc.max_buffer_time); nc 1759 net/batman-adv/types.h struct batadv_priv_nc nc; nc 373 net/core/skbuff.c struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); nc 375 net/core/skbuff.c return page_frag_alloc(&nc->page, fragsz, gfp_mask); nc 395 net/core/skbuff.c struct page_frag_cache *nc; nc 400 net/core/skbuff.c nc = this_cpu_ptr(&netdev_alloc_cache); nc 401 net/core/skbuff.c data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); nc 427 net/core/skbuff.c struct page_frag_cache *nc; nc 449 net/core/skbuff.c nc = this_cpu_ptr(&netdev_alloc_cache); nc 450 net/core/skbuff.c data = page_frag_alloc(nc, len, gfp_mask); nc 451 net/core/skbuff.c pfmemalloc = nc->pfmemalloc; nc 454 net/core/skbuff.c nc = this_cpu_ptr(&napi_alloc_cache.page); nc 455 net/core/skbuff.c data = page_frag_alloc(nc, len, gfp_mask); nc 456 net/core/skbuff.c pfmemalloc = nc->pfmemalloc; nc 499 net/core/skbuff.c struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); nc 519 net/core/skbuff.c data = page_frag_alloc(&nc->page, len, gfp_mask); nc 530 net/core/skbuff.c if (nc->page.pfmemalloc) nc 858 net/core/skbuff.c struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); nc 861 net/core/skbuff.c if (nc->skb_count) { nc 862 net/core/skbuff.c kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, nc 863 net/core/skbuff.c nc->skb_cache); nc 864 net/core/skbuff.c nc->skb_count = 0; nc 870 net/core/skbuff.c struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); nc 876 net/core/skbuff.c nc->skb_cache[nc->skb_count++] = skb; nc 884 net/core/skbuff.c if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { nc 886 net/core/skbuff.c nc->skb_cache); nc 887 net/core/skbuff.c nc->skb_count = 0; nc 343 net/ncsi/internal.h #define NCSI_FOR_EACH_CHANNEL(np, nc) \ nc 344 net/ncsi/internal.h list_for_each_entry_rcu(nc, &np->channels, node) nc 348 net/ncsi/internal.h void ncsi_start_channel_monitor(struct ncsi_channel *nc); nc 349 net/ncsi/internal.h void ncsi_stop_channel_monitor(struct ncsi_channel *nc); nc 362 net/ncsi/internal.h struct ncsi_channel **nc); nc 49 net/ncsi/ncsi-aen.c struct ncsi_channel *nc, *tmp; nc 60 net/ncsi/ncsi-aen.c ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc); nc 61 net/ncsi/ncsi-aen.c if (!nc) nc 67 net/ncsi/ncsi-aen.c spin_lock_irqsave(&nc->lock, flags); nc 68 net/ncsi/ncsi-aen.c ncm = &nc->modes[NCSI_MODE_LINK]; nc 78 net/ncsi/ncsi-aen.c nc->id, data & 0x1 ? "up" : "down"); nc 80 net/ncsi/ncsi-aen.c chained = !list_empty(&nc->link); nc 81 net/ncsi/ncsi-aen.c state = nc->state; nc 82 net/ncsi/ncsi-aen.c spin_unlock_irqrestore(&nc->lock, flags); nc 87 net/ncsi/ncsi-aen.c nc->id); nc 92 net/ncsi/ncsi-aen.c if (!ndp->multi_package && !nc->package->multi_channel) { nc 95 net/ncsi/ncsi-aen.c ncsi_stop_channel_monitor(nc); nc 97 net/ncsi/ncsi-aen.c list_add_tail_rcu(&nc->link, &ndp->channel_queue); nc 106 net/ncsi/ncsi-aen.c ncm = &nc->modes[NCSI_MODE_TX_ENABLE]; nc 107 net/ncsi/ncsi-aen.c if (ncsi_channel_is_last(ndp, nc)) { nc 112 net/ncsi/ncsi-aen.c ncsi_update_tx_channel(ndp, nc->package, nc, NULL); nc 114 net/ncsi/ncsi-aen.c } else if (has_link && nc->package->preferred_channel == nc) { nc 116 net/ncsi/ncsi-aen.c ncsi_update_tx_channel(ndp, nc->package, NULL, nc); nc 126 net/ncsi/ncsi-aen.c ncsi_update_tx_channel(ndp, nc->package, nc 127 net/ncsi/ncsi-aen.c tmp, nc); nc 143 net/ncsi/ncsi-aen.c struct ncsi_channel *nc; nc 147 net/ncsi/ncsi-aen.c ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc); nc 148 net/ncsi/ncsi-aen.c if (!nc) nc 151 net/ncsi/ncsi-aen.c spin_lock_irqsave(&nc->lock, flags); nc 152 net/ncsi/ncsi-aen.c if (!list_empty(&nc->link) || nc 153 net/ncsi/ncsi-aen.c nc->state != NCSI_CHANNEL_ACTIVE) { nc 154 net/ncsi/ncsi-aen.c spin_unlock_irqrestore(&nc->lock, flags); nc 157 net/ncsi/ncsi-aen.c spin_unlock_irqrestore(&nc->lock, flags); nc 159 net/ncsi/ncsi-aen.c ncsi_stop_channel_monitor(nc); nc 160 net/ncsi/ncsi-aen.c spin_lock_irqsave(&nc->lock, flags); nc 161 net/ncsi/ncsi-aen.c nc->state = NCSI_CHANNEL_INVISIBLE; nc 162 net/ncsi/ncsi-aen.c spin_unlock_irqrestore(&nc->lock, flags); nc 165 net/ncsi/ncsi-aen.c nc->state = NCSI_CHANNEL_INACTIVE; nc 166 net/ncsi/ncsi-aen.c list_add_tail_rcu(&nc->link, &ndp->channel_queue); nc 175 net/ncsi/ncsi-aen.c struct ncsi_channel *nc; nc 181 net/ncsi/ncsi-aen.c ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc); nc 182 net/ncsi/ncsi-aen.c if (!nc) nc 185 net/ncsi/ncsi-aen.c spin_lock_irqsave(&nc->lock, flags); nc 186 net/ncsi/ncsi-aen.c ncm = &nc->modes[NCSI_MODE_LINK]; nc 189 net/ncsi/ncsi-aen.c spin_unlock_irqrestore(&nc->lock, flags); nc 192 net/ncsi/ncsi-aen.c ncm->data[3] & 0x1 ? "" : "not ", nc->id); nc 35 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 38 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 39 net/ncsi/ncsi-manage.c if (nc == channel) nc 41 net/ncsi/ncsi-manage.c if (nc->state == NCSI_CHANNEL_ACTIVE && nc 42 net/ncsi/ncsi-manage.c ncsi_channel_has_link(nc)) nc 53 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 64 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 65 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 67 net/ncsi/ncsi-manage.c if (!list_empty(&nc->link) || nc 68 net/ncsi/ncsi-manage.c nc->state != NCSI_CHANNEL_ACTIVE) { nc 69 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 73 net/ncsi/ncsi-manage.c if (ncsi_channel_has_link(nc)) { nc 74 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 79 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 89 net/ncsi/ncsi-manage.c struct ncsi_channel *nc = from_timer(nc, t, monitor.timer); nc 90 net/ncsi/ncsi-manage.c struct ncsi_package *np = nc->package; nc 99 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 100 net/ncsi/ncsi-manage.c state = nc->state; nc 101 net/ncsi/ncsi-manage.c chained = !list_empty(&nc->link); nc 102 net/ncsi/ncsi-manage.c enabled = nc->monitor.enabled; nc 103 net/ncsi/ncsi-manage.c monitor_state = nc->monitor.state; nc 104 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 107 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 112 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 121 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 133 net/ncsi/ncsi-manage.c nc->id); nc 137 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 139 net/ncsi/ncsi-manage.c ncm = &nc->modes[NCSI_MODE_LINK]; nc 140 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 141 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INVISIBLE; nc 143 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 146 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_ACTIVE; nc 147 net/ncsi/ncsi-manage.c list_add_tail_rcu(&nc->link, &ndp->channel_queue); nc 153 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 154 net/ncsi/ncsi-manage.c nc->monitor.state++; nc 155 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 156 net/ncsi/ncsi-manage.c mod_timer(&nc->monitor.timer, jiffies + HZ); nc 159 net/ncsi/ncsi-manage.c void ncsi_start_channel_monitor(struct ncsi_channel *nc) nc 163 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 164 net/ncsi/ncsi-manage.c WARN_ON_ONCE(nc->monitor.enabled); nc 165 net/ncsi/ncsi-manage.c nc->monitor.enabled = true; nc 166 net/ncsi/ncsi-manage.c nc->monitor.state = NCSI_CHANNEL_MONITOR_START; nc 167 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 169 net/ncsi/ncsi-manage.c mod_timer(&nc->monitor.timer, jiffies + HZ); nc 172 net/ncsi/ncsi-manage.c void ncsi_stop_channel_monitor(struct ncsi_channel *nc) nc 176 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 177 net/ncsi/ncsi-manage.c if (!nc->monitor.enabled) { nc 178 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 181 net/ncsi/ncsi-manage.c nc->monitor.enabled = false; nc 182 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 184 net/ncsi/ncsi-manage.c del_timer_sync(&nc->monitor.timer); nc 190 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 192 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 193 net/ncsi/ncsi-manage.c if (nc->id == id) nc 194 net/ncsi/ncsi-manage.c return nc; nc 202 net/ncsi/ncsi-manage.c struct ncsi_channel *nc, *tmp; nc 206 net/ncsi/ncsi-manage.c nc = kzalloc(sizeof(*nc), GFP_ATOMIC); nc 207 net/ncsi/ncsi-manage.c if (!nc) nc 210 net/ncsi/ncsi-manage.c nc->id = id; nc 211 net/ncsi/ncsi-manage.c nc->package = np; nc 212 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INACTIVE; nc 213 net/ncsi/ncsi-manage.c nc->monitor.enabled = false; nc 214 net/ncsi/ncsi-manage.c timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0); nc 215 net/ncsi/ncsi-manage.c spin_lock_init(&nc->lock); nc 216 net/ncsi/ncsi-manage.c INIT_LIST_HEAD(&nc->link); nc 218 net/ncsi/ncsi-manage.c nc->caps[index].index = index; nc 220 net/ncsi/ncsi-manage.c nc->modes[index].index = index; nc 226 net/ncsi/ncsi-manage.c kfree(nc); nc 230 net/ncsi/ncsi-manage.c list_add_tail_rcu(&nc->node, &np->channels); nc 234 net/ncsi/ncsi-manage.c return nc; nc 237 net/ncsi/ncsi-manage.c static void ncsi_remove_channel(struct ncsi_channel *nc) nc 239 net/ncsi/ncsi-manage.c struct ncsi_package *np = nc->package; nc 242 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 245 net/ncsi/ncsi-manage.c kfree(nc->mac_filter.addrs); nc 246 net/ncsi/ncsi-manage.c kfree(nc->vlan_filter.vids); nc 248 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INACTIVE; nc 249 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 250 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 254 net/ncsi/ncsi-manage.c list_del_rcu(&nc->node); nc 258 net/ncsi/ncsi-manage.c kfree(nc); nc 308 net/ncsi/ncsi-manage.c struct ncsi_channel *nc, *tmp; nc 312 net/ncsi/ncsi-manage.c list_for_each_entry_safe(nc, tmp, &np->channels, node) nc 313 net/ncsi/ncsi-manage.c ncsi_remove_channel(nc); nc 327 net/ncsi/ncsi-manage.c struct ncsi_channel **nc) nc 337 net/ncsi/ncsi-manage.c if (nc) nc 338 net/ncsi/ncsi-manage.c *nc = c; nc 429 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 450 net/ncsi/ncsi-manage.c &np, &nc); nc 451 net/ncsi/ncsi-manage.c ncsi_send_netlink_timeout(nr, np, nc); nc 463 net/ncsi/ncsi-manage.c struct ncsi_channel *nc, *tmp; nc 469 net/ncsi/ncsi-manage.c nc = ndp->active_channel; nc 511 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 512 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 524 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 537 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 549 net/ncsi/ncsi-manage.c if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) { nc 569 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 570 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INACTIVE; nc 571 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 590 net/ncsi/ncsi-manage.c static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, nc 599 net/ncsi/ncsi-manage.c ncf = &nc->vlan_filter; nc 602 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 605 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 612 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 625 net/ncsi/ncsi-manage.c static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, nc 638 net/ncsi/ncsi-manage.c ncf = &nc->vlan_filter; nc 641 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 658 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 666 net/ncsi/ncsi-manage.c nc->id); nc 667 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 673 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 778 net/ncsi/ncsi-manage.c struct ncsi_channel *nc) nc 788 net/ncsi/ncsi-manage.c if (!ndp->multi_package && np != nc->package) nc 802 net/ncsi/ncsi-manage.c return np->preferred_channel == nc; nc 807 net/ncsi/ncsi-manage.c if (ncsi_channel_has_link(nc)) nc 825 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 842 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) nc 843 net/ncsi/ncsi-manage.c if (nc->modes[NCSI_MODE_TX_ENABLE].enable) { nc 844 net/ncsi/ncsi-manage.c disable = nc; nc 864 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 865 net/ncsi/ncsi-manage.c if (!(np->channel_whitelist & 0x1 << nc->id)) nc 867 net/ncsi/ncsi-manage.c if (nc->state != NCSI_CHANNEL_ACTIVE) nc 869 net/ncsi/ncsi-manage.c if (ncsi_channel_has_link(nc)) { nc 870 net/ncsi/ncsi-manage.c enable = nc; nc 910 net/ncsi/ncsi-manage.c struct ncsi_channel *nc = ndp->active_channel; nc 949 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 966 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 968 net/ncsi/ncsi-manage.c ret = ncsi_gma_handler(&nca, nc->version.mf_id); nc 988 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 992 net/ncsi/ncsi-manage.c ret = clear_one_vid(ndp, nc, &nca); nc 1002 net/ncsi/ncsi-manage.c ret = set_one_vid(ndp, nc, &nca); nc 1032 net/ncsi/ncsi-manage.c nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap; nc 1037 net/ncsi/ncsi-manage.c if (nc->caps[NCSI_CAP_GENERIC].cap & nc 1040 net/ncsi/ncsi-manage.c else if (ncsi_channel_is_tx(ndp, nc)) nc 1046 net/ncsi/ncsi-manage.c if (ncsi_channel_is_tx(ndp, nc)) nc 1052 net/ncsi/ncsi-manage.c nc != np->preferred_channel) nc 1055 net/ncsi/ncsi-manage.c nc->id); nc 1062 net/ncsi/ncsi-manage.c if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK)) nc 1067 net/ncsi/ncsi-manage.c nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap; nc 1084 net/ncsi/ncsi-manage.c nc->id); nc 1085 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 1086 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_ACTIVE; nc 1090 net/ncsi/ncsi-manage.c nc->reconfigure_needed = false; nc 1091 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1096 net/ncsi/ncsi-manage.c if (nc->reconfigure_needed) { nc 1101 net/ncsi/ncsi-manage.c nc->reconfigure_needed = false; nc 1102 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INACTIVE; nc 1103 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1106 net/ncsi/ncsi-manage.c list_add_tail_rcu(&nc->link, &ndp->channel_queue); nc 1114 net/ncsi/ncsi-manage.c if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) { nc 1115 net/ncsi/ncsi-manage.c hot_nc = nc; nc 1120 net/ncsi/ncsi-manage.c nc->id); nc 1122 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1129 net/ncsi/ncsi-manage.c ncsi_start_channel_monitor(nc); nc 1145 net/ncsi/ncsi-manage.c struct ncsi_channel *nc, *found, *hot_nc; nc 1165 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 1166 net/ncsi/ncsi-manage.c if (!(np->channel_whitelist & (0x1 << nc->id))) nc 1169 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, cflags); nc 1171 net/ncsi/ncsi-manage.c if (!list_empty(&nc->link) || nc 1172 net/ncsi/ncsi-manage.c nc->state != NCSI_CHANNEL_INACTIVE) { nc 1173 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, cflags); nc 1178 net/ncsi/ncsi-manage.c found = nc; nc 1180 net/ncsi/ncsi-manage.c if (nc == hot_nc) nc 1181 net/ncsi/ncsi-manage.c found = nc; nc 1183 net/ncsi/ncsi-manage.c ncm = &nc->modes[NCSI_MODE_LINK]; nc 1185 net/ncsi/ncsi-manage.c found = nc; nc 1195 net/ncsi/ncsi-manage.c list_add_tail_rcu(&nc->link, nc 1201 net/ncsi/ncsi-manage.c nc->id, nc 1205 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, cflags); nc 1234 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 1242 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 1245 net/ncsi/ncsi-manage.c cap = nc->caps[NCSI_CAP_GENERIC].cap; nc 1268 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 1348 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 1349 net/ncsi/ncsi-manage.c nca.channel = nc->id; nc 1426 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 1431 net/ncsi/ncsi-manage.c nc = list_first_or_null_rcu(&ndp->channel_queue, nc 1433 net/ncsi/ncsi-manage.c if (!nc) { nc 1438 net/ncsi/ncsi-manage.c list_del_init(&nc->link); nc 1441 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 1442 net/ncsi/ncsi-manage.c old_state = nc->state; nc 1443 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INVISIBLE; nc 1444 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1446 net/ncsi/ncsi-manage.c ndp->active_channel = nc; nc 1447 net/ncsi/ncsi-manage.c ndp->active_package = nc->package; nc 1453 net/ncsi/ncsi-manage.c nc->id); nc 1459 net/ncsi/ncsi-manage.c nc->id); nc 1464 net/ncsi/ncsi-manage.c old_state, nc->package->id, nc->id); nc 1486 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 1492 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 1493 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 1501 net/ncsi/ncsi-manage.c if (nc->state != NCSI_CHANNEL_ACTIVE) { nc 1504 net/ncsi/ncsi-manage.c !list_empty(&nc->link)) { nc 1507 net/ncsi/ncsi-manage.c nc); nc 1508 net/ncsi/ncsi-manage.c nc->reconfigure_needed = true; nc 1510 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1514 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1516 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 1517 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 1518 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INACTIVE; nc 1519 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1522 net/ncsi/ncsi-manage.c list_add_tail_rcu(&nc->link, &ndp->channel_queue); nc 1525 net/ncsi/ncsi-manage.c netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc); nc 1700 net/ncsi/ncsi-manage.c struct ncsi_channel *nc; nc 1710 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 1711 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 1713 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 1714 net/ncsi/ncsi-manage.c chained = !list_empty(&nc->link); nc 1715 net/ncsi/ncsi-manage.c old_state = nc->state; nc 1716 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1731 net/ncsi/ncsi-manage.c struct ncsi_channel *nc, *active, *tmp; nc 1771 net/ncsi/ncsi-manage.c list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link) nc 1772 net/ncsi/ncsi-manage.c list_del_init(&nc->link); nc 1778 net/ncsi/ncsi-manage.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 1779 net/ncsi/ncsi-manage.c spin_lock_irqsave(&nc->lock, flags); nc 1781 net/ncsi/ncsi-manage.c if (nc->state == NCSI_CHANNEL_ACTIVE) { nc 1782 net/ncsi/ncsi-manage.c active = nc; nc 1783 net/ncsi/ncsi-manage.c nc->state = NCSI_CHANNEL_INVISIBLE; nc 1784 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 1785 net/ncsi/ncsi-manage.c ncsi_stop_channel_monitor(nc); nc 1789 net/ncsi/ncsi-manage.c spin_unlock_irqrestore(&nc->lock, flags); nc 59 net/ncsi/ncsi-netlink.c struct ncsi_channel *nc) nc 66 net/ncsi/ncsi-netlink.c nla_put_u32(skb, NCSI_CHANNEL_ATTR_ID, nc->id); nc 67 net/ncsi/ncsi-netlink.c m = &nc->modes[NCSI_MODE_LINK]; nc 69 net/ncsi/ncsi-netlink.c if (nc->state == NCSI_CHANNEL_ACTIVE) nc 71 net/ncsi/ncsi-netlink.c if (nc == nc->package->preferred_channel) nc 74 net/ncsi/ncsi-netlink.c nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version); nc 75 net/ncsi/ncsi-netlink.c nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2); nc 76 net/ncsi/ncsi-netlink.c nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name); nc 81 net/ncsi/ncsi-netlink.c ncf = &nc->vlan_filter; nc 99 net/ncsi/ncsi-netlink.c struct ncsi_channel *nc; nc 123 net/ncsi/ncsi-netlink.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 130 net/ncsi/ncsi-netlink.c rc = ncsi_write_channel_info(skb, ndp, nc); nc 274 net/ncsi/ncsi-netlink.c struct ncsi_channel *nc, *channel; nc 307 net/ncsi/ncsi-netlink.c NCSI_FOR_EACH_CHANNEL(package, nc) nc 308 net/ncsi/ncsi-netlink.c if (nc->id == channel_id) { nc 309 net/ncsi/ncsi-netlink.c channel = nc; nc 480 net/ncsi/ncsi-netlink.c struct ncsi_channel *nc) nc 503 net/ncsi/ncsi-netlink.c if (nc) nc 504 net/ncsi/ncsi-netlink.c nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id); nc 522 net/ncsi/ncsi-netlink.c struct ncsi_channel *nc) nc 550 net/ncsi/ncsi-netlink.c if (nc) nc 551 net/ncsi/ncsi-netlink.c nla_put_u32(skb, NCSI_ATTR_CHANNEL_ID, nc->id); nc 641 net/ncsi/ncsi-netlink.c struct ncsi_channel *nc, *channel; nc 678 net/ncsi/ncsi-netlink.c NCSI_FOR_EACH_CHANNEL(np, nc) nc 679 net/ncsi/ncsi-netlink.c if (nc->id == channel_id) { nc 680 net/ncsi/ncsi-netlink.c channel = nc; nc 15 net/ncsi/ncsi-netlink.h struct ncsi_channel *nc); nc 18 net/ncsi/ncsi-netlink.h struct ncsi_channel *nc); nc 81 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 85 net/ncsi/ncsi-rsp.c ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel, &np, &nc); nc 86 net/ncsi/ncsi-rsp.c if (!nc) { nc 91 net/ncsi/ncsi-rsp.c nc = ncsi_add_channel(np, id); nc 94 net/ncsi/ncsi-rsp.c return nc ? 0 : -ENODEV; nc 128 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 139 net/ncsi/ncsi-rsp.c NCSI_FOR_EACH_CHANNEL(np, nc) { nc 140 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 141 net/ncsi/ncsi-rsp.c nc->state = NCSI_CHANNEL_INACTIVE; nc 142 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 152 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 158 net/ncsi/ncsi-rsp.c NULL, &nc); nc 159 net/ncsi/ncsi-rsp.c if (!nc) nc 162 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_ENABLE]; nc 174 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 185 net/ncsi/ncsi-rsp.c NULL, &nc); nc 186 net/ncsi/ncsi-rsp.c if (!nc) nc 189 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_ENABLE]; nc 201 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 207 net/ncsi/ncsi-rsp.c NULL, &nc); nc 208 net/ncsi/ncsi-rsp.c if (!nc) nc 212 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 213 net/ncsi/ncsi-rsp.c nc->state = NCSI_CHANNEL_INACTIVE; nc 214 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 223 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 229 net/ncsi/ncsi-rsp.c NULL, &nc); nc 230 net/ncsi/ncsi-rsp.c if (!nc) nc 233 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_TX_ENABLE]; nc 245 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 251 net/ncsi/ncsi-rsp.c NULL, &nc); nc 252 net/ncsi/ncsi-rsp.c if (!nc) nc 255 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_TX_ENABLE]; nc 268 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 274 net/ncsi/ncsi-rsp.c NULL, &nc); nc 275 net/ncsi/ncsi-rsp.c if (!nc) nc 279 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_AEN]; nc 297 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 303 net/ncsi/ncsi-rsp.c NULL, &nc); nc 304 net/ncsi/ncsi-rsp.c if (!nc) nc 308 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_LINK]; nc 319 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 326 net/ncsi/ncsi-rsp.c NULL, &nc); nc 327 net/ncsi/ncsi-rsp.c if (!nc) nc 330 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_LINK]; nc 339 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 340 net/ncsi/ncsi-rsp.c nc->monitor.state = NCSI_CHANNEL_MONITOR_START; nc 341 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 351 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 359 net/ncsi/ncsi-rsp.c NULL, &nc); nc 360 net/ncsi/ncsi-rsp.c if (!nc) nc 364 net/ncsi/ncsi-rsp.c ncf = &nc->vlan_filter; nc 369 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 378 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 388 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 394 net/ncsi/ncsi-rsp.c NULL, &nc); nc 395 net/ncsi/ncsi-rsp.c if (!nc) nc 399 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_VLAN]; nc 415 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 421 net/ncsi/ncsi-rsp.c NULL, &nc); nc 422 net/ncsi/ncsi-rsp.c if (!nc) nc 426 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_VLAN]; nc 440 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 451 net/ncsi/ncsi-rsp.c NULL, &nc); nc 452 net/ncsi/ncsi-rsp.c if (!nc) nc 460 net/ncsi/ncsi-rsp.c ncf = &nc->mac_filter; nc 468 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 476 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 486 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 491 net/ncsi/ncsi-rsp.c ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel, NULL, &nc); nc 492 net/ncsi/ncsi-rsp.c if (!nc) nc 496 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_BC]; nc 512 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 517 net/ncsi/ncsi-rsp.c NULL, &nc); nc 518 net/ncsi/ncsi-rsp.c if (!nc) nc 522 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_BC]; nc 538 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 544 net/ncsi/ncsi-rsp.c NULL, &nc); nc 545 net/ncsi/ncsi-rsp.c if (!nc) nc 549 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_MC]; nc 565 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 570 net/ncsi/ncsi-rsp.c NULL, &nc); nc 571 net/ncsi/ncsi-rsp.c if (!nc) nc 575 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_MC]; nc 591 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 597 net/ncsi/ncsi-rsp.c NULL, &nc); nc 598 net/ncsi/ncsi-rsp.c if (!nc) nc 602 net/ncsi/ncsi-rsp.c ncm = &nc->modes[NCSI_MODE_FC]; nc 741 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 748 net/ncsi/ncsi-rsp.c NULL, &nc); nc 749 net/ncsi/ncsi-rsp.c if (!nc) nc 753 net/ncsi/ncsi-rsp.c ncv = &nc->version; nc 769 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 775 net/ncsi/ncsi-rsp.c NULL, &nc); nc 776 net/ncsi/ncsi-rsp.c if (!nc) nc 780 net/ncsi/ncsi-rsp.c nc->caps[NCSI_CAP_GENERIC].cap = ntohl(rsp->cap) & nc 782 net/ncsi/ncsi-rsp.c nc->caps[NCSI_CAP_BC].cap = ntohl(rsp->bc_cap) & nc 784 net/ncsi/ncsi-rsp.c nc->caps[NCSI_CAP_MC].cap = ntohl(rsp->mc_cap) & nc 786 net/ncsi/ncsi-rsp.c nc->caps[NCSI_CAP_BUFFER].cap = ntohl(rsp->buf_cap); nc 787 net/ncsi/ncsi-rsp.c nc->caps[NCSI_CAP_AEN].cap = ntohl(rsp->aen_cap) & nc 789 net/ncsi/ncsi-rsp.c nc->caps[NCSI_CAP_VLAN].cap = rsp->vlan_mode & nc 793 net/ncsi/ncsi-rsp.c nc->mac_filter.addrs = kzalloc(size, GFP_ATOMIC); nc 794 net/ncsi/ncsi-rsp.c if (!nc->mac_filter.addrs) nc 796 net/ncsi/ncsi-rsp.c nc->mac_filter.n_uc = rsp->uc_cnt; nc 797 net/ncsi/ncsi-rsp.c nc->mac_filter.n_mc = rsp->mc_cnt; nc 798 net/ncsi/ncsi-rsp.c nc->mac_filter.n_mixed = rsp->mixed_cnt; nc 800 net/ncsi/ncsi-rsp.c nc->vlan_filter.vids = kcalloc(rsp->vlan_cnt, nc 801 net/ncsi/ncsi-rsp.c sizeof(*nc->vlan_filter.vids), nc 803 net/ncsi/ncsi-rsp.c if (!nc->vlan_filter.vids) nc 808 net/ncsi/ncsi-rsp.c nc->vlan_filter.bitmap = U64_MAX; nc 809 net/ncsi/ncsi-rsp.c nc->vlan_filter.n_vids = rsp->vlan_cnt; nc 820 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 830 net/ncsi/ncsi-rsp.c NULL, &nc); nc 831 net/ncsi/ncsi-rsp.c if (!nc) nc 836 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_BC].enable = 1; nc 837 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_BC].data[0] = ntohl(rsp->bc_mode); nc 840 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_ENABLE].enable = 1; nc 842 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_TX_ENABLE].enable = 1; nc 844 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_MC].enable = 1; nc 847 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_LINK].enable = 1; nc 848 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_LINK].data[0] = ntohl(rsp->link_mode); nc 849 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_VLAN].enable = 1; nc 850 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_VLAN].data[0] = rsp->vlan_mode; nc 851 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_FC].enable = 1; nc 852 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_FC].data[0] = rsp->fc_mode; nc 853 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_AEN].enable = 1; nc 854 net/ncsi/ncsi-rsp.c nc->modes[NCSI_MODE_AEN].data[0] = ntohl(rsp->aen_mode); nc 859 net/ncsi/ncsi-rsp.c ncmf = &nc->mac_filter; nc 860 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 870 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 874 net/ncsi/ncsi-rsp.c ncvf = &nc->vlan_filter; nc 876 net/ncsi/ncsi-rsp.c spin_lock_irqsave(&nc->lock, flags); nc 885 net/ncsi/ncsi-rsp.c spin_unlock_irqrestore(&nc->lock, flags); nc 894 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 900 net/ncsi/ncsi-rsp.c NULL, &nc); nc 901 net/ncsi/ncsi-rsp.c if (!nc) nc 905 net/ncsi/ncsi-rsp.c ncs = &nc->stats; nc 955 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 961 net/ncsi/ncsi-rsp.c NULL, &nc); nc 962 net/ncsi/ncsi-rsp.c if (!nc) nc 966 net/ncsi/ncsi-rsp.c ncs = &nc->stats; nc 982 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 988 net/ncsi/ncsi-rsp.c NULL, &nc); nc 989 net/ncsi/ncsi-rsp.c if (!nc) nc 993 net/ncsi/ncsi-rsp.c ncs = &nc->stats; nc 1051 net/ncsi/ncsi-rsp.c struct ncsi_channel *nc; nc 1057 net/ncsi/ncsi-rsp.c &np, &nc); nc 1061 net/ncsi/ncsi-rsp.c ret = ncsi_send_netlink_rsp(nr, np, nc); nc 976 net/netfilter/nf_tables_api.c struct nft_chain *chain, *nc; nc 1023 net/netfilter/nf_tables_api.c list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { nc 7692 net/netfilter/nf_tables_api.c struct nft_chain *chain, *nc; nc 7731 net/netfilter/nf_tables_api.c list_for_each_entry_safe(chain, nc, &table->chains, list) { nc 163 net/netfilter/nfnetlink.c const struct nfnl_callback *nc; nc 189 net/netfilter/nfnetlink.c nc = nfnetlink_find_client(type, ss); nc 190 net/netfilter/nfnetlink.c if (!nc) { nc 217 net/netfilter/nfnetlink.c if (nc->call_rcu) { nc 218 net/netfilter/nfnetlink.c err = nc->call_rcu(net, net->nfnl, skb, nlh, nc 226 net/netfilter/nfnetlink.c nfnetlink_find_client(type, ss) != nc) nc 228 net/netfilter/nfnetlink.c else if (nc->call) nc 229 net/netfilter/nfnetlink.c err = nc->call(net, net->nfnl, skb, nlh, nc 303 net/netfilter/nfnetlink.c const struct nfnl_callback *nc; nc 406 net/netfilter/nfnetlink.c nc = nfnetlink_find_client(type, ss); nc 407 net/netfilter/nfnetlink.c if (!nc) { nc 432 net/netfilter/nfnetlink.c if (nc->call_batch) { nc 433 net/netfilter/nfnetlink.c err = nc->call_batch(net, net->nfnl, skb, nlh, nc 90 sound/soc/intel/skylake/skl-sst-dsp.h #define SKL_DSP_CORES_MASK(nc) GENMASK((nc - 1), 0) nc 298 sound/soc/sof/intel/hda.h #define SOF_DSP_CORES_MASK(nc) GENMASK(((nc) - 1), 0) nc 202 tools/perf/util/namespaces.c struct nscookie *nc) nc 209 tools/perf/util/namespaces.c if (nc == NULL) nc 212 tools/perf/util/namespaces.c nc->oldns = -1; nc 213 tools/perf/util/namespaces.c nc->newns = -1; nc 236 tools/perf/util/namespaces.c nc->oldcwd = oldcwd; nc 237 tools/perf/util/namespaces.c nc->oldns = oldns; nc 238 tools/perf/util/namespaces.c nc->newns = newns; nc 249 tools/perf/util/namespaces.c void nsinfo__mountns_exit(struct nscookie *nc) nc 251 tools/perf/util/namespaces.c if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd) nc 254 tools/perf/util/namespaces.c setns(nc->oldns, CLONE_NEWNS); nc 256 tools/perf/util/namespaces.c if (nc->oldcwd) { nc 257 tools/perf/util/namespaces.c WARN_ON_ONCE(chdir(nc->oldcwd)); nc 258 tools/perf/util/namespaces.c zfree(&nc->oldcwd); nc 261 tools/perf/util/namespaces.c if (nc->oldns > -1) { nc 262 tools/perf/util/namespaces.c close(nc->oldns); nc 263 tools/perf/util/namespaces.c nc->oldns = -1; nc 266 tools/perf/util/namespaces.c if (nc->newns > -1) { nc 267 tools/perf/util/namespaces.c close(nc->newns); nc 268 tools/perf/util/namespaces.c nc->newns = -1; nc 54 tools/perf/util/namespaces.h void nsinfo__mountns_enter(struct nsinfo *nsi, struct nscookie *nc); nc 55 tools/perf/util/namespaces.h void nsinfo__mountns_exit(struct nscookie *nc); nc 1342 tools/perf/util/probe-event.c char c, nc = 0; nc 1418 tools/perf/util/probe-event.c nc = *ptr; nc 1456 tools/perf/util/probe-event.c c = nc; nc 1465 tools/perf/util/probe-event.c nc = *ptr;