cn                 88 arch/arm/kernel/topology.c 	struct device_node *cn = NULL;
cn                102 arch/arm/kernel/topology.c 		cn = of_get_cpu_node(cpu, NULL);
cn                103 arch/arm/kernel/topology.c 		if (!cn) {
cn                108 arch/arm/kernel/topology.c 		if (topology_parse_cpu_capacity(cn, cpu)) {
cn                109 arch/arm/kernel/topology.c 			of_node_put(cn);
cn                116 arch/arm/kernel/topology.c 			if (of_device_is_compatible(cn, cpu_eff->compatible))
cn                122 arch/arm/kernel/topology.c 		rate = of_get_property(cn, "clock-frequency", &len);
cn                124 arch/arm/kernel/topology.c 			pr_err("%pOF missing clock-frequency property\n", cn);
cn                388 drivers/base/arch_topology.c 	struct device_node *cn, *map;
cn                392 drivers/base/arch_topology.c 	cn = of_find_node_by_path("/cpus");
cn                393 drivers/base/arch_topology.c 	if (!cn) {
cn                402 drivers/base/arch_topology.c 	map = of_get_child_by_name(cn, "cpu-map");
cn                423 drivers/base/arch_topology.c 	of_node_put(cn);
cn               1475 drivers/clk/clk.c 	struct clk_notifier *cn;
cn               1482 drivers/clk/clk.c 	list_for_each_entry(cn, &clk_notifier_list, node) {
cn               1483 drivers/clk/clk.c 		if (cn->clk->core == core) {
cn               1484 drivers/clk/clk.c 			cnd.clk = cn->clk;
cn               1485 drivers/clk/clk.c 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
cn               4143 drivers/clk/clk.c 	struct clk_notifier *cn;
cn               4152 drivers/clk/clk.c 	list_for_each_entry(cn, &clk_notifier_list, node)
cn               4153 drivers/clk/clk.c 		if (cn->clk == clk)
cn               4157 drivers/clk/clk.c 	if (cn->clk != clk) {
cn               4158 drivers/clk/clk.c 		cn = kzalloc(sizeof(*cn), GFP_KERNEL);
cn               4159 drivers/clk/clk.c 		if (!cn)
cn               4162 drivers/clk/clk.c 		cn->clk = clk;
cn               4163 drivers/clk/clk.c 		srcu_init_notifier_head(&cn->notifier_head);
cn               4165 drivers/clk/clk.c 		list_add(&cn->node, &clk_notifier_list);
cn               4168 drivers/clk/clk.c 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
cn               4192 drivers/clk/clk.c 	struct clk_notifier *cn = NULL;
cn               4200 drivers/clk/clk.c 	list_for_each_entry(cn, &clk_notifier_list, node)
cn               4201 drivers/clk/clk.c 		if (cn->clk == clk)
cn               4204 drivers/clk/clk.c 	if (cn->clk == clk) {
cn               4205 drivers/clk/clk.c 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
cn               4210 drivers/clk/clk.c 		if (!cn->notifier_head.head) {
cn               4211 drivers/clk/clk.c 			srcu_cleanup_notifier_head(&cn->notifier_head);
cn               4212 drivers/clk/clk.c 			list_del(&cn->node);
cn               4213 drivers/clk/clk.c 			kfree(cn);
cn                304 drivers/dma/k3dma.c 	struct k3_dma_chan *c, *cn;
cn                308 drivers/dma/k3dma.c 	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
cn                985 drivers/dma/k3dma.c 	struct k3_dma_chan *c, *cn;
cn                993 drivers/dma/k3dma.c 	list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
cn               1224 drivers/dma/pxa_dma.c 	struct pxad_chan *c, *cn;
cn               1226 drivers/dma/pxa_dma.c 	list_for_each_entry_safe(c, cn, &dmadev->channels,
cn                892 drivers/dma/sa11x0-dma.c 	struct sa11x0_dma_chan *c, *cn;
cn                894 drivers/dma/sa11x0-dma.c 	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
cn               1203 drivers/dma/sprd-dma.c 	struct sprd_dma_chn *c, *cn;
cn               1214 drivers/dma/sprd-dma.c 	list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
cn                220 drivers/dma/zx_dma.c 	struct zx_dma_chan *c, *cn;
cn                225 drivers/dma/zx_dma.c 	list_for_each_entry_safe(c, cn, &d->slave.channels,
cn                883 drivers/dma/zx_dma.c 	struct zx_dma_chan *c, *cn;
cn                892 drivers/dma/zx_dma.c 	list_for_each_entry_safe(c, cn, &d->slave.channels,
cn                194 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_context *ctx, *cn;
cn                200 drivers/gpu/drm/i915/gem/i915_gem_context.c 	list_for_each_entry_safe(ctx, cn,
cn                339 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_gem_context *ctx, *cn;
cn                343 drivers/gpu/drm/i915/gem/i915_gem_context.c 	llist_for_each_entry_safe(ctx, cn, freed, free_link)
cn                135 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	struct intel_context *ce, *cn;
cn                144 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
cn                150 drivers/gpu/drm/i915/selftests/i915_vma.c 	struct i915_gem_context *ctx, *cn;
cn                192 drivers/gpu/drm/i915/selftests/i915_vma.c 		list_for_each_entry_safe(ctx, cn, &contexts, link) {
cn                204 drivers/gpu/drm/i915/selftests/i915_vma.c 	list_for_each_entry_safe(ctx, cn, &contexts, link) {
cn                477 drivers/gpu/drm/nouveau/nouveau_connector.c 	struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
cn                484 drivers/gpu/drm/nouveau/nouveau_connector.c 	for_each_child_of_node(dn, cn) {
cn                485 drivers/gpu/drm/nouveau/nouveau_connector.c 		const char *name = of_get_property(cn, "name", NULL);
cn                486 drivers/gpu/drm/nouveau/nouveau_connector.c 		const void *edid = of_get_property(cn, "EDID", NULL);
cn                492 drivers/gpu/drm/nouveau/nouveau_connector.c 			of_node_put(cn);
cn                426 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c 	unsigned int yq, cn, pr;
cn                452 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c 	cn = (ptr[4] >> 4) & 0x3;
cn                467 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c 		(yq << 2) | (cn << 0));
cn               1755 drivers/media/dvb-frontends/mb86a16.c 	u8  cn;
cn               1758 drivers/media/dvb-frontends/mb86a16.c 	if (mb86a16_read(state, 0x26, &cn) != 2) {
cn               1764 drivers/media/dvb-frontends/mb86a16.c 		if (cn < cnr_tab[i].cn_reg) {
cn                259 drivers/media/dvb-frontends/tc90522.c 		s64 cn;
cn                271 drivers/media/dvb-frontends/tc90522.c 		cn = div64_s64(-16346LL * p4 * p, 10) >> 35;
cn                272 drivers/media/dvb-frontends/tc90522.c 		cn += (14341LL * p4) >> 21;
cn                273 drivers/media/dvb-frontends/tc90522.c 		cn -= (50259LL * cndat * p) >> 23;
cn                274 drivers/media/dvb-frontends/tc90522.c 		cn += (88977LL * cndat) >> 9;
cn                275 drivers/media/dvb-frontends/tc90522.c 		cn -= (89565LL * p) >> 11;
cn                276 drivers/media/dvb-frontends/tc90522.c 		cn += 58857  << 3;
cn                277 drivers/media/dvb-frontends/tc90522.c 		stats->stat[0].svalue = cn >> 3;
cn                417 drivers/media/dvb-frontends/tc90522.c 		s64 cn;
cn                428 drivers/media/dvb-frontends/tc90522.c 		cn = 24772;
cn                429 drivers/media/dvb-frontends/tc90522.c 		cn += div64_s64(43827LL * p, 10) >> 24;
cn                431 drivers/media/dvb-frontends/tc90522.c 		cn += div64_s64(3184LL * tmp * tmp, 10) >> 32;
cn                433 drivers/media/dvb-frontends/tc90522.c 		cn -= div64_s64(128LL * tmp * tmp * tmp, 10) >> 33;
cn                435 drivers/media/dvb-frontends/tc90522.c 		cn += div64_s64(192LL * tmp * tmp * tmp * tmp, 1000) >> 24;
cn                437 drivers/media/dvb-frontends/tc90522.c 		stats->stat[0].svalue = cn >> 3;
cn                430 drivers/media/i2c/adv7511-v4l2.c 		u8 itc, cn;
cn                434 drivers/media/i2c/adv7511-v4l2.c 		cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
cn                436 drivers/media/i2c/adv7511-v4l2.c 		adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
cn               1313 drivers/media/i2c/adv7511-v4l2.c 	u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
cn               1428 drivers/media/i2c/adv7511-v4l2.c 	adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
cn                149 drivers/mmc/host/cavium-octeon.c 	struct device_node *cn, *node = pdev->dev.of_node;
cn                280 drivers/mmc/host/cavium-octeon.c 	for_each_child_of_node(node, cn) {
cn                282 drivers/mmc/host/cavium-octeon.c 			of_platform_device_create(cn, NULL, &pdev->dev);
cn               2067 drivers/net/wireless/marvell/libertas/cfg.c 		const char *cn;
cn               2084 drivers/net/wireless/marvell/libertas/cfg.c 			regulatory_hint(priv->wdev->wiphy, regmap[i].cn);
cn                 83 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 	u32 cn;
cn                191 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 	cfg->cn = denominator >> i;
cn                196 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 	    cfg->cn < 1 || cfg->cn > 32 ||
cn                199 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 			cfg->cm, cfg->cn, cfg->co);
cn                306 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 	    priv->cfg.cn < 1 || priv->cfg.cn > 32 ||
cn                309 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 			priv->cfg.cm, priv->cfg.cn, priv->cfg.co);
cn                313 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 		priv->cfg.cm, priv->cfg.cn, priv->cfg.co);
cn                315 drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c 	phy_write(phy, CN(priv->cfg.cn), DPHY_CN);
cn                616 drivers/usb/gadget/configfs.c 	struct gadget_config_name *cn = to_gadget_config_name(item);
cn                618 drivers/usb/gadget/configfs.c 	kfree(cn->configuration);
cn                620 drivers/usb/gadget/configfs.c 	list_del(&cn->list);
cn                621 drivers/usb/gadget/configfs.c 	kfree(cn);
cn               1332 drivers/usb/gadget/configfs.c 		struct gadget_config_name *cn;
cn               1340 drivers/usb/gadget/configfs.c 			list_for_each_entry(cn, &cfg->string_list, list) {
cn               1341 drivers/usb/gadget/configfs.c 				cfg->gstrings[i] = &cn->stringtab_dev;
cn               1342 drivers/usb/gadget/configfs.c 				cn->stringtab_dev.strings = &cn->strings;
cn               1343 drivers/usb/gadget/configfs.c 				cn->strings.s = cn->configuration;
cn                843 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c 	int cn, ver;
cn                862 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c 	cn = (ver & GC_CID_CNAME_MSK) >> 8;
cn                864 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c 	if (cn == 3) {
cn                422 drivers/video/fbdev/mmp/hw/mmp_ctrl.h #define	 CFG_CBSH_ENA(cn)			((cn)<<29)
cn                432 drivers/video/fbdev/mmp/hw/mmp_ctrl.h #define	 CFG_HWC_ENA(cn)			((cn)<<24)
cn                418 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c 	unsigned yq, cn, pr;
cn                444 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c 	cn = (ptr[4] >> 4) & 0x3;
cn                459 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c 		(yq << 2) | (cn << 0));
cn                213 drivers/video/fbdev/pxa168fb.h #define     CFG_CBSH_ENA(cn)			((cn) << 29)
cn                223 drivers/video/fbdev/pxa168fb.h #define     CFG_HWC_ENA(cn)		        ((cn) << 24)
cn                 26 drivers/w1/w1_netlink.c 	struct cn_msg *cn; /* advances as cn_msg is appeneded */
cn                 57 drivers/w1/w1_netlink.c 	if (!block->cn)
cn                 59 drivers/w1/w1_netlink.c 	return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len;
cn                 88 drivers/w1/w1_netlink.c 		block->cn = NULL;
cn                 97 drivers/w1/w1_netlink.c 	if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn)
cn                112 drivers/w1/w1_netlink.c 	if (block->cn && block->cn->ack == ack) {
cn                113 drivers/w1/w1_netlink.c 		block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len);
cn                116 drivers/w1/w1_netlink.c 		if (block->cn)
cn                117 drivers/w1/w1_netlink.c 			block->cn = (struct cn_msg *)(block->cn->data +
cn                118 drivers/w1/w1_netlink.c 				block->cn->len);
cn                120 drivers/w1/w1_netlink.c 			block->cn = block->first_cn;
cn                122 drivers/w1/w1_netlink.c 		memcpy(block->cn, &block->request_cn, sizeof(*block->cn));
cn                123 drivers/w1/w1_netlink.c 		block->cn->len = 0;
cn                124 drivers/w1/w1_netlink.c 		block->cn->ack = ack;
cn                125 drivers/w1/w1_netlink.c 		block->msg = (struct w1_netlink_msg *)block->cn->data;
cn                146 drivers/w1/w1_netlink.c 	block->cn->len += sizeof(*block->msg);
cn                153 drivers/w1/w1_netlink.c 	block->cn->len += space;
cn                169 drivers/w1/w1_netlink.c 	block->cn->len += sizeof(*req_msg);
cn                175 drivers/w1/w1_netlink.c 		block->cn->len += sizeof(*cmd);
cn                192 drivers/w1/w1_netlink.c static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
cn                196 drivers/w1/w1_netlink.c 		struct cn_msg cn;
cn                199 drivers/w1/w1_netlink.c 	memcpy(&packet.cn, cn, sizeof(packet.cn));
cn                201 drivers/w1/w1_netlink.c 	packet.cn.len = sizeof(packet.msg);
cn                204 drivers/w1/w1_netlink.c 	cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL);
cn                217 drivers/w1/w1_netlink.c 		struct cn_msg cn;
cn                222 drivers/w1/w1_netlink.c 	packet.cn.id.idx = CN_W1_IDX;
cn                223 drivers/w1/w1_netlink.c 	packet.cn.id.val = CN_W1_VAL;
cn                225 drivers/w1/w1_netlink.c 	packet.cn.seq = dev->seq++;
cn                226 drivers/w1/w1_netlink.c 	packet.cn.len = sizeof(*msg);
cn                231 drivers/w1/w1_netlink.c 	cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL);
cn                251 drivers/w1/w1_netlink.c 	block->cn->len += sizeof(*data);
cn                400 drivers/w1/w1_netlink.c 	struct cn_msg *cn;
cn                404 drivers/w1/w1_netlink.c 	cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
cn                405 drivers/w1/w1_netlink.c 	if (!cn)
cn                408 drivers/w1/w1_netlink.c 	cn->id.idx = CN_W1_IDX;
cn                409 drivers/w1/w1_netlink.c 	cn->id.val = CN_W1_VAL;
cn                411 drivers/w1/w1_netlink.c 	cn->seq = req_cn->seq;
cn                412 drivers/w1/w1_netlink.c 	cn->ack = req_cn->seq + 1;
cn                413 drivers/w1/w1_netlink.c 	cn->len = sizeof(struct w1_netlink_msg);
cn                414 drivers/w1/w1_netlink.c 	msg = (struct w1_netlink_msg *)cn->data;
cn                423 drivers/w1/w1_netlink.c 		if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
cn                424 drivers/w1/w1_netlink.c 			cn_netlink_send(cn, portid, 0, GFP_KERNEL);
cn                425 drivers/w1/w1_netlink.c 			cn->len = sizeof(struct w1_netlink_msg);
cn                432 drivers/w1/w1_netlink.c 		cn->len += sizeof(*id);
cn                435 drivers/w1/w1_netlink.c 	cn_netlink_send(cn, portid, 0, GFP_KERNEL);
cn                438 drivers/w1/w1_netlink.c 	kfree(cn);
cn                535 drivers/w1/w1_netlink.c static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
cn                537 drivers/w1/w1_netlink.c 	struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1);
cn                552 drivers/w1/w1_netlink.c 	if (cn->flags & ~(W1_CN_BUNDLE)) {
cn                553 drivers/w1/w1_netlink.c 		w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL);
cn                560 drivers/w1/w1_netlink.c 	msg_len = cn->len;
cn                579 drivers/w1/w1_netlink.c 	msg = (struct w1_netlink_msg *)(cn + 1);
cn                582 drivers/w1/w1_netlink.c 		int reply_size = sizeof(*cn) + cn->len + slave_len;
cn                583 drivers/w1/w1_netlink.c 		if (cn->flags & W1_CN_BUNDLE) {
cn                598 drivers/w1/w1_netlink.c 			sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len +
cn                609 drivers/w1/w1_netlink.c 			w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM);
cn                614 drivers/w1/w1_netlink.c 		memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len);
cn                615 drivers/w1/w1_netlink.c 		node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
cn                631 drivers/w1/w1_netlink.c 	msg_len = cn->len;
cn                644 drivers/w1/w1_netlink.c 			err = w1_process_command_root(cn, nsp->portid);
cn                665 drivers/w1/w1_netlink.c 				__func__, cn->id.idx, cn->id.val,
cn                682 drivers/w1/w1_netlink.c 			(size_t)((u8 *)msg - (u8 *)cn));
cn                698 drivers/w1/w1_netlink.c 			w1_netlink_send_error(cn, msg, nsp->portid, err);
cn                727 drivers/w1/w1_netlink.c void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn)
cn                 67 fs/coredump.c  static int expand_corename(struct core_name *cn, int size)
cn                 69 fs/coredump.c  	char *corename = krealloc(cn->corename, size, GFP_KERNEL);
cn                 77 fs/coredump.c  	cn->size = ksize(corename);
cn                 78 fs/coredump.c  	cn->corename = corename;
cn                 82 fs/coredump.c  static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
cn                 89 fs/coredump.c  	free = cn->size - cn->used;
cn                 92 fs/coredump.c  	need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
cn                 96 fs/coredump.c  		cn->used += need;
cn                100 fs/coredump.c  	if (!expand_corename(cn, cn->size + need - free + 1))
cn                106 fs/coredump.c  static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
cn                112 fs/coredump.c  	ret = cn_vprintf(cn, fmt, arg);
cn                119 fs/coredump.c  int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
cn                121 fs/coredump.c  	int cur = cn->used;
cn                126 fs/coredump.c  	ret = cn_vprintf(cn, fmt, arg);
cn                134 fs/coredump.c  		if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
cn                135 fs/coredump.c  				(cn->used - cur == 2 && cn->corename[cur] == '.'
cn                136 fs/coredump.c  				&& cn->corename[cur+1] == '.'))
cn                137 fs/coredump.c  			cn->corename[cur] = '!';
cn                145 fs/coredump.c  		if (cn->used == cur)
cn                146 fs/coredump.c  			ret = cn_printf(cn, "!");
cn                149 fs/coredump.c  	for (; cur < cn->used; ++cur) {
cn                150 fs/coredump.c  		if (cn->corename[cur] == '/')
cn                151 fs/coredump.c  			cn->corename[cur] = '!';
cn                156 fs/coredump.c  static int cn_print_exe_file(struct core_name *cn)
cn                164 fs/coredump.c  		return cn_esc_printf(cn, "%s (path unknown)", current->comm);
cn                178 fs/coredump.c  	ret = cn_esc_printf(cn, "%s", path);
cn                191 fs/coredump.c  static int format_corename(struct core_name *cn, struct coredump_params *cprm,
cn                201 fs/coredump.c  	cn->used = 0;
cn                202 fs/coredump.c  	cn->corename = NULL;
cn                203 fs/coredump.c  	if (expand_corename(cn, core_name_size))
cn                205 fs/coredump.c  	cn->corename[0] = '\0';
cn                232 fs/coredump.c  				err = cn_printf(cn, "%c", '\0');
cn                235 fs/coredump.c  				(*argv)[(*argc)++] = cn->used;
cn                239 fs/coredump.c  			err = cn_printf(cn, "%c", *pat_ptr++);
cn                247 fs/coredump.c  				err = cn_printf(cn, "%c", '%');
cn                252 fs/coredump.c  				err = cn_printf(cn, "%d",
cn                257 fs/coredump.c  				err = cn_printf(cn, "%d",
cn                261 fs/coredump.c  				err = cn_printf(cn, "%d",
cn                265 fs/coredump.c  				err = cn_printf(cn, "%d",
cn                270 fs/coredump.c  				err = cn_printf(cn, "%u",
cn                276 fs/coredump.c  				err = cn_printf(cn, "%u",
cn                281 fs/coredump.c  				err = cn_printf(cn, "%d",
cn                286 fs/coredump.c  				err = cn_printf(cn, "%d",
cn                294 fs/coredump.c  				err = cn_printf(cn, "%lld", time);
cn                300 fs/coredump.c  				err = cn_esc_printf(cn, "%s",
cn                306 fs/coredump.c  				err = cn_esc_printf(cn, "%s", current->comm);
cn                309 fs/coredump.c  				err = cn_print_exe_file(cn);
cn                313 fs/coredump.c  				err = cn_printf(cn, "%lu",
cn                333 fs/coredump.c  		err = cn_printf(cn, ".%d", task_tgid_vnr(current));
cn                570 fs/coredump.c  	struct core_name cn;
cn                625 fs/coredump.c  	ispipe = format_corename(&cn, &cprm, &argv, &argc);
cn                679 fs/coredump.c  			helper_argv[argi] = cn.corename + argv[argi];
cn                693 fs/coredump.c  			       cn.corename);
cn                704 fs/coredump.c  		if (need_suid_safe && cn.corename[0] != '/') {
cn                722 fs/coredump.c  			do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
cn                749 fs/coredump.c  				cn.corename, open_flags, 0600);
cn                752 fs/coredump.c  			cprm.file = filp_open(cn.corename, open_flags, 0600);
cn                796 fs/coredump.c  			pr_info("Core dump to |%s disabled\n", cn.corename);
cn                813 fs/coredump.c  	kfree(cn.corename);
cn                806 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn                847 fs/nfsd/nfs4recover.c 			cn->cn_has_legacy = true;
cn                868 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn                895 fs/nfsd/nfs4recover.c 	spin_lock(&cn->cn_lock);
cn                896 fs/nfsd/nfs4recover.c 	list_for_each_entry(tmp, &cn->cn_list, cu_list) {
cn                904 fs/nfsd/nfs4recover.c 	spin_unlock(&cn->cn_lock);
cn                995 fs/nfsd/nfs4recover.c 	struct cld_net *cn;
cn               1000 fs/nfsd/nfs4recover.c 	cn = kzalloc(sizeof(*cn), GFP_KERNEL);
cn               1001 fs/nfsd/nfs4recover.c 	if (!cn) {
cn               1006 fs/nfsd/nfs4recover.c 	cn->cn_pipe = rpc_mkpipe_data(&cld_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
cn               1007 fs/nfsd/nfs4recover.c 	if (IS_ERR(cn->cn_pipe)) {
cn               1008 fs/nfsd/nfs4recover.c 		ret = PTR_ERR(cn->cn_pipe);
cn               1011 fs/nfsd/nfs4recover.c 	spin_lock_init(&cn->cn_lock);
cn               1012 fs/nfsd/nfs4recover.c 	INIT_LIST_HEAD(&cn->cn_list);
cn               1014 fs/nfsd/nfs4recover.c 	dentry = nfsd4_cld_register_net(net, cn->cn_pipe);
cn               1020 fs/nfsd/nfs4recover.c 	cn->cn_pipe->dentry = dentry;
cn               1021 fs/nfsd/nfs4recover.c 	cn->cn_has_legacy = false;
cn               1022 fs/nfsd/nfs4recover.c 	nn->cld_net = cn;
cn               1026 fs/nfsd/nfs4recover.c 	rpc_destroy_pipe_data(cn->cn_pipe);
cn               1028 fs/nfsd/nfs4recover.c 	kfree(cn);
cn               1049 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1051 fs/nfsd/nfs4recover.c 	nfsd4_cld_unregister_net(net, cn->cn_pipe);
cn               1052 fs/nfsd/nfs4recover.c 	rpc_destroy_pipe_data(cn->cn_pipe);
cn               1053 fs/nfsd/nfs4recover.c 	if (cn->cn_tfm)
cn               1054 fs/nfsd/nfs4recover.c 		crypto_free_shash(cn->cn_tfm);
cn               1063 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1071 fs/nfsd/nfs4recover.c 	spin_lock(&cn->cn_lock);
cn               1072 fs/nfsd/nfs4recover.c 	list_for_each_entry(tmp, &cn->cn_list, cu_list) {
cn               1073 fs/nfsd/nfs4recover.c 		if (tmp->cu_u.cu_msg.cm_xid == cn->cn_xid) {
cn               1074 fs/nfsd/nfs4recover.c 			cn->cn_xid++;
cn               1075 fs/nfsd/nfs4recover.c 			spin_unlock(&cn->cn_lock);
cn               1081 fs/nfsd/nfs4recover.c 	put_unaligned(cn->cn_xid++, &new->cu_u.cu_msg.cm_xid);
cn               1082 fs/nfsd/nfs4recover.c 	new->cu_net = cn;
cn               1083 fs/nfsd/nfs4recover.c 	list_add(&new->cu_list, &cn->cn_list);
cn               1084 fs/nfsd/nfs4recover.c 	spin_unlock(&cn->cn_lock);
cn               1094 fs/nfsd/nfs4recover.c 	struct cld_net *cn = victim->cu_net;
cn               1096 fs/nfsd/nfs4recover.c 	spin_lock(&cn->cn_lock);
cn               1098 fs/nfsd/nfs4recover.c 	spin_unlock(&cn->cn_lock);
cn               1109 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1126 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               1146 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1148 fs/nfsd/nfs4recover.c 	struct crypto_shash *tfm = cn->cn_tfm;
cn               1194 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, cmsg);
cn               1215 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1232 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               1257 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1275 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               1296 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1310 fs/nfsd/nfs4recover.c 	if (cn->cn_has_legacy) {
cn               1339 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1343 fs/nfsd/nfs4recover.c 	struct crypto_shash *tfm = cn->cn_tfm;
cn               1357 fs/nfsd/nfs4recover.c 	if (cn->cn_has_legacy) {
cn               1412 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1421 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               1439 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1449 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               1468 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1477 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               1521 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1522 fs/nfsd/nfs4recover.c 	struct rpc_pipe *pipe = cn->cn_pipe;
cn               1532 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               1541 fs/nfsd/nfs4recover.c 	ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg);
cn               2140 fs/nfsd/nfs4recover.c 	struct cld_net *cn = nn->cld_net;
cn               2147 fs/nfsd/nfs4recover.c 	if (!cn) {
cn               2154 fs/nfsd/nfs4recover.c 		dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe);
cn               2159 fs/nfsd/nfs4recover.c 		cn->cn_pipe->dentry = dentry;
cn               2162 fs/nfsd/nfs4recover.c 		if (cn->cn_pipe->dentry)
cn               2163 fs/nfsd/nfs4recover.c 			nfsd4_cld_unregister_sb(cn->cn_pipe);
cn                 92 fs/reiserfs/journal.c static int can_dirty(struct reiserfs_journal_cnode *cn);
cn                371 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn                381 fs/reiserfs/journal.c 	cn = journal->j_cnode_free_list;
cn                382 fs/reiserfs/journal.c 	if (!cn) {
cn                383 fs/reiserfs/journal.c 		return cn;
cn                385 fs/reiserfs/journal.c 	if (cn->next) {
cn                386 fs/reiserfs/journal.c 		cn->next->prev = NULL;
cn                388 fs/reiserfs/journal.c 	journal->j_cnode_free_list = cn->next;
cn                389 fs/reiserfs/journal.c 	memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
cn                390 fs/reiserfs/journal.c 	return cn;
cn                397 fs/reiserfs/journal.c 		       struct reiserfs_journal_cnode *cn)
cn                406 fs/reiserfs/journal.c 	cn->next = journal->j_cnode_free_list;
cn                408 fs/reiserfs/journal.c 		journal->j_cnode_free_list->prev = cn;
cn                410 fs/reiserfs/journal.c 	cn->prev = NULL;	/* not needed with the memset, but I might kill the memset, and forget to do this */
cn                411 fs/reiserfs/journal.c 	journal->j_cnode_free_list = cn;
cn                432 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn                433 fs/reiserfs/journal.c 	cn = journal_hash(table, sb, bl);
cn                434 fs/reiserfs/journal.c 	while (cn) {
cn                435 fs/reiserfs/journal.c 		if (cn->blocknr == bl && cn->sb == sb)
cn                436 fs/reiserfs/journal.c 			return cn;
cn                437 fs/reiserfs/journal.c 		cn = cn->hnext;
cn                466 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn                502 fs/reiserfs/journal.c 	    && (cn =
cn                508 fs/reiserfs/journal.c 	if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
cn                520 fs/reiserfs/journal.c 				       struct reiserfs_journal_cnode *cn)
cn                524 fs/reiserfs/journal.c 	cn_orig = journal_hash(table, cn->sb, cn->blocknr);
cn                525 fs/reiserfs/journal.c 	cn->hnext = cn_orig;
cn                526 fs/reiserfs/journal.c 	cn->hprev = NULL;
cn                528 fs/reiserfs/journal.c 		cn_orig->hprev = cn;
cn                530 fs/reiserfs/journal.c 	journal_hash(table, cn->sb, cn->blocknr) = cn;
cn               1171 fs/reiserfs/journal.c 							  *cn)
cn               1173 fs/reiserfs/journal.c 	struct super_block *sb = cn->sb;
cn               1174 fs/reiserfs/journal.c 	b_blocknr_t blocknr = cn->blocknr;
cn               1176 fs/reiserfs/journal.c 	cn = cn->hprev;
cn               1177 fs/reiserfs/journal.c 	while (cn) {
cn               1178 fs/reiserfs/journal.c 		if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
cn               1179 fs/reiserfs/journal.c 			return cn->jlist;
cn               1181 fs/reiserfs/journal.c 		cn = cn->hprev;
cn               1202 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn, *last;
cn               1203 fs/reiserfs/journal.c 	cn = jl->j_realblock;
cn               1209 fs/reiserfs/journal.c 	while (cn) {
cn               1210 fs/reiserfs/journal.c 		if (cn->blocknr != 0) {
cn               1214 fs/reiserfs/journal.c 						 cn->blocknr, cn->bh ? 1 : 0,
cn               1215 fs/reiserfs/journal.c 						 cn->state);
cn               1217 fs/reiserfs/journal.c 			cn->state = 0;
cn               1219 fs/reiserfs/journal.c 					    jl, cn->blocknr, 1);
cn               1221 fs/reiserfs/journal.c 		last = cn;
cn               1222 fs/reiserfs/journal.c 		cn = cn->next;
cn               1351 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn               1412 fs/reiserfs/journal.c 	cn = jl->j_realblock;
cn               1413 fs/reiserfs/journal.c 	while (cn) {
cn               1418 fs/reiserfs/journal.c 		if (cn->blocknr == 0) {
cn               1429 fs/reiserfs/journal.c 		pjl = find_newer_jl_for_cn(cn);
cn               1435 fs/reiserfs/journal.c 		if (!pjl && cn->bh) {
cn               1436 fs/reiserfs/journal.c 			saved_bh = cn->bh;
cn               1445 fs/reiserfs/journal.c 				BUG_ON(!can_dirty(cn));
cn               1448 fs/reiserfs/journal.c 			} else if (can_dirty(cn)) {
cn               1494 fs/reiserfs/journal.c 			set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
cn               1496 fs/reiserfs/journal.c 			BUG_ON(cn->blocknr != saved_bh->b_blocknr);
cn               1509 fs/reiserfs/journal.c 		cn = cn->next;
cn               1523 fs/reiserfs/journal.c 		cn = jl->j_realblock;
cn               1524 fs/reiserfs/journal.c 		while (cn) {
cn               1525 fs/reiserfs/journal.c 			if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
cn               1526 fs/reiserfs/journal.c 				if (!cn->bh) {
cn               1532 fs/reiserfs/journal.c 				__wait_on_buffer(cn->bh);
cn               1535 fs/reiserfs/journal.c 				if (!cn->bh) {
cn               1539 fs/reiserfs/journal.c 				if (unlikely(!buffer_uptodate(cn->bh))) {
cn               1552 fs/reiserfs/journal.c 				       (cn->bh));
cn               1555 fs/reiserfs/journal.c 				put_bh(cn->bh);
cn               1557 fs/reiserfs/journal.c 				release_buffer_page(cn->bh);
cn               1559 fs/reiserfs/journal.c 			cn = cn->next;
cn               1633 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn               1642 fs/reiserfs/journal.c 	cn = jl->j_realblock;
cn               1643 fs/reiserfs/journal.c 	while (cn) {
cn               1648 fs/reiserfs/journal.c 		if (cn->blocknr == 0) {
cn               1651 fs/reiserfs/journal.c 		if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
cn               1658 fs/reiserfs/journal.c 			tmp_bh = cn->bh;
cn               1661 fs/reiserfs/journal.c 			if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
cn               1674 fs/reiserfs/journal.c 		cn = cn->next;
cn               1684 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn               1688 fs/reiserfs/journal.c 	cn = jl->j_realblock;
cn               1689 fs/reiserfs/journal.c 	while (cn) {
cn               1695 fs/reiserfs/journal.c 		pjl = find_newer_jl_for_cn(cn);
cn               1696 fs/reiserfs/journal.c 		if (!pjl && cn->blocknr && cn->bh
cn               1697 fs/reiserfs/journal.c 		    && buffer_journal_dirty(cn->bh)) {
cn               1698 fs/reiserfs/journal.c 			BUG_ON(!can_dirty(cn));
cn               1704 fs/reiserfs/journal.c 			clear_buffer_journal_new(cn->bh);
cn               1705 fs/reiserfs/journal.c 			if (buffer_journal_prepared(cn->bh)) {
cn               1706 fs/reiserfs/journal.c 				set_buffer_journal_restore_dirty(cn->bh);
cn               1708 fs/reiserfs/journal.c 				set_buffer_journal_test(cn->bh);
cn               1709 fs/reiserfs/journal.c 				mark_buffer_dirty(cn->bh);
cn               1712 fs/reiserfs/journal.c 		cn = cn->next;
cn               3272 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn = NULL;
cn               3338 fs/reiserfs/journal.c 	if (!cn) {
cn               3339 fs/reiserfs/journal.c 		cn = get_cnode(sb);
cn               3340 fs/reiserfs/journal.c 		if (!cn) {
cn               3351 fs/reiserfs/journal.c 		cn->bh = bh;
cn               3352 fs/reiserfs/journal.c 		cn->blocknr = bh->b_blocknr;
cn               3353 fs/reiserfs/journal.c 		cn->sb = sb;
cn               3354 fs/reiserfs/journal.c 		cn->jlist = NULL;
cn               3355 fs/reiserfs/journal.c 		insert_journal_hash(journal->j_hash_table, cn);
cn               3360 fs/reiserfs/journal.c 	cn->next = NULL;
cn               3361 fs/reiserfs/journal.c 	cn->prev = journal->j_last;
cn               3362 fs/reiserfs/journal.c 	cn->bh = bh;
cn               3364 fs/reiserfs/journal.c 		journal->j_last->next = cn;
cn               3365 fs/reiserfs/journal.c 		journal->j_last = cn;
cn               3367 fs/reiserfs/journal.c 		journal->j_first = cn;
cn               3368 fs/reiserfs/journal.c 		journal->j_last = cn;
cn               3419 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn;
cn               3423 fs/reiserfs/journal.c 	cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
cn               3424 fs/reiserfs/journal.c 	if (!cn || !cn->bh) {
cn               3427 fs/reiserfs/journal.c 	bh = cn->bh;
cn               3428 fs/reiserfs/journal.c 	if (cn->prev) {
cn               3429 fs/reiserfs/journal.c 		cn->prev->next = cn->next;
cn               3431 fs/reiserfs/journal.c 	if (cn->next) {
cn               3432 fs/reiserfs/journal.c 		cn->next->prev = cn->prev;
cn               3434 fs/reiserfs/journal.c 	if (cn == journal->j_first) {
cn               3435 fs/reiserfs/journal.c 		journal->j_first = cn->next;
cn               3437 fs/reiserfs/journal.c 	if (cn == journal->j_last) {
cn               3438 fs/reiserfs/journal.c 		journal->j_last = cn->prev;
cn               3457 fs/reiserfs/journal.c 	free_cnode(sb, cn);
cn               3472 fs/reiserfs/journal.c static int can_dirty(struct reiserfs_journal_cnode *cn)
cn               3474 fs/reiserfs/journal.c 	struct super_block *sb = cn->sb;
cn               3475 fs/reiserfs/journal.c 	b_blocknr_t blocknr = cn->blocknr;
cn               3476 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cur = cn->hprev;
cn               3495 fs/reiserfs/journal.c 	cur = cn->hnext;
cn               3732 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn = NULL;
cn               3738 fs/reiserfs/journal.c 	cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
cn               3739 fs/reiserfs/journal.c 	if (cn && cn->bh) {
cn               3740 fs/reiserfs/journal.c 		bh = cn->bh;
cn               3773 fs/reiserfs/journal.c 		cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
cn               3775 fs/reiserfs/journal.c 		while (cn) {
cn               3776 fs/reiserfs/journal.c 			if (sb == cn->sb && blocknr == cn->blocknr) {
cn               3777 fs/reiserfs/journal.c 				set_bit(BLOCK_FREED, &cn->state);
cn               3778 fs/reiserfs/journal.c 				if (cn->bh) {
cn               3785 fs/reiserfs/journal.c 						clear_buffer_journal_dirty(cn->
cn               3787 fs/reiserfs/journal.c 						clear_buffer_dirty(cn->bh);
cn               3788 fs/reiserfs/journal.c 						clear_buffer_journal_test(cn->
cn               3791 fs/reiserfs/journal.c 						put_bh(cn->bh);
cn               3793 fs/reiserfs/journal.c 						    (&cn->bh->b_count) < 0) {
cn               3803 fs/reiserfs/journal.c 					if (cn->jlist) {
cn               3804 fs/reiserfs/journal.c 						atomic_dec(&cn->jlist->
cn               3807 fs/reiserfs/journal.c 					cn->bh = NULL;
cn               3810 fs/reiserfs/journal.c 			cn = cn->hnext;
cn               3922 fs/reiserfs/journal.c 		struct reiserfs_journal_cnode *cn;
cn               3924 fs/reiserfs/journal.c 		cn = get_journal_hash_dev(sb,
cn               3927 fs/reiserfs/journal.c 		if (cn && can_dirty(cn)) {
cn               3976 fs/reiserfs/journal.c 	struct reiserfs_journal_cnode *cn, *next, *jl_cn;
cn               4116 fs/reiserfs/journal.c 	for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
cn               4117 fs/reiserfs/journal.c 		if (buffer_journaled(cn->bh)) {
cn               4137 fs/reiserfs/journal.c 			    (sb, cn->bh->b_blocknr)) {
cn               4141 fs/reiserfs/journal.c 					       cn->bh->b_blocknr);
cn               4143 fs/reiserfs/journal.c 			jl_cn->blocknr = cn->bh->b_blocknr;
cn               4146 fs/reiserfs/journal.c 			jl_cn->bh = cn->bh;
cn               4151 fs/reiserfs/journal.c 				    cpu_to_le32(cn->bh->b_blocknr);
cn               4154 fs/reiserfs/journal.c 				    cpu_to_le32(cn->bh->b_blocknr);
cn               4183 fs/reiserfs/journal.c 	cn = journal->j_first;
cn               4185 fs/reiserfs/journal.c 	while (cn) {
cn               4186 fs/reiserfs/journal.c 		clear_buffer_journal_new(cn->bh);
cn               4188 fs/reiserfs/journal.c 		if (buffer_journaled(cn->bh)) {
cn               4199 fs/reiserfs/journal.c 			page = cn->bh->b_page;
cn               4202 fs/reiserfs/journal.c 			       addr + offset_in_page(cn->bh->b_data),
cn               4203 fs/reiserfs/journal.c 			       cn->bh->b_size);
cn               4207 fs/reiserfs/journal.c 			set_buffer_journal_dirty(cn->bh);
cn               4208 fs/reiserfs/journal.c 			clear_buffer_journaled(cn->bh);
cn               4217 fs/reiserfs/journal.c 			brelse(cn->bh);
cn               4219 fs/reiserfs/journal.c 		next = cn->next;
cn               4220 fs/reiserfs/journal.c 		free_cnode(sb, cn);
cn               4221 fs/reiserfs/journal.c 		cn = next;
cn               2384 fs/ubifs/lpt.c 	struct ubifs_cnode *cn;
cn               2404 fs/ubifs/lpt.c 				cn = nn->nbranch[iip].cnode;
cn               2405 fs/ubifs/lpt.c 				if (cn) {
cn               2411 fs/ubifs/lpt.c 					cnode = cn;
cn               1210 include/linux/of.h #define of_for_each_phandle(it, err, np, ln, cn, cc)			\
cn               1211 include/linux/of.h 	for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)),	\
cn                115 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct clusterip_net *cn = clusterip_pernet(c->net);
cn                118 net/ipv4/netfilter/ipt_CLUSTERIP.c 	if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
cn                120 net/ipv4/netfilter/ipt_CLUSTERIP.c 		spin_unlock(&cn->lock);
cn                126 net/ipv4/netfilter/ipt_CLUSTERIP.c 		mutex_lock(&cn->mutex);
cn                127 net/ipv4/netfilter/ipt_CLUSTERIP.c 		if (cn->procdir)
cn                129 net/ipv4/netfilter/ipt_CLUSTERIP.c 		mutex_unlock(&cn->mutex);
cn                140 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct clusterip_net *cn = clusterip_pernet(net);
cn                142 net/ipv4/netfilter/ipt_CLUSTERIP.c 	list_for_each_entry_rcu(c, &cn->configs, list) {
cn                193 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct clusterip_net *cn = clusterip_pernet(net);
cn                196 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_lock_bh(&cn->lock);
cn                197 net/ipv4/netfilter/ipt_CLUSTERIP.c 	list_for_each_entry_rcu(c, &cn->configs, list) {
cn                222 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_unlock_bh(&cn->lock);
cn                231 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct clusterip_net *cn = clusterip_pernet(net);
cn                265 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_lock_bh(&cn->lock);
cn                271 net/ipv4/netfilter/ipt_CLUSTERIP.c 	list_add_rcu(&c->list, &cn->configs);
cn                272 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_unlock_bh(&cn->lock);
cn                280 net/ipv4/netfilter/ipt_CLUSTERIP.c 		mutex_lock(&cn->mutex);
cn                282 net/ipv4/netfilter/ipt_CLUSTERIP.c 					  cn->procdir,
cn                284 net/ipv4/netfilter/ipt_CLUSTERIP.c 		mutex_unlock(&cn->mutex);
cn                298 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_lock_bh(&cn->lock);
cn                301 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_unlock_bh(&cn->lock);
cn                819 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct clusterip_net *cn = clusterip_pernet(net);
cn                822 net/ipv4/netfilter/ipt_CLUSTERIP.c 	INIT_LIST_HEAD(&cn->configs);
cn                824 net/ipv4/netfilter/ipt_CLUSTERIP.c 	spin_lock_init(&cn->lock);
cn                831 net/ipv4/netfilter/ipt_CLUSTERIP.c 	cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
cn                832 net/ipv4/netfilter/ipt_CLUSTERIP.c 	if (!cn->procdir) {
cn                837 net/ipv4/netfilter/ipt_CLUSTERIP.c 	mutex_init(&cn->mutex);
cn                846 net/ipv4/netfilter/ipt_CLUSTERIP.c 	struct clusterip_net *cn = clusterip_pernet(net);
cn                848 net/ipv4/netfilter/ipt_CLUSTERIP.c 	mutex_lock(&cn->mutex);
cn                849 net/ipv4/netfilter/ipt_CLUSTERIP.c 	proc_remove(cn->procdir);
cn                850 net/ipv4/netfilter/ipt_CLUSTERIP.c 	cn->procdir = NULL;
cn                851 net/ipv4/netfilter/ipt_CLUSTERIP.c 	mutex_unlock(&cn->mutex);
cn                401 net/lapb/lapb_in.c 			int cn;
cn                402 net/lapb/lapb_in.c 			cn = lapb_data_indication(lapb, skb);
cn                411 net/lapb/lapb_in.c 			if (cn == NET_RX_DROP) {
cn               1782 security/smack/smackfs.c 				size_t cn, loff_t *ppos)
cn               1797 security/smack/smackfs.c 	if (cn >= asize)
cn               1798 security/smack/smackfs.c 		rc = simple_read_from_buffer(buf, cn, ppos,
cn               2048 security/smack/smackfs.c 					size_t cn, loff_t *ppos)
cn               2062 security/smack/smackfs.c 	if (cn >= asize)
cn               2063 security/smack/smackfs.c 		rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
cn               2600 security/smack/smackfs.c 				size_t cn, loff_t *ppos)
cn               2616 security/smack/smackfs.c 	if (cn >= asize)
cn               2617 security/smack/smackfs.c 		rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known,