Home
last modified time | relevance | path

Searched refs:ct (Results 1 – 200 of 266) sorted by relevance

12

/linux-4.1.27/arch/powerpc/include/asm/
Dcputime.h51 static inline unsigned long cputime_to_jiffies(const cputime_t ct) in cputime_to_jiffies() argument
53 return mulhdu((__force u64) ct, __cputime_jiffies_factor); in cputime_to_jiffies()
58 static inline cputime_t cputime_to_scaled(const cputime_t ct) in cputime_to_scaled() argument
62 return (__force u64) ct * in cputime_to_scaled()
65 return ct; in cputime_to_scaled()
70 u64 ct; in jiffies_to_cputime() local
74 ct = jif % HZ; in jiffies_to_cputime()
76 if (ct) { in jiffies_to_cputime()
77 ct *= tb_ticks_per_sec; in jiffies_to_cputime()
78 do_div(ct, HZ); in jiffies_to_cputime()
[all …]
/linux-4.1.27/net/netfilter/
Dnf_conntrack_core.c58 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
94 if (read_seqcount_retry(&net->ct.generation, sequence)) { in nf_conntrack_double_lock()
150 return __hash_bucket(hash, net->ct.htable_size); in hash_bucket()
162 return __hash_conntrack(tuple, zone, net->ct.htable_size); in hash_conntrack()
236 clean_from_lists(struct nf_conn *ct) in clean_from_lists() argument
238 pr_debug("clean_from_lists(%p)\n", ct); in clean_from_lists()
239 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); in clean_from_lists()
240 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); in clean_from_lists()
243 nf_ct_remove_expectations(ct); in clean_from_lists()
247 static void nf_ct_add_to_dying_list(struct nf_conn *ct) in nf_ct_add_to_dying_list() argument
[all …]
Dnf_conntrack_h323_main.c64 struct nf_conn *ct,
69 struct nf_conn *ct,
74 struct nf_conn *ct,
83 struct nf_conn *ct,
90 struct nf_conn *ct,
97 struct nf_conn *ct,
104 struct nf_conn *ct,
120 struct nf_conn *ct, enum ip_conntrack_info ctinfo, in get_tpkt_data() argument
123 struct nf_ct_h323_master *info = nfct_help_data(ct); in get_tpkt_data()
223 static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, in get_h245_addr() argument
[all …]
Dnf_nat_core.c59 const struct nf_conn *ct; in __nf_nat_decode_session() local
65 ct = nf_ct_get(skb, &ctinfo); in __nf_nat_decode_session()
66 if (ct == NULL) in __nf_nat_decode_session()
69 family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; in __nf_nat_decode_session()
81 l3proto->decode_session(skb, ct, dir, statusbit, fl); in __nf_nat_decode_session()
130 return reciprocal_scale(hash, net->ct.nat_htable_size); in hash_by_src()
175 same_src(const struct nf_conn *ct, in same_src() argument
180 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in same_src()
197 const struct nf_conn *ct; in find_appropriate_src() local
199 hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) { in find_appropriate_src()
[all …]
Dnfnetlink_queue_ct.c21 struct nf_conn *ct; in nfqnl_ct_get() local
28 ct = nf_ct_get(entskb, ctinfo); in nfqnl_ct_get()
29 if (ct) { in nfqnl_ct_get()
30 if (!nf_ct_is_untracked(ct)) in nfqnl_ct_get()
31 *size += nfq_ct->build_size(ct); in nfqnl_ct_get()
33 ct = NULL; in nfqnl_ct_get()
35 return ct; in nfqnl_ct_get()
43 struct nf_conn *ct; in nfqnl_ct_parse() local
50 ct = nf_ct_get(skb, ctinfo); in nfqnl_ct_parse()
51 if (ct && !nf_ct_is_untracked(ct)) in nfqnl_ct_parse()
[all …]
Dnf_nat_sip.c39 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in mangle_packet() local
43 if (nf_ct_protonum(ct) == IPPROTO_TCP) { in mangle_packet()
48 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo, in mangle_packet()
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, in mangle_packet()
68 static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer, in sip_sprintf_addr() argument
71 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in sip_sprintf_addr()
81 static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer, in sip_sprintf_addr_port() argument
84 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in sip_sprintf_addr_port()
97 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in map_addr() local
99 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); in map_addr()
[all …]
Dnf_conntrack_ecache.c51 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); in ecache_work_evict_list() local
53 if (nf_ct_is_dying(ct)) in ecache_work_evict_list()
56 if (nf_conntrack_event(IPCT_DESTROY, ct)) { in ecache_work_evict_list()
62 set_bit(IPS_DYING_BIT, &ct->status); in ecache_work_evict_list()
63 refs[evicted] = ct; in ecache_work_evict_list()
118 void nf_ct_deliver_cached_events(struct nf_conn *ct) in nf_ct_deliver_cached_events() argument
120 struct net *net = nf_ct_net(ct); in nf_ct_deliver_cached_events()
128 notify = rcu_dereference(net->ct.nf_conntrack_event_cb); in nf_ct_deliver_cached_events()
132 e = nf_ct_ecache_find(ct); in nf_ct_deliver_cached_events()
138 if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events) in nf_ct_deliver_cached_events()
[all …]
Dnf_conntrack_netlink.c131 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) in ctnetlink_dump_status() argument
133 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) in ctnetlink_dump_status()
142 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) in ctnetlink_dump_timeout() argument
144 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ; in ctnetlink_dump_timeout()
158 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct) in ctnetlink_dump_protoinfo() argument
164 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); in ctnetlink_dump_protoinfo()
172 ret = l4proto->to_nlattr(skb, nest_proto, ct); in ctnetlink_dump_protoinfo()
183 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) in ctnetlink_dump_helpinfo() argument
186 const struct nf_conn_help *help = nfct_help(ct); in ctnetlink_dump_helpinfo()
203 helper->to_nlattr(skb, ct); in ctnetlink_dump_helpinfo()
[all …]
Dnf_conntrack_standalone.c62 st->bucket < net->ct.htable_size; in ct_get_first()
64 n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_first()
80 if (++st->bucket >= net->ct.htable_size) in ct_get_next()
85 &net->ct.hash[st->bucket])); in ct_get_next()
123 static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) in ct_show_secctx() argument
129 ret = security_secid_to_secctx(ct->secmark, &secctx, &len); in ct_show_secctx()
138 static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) in ct_show_secctx() argument
144 static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) in ct_show_delta_time() argument
150 tstamp = nf_conn_tstamp_find(ct); in ct_show_delta_time()
165 ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) in ct_show_delta_time() argument
[all …]
Dxt_CT.c23 static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) in xt_ct_target() argument
30 if (!ct) in xt_ct_target()
31 ct = nf_ct_untracked_get(); in xt_ct_target()
32 atomic_inc(&ct->ct_general.use); in xt_ct_target()
33 skb->nfct = &ct->ct_general; in xt_ct_target()
43 struct nf_conn *ct = info->ct; in xt_ct_target_v0() local
45 return xt_ct_target(skb, ct); in xt_ct_target_v0()
52 struct nf_conn *ct = info->ct; in xt_ct_target_v1() local
54 return xt_ct_target(skb, ct); in xt_ct_target_v1()
76 xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, in xt_ct_set_helper() argument
[all …]
Dnf_conntrack_sip.c58 static int string_len(const struct nf_conn *ct, const char *dptr, in string_len() argument
70 static int digits_len(const struct nf_conn *ct, const char *dptr, in digits_len() argument
101 static int callid_len(const struct nf_conn *ct, const char *dptr, in callid_len() argument
120 static int media_len(const struct nf_conn *ct, const char *dptr, in media_len() argument
123 int len = string_len(ct, dptr, limit, shift); in media_len()
131 return len + digits_len(ct, dptr, limit, shift); in media_len()
134 static int sip_parse_addr(const struct nf_conn *ct, const char *cp, in sip_parse_addr() argument
141 if (!ct) in sip_parse_addr()
145 switch (nf_ct_l3num(ct)) { in sip_parse_addr()
176 static int epaddr_len(const struct nf_conn *ct, const char *dptr, in epaddr_len() argument
[all …]
Dnf_conntrack_proto_tcp.c276 return &net->ct.nf_ct_proto.tcp; in tcp_pernet()
314 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) in tcp_print_conntrack() argument
318 spin_lock_bh(&ct->lock); in tcp_print_conntrack()
319 state = ct->proto.tcp.state; in tcp_print_conntrack()
320 spin_unlock_bh(&ct->lock); in tcp_print_conntrack()
500 static bool tcp_in_window(const struct nf_conn *ct, in tcp_in_window() argument
509 struct net *net = nf_ct_net(ct); in tcp_in_window()
513 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; in tcp_in_window()
530 receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1); in tcp_in_window()
786 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && in tcp_error()
[all …]
Dxt_conntrack.c43 conntrack_mt_origsrc(const struct nf_conn *ct, in conntrack_mt_origsrc() argument
47 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, in conntrack_mt_origsrc()
52 conntrack_mt_origdst(const struct nf_conn *ct, in conntrack_mt_origdst() argument
56 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3, in conntrack_mt_origdst()
61 conntrack_mt_replsrc(const struct nf_conn *ct, in conntrack_mt_replsrc() argument
65 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3, in conntrack_mt_replsrc()
70 conntrack_mt_repldst(const struct nf_conn *ct, in conntrack_mt_repldst() argument
74 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3, in conntrack_mt_repldst()
80 const struct nf_conn *ct) in ct_proto_port_check() argument
84 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in ct_proto_port_check()
[all …]
Dnf_conntrack_pptp.c49 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
56 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
68 (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
102 static void pptp_expectfn(struct nf_conn *ct, in pptp_expectfn() argument
105 struct net *net = nf_ct_net(ct); in pptp_expectfn()
110 ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; in pptp_expectfn()
111 ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; in pptp_expectfn()
118 if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK) in pptp_expectfn()
119 nf_nat_pptp_expectfn(ct, exp); in pptp_expectfn()
129 exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); in pptp_expectfn()
[all …]
Dxt_connmark.c43 struct nf_conn *ct; in connmark_tg() local
46 ct = nf_ct_get(skb, &ctinfo); in connmark_tg()
47 if (ct == NULL) in connmark_tg()
52 newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; in connmark_tg()
53 if (ct->mark != newmark) { in connmark_tg()
54 ct->mark = newmark; in connmark_tg()
55 nf_conntrack_event_cache(IPCT_MARK, ct); in connmark_tg()
59 newmark = (ct->mark & ~info->ctmask) ^ in connmark_tg()
61 if (ct->mark != newmark) { in connmark_tg()
62 ct->mark = newmark; in connmark_tg()
[all …]
Dnf_conntrack_helper.c50 .data = &init_net.ct.sysctl_auto_assign_helper,
67 table[0].data = &net->ct.sysctl_auto_assign_helper; in nf_conntrack_helper_init_sysctl()
73 net->ct.helper_sysctl_header = in nf_conntrack_helper_init_sysctl()
76 if (!net->ct.helper_sysctl_header) { in nf_conntrack_helper_init_sysctl()
92 table = net->ct.helper_sysctl_header->ctl_table_arg; in nf_conntrack_helper_fini_sysctl()
93 unregister_net_sysctl_table(net->ct.helper_sysctl_header); in nf_conntrack_helper_fini_sysctl()
171 nf_ct_helper_ext_add(struct nf_conn *ct, in nf_ct_helper_ext_add() argument
176 help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER, in nf_ct_helper_ext_add()
186 int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, in __nf_ct_try_assign_helper() argument
191 struct net *net = nf_ct_net(ct); in __nf_ct_try_assign_helper()
[all …]
Dxt_cluster.c19 static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct) in nf_ct_orig_ipv4_src() argument
21 return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; in nf_ct_orig_ipv4_src()
24 static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct) in nf_ct_orig_ipv6_src() argument
26 return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; in nf_ct_orig_ipv6_src()
42 xt_cluster_hash(const struct nf_conn *ct, in xt_cluster_hash() argument
47 switch(nf_ct_l3num(ct)) { in xt_cluster_hash()
49 hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); in xt_cluster_hash()
52 hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); in xt_cluster_hash()
94 const struct nf_conn *ct; in xt_cluster_mt() local
120 ct = nf_ct_get(skb, &ctinfo); in xt_cluster_mt()
[all …]
Dnf_conntrack_proto_dccp.c422 static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, in dccp_new() argument
425 struct net *net = nf_ct_net(ct); in dccp_new()
449 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT; in dccp_new()
450 ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER; in dccp_new()
451 ct->proto.dccp.state = CT_DCCP_NONE; in dccp_new()
452 ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST; in dccp_new()
453 ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL; in dccp_new()
454 ct->proto.dccp.handshake_seq = 0; in dccp_new()
459 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL, in dccp_new()
478 static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, in dccp_packet() argument
[all …]
Dxt_connlabel.c26 struct nf_conn *ct; in connlabel_mt() local
29 ct = nf_ct_get(skb, &ctinfo); in connlabel_mt()
30 if (ct == NULL || nf_ct_is_untracked(ct)) in connlabel_mt()
34 return (nf_connlabel_set(ct, info->bit) == 0) ^ invert; in connlabel_mt()
36 return nf_connlabel_match(ct, info->bit) ^ invert; in connlabel_mt()
62 par->net->ct.labels_used++; in connlabel_mt_check()
64 if (words > par->net->ct.label_words) in connlabel_mt_check()
65 par->net->ct.label_words = words; in connlabel_mt_check()
72 par->net->ct.labels_used--; in connlabel_mt_destroy()
73 if (par->net->ct.labels_used == 0) in connlabel_mt_destroy()
[all …]
Dxt_nat.c48 struct nf_conn *ct; in xt_snat_target_v0() local
50 ct = nf_ct_get(skb, &ctinfo); in xt_snat_target_v0()
51 NF_CT_ASSERT(ct != NULL && in xt_snat_target_v0()
56 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in xt_snat_target_v0()
65 struct nf_conn *ct; in xt_dnat_target_v0() local
67 ct = nf_ct_get(skb, &ctinfo); in xt_dnat_target_v0()
68 NF_CT_ASSERT(ct != NULL && in xt_dnat_target_v0()
72 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); in xt_dnat_target_v0()
80 struct nf_conn *ct; in xt_snat_target_v1() local
82 ct = nf_ct_get(skb, &ctinfo); in xt_snat_target_v1()
[all …]
Dxt_CONNSECMARK.c39 struct nf_conn *ct; in secmark_save() local
42 ct = nf_ct_get(skb, &ctinfo); in secmark_save()
43 if (ct && !ct->secmark) { in secmark_save()
44 ct->secmark = skb->secmark; in secmark_save()
45 nf_conntrack_event_cache(IPCT_SECMARK, ct); in secmark_save()
57 const struct nf_conn *ct; in secmark_restore() local
60 ct = nf_ct_get(skb, &ctinfo); in secmark_restore()
61 if (ct && ct->secmark) in secmark_restore()
62 skb->secmark = ct->secmark; in secmark_restore()
Dnf_conntrack_seqadj.c9 int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, in nf_ct_seqadj_init() argument
19 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); in nf_ct_seqadj_init()
21 seqadj = nfct_seqadj(ct); in nf_ct_seqadj_init()
29 int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, in nf_ct_seqadj_set() argument
32 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); in nf_ct_seqadj_set()
44 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); in nf_ct_seqadj_set()
46 spin_lock_bh(&ct->lock); in nf_ct_seqadj_set()
54 spin_unlock_bh(&ct->lock); in nf_ct_seqadj_set()
60 struct nf_conn *ct, enum ip_conntrack_info ctinfo, in nf_ct_tcp_seqadj_set() argument
65 if (nf_ct_protonum(ct) != IPPROTO_TCP) in nf_ct_tcp_seqadj_set()
[all …]
Dnf_conntrack_proto_gre.c115 int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, in nf_ct_gre_keymap_add() argument
118 struct net *net = nf_ct_net(ct); in nf_ct_gre_keymap_add()
120 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); in nf_ct_gre_keymap_add()
135 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); in nf_ct_gre_keymap_add()
157 void nf_ct_gre_keymap_destroy(struct nf_conn *ct) in nf_ct_gre_keymap_destroy() argument
159 struct net *net = nf_ct_net(ct); in nf_ct_gre_keymap_destroy()
161 struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); in nf_ct_gre_keymap_destroy()
164 pr_debug("entering for ct %p\n", ct); in nf_ct_gre_keymap_destroy()
238 static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct) in gre_print_conntrack() argument
241 (ct->proto.gre.timeout / HZ), in gre_print_conntrack()
[all …]
Dnft_ct.c40 const struct nf_conn *ct; in nft_ct_get_eval() local
47 ct = nf_ct_get(pkt->skb, &ctinfo); in nft_ct_get_eval()
51 if (ct == NULL) in nft_ct_get_eval()
53 else if (nf_ct_is_untracked(ct)) in nft_ct_get_eval()
63 if (ct == NULL) in nft_ct_get_eval()
71 *dest = ct->status; in nft_ct_get_eval()
75 *dest = ct->mark; in nft_ct_get_eval()
80 *dest = ct->secmark; in nft_ct_get_eval()
84 diff = (long)jiffies - (long)ct->timeout.expires; in nft_ct_get_eval()
90 if (ct->master == NULL) in nft_ct_get_eval()
[all …]
Dxt_u32.c19 const struct xt_u32_test *ct; in u32_match_it() local
34 ct = &data->tests[testind]; in u32_match_it()
36 pos = ct->location[0].number; in u32_match_it()
44 nnums = ct->nnums; in u32_match_it()
48 u_int32_t number = ct->location[i].number; in u32_match_it()
49 switch (ct->location[i].nextop) { in u32_match_it()
77 nvals = ct->nvalues; in u32_match_it()
79 if (ct->value[i].min <= val && val <= ct->value[i].max) in u32_match_it()
82 if (i >= ct->nvalues) in u32_match_it()
Dnf_conntrack_proto_sctp.c178 static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) in sctp_print_conntrack() argument
182 spin_lock_bh(&ct->lock); in sctp_print_conntrack()
183 state = ct->proto.sctp.state; in sctp_print_conntrack()
184 spin_unlock_bh(&ct->lock); in sctp_print_conntrack()
196 static int do_basic_checks(struct nf_conn *ct, in do_basic_checks() argument
302 static int sctp_packet(struct nf_conn *ct, in sctp_packet() argument
323 if (do_basic_checks(ct, skb, dataoff, map) != 0) in sctp_packet()
332 sh->vtag != ct->proto.sctp.vtag[dir]) { in sctp_packet()
338 spin_lock_bh(&ct->lock); in sctp_packet()
347 if (sh->vtag != ct->proto.sctp.vtag[dir] && in sctp_packet()
[all …]
Dnf_conntrack_acct.c30 .data = &init_net.ct.sysctl_acct,
40 seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) in seq_print_acct() argument
45 acct = nf_conn_acct_find(ct); in seq_print_acct()
74 table[0].data = &net->ct.sysctl_acct; in nf_conntrack_acct_init_sysctl()
80 net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", in nf_conntrack_acct_init_sysctl()
82 if (!net->ct.acct_sysctl_header) { in nf_conntrack_acct_init_sysctl()
98 table = net->ct.acct_sysctl_header->ctl_table_arg; in nf_conntrack_acct_fini_sysctl()
99 unregister_net_sysctl_table(net->ct.acct_sysctl_header); in nf_conntrack_acct_fini_sysctl()
115 net->ct.sysctl_acct = nf_ct_acct; in nf_conntrack_acct_pernet_init()
Dnf_nat_helper.c93 struct nf_conn *ct, in __nf_nat_mangle_tcp_packet() argument
123 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); in __nf_nat_mangle_tcp_packet()
128 nf_ct_seqadj_set(ct, ctinfo, tcph->seq, in __nf_nat_mangle_tcp_packet()
147 struct nf_conn *ct, in nf_nat_mangle_udp_packet() argument
181 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); in nf_nat_mangle_udp_packet()
191 void nf_nat_follow_master(struct nf_conn *ct, in nf_nat_follow_master() argument
197 BUG_ON(ct->status & IPS_NAT_DONE_MASK); in nf_nat_follow_master()
202 = ct->master->tuplehash[!exp->dir].tuple.dst.u3; in nf_nat_follow_master()
203 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in nf_nat_follow_master()
209 = ct->master->tuplehash[!exp->dir].tuple.src.u3; in nf_nat_follow_master()
[all …]
Dnf_conntrack_labels.c23 bool nf_connlabel_match(const struct nf_conn *ct, u16 bit) in nf_connlabel_match() argument
25 struct nf_conn_labels *labels = nf_ct_labels_find(ct); in nf_connlabel_match()
34 int nf_connlabel_set(struct nf_conn *ct, u16 bit) in nf_connlabel_set() argument
36 struct nf_conn_labels *labels = nf_ct_labels_find(ct); in nf_connlabel_set()
45 nf_conntrack_event_cache(IPCT_LABEL, ct); in nf_connlabel_set()
62 int nf_connlabels_replace(struct nf_conn *ct, in nf_connlabels_replace() argument
70 labels = nf_ct_labels_find(ct); in nf_connlabels_replace()
88 nf_conntrack_event_cache(IPCT_LABEL, ct); in nf_connlabels_replace()
Dnf_conntrack_timestamp.c27 .data = &init_net.ct.sysctl_tstamp,
52 table[0].data = &net->ct.sysctl_tstamp; in nf_conntrack_tstamp_init_sysctl()
58 net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter", in nf_conntrack_tstamp_init_sysctl()
60 if (!net->ct.tstamp_sysctl_header) { in nf_conntrack_tstamp_init_sysctl()
76 table = net->ct.tstamp_sysctl_header->ctl_table_arg; in nf_conntrack_tstamp_fini_sysctl()
77 unregister_net_sysctl_table(net->ct.tstamp_sysctl_header); in nf_conntrack_tstamp_fini_sysctl()
93 net->ct.sysctl_tstamp = nf_ct_tstamp; in nf_conntrack_tstamp_pernet_init()
Dnf_nat_ftp.c29 static int nf_nat_ftp_fmt_cmd(struct nf_conn *ct, enum nf_ct_ftp_type type, in nf_nat_ftp_fmt_cmd() argument
44 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in nf_nat_ftp_fmt_cmd()
70 struct nf_conn *ct = exp->master; in nf_nat_ftp() local
77 newaddr = ct->tuplehash[!dir].tuple.dst.u3; in nf_nat_ftp()
100 nf_ct_helper_log(skb, ct, "all ports in use"); in nf_nat_ftp()
104 buflen = nf_nat_ftp_fmt_cmd(ct, type, buffer, sizeof(buffer), in nf_nat_ftp()
111 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, in nf_nat_ftp()
118 nf_ct_helper_log(skb, ct, "cannot mangle packet"); in nf_nat_ftp()
Dnf_conntrack_tftp.c40 struct nf_conn *ct, in tftp_help() argument
59 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); in tftp_help()
60 nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); in tftp_help()
62 exp = nf_ct_expect_alloc(ct); in tftp_help()
64 nf_ct_helper_log(skb, ct, "cannot alloc expectation"); in tftp_help()
67 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; in tftp_help()
69 nf_ct_l3num(ct), in tftp_help()
77 if (nf_nat_tftp && ct->status & IPS_NAT_MASK) in tftp_help()
80 nf_ct_helper_log(skb, ct, "cannot add expectation"); in tftp_help()
Dnf_nat_redirect.c36 struct nf_conn *ct; in nf_nat_redirect_ipv4() local
44 ct = nf_ct_get(skb, &ctinfo); in nf_nat_redirect_ipv4()
45 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED)); in nf_nat_redirect_ipv4()
78 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); in nf_nat_redirect_ipv4()
91 struct nf_conn *ct; in nf_nat_redirect_ipv6() local
93 ct = nf_ct_get(skb, &ctinfo); in nf_nat_redirect_ipv6()
122 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST); in nf_nat_redirect_ipv6()
Dnf_conntrack_snmp.c34 struct nf_conn *ct,
39 struct nf_conn *ct, enum ip_conntrack_info ctinfo) in snmp_conntrack_help() argument
43 nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout); in snmp_conntrack_help()
46 if (nf_nat_snmp && ct->status & IPS_NAT_MASK) in snmp_conntrack_help()
47 return nf_nat_snmp(skb, protoff, ct, ctinfo); in snmp_conntrack_help()
Dxt_helper.c30 const struct nf_conn *ct; in helper_mt() local
36 ct = nf_ct_get(skb, &ctinfo); in helper_mt()
37 if (!ct || !ct->master) in helper_mt()
40 master_help = nfct_help(ct->master); in helper_mt()
Dnf_conntrack_ftp.c360 static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, in update_nl_seq() argument
387 struct nf_conn *ct, in help() argument
398 struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct); in help()
454 cmd.l3num = nf_ct_l3num(ct); in help()
455 memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all, in help()
474 nf_ct_helper_log(skb, ct, "partial matching of `%s'", in help()
487 exp = nf_ct_expect_alloc(ct); in help()
489 nf_ct_helper_log(skb, ct, "cannot alloc expectation"); in help()
497 daddr = &ct->tuplehash[!dir].tuple.dst.u3; in help()
500 if ((cmd.l3num == nf_ct_l3num(ct)) && in help()
[all …]
Dnf_conntrack_broadcast.c24 struct nf_conn *ct, in nf_conntrack_broadcast_help() argument
32 struct nf_conn_help *help = nfct_help(ct); in nf_conntrack_broadcast_help()
58 exp = nf_ct_expect_alloc(ct); in nf_conntrack_broadcast_help()
62 exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; in nf_conntrack_broadcast_help()
76 nf_ct_refresh(ct, skb, timeout * HZ); in nf_conntrack_broadcast_help()
Dnf_conntrack_extend.c22 void __nf_ct_ext_destroy(struct nf_conn *ct) in __nf_ct_ext_destroy() argument
26 struct nf_ct_ext *ext = ct->ext; in __nf_ct_ext_destroy()
40 t->destroy(ct); in __nf_ct_ext_destroy()
72 void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, in __nf_ct_ext_add_length() argument
80 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); in __nf_ct_ext_add_length()
82 old = ct->ext; in __nf_ct_ext_add_length()
84 return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp); in __nf_ct_ext_add_length()
114 ct->ext = new; in __nf_ct_ext_add_length()
Dnf_nat_irc.c37 struct nf_conn *ct = exp->master; in help() local
43 newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; in help()
64 nf_ct_helper_log(skb, ct, "all ports in use"); in help()
86 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, in help()
89 nf_ct_helper_log(skb, ct, "cannot mangle packet"); in help()
Dnf_conntrack_amanda.c88 struct nf_conn *ct, in amanda_help() argument
106 nf_ct_refresh(ct, skb, master_timeout * HZ); in amanda_help()
143 exp = nf_ct_expect_alloc(ct); in amanda_help()
145 nf_ct_helper_log(skb, ct, "cannot alloc expectation"); in amanda_help()
149 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in amanda_help()
151 nf_ct_l3num(ct), in amanda_help()
156 if (nf_nat_amanda && ct->status & IPS_NAT_MASK) in amanda_help()
160 nf_ct_helper_log(skb, ct, "cannot add expectation"); in amanda_help()
Dnf_conntrack_expect.c53 net->ct.expect_count--; in nf_ct_unlink_expect_report()
97 if (!net->ct.expect_count) in __nf_ct_expect_find()
101 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { in __nf_ct_expect_find()
136 if (!net->ct.expect_count) in nf_ct_find_expectation()
140 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) { in nf_ct_find_expectation()
185 void nf_ct_remove_expectations(struct nf_conn *ct) in nf_ct_remove_expectations() argument
187 struct nf_conn_help *help = nfct_help(ct); in nf_ct_remove_expectations()
350 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); in nf_ct_expect_insert()
351 net->ct.expect_count++; in nf_ct_expect_insert()
402 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) { in __nf_ct_expect_check()
[all …]
Dnf_conntrack_sane.c63 struct nf_conn *ct, in help() argument
72 struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct); in help()
139 exp = nf_ct_expect_alloc(ct); in help()
141 nf_ct_helper_log(skb, ct, "cannot alloc expectation"); in help()
146 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in help()
147 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, nf_ct_l3num(ct), in help()
156 nf_ct_helper_log(skb, ct, "cannot add expectation"); in help()
Dnf_conntrack_proto_udp.c36 return &net->ct.nf_ct_proto.udp; in udp_pernet()
80 static int udp_packet(struct nf_conn *ct, in udp_packet() argument
90 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { in udp_packet()
91 nf_ct_refresh_acct(ct, ctinfo, skb, in udp_packet()
94 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) in udp_packet()
95 nf_conntrack_event_cache(IPCT_ASSURED, ct); in udp_packet()
97 nf_ct_refresh_acct(ct, ctinfo, skb, in udp_packet()
104 static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, in udp_new() argument
144 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && in udp_error()
303 return &net->ct.nf_ct_proto.udp.pn; in udp_get_net_proto()
Dnf_synproxy_core.c183 struct nf_conn *ct, in synproxy_tstamp_adjust() argument
352 struct nf_conn *ct; in synproxy_net_init() local
356 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL); in synproxy_net_init()
357 if (IS_ERR(ct)) { in synproxy_net_init()
358 err = PTR_ERR(ct); in synproxy_net_init()
362 if (!nfct_seqadj_ext_add(ct)) in synproxy_net_init()
364 if (!nfct_synproxy_ext_add(ct)) in synproxy_net_init()
367 nf_conntrack_tmpl_insert(net, ct); in synproxy_net_init()
368 snet->tmpl = ct; in synproxy_net_init()
383 nf_conntrack_free(ct); in synproxy_net_init()
Dnf_conntrack_proto_generic.c43 return &net->ct.nf_ct_proto.generic; in generic_pernet()
77 static int generic_packet(struct nf_conn *ct, in generic_packet() argument
85 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); in generic_packet()
90 static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, in generic_new() argument
93 return nf_generic_should_process(nf_ct_protonum(ct)); in generic_new()
214 return &net->ct.nf_ct_proto.generic.pn; in generic_get_net_proto()
Dxt_NETMAP.c26 struct nf_conn *ct; in netmap_tg6() local
31 ct = nf_ct_get(skb, &ctinfo); in netmap_tg6()
54 return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); in netmap_tg6()
69 struct nf_conn *ct; in netmap_tg4() local
79 ct = nf_ct_get(skb, &ctinfo); in netmap_tg4()
99 return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); in netmap_tg4()
Dnf_conntrack_irc.c107 struct nf_conn *ct, enum ip_conntrack_info ctinfo) in help() argument
187 tuple = &ct->tuplehash[dir].tuple; in help()
196 exp = nf_ct_expect_alloc(ct); in help()
198 nf_ct_helper_log(skb, ct, in help()
203 tuple = &ct->tuplehash[!dir].tuple; in help()
211 if (nf_nat_irc && ct->status & IPS_NAT_MASK) in help()
217 nf_ct_helper_log(skb, ct, in help()
Dnf_conntrack_proto_udplite.c88 static int udplite_packet(struct nf_conn *ct, in udplite_packet() argument
98 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { in udplite_packet()
99 nf_ct_refresh_acct(ct, ctinfo, skb, in udplite_packet()
102 if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) in udplite_packet()
103 nf_conntrack_event_cache(IPCT_ASSURED, ct); in udplite_packet()
105 nf_ct_refresh_acct(ct, ctinfo, skb, in udplite_packet()
112 static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb, in udplite_new() argument
158 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && in udplite_error()
Dxt_state.c29 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in state_mt() local
31 if (!ct) in state_mt()
34 if (nf_ct_is_untracked(ct)) in state_mt()
Dxt_connbytes.c24 const struct nf_conn *ct; in connbytes_mt() local
32 ct = nf_ct_get(skb, &ctinfo); in connbytes_mt()
33 if (!ct) in connbytes_mt()
36 acct = nf_conn_acct_find(ct); in connbytes_mt()
Dxt_socket.c154 struct nf_conn const *ct; in xt_socket_lookup_slow_v4() local
185 ct = nf_ct_get(skb, &ctinfo); in xt_socket_lookup_slow_v4()
186 if (ct && !nf_ct_is_untracked(ct) && in xt_socket_lookup_slow_v4()
191 (ct->status & IPS_SRC_NAT_DONE)) { in xt_socket_lookup_slow_v4()
193 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; in xt_socket_lookup_slow_v4()
195 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port : in xt_socket_lookup_slow_v4()
196 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; in xt_socket_lookup_slow_v4()
Dnf_nat_tftp.c25 const struct nf_conn *ct = exp->master; in help() local
28 = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; in help()
Dnf_conntrack_netbios_ns.c44 struct nf_conn *ct, enum ip_conntrack_info ctinfo) in netbios_ns_help() argument
46 return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout); in netbios_ns_help()
Dxt_HMARK.c83 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in hmark_ct_set_htuple() local
87 if (ct == NULL || nf_ct_is_untracked(ct)) in hmark_ct_set_htuple()
90 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in hmark_ct_set_htuple()
91 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; in hmark_ct_set_htuple()
101 t->proto = nf_ct_protonum(ct); in hmark_ct_set_htuple()
Dxt_ipvs.c116 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ipvs_mt() local
118 if (ct == NULL || nf_ct_is_untracked(ct)) { in ipvs_mt()
Dnf_nat_proto_udp.c27 const struct nf_conn *ct) in udp_unique_tuple() argument
29 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, in udp_unique_tuple()
Dnf_nat_proto_tcp.c28 const struct nf_conn *ct) in tcp_unique_tuple() argument
30 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, in tcp_unique_tuple()
Dnf_nat_proto_sctp.c24 const struct nf_conn *ct) in sctp_unique_tuple() argument
26 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, in sctp_unique_tuple()
Dnf_nat_proto_dccp.c30 const struct nf_conn *ct) in dccp_unique_tuple() argument
32 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, in dccp_unique_tuple()
Dnf_nat_proto_udplite.c27 const struct nf_conn *ct) in udplite_unique_tuple() argument
29 nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, in udplite_unique_tuple()
Dx_tables.c608 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; in xt_compat_target_from_user() local
610 u_int16_t tsize = ct->u.user.target_size; in xt_compat_target_from_user()
613 memcpy(t, ct, sizeof(*ct)); in xt_compat_target_from_user()
615 target->compat_from_user(t->data, ct->data); in xt_compat_target_from_user()
617 memcpy(t->data, ct->data, tsize - sizeof(*ct)); in xt_compat_target_from_user()
634 struct compat_xt_entry_target __user *ct = *dstptr; in xt_compat_target_to_user() local
638 if (copy_to_user(ct, t, sizeof(*ct)) || in xt_compat_target_to_user()
639 put_user(tsize, &ct->u.user.target_size) || in xt_compat_target_to_user()
640 copy_to_user(ct->u.user.name, t->u.kernel.target->name, in xt_compat_target_to_user()
645 if (target->compat_to_user((void __user *)ct->data, t->data)) in xt_compat_target_to_user()
[all …]
Dnf_nat_proto_common.c41 const struct nf_conn *ct, in nf_nat_l4proto_unique_tuple() argument
89 if (++i != range_size && nf_nat_used_tuple(tuple, ct)) in nf_nat_l4proto_unique_tuple()
Dxt_connlimit.c325 const struct nf_conn *ct; in connlimit_mt() local
329 ct = nf_ct_get(skb, &ctinfo); in connlimit_mt()
330 if (ct != NULL) { in connlimit_mt()
331 tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in connlimit_mt()
332 zone = nf_ct_zone(ct); in connlimit_mt()
Dnfnetlink_cthelper.c37 struct nf_conn *ct, enum ip_conntrack_info ctinfo) in nfnl_userspace_cthelper() argument
42 help = nfct_help(ct); in nfnl_userspace_cthelper()
90 nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct) in nfnl_cthelper_from_nlattr() argument
92 struct nf_conn_help *help = nfct_help(ct); in nfnl_cthelper_from_nlattr()
105 nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct) in nfnl_cthelper_to_nlattr() argument
107 const struct nf_conn_help *help = nfct_help(ct); in nfnl_cthelper_to_nlattr()
Dnfnetlink_queue_core.c297 struct nf_conn *ct = NULL; in nfqnl_build_packet_message() local
348 ct = nfqnl_ct_get(entskb, &size, &ctinfo); in nfqnl_build_packet_message()
482 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) in nfqnl_build_packet_message()
991 struct nf_conn *ct = NULL; in nfqnl_recv_verdict() local
1014 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo); in nfqnl_recv_verdict()
1015 if (ct && nfqa[NFQA_EXP]) { in nfqnl_recv_verdict()
1016 nfqnl_attach_expect(ct, nfqa[NFQA_EXP], in nfqnl_recv_verdict()
1030 if (ct) in nfqnl_recv_verdict()
1031 nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff); in nfqnl_recv_verdict()
Dnf_nat_proto_unknown.c32 const struct nf_conn *ct) in unknown_unique_tuple() argument
/linux-4.1.27/drivers/macintosh/
Dwindfarm.h29 int (*set_value)(struct wf_control *ct, s32 val);
30 int (*get_value)(struct wf_control *ct, s32 *val);
31 s32 (*get_min)(struct wf_control *ct);
32 s32 (*get_max)(struct wf_control *ct);
33 void (*release)(struct wf_control *ct);
58 extern int wf_register_control(struct wf_control *ct);
59 extern void wf_unregister_control(struct wf_control *ct);
61 extern int wf_get_control(struct wf_control *ct);
62 extern void wf_put_control(struct wf_control *ct);
64 static inline int wf_control_set_max(struct wf_control *ct) in wf_control_set_max() argument
[all …]
Dwindfarm_core.c153 struct wf_control *ct = container_of(kref, struct wf_control, ref); in wf_control_release() local
155 DBG("wf: Deleting control %s\n", ct->name); in wf_control_release()
157 if (ct->ops && ct->ops->release) in wf_control_release()
158 ct->ops->release(ct); in wf_control_release()
160 kfree(ct); in wf_control_release()
213 struct wf_control *ct; in wf_register_control() local
216 list_for_each_entry(ct, &wf_controls, link) { in wf_register_control()
217 if (!strcmp(ct->name, new_ct->name)) { in wf_register_control()
219 " duplicate control %s\n", ct->name); in wf_register_control()
246 void wf_unregister_control(struct wf_control *ct) in wf_unregister_control() argument
[all …]
Dwindfarm_pm112.c269 struct wf_control *ct; in cpu_fans_tick() local
327 ct = cpu_fans[i]; in cpu_fans_tick()
328 if (ct == NULL) in cpu_fans_tick()
330 err = ct->ops->set_value(ct, target * cpu_fan_scale[i] / 100); in cpu_fans_tick()
333 "error %d\n", ct->name, err); in cpu_fans_tick()
552 static void pm112_new_control(struct wf_control *ct) in pm112_new_control() argument
556 if (cpufreq_clamp == NULL && !strcmp(ct->name, "cpufreq-clamp")) { in pm112_new_control()
557 if (wf_get_control(ct) == 0) in pm112_new_control()
558 cpufreq_clamp = ct; in pm112_new_control()
562 if (!strcmp(ct->name, cpu_fan_names[i])) { in pm112_new_control()
[all …]
Dwindfarm_pm91.c537 static void wf_smu_new_control(struct wf_control *ct) in wf_smu_new_control() argument
542 if (fan_cpu_main == NULL && !strcmp(ct->name, "cpu-rear-fan-0")) { in wf_smu_new_control()
543 if (wf_get_control(ct) == 0) in wf_smu_new_control()
544 fan_cpu_main = ct; in wf_smu_new_control()
547 if (fan_cpu_second == NULL && !strcmp(ct->name, "cpu-rear-fan-1")) { in wf_smu_new_control()
548 if (wf_get_control(ct) == 0) in wf_smu_new_control()
549 fan_cpu_second = ct; in wf_smu_new_control()
552 if (fan_cpu_third == NULL && !strcmp(ct->name, "cpu-front-fan-0")) { in wf_smu_new_control()
553 if (wf_get_control(ct) == 0) in wf_smu_new_control()
554 fan_cpu_third = ct; in wf_smu_new_control()
[all …]
Dwindfarm_rm31.c568 static void rm31_new_control(struct wf_control *ct) in rm31_new_control() argument
572 if (!strcmp(ct->name, "cpu-fan-a-0")) in rm31_new_control()
573 cpu_fans[0][0] = ct; in rm31_new_control()
574 else if (!strcmp(ct->name, "cpu-fan-b-0")) in rm31_new_control()
575 cpu_fans[0][1] = ct; in rm31_new_control()
576 else if (!strcmp(ct->name, "cpu-fan-c-0")) in rm31_new_control()
577 cpu_fans[0][2] = ct; in rm31_new_control()
578 else if (!strcmp(ct->name, "cpu-fan-a-1")) in rm31_new_control()
579 cpu_fans[1][0] = ct; in rm31_new_control()
580 else if (!strcmp(ct->name, "cpu-fan-b-1")) in rm31_new_control()
[all …]
Dwindfarm_pm72.c670 static void pm72_new_control(struct wf_control *ct) in pm72_new_control() argument
675 if (!strcmp(ct->name, "cpu-front-fan-0")) in pm72_new_control()
676 cpu_front_fans[0] = ct; in pm72_new_control()
677 else if (!strcmp(ct->name, "cpu-front-fan-1")) in pm72_new_control()
678 cpu_front_fans[1] = ct; in pm72_new_control()
679 else if (!strcmp(ct->name, "cpu-rear-fan-0")) in pm72_new_control()
680 cpu_rear_fans[0] = ct; in pm72_new_control()
681 else if (!strcmp(ct->name, "cpu-rear-fan-1")) in pm72_new_control()
682 cpu_rear_fans[1] = ct; in pm72_new_control()
683 else if (!strcmp(ct->name, "cpu-pump-0")) in pm72_new_control()
[all …]
Dwindfarm_smu_controls.c109 static void smu_fan_release(struct wf_control *ct) in smu_fan_release() argument
111 struct smu_fan_control *fct = to_smu_fan(ct); in smu_fan_release()
116 static int smu_fan_set(struct wf_control *ct, s32 value) in smu_fan_set() argument
118 struct smu_fan_control *fct = to_smu_fan(ct); in smu_fan_set()
129 static int smu_fan_get(struct wf_control *ct, s32 *value) in smu_fan_get() argument
131 struct smu_fan_control *fct = to_smu_fan(ct); in smu_fan_get()
136 static s32 smu_fan_min(struct wf_control *ct) in smu_fan_min() argument
138 struct smu_fan_control *fct = to_smu_fan(ct); in smu_fan_min()
142 static s32 smu_fan_max(struct wf_control *ct) in smu_fan_max() argument
144 struct smu_fan_control *fct = to_smu_fan(ct); in smu_fan_max()
Dwindfarm_fcu_controls.c79 static void wf_fcu_fan_release(struct wf_control *ct) in wf_fcu_fan_release() argument
81 struct wf_fcu_fan *fan = ct->priv; in wf_fcu_fan_release()
145 static int wf_fcu_fan_set_rpm(struct wf_control *ct, s32 value) in wf_fcu_fan_set_rpm() argument
147 struct wf_fcu_fan *fan = ct->priv; in wf_fcu_fan_set_rpm()
167 static int wf_fcu_fan_get_rpm(struct wf_control *ct, s32 *value) in wf_fcu_fan_get_rpm() argument
169 struct wf_fcu_fan *fan = ct->priv; in wf_fcu_fan_get_rpm()
202 static int wf_fcu_fan_set_pwm(struct wf_control *ct, s32 value) in wf_fcu_fan_set_pwm() argument
204 struct wf_fcu_fan *fan = ct->priv; in wf_fcu_fan_set_pwm()
224 static int wf_fcu_fan_get_pwm(struct wf_control *ct, s32 *value) in wf_fcu_fan_get_pwm() argument
226 struct wf_fcu_fan *fan = ct->priv; in wf_fcu_fan_get_pwm()
[all …]
Dwindfarm_cpufreq_clamp.c38 static int clamp_set(struct wf_control *ct, s32 value) in clamp_set() argument
50 static int clamp_get(struct wf_control *ct, s32 *value) in clamp_get() argument
56 static s32 clamp_min(struct wf_control *ct) in clamp_min() argument
61 static s32 clamp_max(struct wf_control *ct) in clamp_max() argument
Dwindfarm_pm81.c612 static void wf_smu_new_control(struct wf_control *ct) in wf_smu_new_control() argument
617 if (fan_cpu_main == NULL && !strcmp(ct->name, "cpu-fan")) { in wf_smu_new_control()
618 if (wf_get_control(ct) == 0) in wf_smu_new_control()
619 fan_cpu_main = ct; in wf_smu_new_control()
622 if (fan_system == NULL && !strcmp(ct->name, "system-fan")) { in wf_smu_new_control()
623 if (wf_get_control(ct) == 0) in wf_smu_new_control()
624 fan_system = ct; in wf_smu_new_control()
627 if (cpufreq_clamp == NULL && !strcmp(ct->name, "cpufreq-clamp")) { in wf_smu_new_control()
628 if (wf_get_control(ct) == 0) in wf_smu_new_control()
629 cpufreq_clamp = ct; in wf_smu_new_control()
[all …]
Dwindfarm_pm121.c877 static struct wf_control* pm121_register_control(struct wf_control *ct, in pm121_register_control() argument
881 if (controls[id] == NULL && !strcmp(ct->name, match)) { in pm121_register_control()
882 if (wf_get_control(ct) == 0) in pm121_register_control()
883 controls[id] = ct; in pm121_register_control()
888 static void pm121_new_control(struct wf_control *ct) in pm121_new_control() argument
895 all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all; in pm121_new_control()
896 all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all; in pm121_new_control()
897 all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all; in pm121_new_control()
898 all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all; in pm121_new_control()
/linux-4.1.27/drivers/video/fbdev/aty/
Dmach64_ct.c255 if ((err = aty_valid_pll_ct(info, vclk_per, &pll->ct))) in aty_var_to_pll_ct()
257 if (M64_HAS(GTB_DSP) && (err = aty_dsp_gt(info, bpp, &pll->ct))) in aty_var_to_pll_ct()
267 …ret = par->ref_clk_per * pll->ct.pll_ref_div * pll->ct.vclk_post_div_real / pll->ct.vclk_fb_div / … in aty_pll_to_var_ct()
269 if(pll->ct.xres > 0) { in aty_pll_to_var_ct()
271 ret /= pll->ct.xres; in aty_pll_to_var_ct()
291 pll->ct.pll_ext_cntl, pll->ct.pll_gen_cntl, pll->ct.pll_vclk_cntl); in aty_set_pll_ct()
295 par->clk_wr_offset, pll->ct.vclk_fb_div, in aty_set_pll_ct()
296 pll->ct.pll_ref_div, pll->ct.vclk_post_div, pll->ct.vclk_post_div_real); in aty_set_pll_ct()
313 aty_st_pll_ct(PLL_VCLK_CNTL, pll->ct.pll_vclk_cntl, par); in aty_set_pll_ct()
319 tmp |= ((pll->ct.vclk_post_div & 0x03U) << tmp2); in aty_set_pll_ct()
[all …]
/linux-4.1.27/include/net/netfilter/
Dnf_conntrack.h128 static inline u_int16_t nf_ct_l3num(const struct nf_conn *ct) in nf_ct_l3num() argument
130 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; in nf_ct_l3num()
133 static inline u_int8_t nf_ct_protonum(const struct nf_conn *ct) in nf_ct_protonum() argument
135 return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; in nf_ct_protonum()
138 #define nf_ct_tuple(ct, dir) (&(ct)->tuplehash[dir].tuple) argument
145 static inline struct net *nf_ct_net(const struct nf_conn *ct) in nf_ct_net() argument
147 return read_pnet(&ct->ct_net); in nf_ct_net()
151 void nf_conntrack_alter_reply(struct nf_conn *ct,
168 static inline void nf_ct_put(struct nf_conn *ct) in nf_ct_put() argument
170 NF_CT_ASSERT(ct); in nf_ct_put()
[all …]
Dnf_conntrack_ecache.h24 nf_ct_ecache_find(const struct nf_conn *ct) in nf_ct_ecache_find() argument
27 return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE); in nf_ct_ecache_find()
34 nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp) in nf_ct_ecache_ext_add() argument
37 struct net *net = nf_ct_net(ct); in nf_ct_ecache_ext_add()
40 if (!ctmask && !expmask && net->ct.sysctl_events) { in nf_ct_ecache_ext_add()
47 e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp); in nf_ct_ecache_ext_add()
61 struct nf_conn *ct; member
75 void nf_ct_deliver_cached_events(struct nf_conn *ct);
78 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct) in nf_conntrack_event_cache() argument
80 struct net *net = nf_ct_net(ct); in nf_conntrack_event_cache()
[all …]
Dnf_conntrack_extend.h60 static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) in nf_ct_ext_exist() argument
62 return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); in nf_ct_ext_exist()
65 static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) in __nf_ct_ext_find() argument
67 if (!nf_ct_ext_exist(ct, id)) in __nf_ct_ext_find()
70 return (void *)ct->ext + ct->ext->offset[id]; in __nf_ct_ext_find()
76 void __nf_ct_ext_destroy(struct nf_conn *ct);
77 static inline void nf_ct_ext_destroy(struct nf_conn *ct) in nf_ct_ext_destroy() argument
79 if (ct->ext) in nf_ct_ext_destroy()
80 __nf_ct_ext_destroy(ct); in nf_ct_ext_destroy()
86 static inline void nf_ct_ext_free(struct nf_conn *ct) in nf_ct_ext_free() argument
[all …]
Dnf_conntrack_helper.h42 struct nf_conn *ct,
45 void (*destroy)(struct nf_conn *ct);
47 int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct);
48 int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
65 struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
69 int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
72 void nf_ct_helper_destroy(struct nf_conn *ct);
74 static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) in nfct_help() argument
76 return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); in nfct_help()
79 static inline void *nfct_help_data(const struct nf_conn *ct) in nfct_help_data() argument
[all …]
Dnf_conntrack_seqadj.h23 static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct) in nfct_seqadj() argument
25 return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ); in nfct_seqadj()
28 static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct) in nfct_seqadj_ext_add() argument
30 return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC); in nfct_seqadj_ext_add()
33 int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
35 int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
37 void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
40 int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
42 s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
Dnf_conntrack_acct.h27 struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct) in nf_conn_acct_find() argument
29 return nf_ct_ext_find(ct, NF_CT_EXT_ACCT); in nf_conn_acct_find()
33 struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) in nf_ct_acct_ext_add() argument
35 struct net *net = nf_ct_net(ct); in nf_ct_acct_ext_add()
38 if (!net->ct.sysctl_acct) in nf_ct_acct_ext_add()
41 acct = nf_ct_ext_add(ct, NF_CT_EXT_ACCT, gfp); in nf_ct_acct_ext_add()
49 unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
55 return net->ct.sysctl_acct != 0; in nf_ct_acct_enabled()
61 net->ct.sysctl_acct = enable; in nf_ct_set_acct()
Dnf_conntrack_labels.h17 static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) in nf_ct_labels_find() argument
20 return nf_ct_ext_find(ct, NF_CT_EXT_LABELS); in nf_ct_labels_find()
26 static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) in nf_ct_labels_ext_add() argument
30 struct net *net = nf_ct_net(ct); in nf_ct_labels_ext_add()
33 words = ACCESS_ONCE(net->ct.label_words); in nf_ct_labels_ext_add()
37 cl_ext = nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, in nf_ct_labels_ext_add()
48 bool nf_connlabel_match(const struct nf_conn *ct, u16 bit);
49 int nf_connlabel_set(struct nf_conn *ct, u16 bit);
51 int nf_connlabels_replace(struct nf_conn *ct,
Dnf_conntrack_timestamp.h16 struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct) in nf_conn_tstamp_find() argument
19 return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP); in nf_conn_tstamp_find()
26 struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp) in nf_ct_tstamp_ext_add() argument
29 struct net *net = nf_ct_net(ct); in nf_ct_tstamp_ext_add()
31 if (!net->ct.sysctl_tstamp) in nf_ct_tstamp_ext_add()
34 return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp); in nf_ct_tstamp_ext_add()
42 return net->ct.sysctl_tstamp != 0; in nf_ct_tstamp_enabled()
47 net->ct.sysctl_tstamp = enable; in nf_ct_set_tstamp()
Dnf_nat_l3proto.h29 const struct nf_conn *ct,
42 int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
51 struct nf_conn *ct));
58 struct nf_conn *ct));
66 struct nf_conn *ct));
73 struct nf_conn *ct));
75 int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
84 struct nf_conn *ct));
91 struct nf_conn *ct));
99 struct nf_conn *ct));
[all …]
Dnf_nat.h33 struct nf_conn *ct; member
42 unsigned int nf_nat_setup_info(struct nf_conn *ct,
46 extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
49 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
55 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct) in nfct_nat() argument
58 return nf_ct_ext_find(ct, NF_CT_EXT_NAT); in nfct_nat()
Dnf_nat_core.h10 unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
15 static inline int nf_nat_initialized(struct nf_conn *ct, in nf_nat_initialized() argument
19 return ct->status & IPS_SRC_NAT_DONE; in nf_nat_initialized()
21 return ct->status & IPS_DST_NAT_DONE; in nf_nat_initialized()
27 (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
Dnfnetlink_queue.h14 int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
16 void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
18 int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
35 nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo) in nfqnl_ct_put() argument
40 inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, in nfqnl_ct_seq_adjust() argument
45 inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr, in nfqnl_attach_expect() argument
Dnf_conntrack_timeout.h29 struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct) in nf_ct_timeout_find() argument
32 return nf_ct_ext_find(ct, NF_CT_EXT_TIMEOUT); in nf_ct_timeout_find()
39 struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, in nf_ct_timeout_ext_add() argument
46 timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp); in nf_ct_timeout_ext_add()
59 nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct, in nf_ct_timeout_lookup() argument
66 timeout_ext = nf_ct_timeout_find(ct); in nf_ct_timeout_lookup()
Dnf_nat_helper.h10 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
17 struct nf_conn *ct, in nf_nat_mangle_tcp_packet() argument
25 return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, in nf_nat_mangle_tcp_packet()
30 int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
38 void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
Dnf_conntrack_l4proto.h38 int (*packet)(struct nf_conn *ct,
48 bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
52 void (*destroy)(struct nf_conn *ct);
70 struct nf_conn *ct);
75 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
152 ((net)->ct.sysctl_log_invalid == (proto) || \
153 (net)->ct.sysctl_log_invalid == IPPROTO_RAW)
Dnf_conntrack_synproxy.h12 static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct) in nfct_synproxy() argument
15 return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY); in nfct_synproxy()
21 static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct) in nfct_synproxy_ext_add() argument
24 return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC); in nfct_synproxy_ext_add()
71 struct tcphdr *th, struct nf_conn *ct,
Dnf_conntrack_core.h63 struct nf_conn *ct = (struct nf_conn *)skb->nfct; in nf_conntrack_confirm() local
66 if (ct && !nf_ct_is_untracked(ct)) { in nf_conntrack_confirm()
67 if (!nf_ct_is_confirmed(ct)) in nf_conntrack_confirm()
70 nf_ct_deliver_cached_events(ct); in nf_conntrack_confirm()
Dnf_conntrack_zones.h13 static inline u16 nf_ct_zone(const struct nf_conn *ct) in nf_ct_zone() argument
17 nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE); in nf_ct_zone()
Dnf_nat_l4proto.h38 const struct nf_conn *ct);
67 const struct nf_conn *ct, u16 *rover);
/linux-4.1.27/net/netfilter/ipvs/
Dip_vs_nfct.c85 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ip_vs_update_conntrack() local
88 if (ct == NULL || nf_ct_is_confirmed(ct) || nf_ct_is_untracked(ct) || in ip_vs_update_conntrack()
89 nf_ct_is_dying(ct)) in ip_vs_update_conntrack()
101 if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && in ip_vs_update_conntrack()
102 !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) in ip_vs_update_conntrack()
111 new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; in ip_vs_update_conntrack()
129 __func__, ct, ct->status, ctinfo, in ip_vs_update_conntrack()
130 ARG_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple), in ip_vs_update_conntrack()
132 nf_conntrack_alter_reply(ct, &new_tuple); in ip_vs_update_conntrack()
143 static void ip_vs_nfct_expect_callback(struct nf_conn *ct, in ip_vs_nfct_expect_callback() argument
[all …]
Dip_vs_pe_sip.c107 struct ip_vs_conn *ct) in ip_vs_sip_ct_match() argument
112 if (ct->af == p->af && in ip_vs_sip_ct_match()
113 ip_vs_addr_equal(p->af, p->caddr, &ct->caddr) && in ip_vs_sip_ct_match()
117 p->vaddr, &ct->vaddr) && in ip_vs_sip_ct_match()
118 ct->vport == p->vport && in ip_vs_sip_ct_match()
119 ct->flags & IP_VS_CONN_F_TEMPLATE && in ip_vs_sip_ct_match()
120 ct->protocol == p->protocol && in ip_vs_sip_ct_match()
121 ct->pe_data && ct->pe_data_len == p->pe_data_len && in ip_vs_sip_ct_match()
122 !memcmp(ct->pe_data, p->pe_data, p->pe_data_len)) in ip_vs_sip_ct_match()
Dip_vs_ftp.c183 struct nf_conn *ct; in ip_vs_ftp_out() local
263 ct = nf_ct_get(skb, &ctinfo); in ip_vs_ftp_out()
264 if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) { in ip_vs_ftp_out()
272 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, in ip_vs_ftp_out()
278 ip_vs_nfct_expect_related(skb, ct, n_cp, in ip_vs_ftp_out()
Dip_vs_conn.c746 int ip_vs_check_template(struct ip_vs_conn *ct) in ip_vs_check_template() argument
748 struct ip_vs_dest *dest = ct->dest; in ip_vs_check_template()
749 struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct)); in ip_vs_check_template()
760 ip_vs_proto_name(ct->protocol), in ip_vs_check_template()
761 IP_VS_DBG_ADDR(ct->af, &ct->caddr), in ip_vs_check_template()
762 ntohs(ct->cport), in ip_vs_check_template()
763 IP_VS_DBG_ADDR(ct->af, &ct->vaddr), in ip_vs_check_template()
764 ntohs(ct->vport), in ip_vs_check_template()
765 IP_VS_DBG_ADDR(ct->daf, &ct->daddr), in ip_vs_check_template()
766 ntohs(ct->dport)); in ip_vs_check_template()
[all …]
Dip_vs_xmit.c712 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ip_vs_nat_xmit() local
714 if (ct && !nf_ct_is_untracked(ct)) { in ip_vs_nat_xmit()
801 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ip_vs_nat_xmit_v6() local
803 if (ct && !nf_ct_is_untracked(ct)) { in ip_vs_nat_xmit_v6()
1277 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ip_vs_icmp_xmit() local
1279 if (ct && !nf_ct_is_untracked(ct)) { in ip_vs_icmp_xmit()
1368 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ip_vs_icmp_xmit_v6() local
1370 if (ct && !nf_ct_is_untracked(ct)) { in ip_vs_icmp_xmit_v6()
/linux-4.1.27/kernel/irq/
Dgeneric-chip.c38 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_disable_reg() local
42 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg()
43 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg()
57 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_set_bit() local
61 *ct->mask_cache |= mask; in irq_gc_mask_set_bit()
62 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit()
77 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_clr_bit() local
81 *ct->mask_cache &= ~mask; in irq_gc_mask_clr_bit()
82 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_clr_bit()
97 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_unmask_enable_reg() local
[all …]
/linux-4.1.27/net/ax25/
Dax25_addr.c119 int ct = 0; in ax25cmp() local
121 while (ct < 6) { in ax25cmp()
122 if ((a->ax25_call[ct] & 0xFE) != (b->ax25_call[ct] & 0xFE)) /* Clean off repeater bits */ in ax25cmp()
124 ct++; in ax25cmp()
127 if ((a->ax25_call[ct] & 0x1E) == (b->ax25_call[ct] & 0x1E)) /* SSID without control bit */ in ax25cmp()
222 int ct = 0; in ax25_addr_build() local
255 while (ct < d->ndigi) { in ax25_addr_build()
256 memcpy(buf, &d->calls[ct], AX25_ADDR_LEN); in ax25_addr_build()
258 if (d->repeated[ct]) in ax25_addr_build()
268 ct++; in ax25_addr_build()
[all …]
Daf_ax25.c1123 int ct = 0, err = 0; in ax25_connect() local
1196 while (ct < fsa->fsa_ax25.sax25_ndigis) { in ax25_connect()
1197 if ((fsa->fsa_digipeater[ct].ax25_call[6] & in ax25_connect()
1199 digi->repeated[ct] = 1; in ax25_connect()
1200 digi->lastrepeat = ct; in ax25_connect()
1202 digi->repeated[ct] = 0; in ax25_connect()
1204 digi->calls[ct] = fsa->fsa_digipeater[ct]; in ax25_connect()
1205 ct++; in ax25_connect()
1498 int ct = 0; in ax25_sendmsg() local
1509 while (ct < usax->sax25_ndigis) { in ax25_sendmsg()
[all …]
/linux-4.1.27/net/ipv4/netfilter/
Dnf_nat_l3proto_ipv4.c33 const struct nf_conn *ct, in nf_nat_ipv4_decode_session() argument
38 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; in nf_nat_ipv4_decode_session()
41 if (ct->status & statusbit) { in nf_nat_ipv4_decode_session()
53 if (ct->status & statusbit) { in nf_nat_ipv4_decode_session()
191 struct nf_conn *ct, in nf_nat_icmp_reply_translation() argument
215 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) in nf_nat_icmp_reply_translation()
217 if (ct->status & IPS_NAT_MASK) in nf_nat_icmp_reply_translation()
230 if (!(ct->status & statusbit)) in nf_nat_icmp_reply_translation()
235 l4proto, &ct->tuplehash[!dir].tuple, !manip)) in nf_nat_icmp_reply_translation()
248 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); in nf_nat_icmp_reply_translation()
[all …]
Dnf_conntrack_l3proto_ipv4_compat.c39 st->bucket < net->ct.htable_size; in ct_get_first()
42 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_first()
58 if (++st->bucket >= net->ct.htable_size) in ct_get_next()
62 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_next()
97 static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) in ct_show_secctx() argument
103 ret = security_secid_to_secctx(ct->secmark, &secctx, &len); in ct_show_secctx()
112 static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) in ct_show_secctx() argument
120 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); in ct_seq_show() local
125 NF_CT_ASSERT(ct); in ct_seq_show()
126 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) in ct_seq_show()
[all …]
Dnf_nat_pptp.c42 static void pptp_nat_expected(struct nf_conn *ct, in pptp_nat_expected() argument
45 struct net *net = nf_ct_net(ct); in pptp_nat_expected()
46 const struct nf_conn *master = ct->master; in pptp_nat_expected()
79 other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t); in pptp_nat_expected()
89 BUG_ON(ct->status & IPS_NAT_DONE_MASK); in pptp_nat_expected()
94 = ct->master->tuplehash[!exp->dir].tuple.dst.u3; in pptp_nat_expected()
99 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in pptp_nat_expected()
104 = ct->master->tuplehash[!exp->dir].tuple.src.u3; in pptp_nat_expected()
109 nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); in pptp_nat_expected()
115 struct nf_conn *ct, in pptp_outbound_pkt() argument
[all …]
Dnf_nat_h323.c29 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in set_addr() local
42 if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, in set_addr()
56 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, in set_addr()
93 static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct, in set_sig_addr() argument
98 const struct nf_ct_h323_master *info = nfct_help_data(ct); in set_sig_addr()
105 if (get_h225_addr(ct, *data, &taddr[i], &addr, &port)) { in set_sig_addr()
106 if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && in set_sig_addr()
112 get_h225_addr(ct, *data, &taddr[0], in set_sig_addr()
119 &ct->tuplehash[!dir].tuple.dst.u3.ip, in set_sig_addr()
123 &ct->tuplehash[!dir]. in set_sig_addr()
[all …]
Dnf_conntrack_l3proto_ipv4.c99 struct nf_conn *ct; in ipv4_helper() local
105 ct = nf_ct_get(skb, &ctinfo); in ipv4_helper()
106 if (!ct || ctinfo == IP_CT_RELATED_REPLY) in ipv4_helper()
109 help = nfct_help(ct); in ipv4_helper()
119 ct, ctinfo); in ipv4_helper()
126 struct nf_conn *ct; in ipv4_confirm() local
129 ct = nf_ct_get(skb, &ctinfo); in ipv4_confirm()
130 if (!ct || ctinfo == IP_CT_RELATED_REPLY) in ipv4_confirm()
134 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && in ipv4_confirm()
136 if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) { in ipv4_confirm()
[all …]
Dnf_nat_masquerade_ipv4.c30 struct nf_conn *ct; in nf_nat_masquerade_ipv4() local
39 ct = nf_ct_get(skb, &ctinfo); in nf_nat_masquerade_ipv4()
40 nat = nfct_nat(ct); in nf_nat_masquerade_ipv4()
42 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || in nf_nat_masquerade_ipv4()
48 if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) in nf_nat_masquerade_ipv4()
71 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); in nf_nat_masquerade_ipv4()
Dnf_nat_proto_gre.c46 const struct nf_conn *ct) in gre_unique_tuple() argument
54 if (!ct->master) in gre_unique_tuple()
63 pr_debug("%p: NATing GRE PPTP\n", ct); in gre_unique_tuple()
75 if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) in gre_unique_tuple()
79 pr_debug("%p: no NAT mapping\n", ct); in gre_unique_tuple()
Dnf_conntrack_proto_icmp.c29 return &net->ct.nf_ct_proto.icmp; in icmp_pernet()
90 static int icmp_packet(struct nf_conn *ct, in icmp_packet() argument
101 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); in icmp_packet()
107 static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, in icmp_new() argument
117 if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) || in icmp_new()
118 !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) { in icmp_new()
121 ct->tuplehash[0].tuple.dst.u.icmp.type); in icmp_new()
122 nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple); in icmp_new()
197 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && in icmp_error()
394 return &net->ct.nf_ct_proto.icmp.pn; in icmp_get_net_proto()
Dipt_SYNPROXY.c307 struct nf_conn *ct; in ipv4_synproxy_hook() local
314 ct = nf_ct_get(skb, &ctinfo); in ipv4_synproxy_hook()
315 if (ct == NULL) in ipv4_synproxy_hook()
318 synproxy = nfct_synproxy(ct); in ipv4_synproxy_hook()
330 state = &ct->proto.tcp; in ipv4_synproxy_hook()
333 if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { in ipv4_synproxy_hook()
334 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - in ipv4_synproxy_hook()
347 nf_ct_seqadj_init(ct, ctinfo, 0); in ipv4_synproxy_hook()
390 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); in ipv4_synproxy_hook()
401 synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy); in ipv4_synproxy_hook()
Dnf_nat_proto_icmp.c35 const struct nf_conn *ct) in icmp_unique_tuple() argument
50 if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) in icmp_unique_tuple()
Diptable_nat.c34 struct nf_conn *ct) in iptable_nat_do_chain() argument
36 struct net *net = nf_ct_net(ct); in iptable_nat_do_chain()
Dipt_CLUSTERIP.c305 struct nf_conn *ct; in clusterip_tg() local
313 ct = nf_ct_get(skb, &ctinfo); in clusterip_tg()
314 if (ct == NULL) in clusterip_tg()
332 ct->mark = hash; in clusterip_tg()
347 nf_ct_dump_tuple_ip(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); in clusterip_tg()
349 pr_debug("hash=%u ct_hash=%u ", hash, ct->mark); in clusterip_tg()
Dnf_nat_snmp_basic.c1180 static int snmp_translate(struct nf_conn *ct, in snmp_translate() argument
1197 map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); in snmp_translate()
1198 map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); in snmp_translate()
1201 map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip); in snmp_translate()
1202 map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip); in snmp_translate()
1219 struct nf_conn *ct, in help() argument
1234 if (!(ct->status & IPS_NAT_MASK)) in help()
1253 ret = snmp_translate(ct, ctinfo, skb); in help()
/linux-4.1.27/net/ipv6/netfilter/
Dnf_nat_l3proto_ipv6.c32 const struct nf_conn *ct, in nf_nat_ipv6_decode_session() argument
37 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; in nf_nat_ipv6_decode_session()
40 if (ct->status & statusbit) { in nf_nat_ipv6_decode_session()
52 if (ct->status & statusbit) { in nf_nat_ipv6_decode_session()
197 struct nf_conn *ct, in nf_nat_icmpv6_reply_translation() argument
221 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) in nf_nat_icmpv6_reply_translation()
223 if (ct->status & IPS_NAT_MASK) in nf_nat_icmpv6_reply_translation()
236 if (!(ct->status & statusbit)) in nf_nat_icmpv6_reply_translation()
241 l4proto, &ct->tuplehash[!dir].tuple, !manip)) in nf_nat_icmpv6_reply_translation()
255 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); in nf_nat_icmpv6_reply_translation()
[all …]
Dnf_nat_masquerade_ipv6.c30 struct nf_conn *ct; in nf_nat_masquerade_ipv6() local
33 ct = nf_ct_get(skb, &ctinfo); in nf_nat_masquerade_ipv6()
34 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || in nf_nat_masquerade_ipv6()
41 nfct_nat(ct)->masq_index = out->ifindex; in nf_nat_masquerade_ipv6()
49 return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); in nf_nat_masquerade_ipv6()
53 static int device_cmp(struct nf_conn *ct, void *ifindex) in device_cmp() argument
55 const struct nf_conn_nat *nat = nfct_nat(ct); in device_cmp()
59 if (nf_ct_l3num(ct) != NFPROTO_IPV6) in device_cmp()
Dnf_conntrack_l3proto_ipv6.c102 struct nf_conn *ct; in ipv6_helper() local
111 ct = nf_ct_get(skb, &ctinfo); in ipv6_helper()
112 if (!ct || ctinfo == IP_CT_RELATED_REPLY) in ipv6_helper()
115 help = nfct_help(ct); in ipv6_helper()
131 return helper->help(skb, protoff, ct, ctinfo); in ipv6_helper()
138 struct nf_conn *ct; in ipv6_confirm() local
144 ct = nf_ct_get(skb, &ctinfo); in ipv6_confirm()
145 if (!ct || ctinfo == IP_CT_RELATED_REPLY) in ipv6_confirm()
156 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && in ipv6_confirm()
158 if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { in ipv6_confirm()
[all …]
Dnf_conntrack_proto_icmpv6.c34 return &net->ct.nf_ct_proto.icmpv6; in icmpv6_pernet()
102 static int icmpv6_packet(struct nf_conn *ct, in icmpv6_packet() argument
113 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); in icmpv6_packet()
119 static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb, in icmpv6_new() argument
126 int type = ct->tuplehash[0].tuple.dst.u.icmp.type - 128; in icmpv6_new()
132 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); in icmpv6_new()
133 if (LOG_INVALID(nf_ct_net(ct), IPPROTO_ICMPV6)) in icmpv6_new()
134 nf_log_packet(nf_ct_net(ct), PF_INET6, 0, skb, NULL, in icmpv6_new()
212 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && in icmpv6_error()
364 return &net->ct.nf_ct_proto.icmpv6.pn; in icmpv6_get_net_proto()
Dip6t_SYNPROXY.c322 struct nf_conn *ct; in ipv6_synproxy_hook() local
331 ct = nf_ct_get(skb, &ctinfo); in ipv6_synproxy_hook()
332 if (ct == NULL) in ipv6_synproxy_hook()
335 synproxy = nfct_synproxy(ct); in ipv6_synproxy_hook()
352 state = &ct->proto.tcp; in ipv6_synproxy_hook()
355 if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { in ipv6_synproxy_hook()
356 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - in ipv6_synproxy_hook()
369 nf_ct_seqadj_init(ct, ctinfo, 0); in ipv6_synproxy_hook()
412 nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); in ipv6_synproxy_hook()
423 synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy); in ipv6_synproxy_hook()
Dnf_nat_proto_icmpv6.c37 const struct nf_conn *ct) in icmpv6_unique_tuple() argument
52 if (++i == range_size || !nf_nat_used_tuple(tuple, ct)) in icmpv6_unique_tuple()
Dip6table_nat.c36 struct nf_conn *ct) in ip6table_nat_do_chain() argument
38 struct net *net = nf_ct_net(ct); in ip6table_nat_do_chain()
/linux-4.1.27/arch/arm/mach-imx/
Davic.c94 struct irq_chip_type *ct = gc->chip_types; in avic_irq_suspend() local
97 avic_saved_mask_reg[idx] = __raw_readl(avic_base + ct->regs.mask); in avic_irq_suspend()
98 __raw_writel(gc->wake_active, avic_base + ct->regs.mask); in avic_irq_suspend()
104 struct irq_chip_type *ct = gc->chip_types; in avic_irq_resume() local
107 __raw_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask); in avic_irq_resume()
118 struct irq_chip_type *ct; in avic_init_gc() local
125 ct = gc->chip_types; in avic_init_gc()
126 ct->chip.irq_mask = irq_gc_mask_clr_bit; in avic_init_gc()
127 ct->chip.irq_unmask = irq_gc_mask_set_bit; in avic_init_gc()
128 ct->chip.irq_ack = irq_gc_mask_clr_bit; in avic_init_gc()
[all …]
Dtzic.c110 struct irq_chip_type *ct; in tzic_init_gc() local
117 ct = gc->chip_types; in tzic_init_gc()
118 ct->chip.irq_mask = irq_gc_mask_disable_reg; in tzic_init_gc()
119 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; in tzic_init_gc()
120 ct->chip.irq_set_wake = irq_gc_set_wake; in tzic_init_gc()
121 ct->chip.irq_suspend = tzic_irq_suspend; in tzic_init_gc()
122 ct->chip.irq_resume = tzic_irq_resume; in tzic_init_gc()
123 ct->regs.disable = TZIC_ENCLEAR0(idx); in tzic_init_gc()
124 ct->regs.enable = TZIC_ENSET0(idx); in tzic_init_gc()
/linux-4.1.27/include/scsi/
Dfc_encode.h109 struct fc_ct_req *ct; in fc_ct_hdr_fill() local
113 ct = fc_frame_payload_get(fp, ct_plen); in fc_ct_hdr_fill()
114 memset(ct, 0, ct_plen); in fc_ct_hdr_fill()
115 ct->hdr.ct_rev = FC_CT_REV; in fc_ct_hdr_fill()
116 ct->hdr.ct_fs_type = fs_type; in fc_ct_hdr_fill()
117 ct->hdr.ct_fs_subtype = subtype; in fc_ct_hdr_fill()
118 ct->hdr.ct_cmd = htons((u16) op); in fc_ct_hdr_fill()
119 return ct; in fc_ct_hdr_fill()
136 struct fc_ct_req *ct; in fc_ct_ns_fill() local
141 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft), in fc_ct_ns_fill()
[all …]
/linux-4.1.27/net/mac80211/
Daes_gcm.c22 struct scatterlist assoc, pt, ct[2]; in ieee80211_aes_gcm_encrypt() local
33 sg_init_table(ct, 2); in ieee80211_aes_gcm_encrypt()
34 sg_set_buf(&ct[0], data, data_len); in ieee80211_aes_gcm_encrypt()
35 sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN); in ieee80211_aes_gcm_encrypt()
39 aead_request_set_crypt(aead_req, &pt, ct, data_len, j_0); in ieee80211_aes_gcm_encrypt()
47 struct scatterlist assoc, pt, ct[2]; in ieee80211_aes_gcm_decrypt() local
60 sg_init_table(ct, 2); in ieee80211_aes_gcm_decrypt()
61 sg_set_buf(&ct[0], data, data_len); in ieee80211_aes_gcm_decrypt()
62 sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN); in ieee80211_aes_gcm_decrypt()
66 aead_request_set_crypt(aead_req, ct, &pt, in ieee80211_aes_gcm_decrypt()
Daes_ccm.c26 struct scatterlist assoc, pt, ct[2]; in ieee80211_aes_ccm_encrypt() local
37 sg_init_table(ct, 2); in ieee80211_aes_ccm_encrypt()
38 sg_set_buf(&ct[0], data, data_len); in ieee80211_aes_ccm_encrypt()
39 sg_set_buf(&ct[1], mic, mic_len); in ieee80211_aes_ccm_encrypt()
43 aead_request_set_crypt(aead_req, &pt, ct, data_len, b_0); in ieee80211_aes_ccm_encrypt()
52 struct scatterlist assoc, pt, ct[2]; in ieee80211_aes_ccm_decrypt() local
65 sg_init_table(ct, 2); in ieee80211_aes_ccm_decrypt()
66 sg_set_buf(&ct[0], data, data_len); in ieee80211_aes_ccm_decrypt()
67 sg_set_buf(&ct[1], mic, mic_len); in ieee80211_aes_ccm_decrypt()
71 aead_request_set_crypt(aead_req, ct, &pt, data_len + mic_len, b_0); in ieee80211_aes_ccm_decrypt()
Daes_gmac.c27 struct scatterlist sg[3], ct[1]; in ieee80211_aes_gmac() local
49 sg_init_table(ct, 1); in ieee80211_aes_gmac()
50 sg_set_buf(&ct[0], mic, GMAC_MIC_LEN); in ieee80211_aes_gmac()
54 aead_request_set_crypt(aead_req, NULL, ct, 0, iv); in ieee80211_aes_gmac()
/linux-4.1.27/drivers/irqchip/
Dirq-omap-intc.c205 struct irq_chip_type *ct; in omap_alloc_gc_of() local
209 ct = gc->chip_types; in omap_alloc_gc_of()
211 ct->type = IRQ_TYPE_LEVEL_MASK; in omap_alloc_gc_of()
212 ct->handler = handle_level_irq; in omap_alloc_gc_of()
214 ct->chip.irq_ack = omap_mask_ack_irq; in omap_alloc_gc_of()
215 ct->chip.irq_mask = irq_gc_mask_disable_reg; in omap_alloc_gc_of()
216 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; in omap_alloc_gc_of()
218 ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE; in omap_alloc_gc_of()
220 ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i; in omap_alloc_gc_of()
221 ct->regs.disable = INTC_MIR_SET0 + 32 * i; in omap_alloc_gc_of()
[all …]
Dirq-brcmstb-l2.c122 struct irq_chip_type *ct; in brcmstb_l2_intc_of_init() local
181 ct = gc->chip_types; in brcmstb_l2_intc_of_init()
183 ct->chip.irq_ack = irq_gc_ack_set_bit; in brcmstb_l2_intc_of_init()
184 ct->regs.ack = CPU_CLEAR; in brcmstb_l2_intc_of_init()
186 ct->chip.irq_mask = irq_gc_mask_disable_reg; in brcmstb_l2_intc_of_init()
187 ct->regs.disable = CPU_MASK_SET; in brcmstb_l2_intc_of_init()
189 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; in brcmstb_l2_intc_of_init()
190 ct->regs.enable = CPU_MASK_CLEAR; in brcmstb_l2_intc_of_init()
192 ct->chip.irq_suspend = brcmstb_l2_intc_suspend; in brcmstb_l2_intc_of_init()
193 ct->chip.irq_resume = brcmstb_l2_intc_resume; in brcmstb_l2_intc_of_init()
[all …]
Dirq-bcm7120-l2.c87 struct irq_chip_type *ct = irq_data_get_chip_type(d); in bcm7120_l2_intc_suspend() local
93 ct->regs.mask); in bcm7120_l2_intc_suspend()
100 struct irq_chip_type *ct = irq_data_get_chip_type(d); in bcm7120_l2_intc_resume() local
104 irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); in bcm7120_l2_intc_resume()
214 struct irq_chip_type *ct; in bcm7120_l2_intc_probe() local
275 ct = gc->chip_types; in bcm7120_l2_intc_probe()
278 ct->regs.mask = data->en_offset[idx]; in bcm7120_l2_intc_probe()
280 ct->chip.irq_mask = irq_gc_mask_clr_bit; in bcm7120_l2_intc_probe()
281 ct->chip.irq_unmask = irq_gc_mask_set_bit; in bcm7120_l2_intc_probe()
282 ct->chip.irq_ack = irq_gc_noop; in bcm7120_l2_intc_probe()
[all …]
Dirq-sirfsoc.c34 struct irq_chip_type *ct; in sirfsoc_alloc_gc() local
44 ct = gc->chip_types; in sirfsoc_alloc_gc()
45 ct->chip.irq_mask = irq_gc_mask_clr_bit; in sirfsoc_alloc_gc()
46 ct->chip.irq_unmask = irq_gc_mask_set_bit; in sirfsoc_alloc_gc()
47 ct->regs.mask = SIRFSOC_INT_RISC_MASK0; in sirfsoc_alloc_gc()
Dirq-sunxi-nmi.c75 struct irq_chip_type *ct = gc->chip_types; in sunxi_sc_nmi_set_type() local
77 u32 ctrl_off = ct->regs.type; in sunxi_sc_nmi_set_type()
107 for (i = 0; i < gc->num_ct; i++, ct++) in sunxi_sc_nmi_set_type()
108 if (ct->type & flow_type) in sunxi_sc_nmi_set_type()
109 ctrl_off = ct->regs.type; in sunxi_sc_nmi_set_type()
Dirq-dw-apb-ictl.c57 struct irq_chip_type *ct = irq_data_get_chip_type(d); in dw_apb_ictl_resume() local
60 writel_relaxed(~0, gc->reg_base + ct->regs.enable); in dw_apb_ictl_resume()
61 writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); in dw_apb_ictl_resume()
Dirq-orion.c132 struct irq_chip_type *ct = irq_data_get_chip_type(d); in orion_bridge_irq_startup() local
134 ct->chip.irq_ack(d); in orion_bridge_irq_startup()
135 ct->chip.irq_unmask(d); in orion_bridge_irq_startup()
Dirq-atmel-aic-common.c43 struct irq_chip_type *ct = irq_data_get_chip_type(d); in aic_common_shutdown() local
45 ct->chip.irq_mask(d); in aic_common_shutdown()
/linux-4.1.27/arch/mips/jz4740/
Dirq.c85 struct irq_chip_type *ct; in arch_init_irq() local
99 ct = gc->chip_types; in arch_init_irq()
100 ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; in arch_init_irq()
101 ct->regs.disable = JZ_REG_INTC_SET_MASK; in arch_init_irq()
102 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; in arch_init_irq()
103 ct->chip.irq_mask = irq_gc_mask_disable_reg; in arch_init_irq()
104 ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; in arch_init_irq()
105 ct->chip.irq_set_wake = irq_gc_set_wake; in arch_init_irq()
106 ct->chip.irq_suspend = jz4740_irq_suspend; in arch_init_irq()
107 ct->chip.irq_resume = jz4740_irq_resume; in arch_init_irq()
Dgpio.c422 struct irq_chip_type *ct; in jz4740_gpio_chip_init() local
436 ct = gc->chip_types; in jz4740_gpio_chip_init()
437 ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR; in jz4740_gpio_chip_init()
438 ct->regs.disable = JZ_REG_GPIO_MASK_SET; in jz4740_gpio_chip_init()
439 ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR; in jz4740_gpio_chip_init()
441 ct->chip.name = "GPIO"; in jz4740_gpio_chip_init()
442 ct->chip.irq_mask = irq_gc_mask_disable_reg; in jz4740_gpio_chip_init()
443 ct->chip.irq_unmask = jz_gpio_irq_unmask; in jz4740_gpio_chip_init()
444 ct->chip.irq_ack = irq_gc_ack_set_bit; in jz4740_gpio_chip_init()
445 ct->chip.irq_suspend = jz4740_irq_suspend; in jz4740_gpio_chip_init()
[all …]
/linux-4.1.27/net/appletalk/
Daarp.c315 int ct; in aarp_expire_timeout() local
319 for (ct = 0; ct < AARP_HASH_SIZE; ct++) { in aarp_expire_timeout()
320 __aarp_expire_timer(&resolved[ct]); in aarp_expire_timeout()
321 __aarp_kick(&unresolved[ct]); in aarp_expire_timeout()
322 __aarp_expire_timer(&unresolved[ct]); in aarp_expire_timeout()
323 __aarp_expire_timer(&proxies[ct]); in aarp_expire_timeout()
337 int ct; in aarp_device_event() local
345 for (ct = 0; ct < AARP_HASH_SIZE; ct++) { in aarp_device_event()
346 __aarp_expire_device(&resolved[ct], dev); in aarp_device_event()
347 __aarp_expire_device(&unresolved[ct], dev); in aarp_device_event()
[all …]
/linux-4.1.27/arch/arm/plat-orion/
Dgpio.c360 struct irq_chip_type *ct = irq_data_get_chip_type(d); in gpio_irq_set_type() local
377 if (!(ct->type & type)) in gpio_irq_set_type()
504 struct irq_chip_type *ct = irq_data_get_chip_type(d); in orion_gpio_unmask_irq() local
509 reg_val = irq_reg_readl(gc, ct->regs.mask); in orion_gpio_unmask_irq()
511 irq_reg_writel(gc, reg_val, ct->regs.mask); in orion_gpio_unmask_irq()
518 struct irq_chip_type *ct = irq_data_get_chip_type(d); in orion_gpio_mask_irq() local
523 reg_val = irq_reg_readl(gc, ct->regs.mask); in orion_gpio_mask_irq()
525 irq_reg_writel(gc, reg_val, ct->regs.mask); in orion_gpio_mask_irq()
537 struct irq_chip_type *ct; in orion_gpio_init() local
594 ct = gc->chip_types; in orion_gpio_init()
[all …]
Dirq.c26 struct irq_chip_type *ct; in orion_irq_init() local
35 ct = gc->chip_types; in orion_irq_init()
36 ct->chip.irq_mask = irq_gc_mask_clr_bit; in orion_irq_init()
37 ct->chip.irq_unmask = irq_gc_mask_set_bit; in orion_irq_init()
/linux-4.1.27/include/linux/netfilter/
Dnf_conntrack_h323.h32 int get_h225_addr(struct nf_conn *ct, unsigned char *data,
50 struct nf_conn *ct,
55 struct nf_conn *ct,
60 struct nf_conn *ct,
68 extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
74 extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
81 struct nf_conn *ct,
88 extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
Dnf_conntrack_sip.h68 int (*match_len)(const struct nf_conn *ct,
169 int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
173 int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
177 int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
182 int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
187 int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
192 int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
Dnf_conntrack_proto_gre.h84 int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
88 void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
Dnf_conntrack_pptp.h305 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
312 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
322 (*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
Dnf_conntrack_snmp.h6 struct nf_conn *ct,
/linux-4.1.27/arch/arm/mach-davinci/
Dirq.c52 struct irq_chip_type *ct; in davinci_alloc_gc() local
61 ct = gc->chip_types; in davinci_alloc_gc()
62 ct->chip.irq_ack = irq_gc_ack_set_bit; in davinci_alloc_gc()
63 ct->chip.irq_mask = irq_gc_mask_clr_bit; in davinci_alloc_gc()
64 ct->chip.irq_unmask = irq_gc_mask_set_bit; in davinci_alloc_gc()
66 ct->regs.ack = IRQ_REG0_OFFSET; in davinci_alloc_gc()
67 ct->regs.mask = IRQ_ENT_REG0_OFFSET; in davinci_alloc_gc()
/linux-4.1.27/arch/powerpc/mm/
Dicswx.c164 static int acop_use_cop(int ct) in acop_use_cop() argument
200 int ct; in acop_handle_fault() local
215 ct = ICSWX_GET_CT_HINT(error_code); in acop_handle_fault()
216 if (ct < 0) { in acop_handle_fault()
227 ct = (ccw >> 16) & 0x3f; in acop_handle_fault()
243 if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) { in acop_handle_fault()
249 if (!acop_use_cop(ct)) in acop_handle_fault()
254 current->comm, current->pid, ct); in acop_handle_fault()
/linux-4.1.27/drivers/gpio/
Dgpio-mvebu.c323 struct irq_chip_type *ct = irq_data_get_chip_type(d); in mvebu_gpio_edge_irq_mask() local
327 ct->mask_cache_priv &= ~mask; in mvebu_gpio_edge_irq_mask()
329 writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip)); in mvebu_gpio_edge_irq_mask()
337 struct irq_chip_type *ct = irq_data_get_chip_type(d); in mvebu_gpio_edge_irq_unmask() local
342 ct->mask_cache_priv |= mask; in mvebu_gpio_edge_irq_unmask()
343 writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip)); in mvebu_gpio_edge_irq_unmask()
351 struct irq_chip_type *ct = irq_data_get_chip_type(d); in mvebu_gpio_level_irq_mask() local
356 ct->mask_cache_priv &= ~mask; in mvebu_gpio_level_irq_mask()
357 writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip)); in mvebu_gpio_level_irq_mask()
365 struct irq_chip_type *ct = irq_data_get_chip_type(d); in mvebu_gpio_level_irq_unmask() local
[all …]
Dgpio-sodaville.c135 struct irq_chip_type *ct; in sdv_register_irqsupport() local
164 ct = sd->gc->chip_types; in sdv_register_irqsupport()
165 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; in sdv_register_irqsupport()
166 ct->regs.eoi = GPSTR; in sdv_register_irqsupport()
167 ct->regs.mask = GPIO_INT; in sdv_register_irqsupport()
168 ct->chip.irq_mask = irq_gc_mask_clr_bit; in sdv_register_irqsupport()
169 ct->chip.irq_unmask = irq_gc_mask_set_bit; in sdv_register_irqsupport()
170 ct->chip.irq_eoi = irq_gc_eoi; in sdv_register_irqsupport()
171 ct->chip.irq_set_type = sdv_gpio_pub_set_type; in sdv_register_irqsupport()
Dgpio-mxs.c202 struct irq_chip_type *ct; in mxs_gpio_init_gc() local
208 ct = gc->chip_types; in mxs_gpio_init_gc()
209 ct->chip.irq_ack = irq_gc_ack_set_bit; in mxs_gpio_init_gc()
210 ct->chip.irq_mask = irq_gc_mask_clr_bit; in mxs_gpio_init_gc()
211 ct->chip.irq_unmask = irq_gc_mask_set_bit; in mxs_gpio_init_gc()
212 ct->chip.irq_set_type = mxs_gpio_set_irq_type; in mxs_gpio_init_gc()
213 ct->chip.irq_set_wake = mxs_gpio_set_wake_irq; in mxs_gpio_init_gc()
214 ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR; in mxs_gpio_init_gc()
215 ct->regs.mask = PINCTRL_IRQEN(port); in mxs_gpio_init_gc()
Dgpio-mxc.c345 struct irq_chip_type *ct; in mxc_gpio_init_gc() local
351 ct = gc->chip_types; in mxc_gpio_init_gc()
352 ct->chip.irq_ack = irq_gc_ack_set_bit; in mxc_gpio_init_gc()
353 ct->chip.irq_mask = irq_gc_mask_clr_bit; in mxc_gpio_init_gc()
354 ct->chip.irq_unmask = irq_gc_mask_set_bit; in mxc_gpio_init_gc()
355 ct->chip.irq_set_type = gpio_set_irq_type; in mxc_gpio_init_gc()
356 ct->chip.irq_set_wake = gpio_set_wake_irq; in mxc_gpio_init_gc()
357 ct->regs.ack = GPIO_ISR; in mxc_gpio_init_gc()
358 ct->regs.mask = GPIO_IMR; in mxc_gpio_init_gc()
Dgpio-dwapb.c302 struct irq_chip_type *ct; in dwapb_configure_irqs() local
332 ct = &irq_gc->chip_types[i]; in dwapb_configure_irqs()
333 ct->chip.irq_ack = irq_gc_ack_set_bit; in dwapb_configure_irqs()
334 ct->chip.irq_mask = irq_gc_mask_set_bit; in dwapb_configure_irqs()
335 ct->chip.irq_unmask = irq_gc_mask_clr_bit; in dwapb_configure_irqs()
336 ct->chip.irq_set_type = dwapb_irq_set_type; in dwapb_configure_irqs()
337 ct->chip.irq_enable = dwapb_irq_enable; in dwapb_configure_irqs()
338 ct->chip.irq_disable = dwapb_irq_disable; in dwapb_configure_irqs()
339 ct->chip.irq_request_resources = dwapb_irq_reqres; in dwapb_configure_irqs()
340 ct->chip.irq_release_resources = dwapb_irq_relres; in dwapb_configure_irqs()
[all …]
Dgpio-sta2x11.c326 struct irq_chip_type *ct; in gsta_alloc_irq_chip() local
331 ct = gc->chip_types; in gsta_alloc_irq_chip()
333 ct->chip.irq_set_type = gsta_irq_type; in gsta_alloc_irq_chip()
334 ct->chip.irq_disable = gsta_irq_disable; in gsta_alloc_irq_chip()
335 ct->chip.irq_enable = gsta_irq_enable; in gsta_alloc_irq_chip()
343 struct irq_chip_type *ct = gc->chip_types; in gsta_alloc_irq_chip() local
347 irq_set_chip_and_handler(i, &ct->chip, ct->handler); in gsta_alloc_irq_chip()
Dgpio-ml-ioh.c392 struct irq_chip_type *ct; in ioh_gpio_alloc_generic_chip() local
397 ct = gc->chip_types; in ioh_gpio_alloc_generic_chip()
399 ct->chip.irq_mask = ioh_irq_mask; in ioh_gpio_alloc_generic_chip()
400 ct->chip.irq_unmask = ioh_irq_unmask; in ioh_gpio_alloc_generic_chip()
401 ct->chip.irq_set_type = ioh_irq_type; in ioh_gpio_alloc_generic_chip()
402 ct->chip.irq_disable = ioh_irq_disable; in ioh_gpio_alloc_generic_chip()
403 ct->chip.irq_enable = ioh_irq_enable; in ioh_gpio_alloc_generic_chip()
Dgpio-pch.c338 struct irq_chip_type *ct; in pch_gpio_alloc_generic_chip() local
343 ct = gc->chip_types; in pch_gpio_alloc_generic_chip()
345 ct->chip.irq_ack = pch_irq_ack; in pch_gpio_alloc_generic_chip()
346 ct->chip.irq_mask = pch_irq_mask; in pch_gpio_alloc_generic_chip()
347 ct->chip.irq_unmask = pch_irq_unmask; in pch_gpio_alloc_generic_chip()
348 ct->chip.irq_set_type = pch_irq_type; in pch_gpio_alloc_generic_chip()
/linux-4.1.27/sound/soc/au1x/
Dpsc-i2s.c58 unsigned long ct; in au1xpsc_i2s_set_fmt() local
63 ct = pscdata->cfg; in au1xpsc_i2s_set_fmt()
65 ct &= ~(PSC_I2SCFG_XM | PSC_I2SCFG_MLJ); /* left-justified */ in au1xpsc_i2s_set_fmt()
68 ct |= PSC_I2SCFG_XM; /* enable I2S mode */ in au1xpsc_i2s_set_fmt()
73 ct |= PSC_I2SCFG_MLJ; /* LSB (right-) justified */ in au1xpsc_i2s_set_fmt()
79 ct &= ~(PSC_I2SCFG_BI | PSC_I2SCFG_WI); /* IB-IF */ in au1xpsc_i2s_set_fmt()
82 ct |= PSC_I2SCFG_BI | PSC_I2SCFG_WI; in au1xpsc_i2s_set_fmt()
85 ct |= PSC_I2SCFG_BI; in au1xpsc_i2s_set_fmt()
88 ct |= PSC_I2SCFG_WI; in au1xpsc_i2s_set_fmt()
98 ct |= PSC_I2SCFG_MS; /* PSC I2S slave mode */ in au1xpsc_i2s_set_fmt()
[all …]
/linux-4.1.27/drivers/scsi/libfc/
Dfc_elsct.c95 struct fc_ct_hdr *ct; in fc_els_resp_type() local
129 ct = fc_frame_payload_get(fp, sizeof(*ct)); in fc_els_resp_type()
130 if (ct) { in fc_els_resp_type()
131 switch (ntohs(ct->ct_cmd)) { in fc_els_resp_type()
Dfc_lport.c1130 struct fc_ct_hdr *ct; in fc_lport_ns_resp() local
1153 ct = fc_frame_payload_get(fp, sizeof(*ct)); in fc_lport_ns_resp()
1155 if (fh && ct && fh->fh_type == FC_TYPE_CT && in fc_lport_ns_resp()
1156 ct->ct_fs_type == FC_FST_DIR && in fc_lport_ns_resp()
1157 ct->ct_fs_subtype == FC_NS_SUBTYPE && in fc_lport_ns_resp()
1158 ntohs(ct->ct_cmd) == FC_FS_ACC) in fc_lport_ns_resp()
1206 struct fc_ct_hdr *ct; in fc_lport_ms_resp() local
1229 ct = fc_frame_payload_get(fp, sizeof(*ct)); in fc_lport_ms_resp()
1231 if (fh && ct && fh->fh_type == FC_TYPE_CT && in fc_lport_ms_resp()
1232 ct->ct_fs_type == FC_FST_MGMT && in fc_lport_ms_resp()
[all …]
/linux-4.1.27/arch/sh/boards/mach-se/7722/
Dirq.c72 struct irq_chip_type *ct; in se7722_gc_init() local
82 ct = gc->chip_types; in se7722_gc_init()
83 ct->chip.irq_mask = irq_gc_mask_set_bit; in se7722_gc_init()
84 ct->chip.irq_unmask = irq_gc_mask_clr_bit; in se7722_gc_init()
86 ct->regs.mask = IRQ01_MASK_REG; in se7722_gc_init()
/linux-4.1.27/arch/s390/kernel/
Dcache.c138 union cache_topology ct; in init_cache_level() local
145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); in init_cache_level()
147 ctype = get_cache_type(&ct.ci[0], level); in init_cache_level()
163 union cache_topology ct; in populate_cache_leaves() local
168 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); in populate_cache_leaves()
173 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; in populate_cache_leaves()
174 ctype = get_cache_type(&ct.ci[0], level); in populate_cache_leaves()
/linux-4.1.27/arch/sh/boards/mach-se/7343/
Dirq.c73 struct irq_chip_type *ct; in se7343_gc_init() local
83 ct = gc->chip_types; in se7343_gc_init()
84 ct->chip.irq_mask = irq_gc_mask_set_bit; in se7343_gc_init()
85 ct->chip.irq_unmask = irq_gc_mask_clr_bit; in se7343_gc_init()
87 ct->regs.mask = PA_CPLD_IMSK_REG; in se7343_gc_init()
/linux-4.1.27/lib/
Dstring.c232 int strcmp(const char *cs, const char *ct) in strcmp() argument
238 c2 = *ct++; in strcmp()
256 int strncmp(const char *cs, const char *ct, size_t count) in strncmp() argument
262 c2 = *ct++; in strncmp()
473 char *strpbrk(const char *cs, const char *ct) in strpbrk() argument
478 for (sc2 = ct; *sc2 != '\0'; ++sc2) { in strpbrk()
500 char *strsep(char **s, const char *ct) in strsep() argument
508 end = strpbrk(sbegin, ct); in strsep()
676 __visible int memcmp(const void *cs, const void *ct, size_t count) in memcmp() argument
681 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) in memcmp()
/linux-4.1.27/arch/arm/boot/compressed/
Dstring.c68 int memcmp(const void *cs, const void *ct, size_t count) in memcmp() argument
70 const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count; in memcmp()
81 int strcmp(const char *cs, const char *ct) in strcmp() argument
88 c2 = *ct++; in strcmp()
/linux-4.1.27/arch/x86/lib/
Dstring_32.c94 int strcmp(const char *cs, const char *ct) in strcmp() argument
109 : "1" (cs), "2" (ct) in strcmp()
117 int strncmp(const char *cs, const char *ct, size_t count) in strncmp() argument
134 : "1" (cs), "2" (ct), "3" (count) in strncmp()
Dstrstr_32.c3 char *strstr(const char *cs, const char *ct) in strstr() argument
27 : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) in strstr()
/linux-4.1.27/drivers/s390/char/
Dkeyboard.c461 unsigned int ct; in kbd_ioctl() local
504 ct = kbd->accent_table_size; in kbd_ioctl()
505 if (put_user(ct, &a->kb_cnt)) in kbd_ioctl()
508 ct * sizeof(struct kbdiacruc))) in kbd_ioctl()
520 if (get_user(ct, &a->kb_cnt)) in kbd_ioctl()
522 if (ct >= MAX_DIACR) in kbd_ioctl()
524 kbd->accent_table_size = ct; in kbd_ioctl()
525 for (i = 0; i < ct; i++) { in kbd_ioctl()
540 if (get_user(ct, &a->kb_cnt)) in kbd_ioctl()
542 if (ct >= MAX_DIACR) in kbd_ioctl()
[all …]
/linux-4.1.27/include/asm-generic/
Dcputime_nsecs.h81 static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) in cputime_to_timespec() argument
85 val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); in cputime_to_timespec()
98 static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) in cputime_to_timeval() argument
102 val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem); in cputime_to_timeval()
/linux-4.1.27/drivers/mfd/
Djz4740-adc.c208 struct irq_chip_type *ct; in jz4740_adc_probe() local
269 ct = gc->chip_types; in jz4740_adc_probe()
270 ct->regs.mask = JZ_REG_ADC_CTRL; in jz4740_adc_probe()
271 ct->regs.ack = JZ_REG_ADC_STATUS; in jz4740_adc_probe()
272 ct->chip.irq_mask = irq_gc_mask_set_bit; in jz4740_adc_probe()
273 ct->chip.irq_unmask = irq_gc_mask_clr_bit; in jz4740_adc_probe()
274 ct->chip.irq_ack = irq_gc_ack_set_bit; in jz4740_adc_probe()
/linux-4.1.27/net/ipv6/
Dip6mr.c348 int ct; member
359 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) { in ipmr_mfc_seq_idx()
360 it->cache = &mrt->mfc6_cache_array[it->ct]; in ipmr_mfc_seq_idx()
385 int ct; member
394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { in ip6mr_vif_seq_idx()
395 if (!MIF_EXISTS(mrt, iter->ct)) in ip6mr_vif_seq_idx()
398 return &mrt->vif6_table[iter->ct]; in ip6mr_vif_seq_idx()
431 while (++iter->ct < mrt->maxvif) { in ip6mr_vif_seq_next()
432 if (!MIF_EXISTS(mrt, iter->ct)) in ip6mr_vif_seq_next()
434 return &mrt->vif6_table[iter->ct]; in ip6mr_vif_seq_next()
[all …]
/linux-4.1.27/drivers/media/usb/gspca/stv06xx/
Dstv06xx_hdcs.c181 int ct; in hdcs_set_exposure() local
194 ct = hdcs->exp.cto + hdcs->psmp + (HDCS_ADC_START_SIG_DUR + 2); in hdcs_set_exposure()
195 cp = hdcs->exp.cto + (hdcs->w * ct / 2); in hdcs_set_exposure()
208 srowexp = hdcs->w - (cycles + hdcs->exp.er + 13) / ct; in hdcs_set_exposure()
210 mnct = (hdcs->exp.er + 12 + ct - 1) / ct; in hdcs_set_exposure()
216 mnct = (hdcs->exp.er + 5 + ct - 1) / ct; in hdcs_set_exposure()
217 max_srowexp = cp - mnct * ct - 1; in hdcs_set_exposure()
/linux-4.1.27/net/ipv4/
Dipmr.c1630 int ct; in ipmr_device_event() local
1637 for (ct = 0; ct < mrt->maxvif; ct++, v++) { in ipmr_device_event()
1639 vif_delete(mrt, ct, 1, NULL); in ipmr_device_event()
1803 int ct; in ipmr_find_vif() local
1805 for (ct = mrt->maxvif-1; ct >= 0; ct--) { in ipmr_find_vif()
1806 if (mrt->vif_table[ct].dev == dev) in ipmr_find_vif()
1809 return ct; in ipmr_find_vif()
1819 int vif, ct; in ip_mr_forward() local
1897 for (ct = cache->mfc_un.res.maxvif - 1; in ip_mr_forward()
1898 ct >= cache->mfc_un.res.minvif; ct--) { in ip_mr_forward()
[all …]
/linux-4.1.27/include/uapi/linux/netfilter/
Dxt_CT.h20 struct nf_conn *ct __attribute__((aligned(8))); member
32 struct nf_conn *ct __attribute__((aligned(8))); member
/linux-4.1.27/arch/s390/lib/
Dstring.c203 int strcmp(const char *cs, const char *ct) in strcmp() argument
215 : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) in strcmp()
303 int memcmp(const void *cs, const void *ct, size_t n) in memcmp() argument
307 register unsigned long r4 asm("4") = (unsigned long) ct; in memcmp()
/linux-4.1.27/arch/blackfin/include/asm/
Dstring.h21 extern int strcmp(const char *cs, const char *ct);
24 extern int strncmp(const char *cs, const char *ct, size_t count);
/linux-4.1.27/include/linux/
Dvt_kern.h70 int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list);
71 int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list);
100 int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) in con_set_unimap() argument
105 int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, in con_get_unimap() argument
Dnetfilter.h366 size_t (*build_size)(const struct nf_conn *ct);
367 int (*build)(struct sk_buff *skb, struct nf_conn *ct);
368 int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
369 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
371 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
/linux-4.1.27/arch/arm/mach-omap2/
Dprm_common.c272 struct irq_chip_type *ct; in omap_prcm_register_chain_handler() local
337 ct = gc->chip_types; in omap_prcm_register_chain_handler()
338 ct->chip.irq_ack = irq_gc_ack_set_bit; in omap_prcm_register_chain_handler()
339 ct->chip.irq_mask = irq_gc_mask_clr_bit; in omap_prcm_register_chain_handler()
340 ct->chip.irq_unmask = irq_gc_mask_set_bit; in omap_prcm_register_chain_handler()
342 ct->regs.ack = irq_setup->ack + i * 4; in omap_prcm_register_chain_handler()
343 ct->regs.mask = irq_setup->mask + i * 4; in omap_prcm_register_chain_handler()
/linux-4.1.27/arch/m68k/include/asm/
Dstring.h42 static inline int strcmp(const char *cs, const char *ct) in strcmp() argument
55 : "+a" (cs), "+a" (ct), "=d" (res)); in strcmp()
/linux-4.1.27/arch/powerpc/perf/
Dhv-24x7.c498 unsigned ct; member
547 it->ct++; in event_uniq_add()
549 name, it->ct); in event_uniq_add()
550 return it->ct; in event_uniq_add()
561 .ct = 0, in event_uniq_add()
659 ssize_t ct, ev_len; in create_events_from_catalog() local
841 ct = event_data_to_attrs(event_idx, events + event_attr_ct, in create_events_from_catalog()
843 if (ct <= 0) { in create_events_from_catalog()
848 event_attr_ct += ct; in create_events_from_catalog()
1141 u64 ct; in h_24x7_event_init() local
[all …]
/linux-4.1.27/drivers/staging/rtl8188eu/include/
Drtw_security.h272 #define PUTU32(ct, st) { \ argument
273 (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
274 (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
/linux-4.1.27/drivers/staging/rtl8723au/include/
Drtw_security.h254 #define PUTU32(ct, st) { \ argument
255 (ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
256 (ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
/linux-4.1.27/drivers/usb/storage/
Dsddr09.c646 int result, bulklen, nsg, ct;
654 address = 040000; ct = 1;
656 bulklen += (ct << 9);
657 command[4*nsg+2] = ct;
662 address = 0340000; ct = 1;
664 bulklen += (ct << 9);
665 command[4*nsg+2] = ct;
670 address = 01000000; ct = 2;
672 bulklen += (ct << 9);
673 command[4*nsg+2] = ct;
[all …]
/linux-4.1.27/arch/x86/include/asm/
Dstring_64.h63 int memcmp(const void *cs, const void *ct, size_t count);
67 int strcmp(const char *cs, const char *ct);
Dstring_32.h21 extern int strcmp(const char *cs, const char *ct);
24 extern int strncmp(const char *cs, const char *ct, size_t count);
250 extern char *strstr(const char *cs, const char *ct);
/linux-4.1.27/drivers/tty/vt/
Dkeyboard.c1594 unsigned int ct; in vt_do_diacrit() local
1599 if (get_user(ct, &a->kb_cnt)) in vt_do_diacrit()
1601 if (ct >= MAX_DIACR) in vt_do_diacrit()
1604 if (ct) { in vt_do_diacrit()
1605 dia = kmalloc(sizeof(struct kbdiacr) * ct, in vt_do_diacrit()
1611 sizeof(struct kbdiacr) * ct)) { in vt_do_diacrit()
1618 accent_table_size = ct; in vt_do_diacrit()
1619 for (i = 0; i < ct; i++) { in vt_do_diacrit()
1635 unsigned int ct; in vt_do_diacrit() local
1641 if (get_user(ct, &a->kb_cnt)) in vt_do_diacrit()
[all …]
Dconsolemap.c530 int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) in con_set_unimap() argument
535 if (!ct) in con_set_unimap()
611 while (ct--) { in con_set_unimap()
727 int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list) in con_get_unimap() argument
743 if (*p2 < MAX_GLYPH && ect++ < ct) { in con_get_unimap()
755 return ((ect <= ct) ? 0 : -ENOMEM); in con_get_unimap()
/linux-4.1.27/drivers/media/platform/s5p-jpeg/
Djpeg-core.c1078 struct s5p_jpeg_ctx *ct = fh_to_ctx(priv); in s5p_jpeg_g_fmt() local
1080 vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type); in s5p_jpeg_g_fmt()
1085 ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed) in s5p_jpeg_g_fmt()
1087 q_data = get_q_data(ct, f->type); in s5p_jpeg_g_fmt()
1319 static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f) in s5p_jpeg_s_fmt() argument
1328 vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type); in s5p_jpeg_s_fmt()
1332 q_data = get_q_data(ct, f->type); in s5p_jpeg_s_fmt()
1336 v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__); in s5p_jpeg_s_fmt()
1343 q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type); in s5p_jpeg_s_fmt()
1353 if (ct->jpeg->variant->version == SJPEG_EXYNOS4 && in s5p_jpeg_s_fmt()
[all …]
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
Dbe_hw.h254 u8 ct[2]; /* dword 0 */ member
302 u8 ct[2]; /* dword 1 */ member
335 u8 ct[2]; /* dword 1 */ member
/linux-4.1.27/arch/x86/boot/
Dstring.c42 int strncmp(const char *cs, const char *ct, size_t count) in strncmp() argument
48 c2 = *ct++; in strncmp()
/linux-4.1.27/drivers/net/wan/
Dz85230.c1576 int ct; in z8530_rx_done() local
1602 ct=c->mtu-get_dma_residue(c->rxdma); in z8530_rx_done()
1603 if(ct<0) in z8530_rx_done()
1604 ct=2; /* Shit happens.. */ in z8530_rx_done()
1639 skb = dev_alloc_skb(ct); in z8530_rx_done()
1644 skb_put(skb, ct); in z8530_rx_done()
1645 skb_copy_to_linear_data(skb, rxb, ct); in z8530_rx_done()
1647 c->netdevice->stats.rx_bytes += ct; in z8530_rx_done()
1666 ct=c->count; in z8530_rx_done()
1686 c->netdevice->stats.rx_bytes += ct; in z8530_rx_done()
[all …]
/linux-4.1.27/arch/tile/include/arch/
Dmpipe_shm.h250 uint_reg_t ct : 1; member
326 uint_reg_t ct : 1;
/linux-4.1.27/arch/arc/include/asm/
Dstring.h34 extern int strcmp(const char *cs, const char *ct);
/linux-4.1.27/drivers/gpu/drm/nouveau/dispnv04/
Ddisp.c40 struct drm_connector *connector, *ct; in nv04_display_create() local
93 list_for_each_entry_safe(connector, ct, in nv04_display_create()
/linux-4.1.27/arch/powerpc/platforms/cell/
Dinterrupt.c248 static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, in iic_host_xlate() argument
256 if (!of_device_is_compatible(ct, in iic_host_xlate()
261 val = of_get_property(ct, "#interrupt-cells", NULL); in iic_host_xlate()
/linux-4.1.27/drivers/media/rc/img-ir/
Dimg-ir-hw.c624 unsigned int ct; in img_ir_decoder_compatible() local
627 ct = dec->control.code_type; in img_ir_decoder_compatible()
628 if (priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_BROKEN) in img_ir_decoder_compatible()
958 unsigned int ct; in img_ir_isr_hw() local
964 ct = hw->decoder->control.code_type; in img_ir_isr_hw()
968 if (!(priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_IRQ) || in img_ir_isr_hw()
996 if (hw->ct_quirks[ct] & IMG_IR_QUIRK_CODE_LEN_INCR) in img_ir_isr_hw()
/linux-4.1.27/arch/s390/include/asm/
Dnmi.h56 __u32 ct : 1; /* 46 cpu timer validity */ member

12