Home
last modified time | relevance | path

Searched refs:fl (Results 1 – 185 of 185) sorted by relevance

/linux-4.1.27/net/ipv6/
Dip6_flowlabel.c60 #define for_each_fl_rcu(hash, fl) \ argument
61 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
62 fl != NULL; \
63 fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl) \ argument
65 for (fl = rcu_dereference_bh(fl->next); \
66 fl != NULL; \
67 fl = rcu_dereference_bh(fl->next))
76 struct ip6_flowlabel *fl; in __fl_lookup() local
78 for_each_fl_rcu(FL_HASH(label), fl) { in __fl_lookup()
[all …]
Dxfrm6_state.c24 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) in __xfrm6_init_tempsel() argument
26 const struct flowi6 *fl6 = &fl->u.ip6; in __xfrm6_init_tempsel()
32 sel->dport = xfrm_flowi_dport(fl, &fl6->uli); in __xfrm6_init_tempsel()
34 sel->sport = xfrm_flowi_sport(fl, &fl6->uli); in __xfrm6_init_tempsel()
Dip6_vti.c430 vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) in vti6_xmit() argument
444 dst = xfrm_lookup(t->net, dst, fl, NULL, 0); in vti6_xmit()
513 struct flowi fl; in vti6_tnl_xmit() local
516 memset(&fl, 0, sizeof(fl)); in vti6_tnl_xmit()
526 xfrm_decode_session(skb, &fl, AF_INET6); in vti6_tnl_xmit()
530 xfrm_decode_session(skb, &fl, AF_INET); in vti6_tnl_xmit()
538 fl.flowi_mark = be32_to_cpu(t->parms.o_key); in vti6_tnl_xmit()
540 ret = vti6_xmit(skb, dev, &fl); in vti6_tnl_xmit()
Dxfrm6_policy.c69 static int xfrm6_get_tos(const struct flowi *fl) in xfrm6_get_tos() argument
96 const struct flowi *fl) in xfrm6_fill_dst() argument
127 _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) in _decode_session6() argument
129 struct flowi6 *fl6 = &fl->u.ip6; in _decode_session6()
Dmip6.c211 const struct flowi *fl) in mip6_destopt_reject() argument
215 const struct flowi6 *fl6 = &fl->u.ip6; in mip6_destopt_reject()
249 sel.dport = xfrm_flowi_dport(fl, &fl6->uli); in mip6_destopt_reject()
252 sel.sport = xfrm_flowi_sport(fl, &fl6->uli); in mip6_destopt_reject()
Dnetfilter.c112 struct flowi *fl, bool strict) in nf_ip6_route() argument
124 result = ip6_route_output(net, sk, &fl->u.ip6); in nf_ip6_route()
Dfib6_rules.c150 static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) in fib6_rule_match() argument
153 struct flowi6 *fl6 = &fl->u.ip6; in fib6_rule_match()
Dip6_output.c1207 cork->fl.u.ip6 = *fl6; in ip6_setup_cork()
1570 fl6 = &inet->cork.fl.u.ip6; in ip6_append_data()
1597 memset(&cork->fl, 0, sizeof(cork->fl)); in ip6_cork_release()
1613 struct flowi6 *fl6 = &cork->fl.u.ip6; in __ip6_make_skb()
Dip6_gre.c789 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); in ip6gre_xmit_ipv4()
839 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); in ip6gre_xmit_ipv6()
891 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); in ip6gre_xmit_other()
937 struct flowi6 *fl6 = &t->fl.u.ip6; in ip6gre_tnl_link_config()
1186 t->fl.u.ip6.flowlabel, false)); in ip6gre_header()
Dtcp_ipv6.c442 struct flowi *fl, in tcp_v6_send_synack() argument
449 struct flowi6 *fl6 = &fl->u.ip6; in tcp_v6_send_synack()
697 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl, in tcp_v6_route_req() argument
703 return inet6_csk_route_req(sk, &fl->u.ip6, req); in tcp_v6_route_req()
Dip6_tunnel.c1134 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); in ip4ip6_tnl_xmit()
1188 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); in ip6ip6_tnl_xmit()
1243 struct flowi6 *fl6 = &t->fl.u.ip6; in ip6_tnl_link_config()
Dudp.c1087 fl6 = inet_sk(sk)->cork.fl.u.ip6; in udp_v6_push_pending_frames()
/linux-4.1.27/fs/
Dlocks.c138 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) argument
139 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) argument
140 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) argument
141 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) argument
143 static bool lease_breaking(struct file_lock *fl) in lease_breaking() argument
145 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); in lease_breaking()
148 static int target_leasetype(struct file_lock *fl) in target_leasetype() argument
150 if (fl->fl_flags & FL_UNLOCK_PENDING) in target_leasetype()
152 if (fl->fl_flags & FL_DOWNGRADE_PENDING) in target_leasetype()
154 return fl->fl_type; in target_leasetype()
[all …]
Dread_write.c1163 int fl; in do_sendfile() local
1215 fl = 0; in do_sendfile()
1224 fl = SPLICE_F_NONBLOCK; in do_sendfile()
1227 retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); in do_sendfile()
/linux-4.1.27/include/trace/events/
Dfilelock.h39 TP_PROTO(struct inode *inode, struct file_lock *fl),
41 TP_ARGS(inode, fl),
44 __field(struct file_lock *, fl)
56 __entry->fl = fl ? fl : NULL;
59 __entry->fl_next = fl ? fl->fl_next : NULL;
60 __entry->fl_owner = fl ? fl->fl_owner : NULL;
61 __entry->fl_flags = fl ? fl->fl_flags : 0;
62 __entry->fl_type = fl ? fl->fl_type : 0;
63 __entry->fl_break_time = fl ? fl->fl_break_time : 0;
64 __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
[all …]
/linux-4.1.27/fs/afs/
Dflock.c17 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
18 static void afs_fl_release_private(struct file_lock *fl);
83 static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) in afs_grant_locks() argument
87 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); in afs_grant_locks()
88 if (fl->fl_type == F_RDLCK) { in afs_grant_locks()
110 struct file_lock *fl; in afs_lock_work() local
144 fl = list_entry(vnode->granted_locks.next, in afs_lock_work()
146 key = key_get(fl->fl_file->private_data); in afs_lock_work()
178 fl = list_entry(vnode->pending_locks.next, in afs_lock_work()
180 key = key_get(fl->fl_file->private_data); in afs_lock_work()
[all …]
/linux-4.1.27/fs/lockd/
Dclntproc.c30 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) in nlmclnt_setlockargs() argument
131 memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); in nlmclnt_setlockargs()
135 (unsigned int)fl->fl_u.nfs_fl.owner->pid, in nlmclnt_setlockargs()
137 lock->svid = fl->fl_u.nfs_fl.owner->pid; in nlmclnt_setlockargs()
138 lock->fl.fl_start = fl->fl_start; in nlmclnt_setlockargs()
139 lock->fl.fl_end = fl->fl_end; in nlmclnt_setlockargs()
140 lock->fl.fl_type = fl->fl_type; in nlmclnt_setlockargs()
145 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); in nlmclnt_release_lockargs()
155 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) in nlmclnt_proc() argument
[all …]
Dsvclock.c147 struct file_lock *fl; in nlmsvc_lookup_block() local
150 file, lock->fl.fl_pid, in nlmsvc_lookup_block()
151 (long long)lock->fl.fl_start, in nlmsvc_lookup_block()
152 (long long)lock->fl.fl_end, lock->fl.fl_type); in nlmsvc_lookup_block()
154 fl = &block->b_call->a_args.lock.fl; in nlmsvc_lookup_block()
156 block->b_file, fl->fl_pid, in nlmsvc_lookup_block()
157 (long long)fl->fl_start, in nlmsvc_lookup_block()
158 (long long)fl->fl_end, fl->fl_type, in nlmsvc_lookup_block()
160 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { in nlmsvc_lookup_block()
238 call->a_args.lock.fl.fl_flags |= FL_SLEEP; in nlmsvc_create_block()
[all …]
Dxdr4.c108 struct file_lock *fl = &lock->fl; in nlm4_decode_lock() local
119 locks_init_lock(fl); in nlm4_decode_lock()
120 fl->fl_owner = current->files; in nlm4_decode_lock()
121 fl->fl_pid = (pid_t)lock->svid; in nlm4_decode_lock()
122 fl->fl_flags = FL_POSIX; in nlm4_decode_lock()
123 fl->fl_type = F_RDLCK; /* as good as anything else */ in nlm4_decode_lock()
128 fl->fl_start = s64_to_loff_t(start); in nlm4_decode_lock()
131 fl->fl_end = OFFSET_MAX; in nlm4_decode_lock()
133 fl->fl_end = s64_to_loff_t(end); in nlm4_decode_lock()
151 struct file_lock *fl = &resp->lock.fl; in nlm4_encode_testres() local
[all …]
Dxdr.c116 struct file_lock *fl = &lock->fl; in nlm_decode_lock() local
127 locks_init_lock(fl); in nlm_decode_lock()
128 fl->fl_owner = current->files; in nlm_decode_lock()
129 fl->fl_pid = (pid_t)lock->svid; in nlm_decode_lock()
130 fl->fl_flags = FL_POSIX; in nlm_decode_lock()
131 fl->fl_type = F_RDLCK; /* as good as anything else */ in nlm_decode_lock()
136 fl->fl_start = s32_to_loff_t(start); in nlm_decode_lock()
139 fl->fl_end = OFFSET_MAX; in nlm_decode_lock()
141 fl->fl_end = s32_to_loff_t(end); in nlm_decode_lock()
158 struct file_lock *fl = &resp->lock.fl; in nlm_encode_testres() local
[all …]
Dclntlock.c98 struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) in nlmclnt_prepare_block() argument
105 block->b_lock = fl; in nlmclnt_prepare_block()
164 const struct file_lock *fl = &lock->fl; in nlmclnt_grant() local
177 if (fl_blocked->fl_start != fl->fl_start) in nlmclnt_grant()
179 if (fl_blocked->fl_end != fl->fl_end) in nlmclnt_grant()
232 struct file_lock *fl, *next; in reclaimer() local
262 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { in reclaimer()
263 list_del_init(&fl->fl_u.nfs_fl.list); in reclaimer()
273 if (nlmclnt_reclaim(host, fl, req) != 0) in reclaimer()
275 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); in reclaimer()
Dclntxdr.c63 const struct file_lock *fl = &lock->fl; in nlm_compute_offsets() local
65 *l_offset = loff_t_to_s32(fl->fl_start); in nlm_compute_offsets()
66 if (fl->fl_end == OFFSET_MAX) in nlm_compute_offsets()
69 *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); in nlm_compute_offsets()
263 encode_bool(xdr, lock->fl.fl_type == F_RDLCK); in encode_nlm_holder()
276 struct file_lock *fl = &lock->fl; in decode_nlm_holder() local
283 locks_init_lock(fl); in decode_nlm_holder()
290 fl->fl_pid = (pid_t)lock->svid; in decode_nlm_holder()
300 fl->fl_flags = FL_POSIX; in decode_nlm_holder()
301 fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; in decode_nlm_holder()
[all …]
Dclnt4xdr.c67 const struct file_lock *fl = &lock->fl; in nlm4_compute_offsets() local
69 *l_offset = loff_t_to_s64(fl->fl_start); in nlm4_compute_offsets()
70 if (fl->fl_end == OFFSET_MAX) in nlm4_compute_offsets()
73 *l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); in nlm4_compute_offsets()
268 encode_bool(xdr, lock->fl.fl_type == F_RDLCK); in encode_nlm4_holder()
281 struct file_lock *fl = &lock->fl; in decode_nlm4_holder() local
289 locks_init_lock(fl); in decode_nlm4_holder()
296 fl->fl_pid = (pid_t)lock->svid; in decode_nlm4_holder()
306 fl->fl_flags = FL_POSIX; in decode_nlm4_holder()
307 fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; in decode_nlm4_holder()
[all …]
Dsvcsubs.c166 struct file_lock *fl; in nlm_traverse_locks() local
175 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { in nlm_traverse_locks()
176 if (fl->fl_lmops != &nlmsvc_lock_operations) in nlm_traverse_locks()
182 lockhost = (struct nlm_host *) fl->fl_owner; in nlm_traverse_locks()
184 struct file_lock lock = *fl; in nlm_traverse_locks()
228 struct file_lock *fl; in nlm_file_inuse() local
236 list_for_each_entry(fl, &flctx->flc_posix, fl_list) { in nlm_file_inuse()
237 if (fl->fl_lmops == &nlmsvc_lock_operations) { in nlm_file_inuse()
Dsvc4proc.c47 lock->fl.fl_file = file->f_file; in nlm4svc_retrieve_args()
48 lock->fl.fl_owner = (fl_owner_t) host; in nlm4svc_retrieve_args()
49 lock->fl.fl_lmops = &nlmsvc_lock_operations; in nlm4svc_retrieve_args()
Dsvcproc.c77 lock->fl.fl_file = file->f_file; in nlmsvc_retrieve_args()
78 lock->fl.fl_owner = (fl_owner_t) host; in nlmsvc_retrieve_args()
79 lock->fl.fl_lmops = &nlmsvc_lock_operations; in nlmsvc_retrieve_args()
/linux-4.1.27/fs/dlm/
Dplock.c33 int (*callback)(struct file_lock *fl, int result);
34 void *fl; member
81 struct file *file, struct file_lock *fl) in do_unlock_close() argument
90 op->info.pid = fl->fl_pid; in do_unlock_close()
95 if (fl->fl_lmops && fl->fl_lmops->lm_grant) in do_unlock_close()
96 op->info.owner = (__u64) fl->fl_pid; in do_unlock_close()
98 op->info.owner = (__u64)(long) fl->fl_owner; in do_unlock_close()
105 int cmd, struct file_lock *fl) in dlm_posix_lock() argument
124 op->info.pid = fl->fl_pid; in dlm_posix_lock()
125 op->info.ex = (fl->fl_type == F_WRLCK); in dlm_posix_lock()
[all …]
/linux-4.1.27/fs/ceph/
Dlocks.c36 int cmd, u8 wait, struct file_lock *fl) in ceph_lock_message() argument
56 if (LLONG_MAX == fl->fl_end) in ceph_lock_message()
59 length = fl->fl_end - fl->fl_start + 1; in ceph_lock_message()
61 owner = secure_addr(fl->fl_owner); in ceph_lock_message()
65 (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length, in ceph_lock_message()
66 wait, fl->fl_type); in ceph_lock_message()
71 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); in ceph_lock_message()
72 req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); in ceph_lock_message()
82 fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid); in ceph_lock_message()
84 fl->fl_type = F_RDLCK; in ceph_lock_message()
[all …]
Dsuper.h930 extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
931 extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
940 extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
/linux-4.1.27/fs/ocfs2/
Dlocks.c39 int cmd, struct file_lock *fl) in ocfs2_do_flock() argument
45 if (fl->fl_type == F_WRLCK) in ocfs2_do_flock()
84 ret = flock_lock_file_wait(file, fl); in ocfs2_do_flock()
94 static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl) in ocfs2_do_funlock() argument
101 ret = flock_lock_file_wait(file, fl); in ocfs2_do_funlock()
110 int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) in ocfs2_flock() argument
115 if (!(fl->fl_flags & FL_FLOCK)) in ocfs2_flock()
122 return flock_lock_file_wait(file, fl); in ocfs2_flock()
124 if (fl->fl_type == F_UNLCK) in ocfs2_flock()
125 return ocfs2_do_funlock(file, cmd, fl); in ocfs2_flock()
[all …]
Dlocks.h29 int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl);
30 int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl);
Dstackglue.h220 struct file_lock *fl);
292 struct file *file, int cmd, struct file_lock *fl);
Dstack_user.c755 struct file_lock *fl) in user_plock() argument
771 fl->fl_type = F_UNLCK; in user_plock()
775 return dlm_posix_get(conn->cc_lockspace, ino, file, fl); in user_plock()
776 else if (fl->fl_type == F_UNLCK) in user_plock()
777 return dlm_posix_unlock(conn->cc_lockspace, ino, file, fl); in user_plock()
779 return dlm_posix_lock(conn->cc_lockspace, ino, file, cmd, fl); in user_plock()
Dalloc.c6473 struct ocfs2_per_slot_free_list *fl; in ocfs2_run_deallocs() local
6479 fl = ctxt->c_first_suballocator; in ocfs2_run_deallocs()
6481 if (fl->f_first) { in ocfs2_run_deallocs()
6482 trace_ocfs2_run_deallocs(fl->f_inode_type, in ocfs2_run_deallocs()
6483 fl->f_slot); in ocfs2_run_deallocs()
6485 fl->f_inode_type, in ocfs2_run_deallocs()
6486 fl->f_slot, in ocfs2_run_deallocs()
6487 fl->f_first); in ocfs2_run_deallocs()
6494 ctxt->c_first_suballocator = fl->f_next_suballocator; in ocfs2_run_deallocs()
6495 kfree(fl); in ocfs2_run_deallocs()
[all …]
Dstackglue.c302 struct file *file, int cmd, struct file_lock *fl) in ocfs2_plock() argument
306 return active_stack->sp_ops->plock(conn, ino, file, cmd, fl); in ocfs2_plock()
/linux-4.1.27/fs/9p/
Dvfs_file.c131 static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) in v9fs_file_lock() argument
136 p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); in v9fs_file_lock()
139 if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) in v9fs_file_lock()
142 if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { in v9fs_file_lock()
150 static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) in v9fs_file_do_lock() argument
161 if ((fl->fl_flags & FL_POSIX) != FL_POSIX) in v9fs_file_do_lock()
164 res = posix_lock_file_wait(filp, fl); in v9fs_file_do_lock()
171 switch (fl->fl_type) { in v9fs_file_do_lock()
182 flock.start = fl->fl_start; in v9fs_file_do_lock()
183 if (fl->fl_end == OFFSET_MAX) in v9fs_file_do_lock()
[all …]
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c247 static inline unsigned int fl_cap(const struct sge_fl *fl) in fl_cap() argument
249 return fl->size - FL_PER_EQ_UNIT; in fl_cap()
262 const struct sge_fl *fl) in fl_starving() argument
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
473 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) in free_rx_bufs() argument
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; in free_rx_bufs()
484 if (++fl->cidx == fl->size) in free_rx_bufs()
485 fl->cidx = 0; in free_rx_bufs()
486 fl->avail--; in free_rx_bufs()
503 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) in unmap_rx_buf() argument
[all …]
Dcxgb4vf_main.c580 &rxq->fl, t4vf_ethrx_handler); in setup_sge_queues()
621 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; in setup_sge_queues()
622 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; in setup_sge_queues()
1388 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; in cxgb4vf_get_ringparam()
1421 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; in cxgb4vf_set_ringparam()
1780 R("FL ID:", fl.abs_id); in sge_qinfo_show()
1781 R("FL size:", fl.size - MIN_FL_RESID); in sge_qinfo_show()
1782 R("FL avail:", fl.avail); in sge_qinfo_show()
1783 R("FL PIdx:", fl.pidx); in sge_qinfo_show()
1784 R("FL CIdx:", fl.cidx); in sge_qinfo_show()
[all …]
Dadapter.h212 struct sge_fl fl; /* Free List */ member
/linux-4.1.27/fs/nfs/filelayout/
Dfilelayout.c563 struct nfs4_filelayout_segment *fl, in filelayout_check_layout() argument
582 if (fl->pattern_offset > lgr->range.offset) { in filelayout_check_layout()
584 __func__, fl->pattern_offset); in filelayout_check_layout()
588 if (!fl->stripe_unit) { in filelayout_check_layout()
590 __func__, fl->stripe_unit); in filelayout_check_layout()
605 fl->dsaddr = dsaddr; in filelayout_check_layout()
607 if (fl->first_stripe_index >= dsaddr->stripe_count) { in filelayout_check_layout()
609 __func__, fl->first_stripe_index); in filelayout_check_layout()
613 if ((fl->stripe_type == STRIPE_SPARSE && in filelayout_check_layout()
614 fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) || in filelayout_check_layout()
[all …]
/linux-4.1.27/drivers/mtd/
Dredboot.c66 struct fis_list *fl = NULL, *tmp_fl; in parse_redboot_partitions() local
207 prev = &fl; in parse_redboot_partitions()
216 if (fl->img->flash_base) { in parse_redboot_partitions()
221 for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) { in parse_redboot_partitions()
246 if (fl->img->flash_base) { in parse_redboot_partitions()
248 parts[0].size = fl->img->flash_base; in parse_redboot_partitions()
254 parts[i].size = fl->img->size; in parse_redboot_partitions()
255 parts[i].offset = fl->img->flash_base; in parse_redboot_partitions()
258 strcpy(names, fl->img->name); in parse_redboot_partitions()
269 …if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_bas… in parse_redboot_partitions()
[all …]
/linux-4.1.27/scripts/kconfig/
Dkxgettext.c137 struct file_line *fl = file_line__new(file, lineno); in message__add_file_line() local
139 if (fl == NULL) in message__add_file_line()
142 fl->next = self->files; in message__add_file_line()
143 self->files = fl; in message__add_file_line()
191 struct file_line *fl = self->files; in message__print_file_lineno() local
197 printf("#: %s:%d", fl->file, fl->lineno); in message__print_file_lineno()
198 fl = fl->next; in message__print_file_lineno()
200 while (fl != NULL) { in message__print_file_lineno()
201 printf(", %s:%d", fl->file, fl->lineno); in message__print_file_lineno()
202 fl = fl->next; in message__print_file_lineno()
/linux-4.1.27/fs/nfs/
Dfile.c731 do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) in do_getlk() argument
735 unsigned int saved_type = fl->fl_type; in do_getlk()
738 posix_test_lock(filp, fl); in do_getlk()
739 if (fl->fl_type != F_UNLCK) { in do_getlk()
743 fl->fl_type = saved_type; in do_getlk()
751 status = NFS_PROTO(inode)->lock(filp, cmd, fl); in do_getlk()
755 fl->fl_type = F_UNLCK; in do_getlk()
759 static int do_vfs_lock(struct file *file, struct file_lock *fl) in do_vfs_lock() argument
762 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { in do_vfs_lock()
764 res = posix_lock_file_wait(file, fl); in do_vfs_lock()
[all …]
Dproc.c637 nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl) in nfs_proc_lock() argument
641 return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl); in nfs_proc_lock()
646 static int nfs_lock_check_bounds(const struct file_lock *fl) in nfs_lock_check_bounds() argument
650 start = (__s32)fl->fl_start; in nfs_lock_check_bounds()
651 if ((loff_t)start != fl->fl_start) in nfs_lock_check_bounds()
654 if (fl->fl_end != OFFSET_MAX) { in nfs_lock_check_bounds()
655 end = (__s32)fl->fl_end; in nfs_lock_check_bounds()
656 if ((loff_t)end != fl->fl_end) in nfs_lock_check_bounds()
Ddelegation.h58 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid …
Dwrite.c1178 is_whole_file_wrlock(struct file_lock *fl) in is_whole_file_wrlock() argument
1180 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && in is_whole_file_wrlock()
1181 fl->fl_type == F_WRLCK; in is_whole_file_wrlock()
1196 struct file_lock *fl; in nfs_can_extend_write() local
1212 fl = list_first_entry(&flctx->flc_posix, struct file_lock, in nfs_can_extend_write()
1214 if (is_whole_file_wrlock(fl)) in nfs_can_extend_write()
1217 fl = list_first_entry(&flctx->flc_flock, struct file_lock, in nfs_can_extend_write()
1219 if (fl->fl_type == F_WRLCK) in nfs_can_extend_write()
Dnfs4state.c917 static void nfs4_fl_release_lock(struct file_lock *fl) in nfs4_fl_release_lock() argument
919 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); in nfs4_fl_release_lock()
927 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) in nfs4_set_lock_state() argument
931 if (fl->fl_ops != NULL) in nfs4_set_lock_state()
933 lsp = nfs4_get_lock_state(state, fl->fl_owner); in nfs4_set_lock_state()
936 fl->fl_u.nfs4_fl.owner = lsp; in nfs4_set_lock_state()
937 fl->fl_ops = &nfs4_fl_lock_ops; in nfs4_set_lock_state()
1388 struct file_lock *fl; in nfs4_reclaim_locks() local
1402 list_for_each_entry(fl, list, fl_list) { in nfs4_reclaim_locks()
1403 if (nfs_file_open_context(fl->fl_file)->state != state) in nfs4_reclaim_locks()
[all …]
Ddelegation.c87 struct file_lock *fl; in nfs_delegation_claim_locks() local
98 list_for_each_entry(fl, list, fl_list) { in nfs_delegation_claim_locks()
99 if (nfs_file_open_context(fl->fl_file) != ctx) in nfs_delegation_claim_locks()
102 status = nfs4_lock_delegation_recall(fl, state, stateid); in nfs_delegation_claim_locks()
Dnfs4proc.c5321 .fl = request, in _nfs4_proc_getlk()
5370 static int do_vfs_lock(struct inode *inode, struct file_lock *fl) in do_vfs_lock() argument
5373 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { in do_vfs_lock()
5375 res = posix_lock_inode_wait(inode, fl); in do_vfs_lock()
5378 res = flock_lock_inode_wait(inode, fl); in do_vfs_lock()
5391 struct file_lock fl; member
5396 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, in nfs4_alloc_unlockdata() argument
5408 p->arg.fl = &p->fl; in nfs4_alloc_unlockdata()
5415 memcpy(&p->fl, fl, sizeof(p->fl)); in nfs4_alloc_unlockdata()
5438 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); in nfs4_locku_done()
[all …]
Dnfs4xdr.c1264 static inline int nfs4_lock_type(struct file_lock *fl, int block) in nfs4_lock_type() argument
1266 if (fl->fl_type == F_RDLCK) in nfs4_lock_type()
1271 static inline uint64_t nfs4_lock_length(struct file_lock *fl) in nfs4_lock_length() argument
1273 if (fl->fl_end == OFFSET_MAX) in nfs4_lock_length()
1275 return fl->fl_end - fl->fl_start + 1; in nfs4_lock_length()
1300 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block)); in encode_lock()
1302 p = xdr_encode_hyper(p, args->fl->fl_start); in encode_lock()
1303 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); in encode_lock()
1323 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); in encode_lockt()
1324 p = xdr_encode_hyper(p, args->fl->fl_start); in encode_lockt()
[all …]
Dnfs4_fs.h439 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
Dnfs3proc.c869 nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl) in nfs3_proc_lock() argument
873 return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl); in nfs3_proc_lock()
/linux-4.1.27/net/openvswitch/
Dvport.h244 struct flowi4 *fl, in ovs_tunnel_route_lookup() argument
249 memset(fl, 0, sizeof(*fl)); in ovs_tunnel_route_lookup()
250 fl->daddr = key->ipv4_dst; in ovs_tunnel_route_lookup()
251 fl->saddr = key->ipv4_src; in ovs_tunnel_route_lookup()
252 fl->flowi4_tos = RT_TOS(key->ipv4_tos); in ovs_tunnel_route_lookup()
253 fl->flowi4_mark = mark; in ovs_tunnel_route_lookup()
254 fl->flowi4_proto = protocol; in ovs_tunnel_route_lookup()
256 rt = ip_route_output_key(net, fl); in ovs_tunnel_route_lookup()
Dvport-gre.c138 struct flowi4 fl; in gre_tnl_send() local
151 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE); in gre_tnl_send()
191 return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, in gre_tnl_send()
Dvport-geneve.c180 struct flowi4 fl; in geneve_tnl_send() local
192 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP); in geneve_tnl_send()
211 err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr, in geneve_tnl_send()
Dvport-vxlan.c230 struct flowi4 fl; in vxlan_tnl_send() local
242 rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP); in vxlan_tnl_send()
259 err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst, in vxlan_tnl_send()
Dvport.c585 struct flowi4 fl; in ovs_tunnel_get_egress_info() local
596 rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto); in ovs_tunnel_get_egress_info()
606 fl.saddr, tun_key->ipv4_dst, in ovs_tunnel_get_egress_info()
Dactions.c350 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) in set_ipv6_fl() argument
353 SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); in set_ipv6_fl()
354 SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); in set_ipv6_fl()
355 SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); in set_ipv6_fl()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
558 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) in __refill_fl() argument
560 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), in __refill_fl()
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
678 if (q->fl[i].desc) { in t3_free_qset()
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
682 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
683 kfree(q->fl[i].sdesc); in t3_free_qset()
685 q->fl[i].size * in t3_free_qset()
686 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
[all …]
Dadapter.h204 struct sge_fl fl[SGE_RXQ_PER_SET]; member
Dcxgb3_main.c2750 qs->fl[i].empty += (v & 1); in t3_adap_check_task()
/linux-4.1.27/arch/mips/kernel/
Drtlx.c242 size_t lx_write, fl = 0L; in rtlx_read() local
261 fl = min(count, (size_t)lx->buffer_size - lx->lx_read); in rtlx_read()
263 failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); in rtlx_read()
268 if (count - fl) in rtlx_read()
269 failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); in rtlx_read()
287 size_t fl; in rtlx_write() local
303 fl = min(count, (size_t) rt->buffer_size - rt->rt_write); in rtlx_write()
305 failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); in rtlx_write()
310 if (count - fl) in rtlx_write()
311 failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); in rtlx_write()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c240 static inline unsigned int fl_cap(const struct sge_fl *fl) in fl_cap() argument
242 return fl->size - 8; /* 1 descriptor = 8 buffers */ in fl_cap()
255 const struct sge_fl *fl) in fl_starving() argument
259 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
675 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) in __refill_fl() argument
677 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
2023 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
2032 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
2040 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
2060 restore_rx_bufs(&si, &rxq->fl, frags); in process_responses()
[all …]
Dcxgb4_debugfs.c1741 R("FL ID:", fl.cntxt_id); in sge_qinfo_show()
1742 R("FL size:", fl.size - 8); in sge_qinfo_show()
1743 R("FL pend:", fl.pend_cred); in sge_qinfo_show()
1744 R("FL avail:", fl.avail); in sge_qinfo_show()
1745 R("FL PIDX:", fl.pidx); in sge_qinfo_show()
1746 R("FL CIDX:", fl.cidx); in sge_qinfo_show()
1766 R("FL ID:", fl.cntxt_id); in sge_qinfo_show()
1767 R("FL size:", fl.size - 8); in sge_qinfo_show()
1768 R("FL pend:", fl.pend_cred); in sge_qinfo_show()
1769 R("FL avail:", fl.avail); in sge_qinfo_show()
[all …]
Dcxgb4.h530 struct sge_fl fl; member
543 struct sge_fl fl; member
1058 struct sge_fl *fl, rspq_handler_t hnd);
Dcxgb4_ethtool.c490 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; in get_sge_param()
515 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; in set_sge_param()
Dcxgb4_main.c979 msi_idx, q->fl.size ? &q->fl : NULL, in alloc_ofld_rxqs()
1046 msi_idx, &q->fl, in setup_sge_queues()
4197 r->fl.size = 72; in cfg_queues()
4214 r->fl.size = 72; in cfg_queues()
4222 r->fl.size = 72; in cfg_queues()
/linux-4.1.27/fs/nfsd/
Dnfs4layouts.c159 struct file_lock *fl; in nfsd4_layout_setlease() local
162 fl = locks_alloc_lock(); in nfsd4_layout_setlease()
163 if (!fl) in nfsd4_layout_setlease()
165 locks_init_lock(fl); in nfsd4_layout_setlease()
166 fl->fl_lmops = &nfsd4_layouts_lm_ops; in nfsd4_layout_setlease()
167 fl->fl_flags = FL_LAYOUT; in nfsd4_layout_setlease()
168 fl->fl_type = F_RDLCK; in nfsd4_layout_setlease()
169 fl->fl_end = OFFSET_MAX; in nfsd4_layout_setlease()
170 fl->fl_owner = ls; in nfsd4_layout_setlease()
171 fl->fl_pid = current->tgid; in nfsd4_layout_setlease()
[all …]
Dnfs4state.c3604 nfsd_break_deleg_cb(struct file_lock *fl) in nfsd_break_deleg_cb() argument
3607 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; in nfsd_break_deleg_cb()
3611 WARN(1, "(%p)->fl_owner NULL\n", fl); in nfsd_break_deleg_cb()
3623 fl->fl_break_time = 0; in nfsd_break_deleg_cb()
3982 struct file_lock *fl; in nfs4_alloc_init_lease() local
3984 fl = locks_alloc_lock(); in nfs4_alloc_init_lease()
3985 if (!fl) in nfs4_alloc_init_lease()
3987 fl->fl_lmops = &nfsd_lease_mng_ops; in nfs4_alloc_init_lease()
3988 fl->fl_flags = FL_DELEG; in nfs4_alloc_init_lease()
3989 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; in nfs4_alloc_init_lease()
[all …]
/linux-4.1.27/net/xfrm/
Dxfrm_policy.c63 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) in __xfrm4_selector_match() argument
65 const struct flowi4 *fl4 = &fl->u.ip4; in __xfrm4_selector_match()
69 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && in __xfrm4_selector_match()
70 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && in __xfrm4_selector_match()
76 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) in __xfrm6_selector_match() argument
78 const struct flowi6 *fl6 = &fl->u.ip6; in __xfrm6_selector_match()
82 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && in __xfrm6_selector_match()
83 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && in __xfrm6_selector_match()
88 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, in xfrm_selector_match() argument
93 return __xfrm4_selector_match(sel, fl); in xfrm_selector_match()
[all …]
Dxfrm_state.c633 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, in xfrm_init_tempstate() argument
641 afinfo->init_tempsel(&x->sel, fl); in xfrm_init_tempstate()
727 const struct flowi *fl, unsigned short family, in xfrm_state_look_at() argument
744 !xfrm_selector_match(&x->sel, fl, x->sel.family)) || in xfrm_state_look_at()
745 !security_xfrm_state_pol_flow_match(x, pol, fl)) in xfrm_state_look_at()
757 if (xfrm_selector_match(&x->sel, fl, x->sel.family) && in xfrm_state_look_at()
758 security_xfrm_state_pol_flow_match(x, pol, fl)) in xfrm_state_look_at()
765 const struct flowi *fl, struct xfrm_tmpl *tmpl, in xfrm_state_find() argument
793 xfrm_state_look_at(pol, x, fl, encap_family, in xfrm_state_find()
809 xfrm_state_look_at(pol, x, fl, encap_family, in xfrm_state_find()
[all …]
/linux-4.1.27/include/linux/
Ddlm_plock.h14 int cmd, struct file_lock *fl);
16 struct file_lock *fl);
18 struct file_lock *fl);
Dnetfilter.h264 struct flowi *fl, bool strict);
317 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) in nf_nat_decode_session() argument
325 decodefn(skb, fl); in nf_nat_decode_session()
351 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) in nf_nat_decode_session() argument
Dfs.h1030 void locks_free_lock(struct file_lock *fl);
1044 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1045 extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
1095 static inline void locks_init_lock(struct file_lock *fl) in locks_init_lock() argument
1100 static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) in locks_copy_conflock() argument
1105 static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) in locks_copy_lock() argument
1120 static inline void posix_test_lock(struct file *filp, struct file_lock *fl) in posix_test_lock() argument
1125 static inline int posix_lock_file(struct file *filp, struct file_lock *fl, in posix_lock_file() argument
1132 struct file_lock *fl) in posix_lock_inode_wait() argument
1142 static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) in vfs_test_lock() argument
[all …]
Dsecurity.h1712 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
1737 const struct flowi *fl);
2717 void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
2718 void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
2858 static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) in security_sk_classify_flow() argument
2862 static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) in security_req_classify_flow() argument
2944 const struct flowi *fl);
2946 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
2998 struct xfrm_policy *xp, const struct flowi *fl) in security_xfrm_state_pol_flow_match() argument
3008 static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) in security_skb_classify_flow() argument
Dnfs_xdr.h424 struct file_lock * fl; member
446 struct file_lock * fl; member
460 struct file_lock * fl; member
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/
Dsge.c1003 static void recycle_fl_buf(struct freelQ *fl, int idx) in recycle_fl_buf() argument
1005 struct freelQ_e *from = &fl->entries[idx]; in recycle_fl_buf()
1006 struct freelQ_e *to = &fl->entries[fl->pidx]; in recycle_fl_buf()
1008 fl->centries[fl->pidx] = fl->centries[idx]; in recycle_fl_buf()
1011 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); in recycle_fl_buf()
1013 to->gen2 = V_CMD_GEN2(fl->genbit); in recycle_fl_buf()
1014 fl->credits++; in recycle_fl_buf()
1016 if (++fl->pidx == fl->size) { in recycle_fl_buf()
1017 fl->pidx = 0; in recycle_fl_buf()
1018 fl->genbit ^= 1; in recycle_fl_buf()
[all …]
/linux-4.1.27/fs/gfs2/
Dfile.c985 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) in gfs2_lock() argument
991 if (!(fl->fl_flags & FL_POSIX)) in gfs2_lock()
993 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) in gfs2_lock()
999 fl->fl_type = F_UNLCK; in gfs2_lock()
1002 if (fl->fl_type == F_UNLCK) in gfs2_lock()
1003 posix_lock_file_wait(file, fl); in gfs2_lock()
1007 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); in gfs2_lock()
1008 else if (fl->fl_type == F_UNLCK) in gfs2_lock()
1009 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); in gfs2_lock()
1011 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); in gfs2_lock()
[all …]
/linux-4.1.27/arch/powerpc/kernel/
Drtas_flash.c320 struct flash_block_list *fl; in rtas_flash_write() local
337 fl = uf->flist; in rtas_flash_write()
338 while (fl->next) in rtas_flash_write()
339 fl = fl->next; /* seek to last block_list for append */ in rtas_flash_write()
340 next_free = fl->num_blocks; in rtas_flash_write()
343 fl->next = kmem_cache_zalloc(flash_block_cache, GFP_KERNEL); in rtas_flash_write()
344 if (!fl->next) in rtas_flash_write()
346 fl = fl->next; in rtas_flash_write()
361 fl->blocks[next_free].data = p; in rtas_flash_write()
362 fl->blocks[next_free].length = count; in rtas_flash_write()
[all …]
/linux-4.1.27/include/net/
Dxfrm.h292 struct flowi *fl,
294 int (*get_tos)(const struct flowi *fl);
302 const struct flowi *fl);
327 const struct flowi *fl);
855 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) in xfrm_flowi_sport() argument
858 switch(fl->flowi_proto) { in xfrm_flowi_sport()
882 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) in xfrm_flowi_dport() argument
885 switch(fl->flowi_proto) { in xfrm_flowi_dport()
906 const struct flowi *fl, unsigned short family);
1105 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
[all …]
Ddst.h524 const struct flowi *fl, struct sock *sk, in xfrm_lookup() argument
532 const struct flowi *fl, in xfrm_lookup_route() argument
546 const struct flowi *fl, struct sock *sk,
550 const struct flowi *fl, struct sock *sk,
Ddn_fib.h150 #define dn_fib_lookup(fl, res) (-ESRCH) argument
152 #define dn_fib_select_multipath(fl, res) do { } while(0) argument
Dinet6_connection_sock.h42 int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
Dip6_tunnel.h41 struct flowi fl; /* flowi template for xmit */ member
Dipv6.h250 struct ip6_flowlabel *fl; member
274 struct ip6_flowlabel *fl,
283 static inline void fl6_sock_release(struct ip6_flowlabel *fl) in fl6_sock_release() argument
285 if (fl) in fl6_sock_release()
286 atomic_dec(&fl->users); in fl6_sock_release()
Dinet_sock.h131 struct flowi fl; member
Dinet_connection_sock.h39 int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
Dip.h122 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
Dsch_generic.h406 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
Dtcp.h1648 struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
1653 struct flowi *fl, struct request_sock *req,
/linux-4.1.27/net/ipv4/
Dxfrm4_state.c25 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) in __xfrm4_init_tempsel() argument
27 const struct flowi4 *fl4 = &fl->u.ip4; in __xfrm4_init_tempsel()
31 sel->dport = xfrm_flowi_dport(fl, &fl4->uli); in __xfrm4_init_tempsel()
33 sel->sport = xfrm_flowi_sport(fl, &fl4->uli); in __xfrm4_init_tempsel()
Dip_vti.c153 struct flowi *fl) in vti_xmit() argument
167 dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0); in vti_xmit()
220 struct flowi fl; in vti_tunnel_xmit() local
222 memset(&fl, 0, sizeof(fl)); in vti_tunnel_xmit()
226 xfrm_decode_session(skb, &fl, AF_INET); in vti_tunnel_xmit()
230 xfrm_decode_session(skb, &fl, AF_INET6); in vti_tunnel_xmit()
240 fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); in vti_tunnel_xmit()
242 return vti_xmit(skb, dev, &fl); in vti_tunnel_xmit()
Dxfrm4_policy.c65 static int xfrm4_get_tos(const struct flowi *fl) in xfrm4_get_tos() argument
67 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */ in xfrm4_get_tos()
77 const struct flowi *fl) in xfrm4_fill_dst() argument
80 const struct flowi4 *fl4 = &fl->u.ip4; in xfrm4_fill_dst()
102 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) in _decode_session4() argument
106 struct flowi4 *fl4 = &fl->u.ip4; in _decode_session4()
Dnetfilter.c177 struct flowi *fl, bool strict __always_unused) in nf_ip_route() argument
179 struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); in nf_ip_route()
Dfib_rules.c134 static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) in fib4_rule_match() argument
137 struct flowi4 *fl4 = &fl->u.ip4; in fib4_rule_match()
Dinet_connection_sock.c444 fl4 = &newinet->cork.fl.u.ip4; in inet_csk_route_child_sock()
935 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) in inet_csk_rebuild_route() argument
947 fl4 = &fl->u.ip4; in inet_csk_rebuild_route()
967 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); in inet_csk_update_pmtu()
975 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); in inet_csk_update_pmtu()
Ddatagram.c50 fl4 = &inet->cork.fl.u.ip4; in __ip4_datagram_connect()
Dsyncookies.c399 inet_sk(ret)->cork.fl.u.ip4 = fl4; in cookie_v4_check()
Dtcp_ipv4.c170 fl4 = &inet->cork.fl.u.ip4; in tcp_v4_connect()
825 struct flowi *fl, in tcp_v4_send_synack() argument
1186 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl, in tcp_v4_route_req() argument
1190 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); in tcp_v4_route_req()
1193 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) in tcp_v4_route_req()
Dudp.c859 struct flowi4 *fl4 = &inet->cork.fl.u.ip4; in udp_push_pending_frames()
913 fl4 = &inet->cork.fl.u.ip4; in udp_sendmsg()
1075 fl4 = &inet->cork.fl.u.ip4; in udp_sendmsg()
1154 ret = ip_append_page(sk, &inet->cork.fl.u.ip4, in udp_sendpage()
Daf_inet.c1121 fl4 = &inet->cork.fl.u.ip4; in inet_sk_reselect_saddr()
1174 fl4 = &inet->cork.fl.u.ip4; in inet_sk_rebuild_header()
Dip_output.c370 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) in ip_queue_xmit() argument
384 fl4 = &fl->u.ip4; in ip_queue_xmit()
Dtcp_output.c1031 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in tcp_transmit_skb()
3502 struct flowi fl; in tcp_rtx_synack() local
3505 res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL); in tcp_rtx_synack()
Dtcp_input.c6097 struct flowi fl; in tcp_conn_request() local
6162 dst = af_ops->route_req(sk, &fl, req, &strict); in tcp_conn_request()
6192 dst = af_ops->route_req(sk, &fl, req, NULL); in tcp_conn_request()
6210 err = af_ops->send_synack(sk, dst, &fl, req, in tcp_conn_request()
Dipmr.c198 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) in ipmr_rule_match() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_sqp.c132 u32 fl:20; member
139 u32 fl:20; member
195 tcslfl->fl = vertcfl->fl; in ehca_process_perf()
/linux-4.1.27/drivers/scsi/csiostor/
Dcsio_wr.c87 csio_wr_reg32(hw, DBPRIO_F | QID_V(flq->un.fl.flid) | in csio_wr_ring_fldb()
119 struct csio_dma_buf *buf = &flq->un.fl.bufs[0]; in csio_wr_fill_fl()
121 int sreg = flq->un.fl.sreg; in csio_wr_fill_fl()
279 flq->un.fl.bufs = kzalloc(flq->credits * in csio_wr_alloc_q()
282 if (!flq->un.fl.bufs) { in csio_wr_alloc_q()
289 flq->un.fl.packen = 0; in csio_wr_alloc_q()
290 flq->un.fl.offset = 0; in csio_wr_alloc_q()
291 flq->un.fl.sreg = sreg; in csio_wr_alloc_q()
486 iqp.fl0packen = flq->un.fl.packen ? 1 : 0; in csio_wr_iq_create()
1056 if (flq->un.fl.offset > 0) { in csio_wr_process_fl()
[all …]
Dcsio_wr.h412 struct csio_fl fl; member
469 #define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
/linux-4.1.27/drivers/message/fusion/lsi/
Dmpi.h584 #define MPI_SGE_GET_FLAGS(fl) (((fl) & ~MPI_SGE_LENGTH_MASK) >> MPI_SGE_FLAGS_SHIFT) argument
585 #define MPI_SGE_LENGTH(fl) ((fl) & MPI_SGE_LENGTH_MASK) argument
586 #define MPI_SGE_CHAIN_LENGTH(fl) ((fl) & MPI_SGE_CHAIN_LENGTH_MASK) argument
/linux-4.1.27/fs/xfs/
Dxfs_quota.h135 #define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0) argument
136 #define xfs_qm_dqattach(ip, fl) (0) argument
137 #define xfs_qm_dqattach_locked(ip, fl) (0) argument
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
Dapi-ni.c370 lnet_freelist_init(lnet_freelist_t *fl, int n, int size) in lnet_freelist_init() argument
382 INIT_LIST_HEAD(&fl->fl_list); in lnet_freelist_init()
383 fl->fl_objs = space; in lnet_freelist_init()
384 fl->fl_nobjs = n; in lnet_freelist_init()
385 fl->fl_objsize = size; in lnet_freelist_init()
389 list_add((struct list_head *)space, &fl->fl_list); in lnet_freelist_init()
397 lnet_freelist_fini(lnet_freelist_t *fl) in lnet_freelist_fini() argument
402 if (fl->fl_nobjs == 0) in lnet_freelist_fini()
406 for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next) in lnet_freelist_fini()
409 LASSERT(count == fl->fl_nobjs); in lnet_freelist_fini()
[all …]
/linux-4.1.27/net/netfilter/
Dxt_TCPMSS.c50 struct flowi fl; in tcpmss_reverse_mtu() local
56 struct flowi4 *fl4 = &fl.u.ip4; in tcpmss_reverse_mtu()
60 struct flowi6 *fl6 = &fl.u.ip6; in tcpmss_reverse_mtu()
68 ai->route(net, (struct dst_entry **)&rt, &fl, false); in tcpmss_reverse_mtu()
Dnf_nat_core.c56 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl) in __nf_nat_decode_session() argument
81 l3proto->decode_session(skb, ct, dir, statusbit, fl); in __nf_nat_decode_session()
88 struct flowi fl; in nf_xfrm_me_harder() local
93 err = xfrm_decode_session(skb, &fl, family); in nf_xfrm_me_harder()
102 dst = xfrm_lookup(dev_net(dst->dev), dst, &fl, skb->sk, 0); in nf_xfrm_me_harder()
/linux-4.1.27/net/sched/
Dsch_ingress.c63 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); in ingress_enqueue() local
66 result = tc_classify(skb, fl, &res); in ingress_enqueue()
Dsch_prio.c39 struct tcf_proto *fl; in prio_classify() local
44 fl = rcu_dereference_bh(q->filter_list); in prio_classify()
45 err = tc_classify(skb, fl, &res); in prio_classify()
55 if (!fl || err < 0) { in prio_classify()
Dsch_sfb.c256 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, in sfb_classify() argument
262 result = tc_classify(skb, fl, &res); in sfb_classify()
284 struct tcf_proto *fl; in sfb_enqueue() local
310 fl = rcu_dereference_bh(q->filter_list); in sfb_enqueue()
311 if (fl) { in sfb_enqueue()
313 if (!sfb_classify(skb, fl, &ret, &salt)) in sfb_enqueue()
Dsch_atm.c373 struct tcf_proto *fl; in atm_tc_enqueue() local
376 fl = rcu_dereference_bh(flow->filter_list); in atm_tc_enqueue()
377 if (fl) { in atm_tc_enqueue()
378 result = tc_classify_compat(skb, fl, &res); in atm_tc_enqueue()
Dsch_drr.c323 struct tcf_proto *fl; in drr_classify() local
333 fl = rcu_dereference_bh(q->filter_list); in drr_classify()
334 result = tc_classify(skb, fl, &res); in drr_classify()
Dsch_choke.c206 struct tcf_proto *fl; in choke_classify() local
209 fl = rcu_dereference_bh(q->filter_list); in choke_classify()
210 result = tc_classify(skb, fl, &res); in choke_classify()
Dsch_multiq.c45 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify() local
49 err = tc_classify(skb, fl, &res); in multiq_classify()
Dsch_sfq.c190 struct tcf_proto *fl; in sfq_classify() local
198 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
199 if (!fl) { in sfq_classify()
205 result = tc_classify(skb, fl, &res); in sfq_classify()
Dsch_dsmark.c232 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); in dsmark_enqueue() local
233 int result = tc_classify(skb, fl, &res); in dsmark_enqueue()
Dsch_qfq.c710 struct tcf_proto *fl; in qfq_classify() local
721 fl = rcu_dereference_bh(q->filter_list); in qfq_classify()
722 result = tc_classify(skb, fl, &res); in qfq_classify()
Dsch_cbq.c224 struct tcf_proto *fl; in cbq_classify() local
239 fl = rcu_dereference_bh(head->filter_list); in cbq_classify()
243 result = tc_classify_compat(skb, fl, &res); in cbq_classify()
244 if (!fl || result < 0) in cbq_classify()
Dsch_api.c1883 void tcf_destroy_chain(struct tcf_proto __rcu **fl) in tcf_destroy_chain() argument
1887 while ((tp = rtnl_dereference(*fl)) != NULL) { in tcf_destroy_chain()
1888 RCU_INIT_POINTER(*fl, tp->next); in tcf_destroy_chain()
Dsch_htb.c1532 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; in htb_find_tcf() local
1534 return fl; in htb_find_tcf()
/linux-4.1.27/net/core/
Dfib_rules.c176 struct flowi *fl, int flags) in fib_rule_match() argument
180 if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) in fib_rule_match()
183 if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) in fib_rule_match()
186 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) in fib_rule_match()
189 ret = ops->match(rule, fl, flags); in fib_rule_match()
194 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, in fib_rules_lookup() argument
204 if (!fib_rule_match(rule, ops, fl, flags)) in fib_rules_lookup()
220 err = ops->action(rule, fl, flags, arg); in fib_rules_lookup()
Dnet-procfs.c152 struct sd_flow_limit *fl; in softnet_seq_show() local
155 fl = rcu_dereference(sd->flow_limit); in softnet_seq_show()
156 if (fl) in softnet_seq_show()
157 flow_limit_count = fl->count; in softnet_seq_show()
Ddev.c3296 struct sd_flow_limit *fl; in skb_flow_limit() local
3306 fl = rcu_dereference(sd->flow_limit); in skb_flow_limit()
3307 if (fl) { in skb_flow_limit()
3308 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1); in skb_flow_limit()
3309 old_flow = fl->history[fl->history_head]; in skb_flow_limit()
3310 fl->history[fl->history_head] = new_flow; in skb_flow_limit()
3312 fl->history_head++; in skb_flow_limit()
3313 fl->history_head &= FLOW_LIMIT_HISTORY - 1; in skb_flow_limit()
3315 if (likely(fl->buckets[old_flow])) in skb_flow_limit()
3316 fl->buckets[old_flow]--; in skb_flow_limit()
[all …]
/linux-4.1.27/drivers/staging/lustre/include/linux/lnet/
Dlib-lnet.h189 int lnet_freelist_init(lnet_freelist_t *fl, int n, int size);
190 void lnet_freelist_fini(lnet_freelist_t *fl);
193 lnet_freelist_alloc(lnet_freelist_t *fl) in lnet_freelist_alloc() argument
198 if (list_empty(&fl->fl_list)) in lnet_freelist_alloc()
201 o = list_entry(fl->fl_list.next, lnet_freeobj_t, fo_list); in lnet_freelist_alloc()
207 lnet_freelist_free(lnet_freelist_t *fl, void *obj) in lnet_freelist_free() argument
212 list_add(&o->fo_list, &fl->fl_list); in lnet_freelist_free()
/linux-4.1.27/net/sctp/
Dtransport.c227 &transport->fl, sk); in sctp_transport_pmtu()
254 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); in sctp_transport_update_pmtu()
261 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); in sctp_transport_update_pmtu()
274 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); in sctp_transport_route()
279 af->get_saddr(opt, transport, &transport->fl); in sctp_transport_route()
Dprotocol.c424 struct flowi *fl, struct sock *sk) in sctp_v4_get_dst() argument
428 struct flowi4 *fl4 = &fl->u.ip4; in sctp_v4_get_dst()
527 struct flowi *fl) in sctp_v4_get_saddr() argument
534 saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr; in sctp_v4_get_saddr()
960 skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr); in sctp_v4_xmit()
967 return ip_queue_xmit(&inet->sk, skb, &transport->fl); in sctp_v4_xmit()
Dipv6.c211 struct flowi6 *fl6 = &transport->fl.u.ip6; in sctp_v6_xmit()
230 struct flowi *fl, struct sock *sk) in sctp_v6_get_dst() argument
234 struct flowi6 *fl6 = &fl->u.ip6; in sctp_v6_get_dst()
358 struct flowi *fl) in sctp_v6_get_saddr() argument
360 struct flowi6 *fl6 = &fl->u.ip6; in sctp_v6_get_saddr()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_vfpf.c1565 struct bnx2x_vf_mac_vlan_filters *fl = NULL; in bnx2x_vf_mbx_macvlan_list() local
1572 fl = kzalloc(fsz, GFP_KERNEL); in bnx2x_vf_mbx_macvlan_list()
1573 if (!fl) in bnx2x_vf_mbx_macvlan_list()
1582 fl->filters[j].mac = msg_filter->mac; in bnx2x_vf_mbx_macvlan_list()
1583 fl->filters[j].type = BNX2X_VF_FILTER_MAC; in bnx2x_vf_mbx_macvlan_list()
1585 fl->filters[j].vid = msg_filter->vlan_tag; in bnx2x_vf_mbx_macvlan_list()
1586 fl->filters[j].type = BNX2X_VF_FILTER_VLAN; in bnx2x_vf_mbx_macvlan_list()
1588 fl->filters[j].add = in bnx2x_vf_mbx_macvlan_list()
1591 fl->count++; in bnx2x_vf_mbx_macvlan_list()
1593 if (!fl->count) in bnx2x_vf_mbx_macvlan_list()
[all …]
/linux-4.1.27/fs/fuse/
Dfile.c2072 struct file_lock *fl) in convert_fuse_file_lock() argument
2084 fl->fl_start = ffl->start; in convert_fuse_file_lock()
2085 fl->fl_end = ffl->end; in convert_fuse_file_lock()
2086 fl->fl_pid = ffl->pid; in convert_fuse_file_lock()
2092 fl->fl_type = ffl->type; in convert_fuse_file_lock()
2097 const struct file_lock *fl, int opcode, pid_t pid, in fuse_lk_fill() argument
2106 inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner); in fuse_lk_fill()
2107 inarg->lk.start = fl->fl_start; in fuse_lk_fill()
2108 inarg->lk.end = fl->fl_end; in fuse_lk_fill()
2109 inarg->lk.type = fl->fl_type; in fuse_lk_fill()
[all …]
/linux-4.1.27/security/selinux/
Dxfrm.c180 const struct flowi *fl) in selinux_xfrm_state_pol_flow_match() argument
202 if (fl->flowi_secid != state_sid) in selinux_xfrm_state_pol_flow_match()
208 return (avc_has_perm(fl->flowi_secid, state_sid, in selinux_xfrm_state_pol_flow_match()
Dhooks.c4672 struct flowi *fl) in selinux_req_classify_flow() argument
4674 fl->flowi_secid = req->secid; in selinux_req_classify_flow()
/linux-4.1.27/drivers/media/usb/pvrusb2/
Dpvrusb2-ioread.h34 int pvr2_ioread_set_enabled(struct pvr2_ioread *,int fl);
Dpvrusb2-ioread.c243 int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl) in pvr2_ioread_set_enabled() argument
246 if ((!fl) == (!(cp->enabled))) return ret; in pvr2_ioread_set_enabled()
249 if (fl) { in pvr2_ioread_set_enabled()
Dpvrusb2-context.c43 static void pvr2_context_set_notify(struct pvr2_context *mp, int fl) in pvr2_context_set_notify() argument
47 if (fl) { in pvr2_context_set_notify()
Dpvrusb2-io.c183 int fl; in pvr2_buffer_set_ready() local
194 fl = (sp->r_count == 0); in pvr2_buffer_set_ready()
206 return fl; in pvr2_buffer_set_ready()
Dpvrusb2-hdw.c1721 int fl; in pvr2_hdw_untrip() local
1723 fl = pvr2_hdw_untrip_unlocked(hdw); in pvr2_hdw_untrip()
1725 if (fl) pvr2_hdw_state_sched(hdw); in pvr2_hdw_untrip()
1764 int fl; in pvr2_hdw_set_stream_type() local
1766 if ((fl = (hdw->desired_stream_type != config)) != 0) { in pvr2_hdw_set_stream_type()
1774 if (fl) return 0; in pvr2_hdw_set_stream_type()
3222 int fl; in pvr2_hdw_commit_ctl() local
3224 fl = pvr2_hdw_commit_setup(hdw); in pvr2_hdw_commit_ctl()
3226 if (!fl) return 0; in pvr2_hdw_commit_ctl()
3233 int fl = 0; in pvr2_hdw_worker_poll() local
[all …]
/linux-4.1.27/drivers/media/i2c/smiapp/
Dsmiapp.h94 #define SMIAPP_IDENT_FQ(manufacturer, model, rev, fl, _name, _quirk) \ argument
98 .flags = fl, \
/linux-4.1.27/include/linux/lockd/
Dbind.h56 struct file_lock *fl);
Dxdr.h43 struct file_lock fl; member
Dlockd.h208 struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
/linux-4.1.27/net/decnet/
Ddn_rules.c107 static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) in dn_fib_rule_match() argument
110 struct flowidn *fld = &fl->u.dn; in dn_fib_rule_match()
Ddn_route.c1291 int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int fl… in dn_route_output_sock() argument
1295 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); in dn_route_output_sock()
1296 if (err == 0 && fl->flowidn_proto) { in dn_route_output_sock()
1298 flowidn_to_flowi(fl), sk, 0); in dn_route_output_sock()
/linux-4.1.27/security/
Dsecurity.c1271 void security_sk_classify_flow(struct sock *sk, struct flowi *fl) in security_sk_classify_flow() argument
1273 security_ops->sk_getsecid(sk, &fl->flowi_secid); in security_sk_classify_flow()
1277 void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) in security_req_classify_flow() argument
1279 security_ops->req_classify_flow(req, fl); in security_req_classify_flow()
1422 const struct flowi *fl) in security_xfrm_state_pol_flow_match() argument
1424 return security_ops->xfrm_state_pol_flow_match(x, xp, fl); in security_xfrm_state_pol_flow_match()
1432 void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) in security_skb_classify_flow() argument
1434 int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0); in security_skb_classify_flow()
Dcapability.c747 struct flowi *fl) in cap_req_classify_flow() argument
833 const struct flowi *fl) in cap_xfrm_state_pol_flow_match() argument
838 static int cap_xfrm_decode_session(struct sk_buff *skb, u32 *fl, int ckall) in cap_xfrm_decode_session() argument
/linux-4.1.27/security/selinux/include/
Dxfrm.h28 const struct flowi *fl);
/linux-4.1.27/drivers/staging/fbtft/
DMakefile6 obj-$(CONFIG_FB_TFT_AGM1264K_FL) += fb_agm1264k-fl.o
/linux-4.1.27/drivers/virtio/
Dvirtio_input.c156 u32 mi, ma, re, fu, fl; in virtinput_cfg_abs() local
163 virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl); in virtinput_cfg_abs()
164 input_set_abs_params(vi->idev, abs, mi, ma, fu, fl); in virtinput_cfg_abs()
/linux-4.1.27/tools/hv/
Dhv_kvp_daemon.c125 struct flock fl = {F_WRLCK, SEEK_SET, 0, 0, 0}; in kvp_acquire_lock() local
126 fl.l_pid = getpid(); in kvp_acquire_lock()
128 if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { in kvp_acquire_lock()
137 struct flock fl = {F_UNLCK, SEEK_SET, 0, 0, 0}; in kvp_release_lock() local
138 fl.l_pid = getpid(); in kvp_release_lock()
140 if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { in kvp_release_lock()
/linux-4.1.27/fs/ext4/
Dmove_extent.c138 unsigned fl = AOP_FLAG_NOFS; in mext_page_double_lock() local
152 page[0] = grab_cache_page_write_begin(mapping[0], index1, fl); in mext_page_double_lock()
156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); in mext_page_double_lock()
/linux-4.1.27/include/net/netfilter/
Dnf_nat_l3proto.h32 struct flowi *fl);
/linux-4.1.27/drivers/staging/lustre/lustre/include/
Ddt_object.h344 const char *name, int fl,
348 const char *name, int fl, struct thandle *handle,
1347 const char *name, int fl, in dt_declare_xattr_set() argument
1353 return dt->do_ops->do_declare_xattr_set(env, dt, buf, name, fl, th); in dt_declare_xattr_set()
1358 const char *name, int fl, struct thandle *th, in dt_xattr_set() argument
1364 return dt->do_ops->do_xattr_set(env, dt, buf, name, fl, th, capa); in dt_xattr_set()
/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event_intel_ds.c866 int fl = event->hw.flags; in __intel_pmu_pebs_event() local
874 fll = fl & PERF_X86_EVENT_PEBS_LDLAT; in __intel_pmu_pebs_event()
875 fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); in __intel_pmu_pebs_event()
894 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) in __intel_pmu_pebs_event()
/linux-4.1.27/drivers/char/
Dppdev.c285 int fl; in register_device() local
298 fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; in register_device()
300 NULL, pp_irq, fl, pp); in register_device()
/linux-4.1.27/net/tipc/
Dudp_media.c168 struct flowi4 fl = { in tipc_udp_send_msg() local
174 rt = ip_route_output_key(net, &fl); in tipc_udp_send_msg()
/linux-4.1.27/net/l2tp/
Dl2tp_core.c1053 struct flowi *fl, size_t data_len) in l2tp_xmit_core() argument
1083 error = ip_queue_xmit(tunnel->sock, skb, fl); in l2tp_xmit_core()
1107 struct flowi *fl; in l2tp_xmit_skb() local
1147 fl = &inet->cork.fl; in l2tp_xmit_skb()
1175 l2tp_xmit_core(session, skb, fl, data_len); in l2tp_xmit_skb()
Dl2tp_ip.c450 fl4 = &inet->cork.fl.u.ip4; in l2tp_ip_sendmsg()
490 rc = ip_queue_xmit(sk, skb, &inet->cork.fl); in l2tp_ip_sendmsg()
/linux-4.1.27/include/net/9p/
Dclient.h258 int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
/linux-4.1.27/net/ipv4/netfilter/
Dnf_nat_l3proto_ipv4.c36 struct flowi *fl) in nf_nat_ipv4_decode_session() argument
39 struct flowi4 *fl4 = &fl->u.ip4; in nf_nat_ipv4_decode_session()
/linux-4.1.27/include/net/sctp/
Dstructs.h456 struct flowi *fl,
460 struct flowi *fl);
784 struct flowi fl; member
/linux-4.1.27/sound/soc/codecs/
Dwm2200.c1701 int i, bclk, lrclk, wl, fl, sr_code; in wm2200_hw_params() local
1708 fl = snd_soc_params_to_frame_size(params); in wm2200_hw_params()
1709 if (fl < 0) in wm2200_hw_params()
1710 return fl; in wm2200_hw_params()
1713 wl, fl); in wm2200_hw_params()
Dwm5100.c1405 int i, base, bclk, aif_rate, lrclk, wl, fl, sr; in wm5100_hw_params() local
1414 fl = snd_soc_params_to_frame_size(params); in wm5100_hw_params()
1415 if (fl < 0) in wm5100_hw_params()
1416 return fl; in wm5100_hw_params()
1419 wl, fl); in wm5100_hw_params()
1489 i = (wl << WM5100_AIF1TX_WL_SHIFT) | fl; in wm5100_hw_params()
/linux-4.1.27/net/ipv6/netfilter/
Dnf_nat_l3proto_ipv6.c35 struct flowi *fl) in nf_nat_ipv6_decode_session() argument
38 struct flowi6 *fl6 = &fl->u.ip6; in nf_nat_ipv6_decode_session()
/linux-4.1.27/drivers/gpu/drm/radeon/
Dr600_dpm.h140 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
Dr600_dpm.c218 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) in r600_calculate_at() argument
223 if ((fl == 0) || (fh == 0) || (fl > fh)) in r600_calculate_at()
226 k = (100 * fh) / fl; in r600_calculate_at()
/linux-4.1.27/include/linux/mlx4/
Dqp.h137 u8 fl; member
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_cmd_parser.c94 #define CMD(op, opm, f, lm, fl, ...) \ argument
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
/linux-4.1.27/include/linux/mlx5/
Dqp.h422 u8 fl; member
/linux-4.1.27/drivers/staging/lustre/lustre/lmv/
Dlmv_obd.c1878 #define md_op_data_fid(op_data, fl) \ argument
1879 (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
1880 fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
1881 fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
1882 fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
/linux-4.1.27/drivers/scsi/lpfc/
Dlpfc_hw.h3285 uint32_t fl:1; member
3291 uint32_t fl:1; member
3389 uint32_t fl:1; member
3395 uint32_t fl:1; member
Dlpfc_els.c1269 icmd->un.elsreq64.fl = 1; in lpfc_issue_els_flogi()
7644 icmd->un.elsreq64.fl = 1; in lpfc_issue_els_fdisc()
/linux-4.1.27/net/dccp/
Doutput.c141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); in dccp_transmit_skb()
Dipv4.c74 fl4 = &inet->cork.fl.u.ip4; in dccp_v4_connect()
/linux-4.1.27/drivers/scsi/cxgbi/
Dlibcxgbi.c685 struct flowi6 fl; in find_route_ipv6() local
688 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); in find_route_ipv6()
690 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); in find_route_ipv6()
691 return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); in find_route_ipv6()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Dllite_lib.c716 static inline int ll_set_opt(const char *opt, char *data, int fl) in ll_set_opt() argument
721 return fl; in ll_set_opt()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dqp.c1346 path->fl = 1 << 6; in _mlx4_set_path()
1687 context->pri_path.fl = 0x80; in __mlx4_ib_modify_qp()
1690 context->pri_path.fl = 0x80; in __mlx4_ib_modify_qp()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Dresource_tracker.c764 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; in update_vport_qp_param()
3580 u8 orig_pri_path_fl = qpc->pri_path.fl; in mlx4_INIT2RTR_QP_wrapper()
4903 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; in mlx4_vf_immed_vlan_work_handler()
4913 upd_context->qp_context.pri_path.fl = in mlx4_vf_immed_vlan_work_handler()
/linux-4.1.27/drivers/media/pci/bt8xx/
Dbttv-driver.c806 unsigned char fl, fh, fi; in set_pll_freq() local
819 fl=fout/fin; in set_pll_freq()
821 btwrite(fl, BT848_PLL_F_LO); in set_pll_freq()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dqp.c1384 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; in mlx5_set_path()
/linux-4.1.27/
DCREDITS2173 E: floeff@tunix.mathematik.uni-stuttgart.de, fl@LF.net