Home
last modified time | relevance | path

Searched refs:gl (Results 1 – 41 of 41) sorted by relevance

/linux-4.4.14/fs/gfs2/
Dglock.c56 struct gfs2_glock *gl; /* current glock struct */ member
60 typedef void (*glock_examiner) (struct gfs2_glock * gl);
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
83 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
87 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_free()
88 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_free()
90 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_free()
91 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_free()
103 static void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
[all …]
Dglops.c33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) in gfs2_ail_error() argument
35 fs_err(gl->gl_name.ln_sbd, in gfs2_ail_error()
40 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", in gfs2_ail_error()
41 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error()
42 gfs2_glock2aspace(gl)); in gfs2_ail_error()
43 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); in gfs2_ail_error()
54 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument
57 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush()
58 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush()
72 gfs2_ail_error(gl, bh); in __gfs2_ail_flush()
[all …]
Dglock.h130 void (*lm_put_lock) (struct gfs2_glock *gl);
131 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
133 void (*lm_cancel) (struct gfs2_glock *gl);
138 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) in gfs2_glock_is_locked_by_me() argument
144 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
146 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_glock_is_locked_by_me()
154 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
159 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) in gfs2_glock_is_held_excl() argument
161 return gl->gl_state == LM_ST_EXCLUSIVE; in gfs2_glock_is_held_excl()
164 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) in gfs2_glock_is_held_dfrd() argument
[all …]
Dlock_dlm.c73 static inline void gfs2_update_reply_times(struct gfs2_glock *gl) in gfs2_update_reply_times() argument
76 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times()
77 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times()
82 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times()
83 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times()
84 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times()
88 trace_gfs2_glock_lock_time(gl, rtt); in gfs2_update_reply_times()
100 static inline void gfs2_update_request_times(struct gfs2_glock *gl) in gfs2_update_request_times() argument
103 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times()
108 dstamp = gl->gl_dstamp; in gfs2_update_request_times()
[all …]
Dtrace_gfs2.h91 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state),
93 TP_ARGS(gl, new_state),
107 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
108 __entry->glnum = gl->gl_name.ln_number;
109 __entry->gltype = gl->gl_name.ln_type;
110 __entry->cur_state = glock_trace_state(gl->gl_state);
112 __entry->tgt_state = glock_trace_state(gl->gl_target);
113 __entry->dmt_state = glock_trace_state(gl->gl_demote_state);
114 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
130 TP_PROTO(const struct gfs2_glock *gl),
[all …]
Dmain.c50 struct gfs2_glock *gl = foo; in gfs2_init_glock_once() local
52 INIT_HLIST_BL_NODE(&gl->gl_list); in gfs2_init_glock_once()
53 spin_lock_init(&gl->gl_lockref.lock); in gfs2_init_glock_once()
54 INIT_LIST_HEAD(&gl->gl_holders); in gfs2_init_glock_once()
55 INIT_LIST_HEAD(&gl->gl_lru); in gfs2_init_glock_once()
56 INIT_LIST_HEAD(&gl->gl_ail_list); in gfs2_init_glock_once()
57 atomic_set(&gl->gl_ail_count, 0); in gfs2_init_glock_once()
58 atomic_set(&gl->gl_revokes, 0); in gfs2_init_glock_once()
63 struct gfs2_glock *gl = foo; in gfs2_init_gl_aspace_once() local
64 struct address_space *mapping = (struct address_space *)(gl + 1); in gfs2_init_gl_aspace_once()
[all …]
Dmeta_io.c114 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) in gfs2_getbuf() argument
116 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_getbuf()
117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_getbuf()
182 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) in gfs2_meta_new() argument
185 bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_new()
200 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, in gfs2_meta_read() argument
203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_meta_read()
211 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_read()
340 struct gfs2_glock *gl = ip->i_gl; in gfs2_meta_indirect_buffer() local
345 ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh); in gfs2_meta_indirect_buffer()
[all …]
Dtrans.c125 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, in gfs2_alloc_bufdata() argument
133 bd->bd_gl = gl; in gfs2_alloc_bufdata()
158 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_data() argument
161 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_data()
178 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops); in gfs2_trans_add_data()
184 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_data()
226 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_meta() argument
229 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_meta()
240 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops); in gfs2_trans_add_meta()
247 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_meta()
Dlops.c72 struct gfs2_glock *gl = bd->bd_gl; in maybe_release_space() local
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in maybe_release_space()
74 struct gfs2_rgrpd *rgd = gl->gl_object; in maybe_release_space()
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; in maybe_release_space()
118 struct gfs2_glock *gl = bd->bd_gl; in gfs2_unpin() local
119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); in gfs2_unpin()
120 atomic_inc(&gl->gl_ail_count); in gfs2_unpin()
529 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements() local
552 bh_ip = gfs2_meta_new(gl, blkno); in buf_lo_scan_elements()
578 static void gfs2_meta_sync(struct gfs2_glock *gl) in gfs2_meta_sync() argument
[all …]
Dmeta_io.h54 extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
55 extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
58 extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
72 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
Dincore.h40 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
219 void (*go_sync) (struct gfs2_glock *gl);
220 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
221 void (*go_inval) (struct gfs2_glock *gl, int flags);
222 int (*go_demote_ok) (const struct gfs2_glock *gl);
225 void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
226 void (*go_callback)(struct gfs2_glock *gl, bool remote);
831 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) in gfs2_glstats_inc() argument
833 gl->gl_stats.stats[which]++; in gfs2_glstats_inc()
836 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) in gfs2_sbstats_inc() argument
[all …]
Dtrans.h42 extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
43 extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
Drgrp.c723 struct gfs2_glock *gl; in gfs2_clear_rgrpd() local
727 gl = rgd->rd_gl; in gfs2_clear_rgrpd()
731 if (gl) { in gfs2_clear_rgrpd()
732 spin_lock(&gl->gl_lockref.lock); in gfs2_clear_rgrpd()
733 gl->gl_object = NULL; in gfs2_clear_rgrpd()
734 spin_unlock(&gl->gl_lockref.lock); in gfs2_clear_rgrpd()
735 gfs2_glock_add_to_lru(gl); in gfs2_clear_rgrpd()
736 gfs2_glock_put(gl); in gfs2_clear_rgrpd()
1033 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update() local
1040 if (!gfs2_glock_is_locked_by_me(gl)) { in gfs2_rindex_update()
[all …]
Dlog.c98 struct gfs2_glock *gl = NULL; in gfs2_ail1_start_one() local
117 if (gl == bd->bd_gl) in gfs2_ail1_start_one()
119 gl = bd->bd_gl; in gfs2_ail1_start_one()
581 struct gfs2_glock *gl = bd->bd_gl; in gfs2_add_revoke() local
589 atomic_inc(&gl->gl_revokes); in gfs2_add_revoke()
590 set_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_add_revoke()
704 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, in gfs2_log_flush() argument
713 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { in gfs2_log_flush()
Dglops.h28 extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
Dlog.h74 extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
Dfile.c1019 struct gfs2_glock *gl; in do_flock() local
1030 gl = fl_gh->gh_gl; in do_flock()
1031 if (gl) { in do_flock()
1040 &gfs2_flock_glops, CREATE, &gl); in do_flock()
1043 gfs2_holder_init(gl, state, flags, fl_gh); in do_flock()
1044 gfs2_glock_put(gl); in do_flock()
Drgrp.h74 extern void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
Dsuper.c1303 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local
1304 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_drop_inode()
1428 struct gfs2_glock *gl = ip->i_gl; in gfs2_final_release_pages() local
1433 if (atomic_read(&gl->gl_revokes) == 0) { in gfs2_final_release_pages()
1434 clear_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_final_release_pages()
1435 clear_bit(GLF_DIRTY, &gl->gl_flags); in gfs2_final_release_pages()
Dsys.c241 struct gfs2_glock *gl; in demote_rq_store() local
276 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); in demote_rq_store()
279 gfs2_glock_cb(gl, glmode); in demote_rq_store()
280 gfs2_glock_put(gl); in demote_rq_store()
Dbmap.c273 static void gfs2_metapath_ra(struct gfs2_glock *gl, in gfs2_metapath_ra() argument
284 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); in gfs2_metapath_ra()
400 struct gfs2_glock *gl, unsigned int i, in gfs2_indirect_init() argument
408 mp->mp_bh[i] = gfs2_meta_new(gl, bn); in gfs2_indirect_init()
409 gfs2_trans_add_meta(gl, mp->mp_bh[i]); in gfs2_indirect_init()
Drecovery.c36 struct gfs2_glock *gl = ip->i_gl; in gfs2_replay_read_block() local
50 *bh = gfs2_meta_ra(gl, dblock, extlen); in gfs2_replay_read_block()
Dinode.c834 struct gfs2_glock *gl; in __gfs2_lookup() local
845 gl = GFS2_I(inode)->i_gl; in __gfs2_lookup()
846 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); in __gfs2_lookup()
Ddir.c1398 struct gfs2_glock *gl = ip->i_gl; in gfs2_dir_readahead() local
1418 bh = gfs2_getbuf(gl, blocknr, 1); in gfs2_dir_readahead()
/linux-4.4.14/drivers/scsi/cxgbi/
Dlibcxgbi.c1271 struct cxgbi_gather_list *gl, unsigned int gidx) in cxgbi_ddp_ppod_set() argument
1277 ppod->addr[i] = gidx < gl->nelem ? in cxgbi_ddp_ppod_set()
1278 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; in cxgbi_ddp_ppod_set()
1292 struct cxgbi_gather_list *gl) in ddp_find_unused_entries() argument
1312 ddp->gl_map[k] = gl; in ddp_find_unused_entries()
1334 struct cxgbi_gather_list *gl) in ddp_gl_unmap() argument
1338 for (i = 0; i < gl->nelem; i++) in ddp_gl_unmap()
1339 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, in ddp_gl_unmap()
1344 struct cxgbi_gather_list *gl) in ddp_gl_map() argument
1348 for (i = 0; i < gl->nelem; i++) { in ddp_gl_map()
[all …]
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c1474 const struct pkt_gl *gl, in copy_frags() argument
1480 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
1481 gl->frags[0].offset + offset, in copy_frags()
1482 gl->frags[0].size - offset); in copy_frags()
1483 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
1484 for (i = 1; i < gl->nfrags; i++) in copy_frags()
1485 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
1486 gl->frags[i].offset, in copy_frags()
1487 gl->frags[i].size); in copy_frags()
1490 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
[all …]
Dcxgb4vf_main.c429 const struct pkt_gl *gl) in fwevtq_handler() argument
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c1721 const struct pkt_gl *gl, unsigned int offset) in copy_frags() argument
1726 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
1727 gl->frags[0].offset + offset, in copy_frags()
1728 gl->frags[0].size - offset); in copy_frags()
1729 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
1730 for (i = 1; i < gl->nfrags; i++) in copy_frags()
1731 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
1732 gl->frags[i].offset, in copy_frags()
1733 gl->frags[i].size); in copy_frags()
1736 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
[all …]
Dcxgb4_uld.h283 const struct pkt_gl *gl);
307 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
Dcxgb4.h507 const struct pkt_gl *gl);
1101 const struct pkt_gl *gl);
Dcxgb4_main.c601 const struct pkt_gl *gl) in fwevtq_handler() argument
693 const struct pkt_gl *gl) in uldrx_handler() argument
703 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { in uldrx_handler()
707 if (gl == NULL) in uldrx_handler()
709 else if (gl == CXGB4_MSG_AN) in uldrx_handler()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Ddevice.c1059 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, in copy_gl_to_skb_pkt() argument
1072 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + in copy_gl_to_skb_pkt()
1077 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + in copy_gl_to_skb_pkt()
1092 gl->va + pktshift, in copy_gl_to_skb_pkt()
1093 gl->tot_len - pktshift); in copy_gl_to_skb_pkt()
1097 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, in recv_rx_pkt() argument
1106 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); in recv_rx_pkt()
1123 const struct pkt_gl *gl) in c4iw_uld_rx_handler() argument
1130 if (gl == NULL) { in c4iw_uld_rx_handler()
1139 } else if (gl == CXGB4_MSG_AN) { in c4iw_uld_rx_handler()
[all …]
/linux-4.4.14/drivers/iommu/
Dintel-svm.c150 unsigned long address, unsigned long pages, int ih, int gl) in intel_flush_svm_range_dev() argument
158 if (gl) in intel_flush_svm_range_dev()
170 desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) | in intel_flush_svm_range_dev()
196 unsigned long pages, int ih, int gl) in intel_flush_svm_range() argument
207 intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); in intel_flush_svm_range()
/linux-4.4.14/include/linux/
Dintel-iommu.h308 #define QI_EIOTLB_GL(gl) (((u64)gl) << 7) argument
/linux-4.4.14/Documentation/DocBook/media/
Dcrop.gif.b6495 UdmO6fP+wPVuJf5+7/uu8BXw8BmP8QrP7x0/AR0/8gl/8CKf8fhu8hpf8h4P8iHfuXpM7gAw8wBQ
/linux-4.4.14/drivers/scsi/cxgbi/cxgb4i/
Dcxgb4i.c1575 struct cxgbi_gather_list *gl, in ddp_ppod_write_idata() argument
1604 if (!hdr && !gl) in ddp_ppod_write_idata()
1607 cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); in ddp_ppod_write_idata()
1616 struct cxgbi_gather_list *gl) in ddp_set_map() argument
1626 idx, cnt, gl, 4 * i); in ddp_set_map()
/linux-4.4.14/arch/sparc/include/asm/
Dhypervisor.h1217 unsigned char gl; /* Global register level */ member
/linux-4.4.14/drivers/scsi/cxgbi/cxgb3i/
Dcxgb3i.c1085 struct cxgbi_gather_list *gl) in ddp_set_map() argument
1094 csk, idx, npods, gl); in ddp_set_map()
1106 hdr, gl, i * PPOD_PAGES_MAX); in ddp_set_map()
/linux-4.4.14/tools/perf/util/
Dprobe-finder.c992 static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) in pubname_search_cb() argument
996 if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { in pubname_search_cb()
1001 if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) in pubname_search_cb()
/linux-4.4.14/Documentation/filesystems/
D9p.txt29 http://goo.gl/3WPDg
/linux-4.4.14/net/ipv4/
DKconfig618 delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg