gh 41 drivers/acpi/arm64/gtdt.c struct acpi_gtdt_header *gh = platform_timer; gh 43 drivers/acpi/arm64/gtdt.c platform_timer += gh->length; gh 56 drivers/acpi/arm64/gtdt.c struct acpi_gtdt_header *gh = platform_timer; gh 58 drivers/acpi/arm64/gtdt.c return gh->type == ACPI_GTDT_TYPE_TIMER_BLOCK; gh 63 drivers/acpi/arm64/gtdt.c struct acpi_gtdt_header *gh = platform_timer; gh 66 drivers/acpi/arm64/gtdt.c if (gh->type != ACPI_GTDT_TYPE_WATCHDOG) gh 468 drivers/net/geneve.c static int geneve_hlen(struct genevehdr *gh) gh 470 drivers/net/geneve.c return sizeof(*gh) + gh->opt_len * 4; gh 479 drivers/net/geneve.c struct genevehdr *gh, *gh2; gh 486 drivers/net/geneve.c hlen = off_gnv + sizeof(*gh); gh 487 drivers/net/geneve.c gh = skb_gro_header_fast(skb, off_gnv); gh 489 drivers/net/geneve.c gh = skb_gro_header_slow(skb, hlen, off_gnv); gh 490 drivers/net/geneve.c if (unlikely(!gh)) gh 494 drivers/net/geneve.c if (gh->ver != GENEVE_VER || gh->oam) gh 496 drivers/net/geneve.c gh_len = geneve_hlen(gh); gh 500 drivers/net/geneve.c gh = skb_gro_header_slow(skb, hlen, off_gnv); gh 501 drivers/net/geneve.c if (unlikely(!gh)) gh 510 drivers/net/geneve.c if (gh->opt_len != gh2->opt_len || gh 511 drivers/net/geneve.c memcmp(gh, gh2, gh_len)) { gh 517 drivers/net/geneve.c type = gh->proto_type; gh 525 drivers/net/geneve.c skb_gro_postpull_rcsum(skb, gh, gh_len); gh 540 drivers/net/geneve.c struct genevehdr *gh; gh 546 drivers/net/geneve.c gh = (struct genevehdr *)(skb->data + nhoff); gh 547 drivers/net/geneve.c gh_len = geneve_hlen(gh); gh 548 drivers/net/geneve.c type = gh->proto_type; gh 62 fs/gfs2/acl.c struct gfs2_holder gh; gh 68 fs/gfs2/acl.c LM_FLAG_ANY, &gh); gh 75 fs/gfs2/acl.c gfs2_glock_dq_uninit(&gh); gh 111 fs/gfs2/acl.c struct gfs2_holder gh; gh 124 fs/gfs2/acl.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 145 fs/gfs2/acl.c gfs2_glock_dq_uninit(&gh); gh 520 fs/gfs2/aops.c struct gfs2_holder gh; gh 524 fs/gfs2/aops.c gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 525 fs/gfs2/aops.c error = gfs2_glock_nq(&gh); gh 534 fs/gfs2/aops.c gfs2_glock_dq(&gh); gh 536 fs/gfs2/aops.c gfs2_holder_uninit(&gh); gh 605 fs/gfs2/aops.c struct gfs2_holder gh; gh 608 fs/gfs2/aops.c gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 609 fs/gfs2/aops.c ret = gfs2_glock_nq(&gh); gh 614 fs/gfs2/aops.c gfs2_glock_dq(&gh); gh 616 fs/gfs2/aops.c gfs2_holder_uninit(&gh); gh 95 fs/gfs2/export.c struct gfs2_holder gh; gh 112 fs/gfs2/export.c error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh); gh 118 fs/gfs2/export.c gfs2_glock_dq_uninit(&gh); gh 160 fs/gfs2/file.c struct gfs2_holder gh; gh 164 fs/gfs2/file.c gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 165 fs/gfs2/file.c error = gfs2_glock_nq(&gh); gh 174 fs/gfs2/file.c gfs2_glock_dq(&gh); gh 176 fs/gfs2/file.c gfs2_holder_uninit(&gh); gh 223 fs/gfs2/file.c struct gfs2_holder gh; gh 231 fs/gfs2/file.c error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 293 fs/gfs2/file.c gfs2_glock_dq_uninit(&gh); gh 455 fs/gfs2/file.c struct gfs2_holder gh; gh 467 fs/gfs2/file.c gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 468 fs/gfs2/file.c ret = gfs2_glock_nq(&gh); gh 542 fs/gfs2/file.c gfs2_glock_dq(&gh); gh 544 fs/gfs2/file.c gfs2_holder_uninit(&gh); gh 751 fs/gfs2/file.c struct gfs2_holder gh; gh 757 fs/gfs2/file.c gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); gh 758 fs/gfs2/file.c ret = gfs2_glock_nq(&gh); gh 764 fs/gfs2/file.c gfs2_glock_dq(&gh); gh 766 fs/gfs2/file.c gfs2_holder_uninit(&gh); gh 777 fs/gfs2/file.c struct gfs2_holder gh; gh 788 fs/gfs2/file.c gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); gh 789 fs/gfs2/file.c ret = gfs2_glock_nq(&gh); gh 800 fs/gfs2/file.c gfs2_glock_dq(&gh); gh 802 fs/gfs2/file.c gfs2_holder_uninit(&gh); gh 845 fs/gfs2/file.c struct gfs2_holder gh; gh 847 fs/gfs2/file.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 850 fs/gfs2/file.c gfs2_glock_dq_uninit(&gh); gh 1107 fs/gfs2/file.c struct gfs2_holder gh; gh 1118 fs/gfs2/file.c gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 1119 fs/gfs2/file.c ret = gfs2_glock_nq(&gh); gh 1150 fs/gfs2/file.c gfs2_glock_dq(&gh); gh 1152 fs/gfs2/file.c gfs2_holder_uninit(&gh); gh 60 fs/gfs2/glock.c static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); gh 282 fs/gfs2/glock.c static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) gh 285 fs/gfs2/glock.c if ((gh->gh_state == LM_ST_EXCLUSIVE || gh 286 fs/gfs2/glock.c gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) gh 288 fs/gfs2/glock.c if (gl->gl_state == gh->gh_state) gh 290 fs/gfs2/glock.c if (gh->gh_flags & GL_EXACT) gh 293 fs/gfs2/glock.c if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) gh 295 fs/gfs2/glock.c if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) gh 298 fs/gfs2/glock.c if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) gh 303 fs/gfs2/glock.c static void gfs2_holder_wake(struct gfs2_holder *gh) gh 305 fs/gfs2/glock.c clear_bit(HIF_WAIT, &gh->gh_iflags); gh 307 fs/gfs2/glock.c wake_up_bit(&gh->gh_iflags, HIF_WAIT); gh 308 fs/gfs2/glock.c if (gh->gh_flags & GL_ASYNC) { gh 309 fs/gfs2/glock.c struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; gh 322 fs/gfs2/glock.c struct gfs2_holder *gh, *tmp; gh 324 fs/gfs2/glock.c list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { gh 325 fs/gfs2/glock.c if (test_bit(HIF_HOLDER, &gh->gh_iflags)) gh 328 fs/gfs2/glock.c gh->gh_error = -EIO; gh 329 fs/gfs2/glock.c else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) gh 330 fs/gfs2/glock.c gh->gh_error = GLR_TRYFAILED; gh 333 fs/gfs2/glock.c list_del_init(&gh->gh_list); gh 334 fs/gfs2/glock.c trace_gfs2_glock_queue(gh, 0); gh 335 fs/gfs2/glock.c gfs2_holder_wake(gh); gh 352 fs/gfs2/glock.c struct gfs2_holder *gh, *tmp; gh 356 fs/gfs2/glock.c list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { gh 357 fs/gfs2/glock.c if (test_bit(HIF_HOLDER, &gh->gh_iflags)) gh 359 fs/gfs2/glock.c if (may_grant(gl, gh)) { gh 360 fs/gfs2/glock.c if (gh->gh_list.prev == &gl->gl_holders && gh 364 fs/gfs2/glock.c ret = glops->go_lock(gh); gh 369 fs/gfs2/glock.c gh->gh_error = ret; gh 370 fs/gfs2/glock.c list_del_init(&gh->gh_list); gh 371 fs/gfs2/glock.c trace_gfs2_glock_queue(gh, 0); gh 372 fs/gfs2/glock.c gfs2_holder_wake(gh); gh 375 fs/gfs2/glock.c set_bit(HIF_HOLDER, &gh->gh_iflags); gh 376 fs/gfs2/glock.c trace_gfs2_promote(gh, 1); gh 377 fs/gfs2/glock.c gfs2_holder_wake(gh); gh 380 fs/gfs2/glock.c set_bit(HIF_HOLDER, &gh->gh_iflags); gh 381 fs/gfs2/glock.c trace_gfs2_promote(gh, 0); gh 382 fs/gfs2/glock.c gfs2_holder_wake(gh); gh 385 fs/gfs2/glock.c if (gh->gh_list.prev == &gl->gl_holders) gh 400 fs/gfs2/glock.c struct gfs2_holder *gh; gh 402 fs/gfs2/glock.c list_for_each_entry(gh, &gl->gl_holders, gh_list) { gh 403 fs/gfs2/glock.c if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) gh 404 fs/gfs2/glock.c return gh; gh 459 fs/gfs2/glock.c struct gfs2_holder *gh; gh 466 fs/gfs2/glock.c gh = find_first_waiter(gl); gh 475 fs/gfs2/glock.c if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { gh 478 fs/gfs2/glock.c if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) gh 479 fs/gfs2/glock.c list_move_tail(&gh->gh_list, &gl->gl_holders); gh 480 fs/gfs2/glock.c gh = find_first_waiter(gl); gh 481 fs/gfs2/glock.c gl->gl_target = gh->gh_state; gh 486 fs/gfs2/glock.c (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { gh 496 fs/gfs2/glock.c do_xmote(gl, gh, gl->gl_target); gh 501 fs/gfs2/glock.c do_xmote(gl, gh, LM_ST_UNLOCKED); gh 518 fs/gfs2/glock.c rv = glops->go_xmote_bh(gl, gh); gh 543 fs/gfs2/glock.c static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) gh 549 fs/gfs2/glock.c unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); gh 607 fs/gfs2/glock.c struct gfs2_holder *gh; gh 610 fs/gfs2/glock.c gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); gh 611 fs/gfs2/glock.c if (test_bit(HIF_HOLDER, &gh->gh_iflags)) gh 612 fs/gfs2/glock.c return gh; gh 628 fs/gfs2/glock.c struct gfs2_holder *gh = NULL; gh 653 fs/gfs2/glock.c gh = find_first_waiter(gl); gh 654 fs/gfs2/glock.c gl->gl_target = gh->gh_state; gh 655 fs/gfs2/glock.c if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) gh 658 fs/gfs2/glock.c do_xmote(gl, gh, gl->gl_target); gh 892 fs/gfs2/glock.c struct gfs2_holder *gh) gh 894 fs/gfs2/glock.c INIT_LIST_HEAD(&gh->gh_list); gh 895 fs/gfs2/glock.c gh->gh_gl = gl; gh 896 fs/gfs2/glock.c gh->gh_ip = _RET_IP_; gh 897 fs/gfs2/glock.c gh->gh_owner_pid = get_pid(task_pid(current)); gh 898 fs/gfs2/glock.c gh->gh_state = state; gh 899 fs/gfs2/glock.c gh->gh_flags = flags; gh 900 fs/gfs2/glock.c gh->gh_error = 0; gh 901 fs/gfs2/glock.c gh->gh_iflags = 0; gh 915 fs/gfs2/glock.c void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) gh 917 fs/gfs2/glock.c gh->gh_state = state; gh 918 fs/gfs2/glock.c gh->gh_flags = flags; gh 919 fs/gfs2/glock.c gh->gh_iflags = 0; gh 920 fs/gfs2/glock.c gh->gh_ip = _RET_IP_; gh 921 fs/gfs2/glock.c put_pid(gh->gh_owner_pid); gh 922 fs/gfs2/glock.c gh->gh_owner_pid = get_pid(task_pid(current)); gh 931 fs/gfs2/glock.c void gfs2_holder_uninit(struct gfs2_holder *gh) gh 933 fs/gfs2/glock.c put_pid(gh->gh_owner_pid); gh 934 fs/gfs2/glock.c gfs2_glock_put(gh->gh_gl); gh 935 fs/gfs2/glock.c gfs2_holder_mark_uninitialized(gh); gh 936 fs/gfs2/glock.c gh->gh_ip = 0; gh 957 fs/gfs2/glock.c int gfs2_glock_wait(struct gfs2_holder *gh) gh 962 fs/gfs2/glock.c wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); gh 963 fs/gfs2/glock.c gfs2_glock_update_hold_time(gh->gh_gl, start_time); gh 964 fs/gfs2/glock.c return gh->gh_error; gh 1108 fs/gfs2/glock.c static inline void add_to_queue(struct gfs2_holder *gh) gh 1112 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gh 1118 fs/gfs2/glock.c GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); gh 1119 fs/gfs2/glock.c if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) gh 1122 fs/gfs2/glock.c if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { gh 1124 fs/gfs2/glock.c try_futile = !may_grant(gl, gh); gh 1130 fs/gfs2/glock.c if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && gh 1131 fs/gfs2/glock.c (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) gh 1136 fs/gfs2/glock.c gh->gh_error = GLR_TRYFAILED; gh 1137 fs/gfs2/glock.c gfs2_holder_wake(gh); gh 1142 fs/gfs2/glock.c if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) gh 1146 fs/gfs2/glock.c trace_gfs2_glock_queue(gh, 1); gh 1150 fs/gfs2/glock.c list_add_tail(&gh->gh_list, &gl->gl_holders); gh 1151 fs/gfs2/glock.c if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) gh 1155 fs/gfs2/glock.c list_add_tail(&gh->gh_list, insert_pt); gh 1157 fs/gfs2/glock.c gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); gh 1158 fs/gfs2/glock.c if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { gh 1171 fs/gfs2/glock.c fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); gh 1172 fs/gfs2/glock.c fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); gh 1174 fs/gfs2/glock.c gh->gh_gl->gl_name.ln_type, gh->gh_state); gh 1188 fs/gfs2/glock.c int gfs2_glock_nq(struct gfs2_holder *gh) gh 1190 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gh 1201 fs/gfs2/glock.c add_to_queue(gh); gh 1202 fs/gfs2/glock.c if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && gh 1211 fs/gfs2/glock.c if (!(gh->gh_flags & GL_ASYNC)) gh 1212 fs/gfs2/glock.c error = gfs2_glock_wait(gh); gh 1224 fs/gfs2/glock.c int gfs2_glock_poll(struct gfs2_holder *gh) gh 1226 fs/gfs2/glock.c return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; gh 1235 fs/gfs2/glock.c void gfs2_glock_dq(struct gfs2_holder *gh) gh 1237 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gh 1243 fs/gfs2/glock.c if (gh->gh_flags & GL_NOCACHE) gh 1246 fs/gfs2/glock.c list_del_init(&gh->gh_list); gh 1247 fs/gfs2/glock.c clear_bit(HIF_HOLDER, &gh->gh_iflags); gh 1252 fs/gfs2/glock.c glops->go_unlock(gh); gh 1264 fs/gfs2/glock.c trace_gfs2_glock_queue(gh, 0); gh 1276 fs/gfs2/glock.c void gfs2_glock_dq_wait(struct gfs2_holder *gh) gh 1278 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gh 1279 fs/gfs2/glock.c gfs2_glock_dq(gh); gh 1290 fs/gfs2/glock.c void gfs2_glock_dq_uninit(struct gfs2_holder *gh) gh 1292 fs/gfs2/glock.c gfs2_glock_dq(gh); gh 1293 fs/gfs2/glock.c gfs2_holder_uninit(gh); gh 1310 fs/gfs2/glock.c unsigned int state, u16 flags, struct gfs2_holder *gh) gh 1317 fs/gfs2/glock.c error = gfs2_glock_nq_init(gl, state, flags, gh); gh 1467 fs/gfs2/glock.c const struct gfs2_holder *gh; gh 1474 fs/gfs2/glock.c list_for_each_entry(gh, &gl->gl_holders, gh_list) { gh 1475 fs/gfs2/glock.c if (test_bit(HIF_HOLDER, &gh->gh_iflags)) gh 1477 fs/gfs2/glock.c if (LM_FLAG_NOEXP & gh->gh_flags) gh 1808 fs/gfs2/glock.c static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, gh 1815 fs/gfs2/glock.c if (gh->gh_owner_pid) gh 1816 fs/gfs2/glock.c gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); gh 1818 fs/gfs2/glock.c fs_id_buf, state2str(gh->gh_state), gh 1819 fs/gfs2/glock.c hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), gh 1820 fs/gfs2/glock.c gh->gh_error, gh 1821 fs/gfs2/glock.c gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, gh 1823 fs/gfs2/glock.c (void *)gh->gh_ip); gh 1886 fs/gfs2/glock.c const struct gfs2_holder *gh; gh 1909 fs/gfs2/glock.c list_for_each_entry(gh, &gl->gl_holders, gh_list) gh 1910 fs/gfs2/glock.c dump_holder(seq, gh, fs_id_buf); gh 138 fs/gfs2/glock.h struct gfs2_holder *gh; gh 144 fs/gfs2/glock.h list_for_each_entry(gh, &gl->gl_holders, gh_list) { gh 145 fs/gfs2/glock.h if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) gh 147 fs/gfs2/glock.h if (gh->gh_owner_pid == pid) gh 150 fs/gfs2/glock.h gh = NULL; gh 154 fs/gfs2/glock.h return gh; gh 186 fs/gfs2/glock.h u16 flags, struct gfs2_holder *gh); gh 188 fs/gfs2/glock.h struct gfs2_holder *gh); gh 189 fs/gfs2/glock.h extern void gfs2_holder_uninit(struct gfs2_holder *gh); gh 190 fs/gfs2/glock.h extern int gfs2_glock_nq(struct gfs2_holder *gh); gh 191 fs/gfs2/glock.h extern int gfs2_glock_poll(struct gfs2_holder *gh); gh 192 fs/gfs2/glock.h extern int gfs2_glock_wait(struct gfs2_holder *gh); gh 194 fs/gfs2/glock.h extern void gfs2_glock_dq(struct gfs2_holder *gh); gh 195 fs/gfs2/glock.h extern void gfs2_glock_dq_wait(struct gfs2_holder *gh); gh 196 fs/gfs2/glock.h extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh); gh 200 fs/gfs2/glock.h struct gfs2_holder *gh); gh 223 fs/gfs2/glock.h struct gfs2_holder *gh) gh 227 fs/gfs2/glock.h gfs2_holder_init(gl, state, flags, gh); gh 229 fs/gfs2/glock.h error = gfs2_glock_nq(gh); gh 231 fs/gfs2/glock.h gfs2_holder_uninit(gh); gh 254 fs/gfs2/glock.h static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) gh 256 fs/gfs2/glock.h gh->gh_gl = NULL; gh 259 fs/gfs2/glock.h static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) gh 261 fs/gfs2/glock.h return gh->gh_gl; gh 264 fs/gfs2/glock.h static inline bool gfs2_holder_queued(struct gfs2_holder *gh) gh 266 fs/gfs2/glock.h return !list_empty(&gh->gh_list); gh 452 fs/gfs2/glops.c static int inode_go_lock(struct gfs2_holder *gh) gh 454 fs/gfs2/glops.c struct gfs2_glock *gl = gh->gh_gl; gh 459 fs/gfs2/glops.c if (!ip || (gh->gh_flags & GL_SKIP)) gh 468 fs/gfs2/glops.c if (gh->gh_state != LM_ST_DEFERRED) gh 473 fs/gfs2/glops.c (gh->gh_state == LM_ST_EXCLUSIVE)) { gh 550 fs/gfs2/glops.c static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) gh 238 fs/gfs2/incore.h int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); gh 241 fs/gfs2/incore.h int (*go_lock) (struct gfs2_holder *gh); gh 242 fs/gfs2/incore.h void (*go_unlock) (struct gfs2_holder *gh); gh 840 fs/gfs2/inode.c struct gfs2_holder gh; gh 853 fs/gfs2/inode.c error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); gh 861 fs/gfs2/inode.c gfs2_glock_dq_uninit(&gh); gh 867 fs/gfs2/inode.c gfs2_glock_dq_uninit(&gh); gh 1998 fs/gfs2/inode.c struct gfs2_holder gh; gh 2002 fs/gfs2/inode.c gfs2_holder_mark_uninitialized(&gh); gh 2004 fs/gfs2/inode.c error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); gh 2023 fs/gfs2/inode.c if (gfs2_holder_initialized(&gh)) gh 2024 fs/gfs2/inode.c gfs2_glock_dq_uninit(&gh); gh 2033 fs/gfs2/inode.c struct gfs2_holder gh; gh 2038 fs/gfs2/inode.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 2044 fs/gfs2/inode.c gfs2_glock_dq_uninit(&gh); gh 2055 fs/gfs2/inode.c struct gfs2_holder gh; gh 2059 fs/gfs2/inode.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 2062 fs/gfs2/inode.c gfs2_glock_dq_uninit(&gh); gh 2074 fs/gfs2/inode.c struct gfs2_holder gh; gh 2078 fs/gfs2/inode.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gh 2081 fs/gfs2/inode.c gfs2_glock_dq_uninit(&gh); gh 1272 fs/gfs2/rgrp.c int gfs2_rgrp_go_lock(struct gfs2_holder *gh) gh 1274 fs/gfs2/rgrp.c struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; gh 1277 fs/gfs2/rgrp.c if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb) gh 1308 fs/gfs2/rgrp.c void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) gh 1310 fs/gfs2/rgrp.c struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; gh 1311 fs/gfs2/rgrp.c int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) | gh 1312 fs/gfs2/rgrp.c test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags); gh 1401 fs/gfs2/rgrp.c struct gfs2_holder gh; gh 1440 fs/gfs2/rgrp.c ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 1452 fs/gfs2/rgrp.c gfs2_glock_dq_uninit(&gh); gh 1468 fs/gfs2/rgrp.c gfs2_glock_dq_uninit(&gh); gh 34 fs/gfs2/rgrp.h extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh); gh 36 fs/gfs2/rgrp.h extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh); gh 237 fs/gfs2/super.c struct gfs2_holder gh; gh 241 fs/gfs2/super.c &gh); gh 272 fs/gfs2/super.c gfs2_glock_dq_uninit(&gh); gh 340 fs/gfs2/super.c struct gfs2_holder gh; gh 346 fs/gfs2/super.c &gh); gh 381 fs/gfs2/super.c gfs2_glock_dq_uninit(&gh); gh 389 fs/gfs2/super.c struct gfs2_holder gh; gh 419 fs/gfs2/super.c error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); gh 450 fs/gfs2/super.c gfs2_glock_dq_uninit(&lfcc->gh); gh 549 fs/gfs2/super.c struct gfs2_holder gh; gh 559 fs/gfs2/super.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 588 fs/gfs2/super.c gfs2_glock_dq_uninit(&gh); gh 844 fs/gfs2/super.c struct gfs2_holder *gha, *gh; gh 863 fs/gfs2/super.c gh = gha + x; gh 865 fs/gfs2/super.c if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) { gh 866 fs/gfs2/super.c err = gfs2_glock_wait(gh); gh 868 fs/gfs2/super.c gfs2_holder_uninit(gh); gh 873 fs/gfs2/super.c gfs2_glock2rgrp(gh->gh_gl); gh 877 fs/gfs2/super.c gfs2_glock_dq_uninit(gh); gh 881 fs/gfs2/super.c if (gfs2_holder_initialized(gh)) gh 887 fs/gfs2/super.c gh); gh 1158 fs/gfs2/super.c struct gfs2_holder gh; gh 1181 fs/gfs2/super.c error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 1197 fs/gfs2/super.c gfs2_glock_dq_uninit(&gh); gh 1245 fs/gfs2/super.c struct gfs2_holder gh; gh 1259 fs/gfs2/super.c gfs2_holder_mark_uninitialized(&gh); gh 1268 fs/gfs2/super.c error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); gh 1362 fs/gfs2/super.c if (gfs2_holder_initialized(&gh)) { gh 1364 fs/gfs2/super.c gfs2_glock_dq_uninit(&gh); gh 201 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_holder *gh, int first), gh 203 fs/gfs2/trace_gfs2.h TP_ARGS(gh, first), gh 214 fs/gfs2/trace_gfs2.h __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; gh 215 fs/gfs2/trace_gfs2.h __entry->glnum = gh->gh_gl->gl_name.ln_number; gh 216 fs/gfs2/trace_gfs2.h __entry->gltype = gh->gh_gl->gl_name.ln_type; gh 218 fs/gfs2/trace_gfs2.h __entry->state = glock_trace_state(gh->gh_state); gh 231 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_holder *gh, int queue), gh 233 fs/gfs2/trace_gfs2.h TP_ARGS(gh, queue), gh 244 fs/gfs2/trace_gfs2.h __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; gh 245 fs/gfs2/trace_gfs2.h __entry->glnum = gh->gh_gl->gl_name.ln_number; gh 246 fs/gfs2/trace_gfs2.h __entry->gltype = gh->gh_gl->gl_name.ln_type; gh 248 fs/gfs2/trace_gfs2.h __entry->state = glock_trace_state(gh->gh_state); gh 594 fs/gfs2/xattr.c struct gfs2_holder gh; gh 600 fs/gfs2/xattr.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); gh 604 fs/gfs2/xattr.c gfs2_holder_mark_uninitialized(&gh); gh 607 fs/gfs2/xattr.c if (gfs2_holder_initialized(&gh)) gh 608 fs/gfs2/xattr.c gfs2_glock_dq_uninit(&gh); gh 1222 fs/gfs2/xattr.c struct gfs2_holder gh; gh 1232 fs/gfs2/xattr.c ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 1238 fs/gfs2/xattr.c gfs2_holder_mark_uninitialized(&gh); gh 1241 fs/gfs2/xattr.c if (gfs2_holder_initialized(&gh)) gh 1242 fs/gfs2/xattr.c gfs2_glock_dq_uninit(&gh); gh 1371 fs/gfs2/xattr.c struct gfs2_holder gh; gh 1384 fs/gfs2/xattr.c error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh); gh 1408 fs/gfs2/xattr.c gfs2_glock_dq_uninit(&gh); gh 288 net/ipv4/ip_gre.c unsigned char *gh; gh 305 net/ipv4/ip_gre.c gh = skb_network_header(skb) + gh 307 net/ipv4/ip_gre.c pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len + gh 554 net/ipv6/ip6_gre.c unsigned char *gh; gh 571 net/ipv6/ip6_gre.c gh = skb_network_header(skb) + gh 573 net/ipv6/ip6_gre.c pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +