Searched refs:leader (Results 1 - 62 of 62) sorted by relevance

/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Devent_attributes_test.c17 struct event event, leader; event_attributes() local
67 event_init(&leader, 0x1001e); event_attributes()
68 event_leader_ebb_init(&leader); event_attributes()
69 FAIL_IF(event_open(&leader)); event_attributes()
75 FAIL_IF(event_open_with_group(&event, leader.fd)); event_attributes()
76 event_close(&leader); event_attributes()
80 event_init(&leader, 0x1001e); event_attributes()
81 event_leader_ebb_init(&leader); event_attributes()
82 FAIL_IF(event_open(&leader)); event_attributes()
86 /* Expected to fail, event doesn't request EBB, leader does */ event_attributes()
87 FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event_attributes()
88 event_close(&leader); event_attributes()
91 event_init(&leader, 0x1001e); event_attributes()
92 event_leader_ebb_init(&leader); event_attributes()
94 leader.attr.config &= ~(1ull << 63); event_attributes()
96 FAIL_IF(event_open(&leader)); event_attributes()
101 /* Expected to fail, leader doesn't request EBB */ event_attributes()
102 FAIL_IF(event_open_with_group(&event, leader.fd) == 0); event_attributes()
103 event_close(&leader); event_attributes()
106 event_init(&leader, 0x1001e); event_attributes()
107 event_leader_ebb_init(&leader); event_attributes()
108 leader.attr.exclusive = 0; event_attributes()
109 /* Expected to fail, leader isn't exclusive */ event_attributes()
110 FAIL_IF(event_open(&leader) == 0); event_attributes()
113 event_init(&leader, 0x1001e); event_attributes()
114 event_leader_ebb_init(&leader); event_attributes()
115 leader.attr.pinned = 0; event_attributes()
116 /* Expected to fail, leader isn't pinned */ event_attributes()
117 FAIL_IF(event_open(&leader) == 0); event_attributes()
/linux-4.1.27/tools/perf/tests/
H A Dthread-mg-share.c13 struct thread *leader; test__thread_mg_share() local
34 leader = machine__findnew_thread(machine, 0, 0); test__thread_mg_share()
39 /* and create 1 separated process, without thread leader */ test__thread_mg_share()
43 leader && t1 && t2 && t3 && other); test__thread_mg_share()
45 mg = leader->mg; test__thread_mg_share()
54 * Verify the other leader was created by previous call. test__thread_mg_share()
59 TEST_ASSERT_VAL("failed to find other leader", other_leader); test__thread_mg_share()
67 thread__delete(leader); test__thread_mg_share()
H A Dparse-events.c566 struct perf_evsel *evsel, *leader; test__group1() local
572 evsel = leader = perf_evlist__first(evlist); test__group1()
582 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group1()
599 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group1()
608 struct perf_evsel *evsel, *leader; test__group2() local
614 evsel = leader = perf_evlist__first(evlist); test__group2()
624 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group2()
640 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group2()
655 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group2()
663 struct perf_evsel *evsel, *leader; test__group3() local
669 evsel = leader = perf_evlist__first(evlist); test__group3()
680 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group3()
682 !strcmp(leader->group_name, "group1")); test__group3()
699 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group3()
705 evsel = leader = perf_evsel__next(evsel); test__group3()
715 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group3()
717 !strcmp(leader->group_name, "group2")); test__group3()
732 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group3()
747 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group3()
755 struct perf_evsel *evsel, *leader; test__group4() local
761 evsel = leader = perf_evlist__first(evlist); test__group4()
773 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group4()
790 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group4()
799 struct perf_evsel *evsel, *leader; test__group5() local
805 evsel = leader = perf_evlist__first(evlist); test__group5()
816 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group5()
832 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group5()
837 evsel = leader = perf_evsel__next(evsel); test__group5()
848 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group5()
864 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group5()
878 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group5()
885 struct perf_evsel *evsel, *leader; test__group_gh1() local
891 evsel = leader = perf_evlist__first(evlist); test__group_gh1()
902 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group_gh1()
917 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group_gh1()
925 struct perf_evsel *evsel, *leader; test__group_gh2() local
931 evsel = leader = perf_evlist__first(evlist); test__group_gh2()
942 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group_gh2()
957 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group_gh2()
965 struct perf_evsel *evsel, *leader; test__group_gh3() local
971 evsel = leader = perf_evlist__first(evlist); test__group_gh3()
982 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group_gh3()
997 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group_gh3()
1005 struct perf_evsel *evsel, *leader; test__group_gh4() local
1011 evsel = leader = perf_evlist__first(evlist); test__group_gh4()
1022 TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel)); test__group_gh4()
1037 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__group_gh4()
1045 struct perf_evsel *evsel, *leader; test__leader_sample1() local
1049 /* cycles - sampling group leader */ test__leader_sample1()
1050 evsel = leader = perf_evlist__first(evlist); test__leader_sample1()
1061 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__leader_sample1()
1075 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__leader_sample1()
1090 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__leader_sample1()
1098 struct perf_evsel *evsel, *leader; test__leader_sample2() local
1102 /* instructions - sampling group leader */ test__leader_sample2()
1103 evsel = leader = perf_evlist__first(evlist); test__leader_sample2()
1114 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__leader_sample2()
1129 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__leader_sample2()
1150 struct perf_evsel *evsel, *leader; test__pinned_group() local
1154 /* cycles - group leader */ test__pinned_group()
1155 evsel = leader = perf_evlist__first(evlist); test__pinned_group()
1160 TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); test__pinned_group()
1163 /* cache-misses - can not be pinned, but will go on with the leader */ test__pinned_group()
H A Dhists_link.c190 static int validate_match(struct hists *leader, struct hists *other) validate_match() argument
192 return __validate_match(leader) || __validate_match(other); validate_match()
248 pr_debug("Invalid count of total leader entries: %zd of %zd\n", __validate_link()
268 static int validate_link(struct hists *leader, struct hists *other) validate_link() argument
270 return __validate_link(leader, 0) || __validate_link(other, 1); validate_link()
H A Dattr.py220 log.debug(" group: [%s] matches group leader %s" %
234 log.debug('[%s] has group leader [%s]' % (name, iname))
/linux-4.1.27/tools/perf/util/
H A Dthread.c15 struct thread *leader; thread__init_map_groups() local
21 leader = machine__findnew_thread(machine, pid, pid); thread__init_map_groups()
22 if (leader) thread__init_map_groups()
23 thread->mg = map_groups__get(leader->mg); thread__init_map_groups()
H A Devsel.h101 struct perf_evsel *leader; member in struct:perf_evsel
289 * perf_evsel__is_group_leader - Return whether given evsel is a leader event
293 * Return %true if @evsel is a group leader or a stand-alone event
297 return evsel->leader == evsel; perf_evsel__is_group_leader()
306 * leader which has other members in the group
351 return evsel->idx - evsel->leader->idx; perf_evsel__group_idx()
356 (_evsel) && (_evsel)->leader == (_leader); \
H A Devlist.c150 struct perf_evsel *evsel, *leader; __perf_evlist__set_leader() local
152 leader = list_entry(list->next, struct perf_evsel, node); __perf_evlist__set_leader()
155 leader->nr_members = evsel->idx - leader->idx + 1; __perf_evlist__set_leader()
158 evsel->leader = leader; __evlist__for_each()
1549 if (evsel->leader == move_evsel->leader) evlist__for_each_safe()
H A Dhist.c1365 * Look for pairs to link to the leader buckets (hist_entries):
1367 void hists__match(struct hists *leader, struct hists *other) hists__match() argument
1374 root = &leader->entries_collapsed; hists__match()
1376 root = leader->entries_in; hists__match()
1388 * Look for entries in the other hists that are not present in the leader, if
1389 * we find them, just add a dummy entry on the leader hists, with period=0,
1392 int hists__link(struct hists *leader, struct hists *other) hists__link() argument
1407 pair = hists__add_dummy_entry(leader, pos); hists__link()
H A Dhist.h159 void hists__match(struct hists *leader, struct hists *other);
160 int hists__link(struct hists *leader, struct hists *other);
H A Dmachine.c296 struct thread *leader; machine__update_thread_pid() local
306 leader = machine__findnew_thread(machine, th->pid_, th->pid_); machine__update_thread_pid()
307 if (!leader) machine__update_thread_pid()
310 if (!leader->mg) machine__update_thread_pid()
311 leader->mg = map_groups__new(machine); machine__update_thread_pid()
313 if (!leader->mg) machine__update_thread_pid()
316 if (th->mg == leader->mg) machine__update_thread_pid()
331 th->mg = map_groups__get(leader->mg); machine__update_thread_pid()
392 * leader and that would screwed the rb tree. __machine__findnew_thread()
H A Devsel.c205 evsel->leader = evsel; perf_evsel__init()
617 struct perf_evsel *leader = evsel->leader; perf_evsel__config() local
641 if (leader->nr_members > 1) { perf_evsel__config()
664 * than leader in case leader 'leads' the sampling. perf_evsel__config()
666 if ((leader != evsel) && leader->sample_read) { perf_evsel__config()
996 struct perf_evsel *leader = evsel->leader; get_group_fd() local
1006 BUG_ON(!leader->fd); get_group_fd()
1008 fd = FD(leader, cpu, thread); get_group_fd()
H A Drecord.c105 * Set the evsel leader links before we configure attributes, perf_evlist__config()
H A Dparse-events.c686 struct perf_evsel *leader; parse_events__set_leader() local
689 leader = list_entry(list->next, struct perf_evsel, node); parse_events__set_leader()
690 leader->group_name = name ? strdup(name) : NULL; parse_events__set_leader()
H A Dheader.c1737 struct perf_evsel *evsel, *leader = NULL; process_group_desc() local
1786 evsel->leader = evsel; process_group_desc()
1799 leader = evsel; process_group_desc()
1804 evsel->leader = leader; process_group_desc()
H A Devent.c491 /* is thread group leader in thread_map? */ perf_event__synthesize_thread_map()
/linux-4.1.27/drivers/media/usb/tm6000/
H A Dtm6000-input.c95 u32 pulse = 0, leader = 0; tm6000_ir_config() local
113 leader = 900; /* ms */ tm6000_ir_config()
118 leader = 900; /* ms - from the NEC decoding */ tm6000_ir_config()
124 leader = ir_clock_mhz * leader; tm6000_ir_config()
126 leader = leader | 0x8000; tm6000_ir_config()
128 dprintk(2, "%s: %s, %d MHz, leader = 0x%04x, pulse = 0x%06x \n", tm6000_ir_config()
131 ir_clock_mhz, leader, pulse); tm6000_ir_config()
144 tm6000_set_reg(dev, TM6010_REQ07_RDC_IR_LEADER1, leader >> 8); tm6000_ir_config()
145 tm6000_set_reg(dev, TM6010_REQ07_RDD_IR_LEADER0, leader); tm6000_ir_config()
/linux-4.1.27/drivers/media/rc/img-ir/
H A Dimg-ir-sanyo.c91 /* leader symbol */
116 /* leader symbol */
H A Dimg-ir-hw.h32 * @hdrtog: Detect header toggle symbol after leader symbol
33 * @ldrdec: Don't discard leader if maximum width reached
38 * @decodend2: Secondary decoder enable (no leader symbol)
96 * @s10: Zero symbol timing data for secondary (no leader symbol) decoder
97 * @s11: One symbol timing data for secondary (no leader symbol) decoder
H A Dimg-ir-nec.c120 /* leader symbol */
145 /* leader symbol */
H A Dimg-ir-jvc.c63 /* leader symbol */
H A Dimg-ir-rc6.c91 /* leader symbol */
H A Dimg-ir-sony.c129 /* leader symbol */
H A Dimg-ir-hw.c96 /* default s10 and s11 to s00 and s01 if no leader */ img_ir_timings_preprocess()
299 /* leader symbol timings are divided by 16 */ img_ir_timings_convert()
/linux-4.1.27/arch/alpha/kernel/
H A Dpci_iommu.c496 -1 : Not leader, physically adjacent to previous.
497 -2 : Not leader, virtually adjacent to previous.
498 Write dma_length of each leader with the combined lengths of
509 struct scatterlist *leader; sg_classify()
513 leader = sg; sg_classify()
515 leader_length = leader->length; sg_classify()
516 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; sg_classify()
537 leader->dma_address = leader_flag; sg_classify()
538 leader->dma_length = leader_length; sg_classify()
539 leader = sg; sg_classify()
547 leader->dma_address = leader_flag; sg_classify()
548 leader->dma_length = leader_length;
551 /* Given a scatterlist leader, choose an allocation method and fill
555 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, sg_fill()
559 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); sg_fill()
560 long size = leader->dma_length; sg_fill()
568 if (leader->dma_address == 0 sg_fill()
582 if (leader->dma_address == 0 && dac_allowed) { sg_fill()
600 if (leader->dma_address == 0) sg_fill()
605 sg_classify(dev, leader, end, 0); sg_fill()
606 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); sg_fill()
618 sg = leader; sg_fill()
640 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), sg_fill()
644 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), sg_fill()
508 struct scatterlist *leader; sg_classify() local
554 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, struct scatterlist *out, struct pci_iommu_arena *arena, dma_addr_t max_dma, int dac_allowed) sg_fill() argument
/linux-4.1.27/fs/
H A Dexec.c926 * do is to wait for the thread group leader to become inactive, de_thread()
930 struct task_struct *leader = tsk->group_leader; de_thread() local
940 if (likely(leader->exit_state)) de_thread()
954 * from sister threads now dead. But in this non-leader de_thread()
955 * exec, nothing survives from the original leader thread, de_thread()
960 tsk->start_time = leader->start_time; de_thread()
961 tsk->real_start_time = leader->real_start_time; de_thread()
963 BUG_ON(!same_thread_group(leader, tsk)); de_thread()
969 * the former thread group leader: de_thread()
972 /* Become a process group leader with the old leader's pid. de_thread()
973 * The old leader becomes a thread of the this thread group. de_thread()
974 * Note: The old leader also uses this pid until release_task de_thread()
977 tsk->pid = leader->pid; de_thread()
978 change_pid(tsk, PIDTYPE_PID, task_pid(leader)); de_thread()
979 transfer_pid(leader, tsk, PIDTYPE_PGID); de_thread()
980 transfer_pid(leader, tsk, PIDTYPE_SID); de_thread()
982 list_replace_rcu(&leader->tasks, &tsk->tasks); de_thread()
983 list_replace_init(&leader->sibling, &tsk->sibling); de_thread()
986 leader->group_leader = tsk; de_thread()
989 leader->exit_signal = -1; de_thread()
991 BUG_ON(leader->exit_state != EXIT_ZOMBIE); de_thread()
992 leader->exit_state = EXIT_DEAD; de_thread()
999 if (unlikely(leader->ptrace)) de_thread()
1000 __wake_up_parent(leader, leader->parent); de_thread()
1004 release_task(leader); de_thread()
H A Dcoredump.c331 * None of sub-threads can fork after zap_process(leader). All zap_threads()
343 * It does list_replace_rcu(&leader->tasks, &current->tasks), zap_threads()
344 * we must see either old or new leader, this does not matter. zap_threads()
349 * Note also that "g" can be the old leader with ->mm == NULL zap_threads()
352 * clear the ->next pointer, we will find the new leader via zap_threads()
586 * of the process group leader. That way we get the do_coredump()
H A Dbinfmt_elf_fdpic.c1342 * This is the record for the group leader. It shows the fill_prstatus()
H A Dbinfmt_elf.c1409 * This is the record for the group leader. It shows the fill_prstatus()
/linux-4.1.27/kernel/
H A Dexit.c122 * skip the group leader because it is the last user of signal_struct, __exit_signal()
171 struct task_struct *leader; release_task() local
187 * If we are the last non-leader member of the thread release_task()
188 * group, and the leader is zombie, then notify the release_task()
189 * group leader's parent process. (if it wants notification.) release_task()
192 leader = p->group_leader; release_task()
193 if (leader != p && thread_group_empty(leader) release_task()
194 && leader->exit_state == EXIT_ZOMBIE) { release_task()
196 * If we were the last child thread and the leader has release_task()
197 * exited already, and the leader's parent ignores SIGCHLD, release_task()
198 * then we are the one who should release the leader. release_task()
200 zap_leader = do_notify_parent(leader, leader->exit_signal); release_task()
202 leader->exit_state = EXIT_DEAD; release_task()
209 p = leader; release_task()
618 /* mt-exec, de_thread() is waiting for group leader */ exit_notify()
881 * thread is not the thread group leader.
935 * using a signal other than SIGCHLD, or a non-leader thread which eligible_child()
1028 * The resource counters for the group leader are in its wait_task_zombie()
1045 * in the group including the group leader. wait_task_zombie()
H A Dcgroup.c2066 * leader after migration. This works because cgroup_migrate() cgroup_task_migrate()
2067 * ensures that the dst_cset of the leader is the first on the cgroup_task_migrate()
2215 * @leader: the leader of the process or the task to migrate
2216 * @threadgroup: whether @leader points to the whole process or a single task
2218 * Migrate a process or task denoted by @leader to @cgrp. If migrating a
2219 * process, the caller must be holding threadgroup_lock of @leader. The
2230 static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, cgroup_migrate() argument
2250 task = leader; cgroup_migrate()
2265 * cgroup_taskset_first() must always return the leader. cgroup_migrate()
2277 } while_each_thread(leader, task); cgroup_migrate()
2344 * @leader: the task or the leader of the threadgroup to be attached
2347 * Call holding cgroup_mutex and threadgroup_lock of @leader.
2350 struct task_struct *leader, bool threadgroup) cgroup_attach_task()
2359 task = leader; cgroup_attach_task()
2365 } while_each_thread(leader, task); cgroup_attach_task()
2372 ret = cgroup_migrate(dst_cgrp, leader, threadgroup); cgroup_attach_task()
2638 * walk tasks but migrate processes. The leader might even
2349 cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup) cgroup_attach_task() argument
H A Dsys.c962 if (p->signal->leader) SYSCALL_DEFINE2()
1074 /* Fail if I am already a session leader */ SYSCALL_DEFINE0()
1075 if (group_leader->signal->leader) SYSCALL_DEFINE0()
1084 group_leader->signal->leader = 1; SYSCALL_DEFINE0()
H A Dtaskstats.c648 * Doesn't matter if tsk is the leader or the last group member leaving taskstats_exit()
H A Dsignal.c1356 * de_thread() it will find the new leader. kill_pid_info()
1720 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2076 * parent of the group leader. The new ptracer will get
2224 * the ptracer of the group leader too unless it's gonna be get_signal()
2475 * should always go to the real parent of the group leader. exit_signals()
H A Dcpuset.c1489 struct task_struct *leader = cgroup_taskset_first(tset); cpuset_attach() local
1519 mm = get_task_mm(leader);
/linux-4.1.27/arch/arm/kernel/
H A Dperf_event.c272 * core perf code won't check that the pmu->ctx == leader->ctx validate_event()
291 struct perf_event *sibling, *leader = event->group_leader; validate_group() local
300 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
303 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { validate_group()
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event_intel_cqm.c456 struct perf_event *leader, *event; intel_cqm_sched_in_event() local
460 leader = list_first_entry(&cache_groups, struct perf_event, intel_cqm_sched_in_event()
462 event = leader; intel_cqm_sched_in_event()
469 if (__conflict_event(event, leader)) intel_cqm_sched_in_event()
925 * Only the group leader gets to report values. This stops us intel_cqm_event_count()
1057 * And we're the group leader.. intel_cqm_event_destroy()
1061 * If there was a group_other, make that leader, otherwise intel_cqm_event_destroy()
1121 * We only do this for the group leader, rather than for intel_cqm_event_init()
H A Dperf_event_intel_uncore.c291 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) uncore_collect_events() argument
305 if (is_uncore_event(leader)) { uncore_collect_events()
306 box->event_list[n] = leader; uncore_collect_events()
313 list_for_each_entry(event, &leader->sibling_list, group_entry) { uncore_collect_events()
557 struct perf_event *leader = event->group_leader; uncore_validate_group() local
572 n = uncore_collect_events(fake_box, leader, true); uncore_validate_group()
H A Dperf_event.c929 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) collect_events() argument
939 if (is_x86_event(leader)) { collect_events()
942 cpuc->event_list[n] = leader; collect_events()
948 list_for_each_entry(event, &leader->sibling_list, group_entry) { collect_events()
1874 struct perf_event *leader = event->group_leader; validate_group() local
1887 n = collect_events(fake_cpuc, leader, true); validate_group()
/linux-4.1.27/kernel/events/
H A Dcore.c1206 static void update_group_times(struct perf_event *leader) update_group_times() argument
1210 update_event_times(leader); update_group_times()
1211 list_for_each_entry(event, &leader->sibling_list, group_entry) update_group_times()
1235 * If we're a stand alone event or group leader, we go to the context list_add_event()
1477 /* Inherit group flags from the previous leader */ perf_group_detach()
2174 * Enabling the leader of a group effectively enables all
2178 * since the non-leader members' sibling_lists will be empty.
2200 struct perf_event *leader = event->group_leader; __perf_event_enable() local
2236 * If the event is in a group and isn't the group leader, __perf_event_enable()
2239 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) __perf_event_enable()
2245 if (event == leader) __perf_event_enable()
2256 if (leader != event) { __perf_event_enable()
2257 group_sched_out(leader, cpuctx, ctx); __perf_event_enable()
2260 if (leader->attr.pinned) { __perf_event_enable()
2261 update_group_times(leader); __perf_event_enable()
2262 leader->state = PERF_EVENT_STATE_ERROR; __perf_event_enable()
3792 struct perf_event *leader = event->group_leader, *sub; perf_event_read_group() local
3793 struct perf_event_context *ctx = leader->ctx; perf_event_read_group()
3800 count = perf_event_read_value(leader, &enabled, &running); perf_event_read_group()
3802 values[n++] = 1 + leader->nr_siblings; perf_event_read_group()
3809 values[n++] = primary_event_id(leader); perf_event_read_group()
3818 list_for_each_entry(sub, &leader->sibling_list, group_entry) { perf_event_read_group()
5094 struct perf_event *leader = event->group_leader, *sub; perf_output_read_group() local
5099 values[n++] = 1 + leader->nr_siblings; perf_output_read_group()
5107 if (leader != event) perf_output_read_group()
5108 leader->pmu->read(leader); perf_output_read_group()
5110 values[n++] = perf_event_count(leader); perf_output_read_group()
5112 values[n++] = primary_event_id(leader); perf_output_read_group()
5116 list_for_each_entry(sub, &leader->sibling_list, group_entry) { perf_output_read_group()
7909 * @group_fd: group leader event fd
8027 * event, and event is, then group leader is not. SYSCALL_DEFINE5()
8065 * Look up the group leader (we will attach this event to it): SYSCALL_DEFINE5()
8106 * Only a group leader can be exclusive or pinned SYSCALL_DEFINE5()
8155 * Install the group siblings before the group leader. SYSCALL_DEFINE5()
8157 * Because a group leader will try and install the entire group SYSCALL_DEFINE5()
8214 * of the group leader will find the pointer to itself in SYSCALL_DEFINE5()
8335 * pass, siblings will not get enabled without a leader, however a perf_pmu_migrate_context()
8336 * leader will enable its siblings, even if those are still on the old perf_pmu_migrate_context()
8699 struct perf_event *leader; inherit_group() local
8703 leader = inherit_event(parent_event, parent, parent_ctx, inherit_group()
8705 if (IS_ERR(leader)) inherit_group()
8706 return PTR_ERR(leader); inherit_group()
8709 child, leader, child_ctx); inherit_group()
/linux-4.1.27/security/yama/
H A Dyama_lsm.c166 /* Since a thread can call prctl(), find the group leader yama_task_prctl()
169 * leader checking is handled later when walking the ancestry yama_task_prctl()
/linux-4.1.27/fs/proc/
H A Dbase.c2670 struct dentry *dentry, *leader, *dir; proc_flush_task_mnt() local
2688 leader = d_hash_and_lookup(mnt->mnt_root, &name); proc_flush_task_mnt()
2689 if (!leader) proc_flush_task_mnt()
2694 dir = d_hash_and_lookup(leader, &name); proc_flush_task_mnt()
2708 dput(leader); proc_flush_task_mnt()
2837 * group leader, and don't worry if the task we have next_tgid()
2838 * found doesn't happen to be a thread group leader. next_tgid()
3039 struct task_struct *leader = get_proc_task(dir); proc_task_lookup() local
3043 if (!leader) proc_task_lookup()
3058 if (!same_thread_group(leader, task)) proc_task_lookup()
3065 put_task_struct(leader); proc_task_lookup()
3073 * Usually this is just the thread group leader, but if the users
3079 * In the case of a seek we start with the leader and walk nr
3108 * with the leader and walk nr threads forward. first_tid()
H A Darray.c626 * So one need to stop or freeze the leader and all get_children_pid()
/linux-4.1.27/kernel/rcu/
H A Dtree_plugin.h1895 * Kick the leader kthread for this NOCB group.
2226 /* If we (the leader) don't have CBs, go wait some more. */ nocb_leader_wait()
2265 * an optional leader-follower relationship so that the grace-period
2410 * leader-follower relationships.
2427 /* If we didn't spawn the leader first, reorganize! */ rcu_spawn_one_nocb_kthread()
2481 /* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
2486 * Initialize leader-follower relationships for all no-CBs CPU.
2492 int nl = 0; /* Next leader. */ rcu_organize_nocb_kthreads()
2511 /* New leader, set up for followers & next leader. */ for_each_cpu()
2516 /* Another follower, link to previous leader. */ for_each_cpu()
H A Dtree.h356 /* The following fields are used by the leader, hence own cacheline. */
360 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
/linux-4.1.27/arch/arm64/kernel/
H A Dperf_event.c339 * core perf code won't check that the pmu->ctx == leader->ctx validate_event()
358 struct perf_event *sibling, *leader = event->group_leader; validate_group() local
369 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
372 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { validate_group()
/linux-4.1.27/include/linux/
H A Dsched.h680 /* boolean value for session group leader */
681 int leader; member in struct:signal_struct
692 * in __exit_signal, except for the group leader.
709 * group, not including a zombie group leader, (This only differs
1413 struct task_struct *group_leader; /* threadgroup leader */
2575 * to have the pid of the thread group leader without actually being
2576 * the thread group leader. For iteration through the pids in proc
2667 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2668 * sub-thread becomes a new leader.
H A Dsignal.h331 * similar to exit_group. The group leader (only) reports
H A Dperf_event.h357 * either you're a sibling on a group, or you're the group leader.
/linux-4.1.27/arch/powerpc/perf/
H A Dcore-book3s.c513 struct perf_event *leader = event->group_leader; ebb_event_check() local
515 /* Event and group leader must agree on EBB */ ebb_event_check()
516 if (is_ebb_event(leader) != is_ebb_event(event)) ebb_event_check()
523 if (!leader->attr.pinned || !leader->attr.exclusive) ebb_event_check()
1827 * hasn't been linked into its leader's sibling list at this point. power_pmu_event_init()
H A Dcore-fsl-emb.c530 * hasn't been linked into its leader's sibling list at this point. fsl_emb_pmu_event_init()
/linux-4.1.27/drivers/tty/
H A Dtty_io.c508 * Only callable by the session leader and only if it does not already have
592 * tty_signal_session_leader - sends SIGHUP to session leader
596 * Send SIGHUP and SIGCONT to the session leader and its process group.
619 if (!p->signal->leader) { tty_signal_session_leader()
821 * tty_vhangup_session - hangup session leader exit
824 * The session leader is exiting and hanging up its controlling terminal.
860 * This function is typically called only by the session leader, when
885 if (!current->signal->leader) disassociate_ctty()
2149 current->signal->leader && tty_open()
2449 * leader to set this tty as the controlling tty for the session.
2464 if (current->signal->leader && (task_session(current) == tty->session)) tiocsctty()
2468 * The process must be a session leader and tiocsctty()
2471 if (!current->signal->leader || current->signal->tty) { tiocsctty()
/linux-4.1.27/tools/perf/
H A Dbuiltin-report.c436 /* Non-group events are considered as leader */ report__collapse_hists()
439 struct hists *leader_hists = evsel__hists(pos->leader); report__collapse_hists()
/linux-4.1.27/drivers/bus/
H A Darm-cci.c639 * core perf code won't check that the pmu->ctx == leader->ctx validate_event()
657 struct perf_event *sibling, *leader = event->group_leader; validate_group() local
666 if (!validate_event(event->pmu, &fake_pmu, leader)) validate_group()
669 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { validate_group()
/linux-4.1.27/fs/jfs/
H A Djfs_logmgr.c624 * No current GC leader, initiate group commit lmNextPage()
704 * start group commit as its group leader. lmGroupCommit()
901 * select the latest ready transaction as new group leader and lmPostGC()
908 * Call lmGCwrite with new group leader lmPostGC()
915 * will elect herself as new group leader. lmPostGC()
/linux-4.1.27/arch/mips/kernel/
H A Dperf_event_mipsxx.c718 struct perf_event *sibling, *leader = event->group_leader; validate_group() local
723 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) validate_group()
726 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { validate_group()
/linux-4.1.27/kernel/trace/
H A Dtrace_output.c367 * we do the lookup on the thread group leader, seq_print_userip_objs()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_main.c1750 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1754 * Returns the recovery leader resource id according to the engine this function
1766 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1770 * Tries to acquire a leader lock for current engine.
4340 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4341 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
9552 * by the MCP (there is only one leader per path). bnx2x_process_kill_chip_reset()
9897 * the first leader that performs a bnx2x_parity_recover()
9915 * point for a leader. bnx2x_parity_recover()
9923 * leader has succeeded and doesn't bnx2x_parity_recover()
9924 * want to be a leader any more. Try bnx2x_parity_recover()
9925 * to continue as a none-leader. bnx2x_parity_recover()
9929 } else { /* non-leader */ bnx2x_parity_recover()
9932 * long as a former leader may have bnx2x_parity_recover()
9938 /* I'm a leader now! Restart a bnx2x_parity_recover()
H A Dbnx2x_cmn.h389 * bnx2x_release_leader_lock - release recovery leader lock
H A Dbnx2x_cmn.c2954 * leader to complete or for other functions to unload and bnx2x_nic_unload()
3025 * (the engine which leader will perform the recovery bnx2x_nic_unload()
/linux-4.1.27/kernel/time/
H A Dposix-cpu-timers.c423 * is being reaped. When the final (leader) thread in the group is reaped,
/linux-4.1.27/scripts/
H A Dcheckpatch.pl892 # Drop the diff line leader and expand tabs

Completed in 2001 milliseconds