task_ctx          340 arch/x86/events/intel/lbr.c static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
task_ctx          347 arch/x86/events/intel/lbr.c 	if (task_ctx->lbr_callstack_users == 0 ||
task_ctx          348 arch/x86/events/intel/lbr.c 	    task_ctx->lbr_stack_state == LBR_NONE) {
task_ctx          353 arch/x86/events/intel/lbr.c 	tos = task_ctx->tos;
task_ctx          359 arch/x86/events/intel/lbr.c 	if ((task_ctx == cpuc->last_task_ctx) &&
task_ctx          360 arch/x86/events/intel/lbr.c 	    (task_ctx->log_id == cpuc->last_log_id) &&
task_ctx          362 arch/x86/events/intel/lbr.c 		task_ctx->lbr_stack_state = LBR_NONE;
task_ctx          367 arch/x86/events/intel/lbr.c 	for (i = 0; i < task_ctx->valid_lbrs; i++) {
task_ctx          369 arch/x86/events/intel/lbr.c 		wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
task_ctx          370 arch/x86/events/intel/lbr.c 		wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
task_ctx          373 arch/x86/events/intel/lbr.c 			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
task_ctx          385 arch/x86/events/intel/lbr.c 	task_ctx->lbr_stack_state = LBR_NONE;
task_ctx          388 arch/x86/events/intel/lbr.c static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
task_ctx          395 arch/x86/events/intel/lbr.c 	if (task_ctx->lbr_callstack_users == 0) {
task_ctx          396 arch/x86/events/intel/lbr.c 		task_ctx->lbr_stack_state = LBR_NONE;
task_ctx          407 arch/x86/events/intel/lbr.c 		task_ctx->lbr_from[i] = from;
task_ctx          408 arch/x86/events/intel/lbr.c 		task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
task_ctx          410 arch/x86/events/intel/lbr.c 			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
task_ctx          412 arch/x86/events/intel/lbr.c 	task_ctx->valid_lbrs = i;
task_ctx          413 arch/x86/events/intel/lbr.c 	task_ctx->tos = tos;
task_ctx          414 arch/x86/events/intel/lbr.c 	task_ctx->lbr_stack_state = LBR_VALID;
task_ctx          416 arch/x86/events/intel/lbr.c 	cpuc->last_task_ctx = task_ctx;
task_ctx          417 arch/x86/events/intel/lbr.c 	cpuc->last_log_id = ++task_ctx->log_id;
task_ctx          423 arch/x86/events/intel/lbr.c 	struct x86_perf_task_context *task_ctx;
task_ctx          433 arch/x86/events/intel/lbr.c 	task_ctx = ctx ? ctx->task_ctx_data : NULL;
task_ctx          434 arch/x86/events/intel/lbr.c 	if (task_ctx) {
task_ctx          436 arch/x86/events/intel/lbr.c 			__intel_pmu_lbr_restore(task_ctx);
task_ctx          438 arch/x86/events/intel/lbr.c 			__intel_pmu_lbr_save(task_ctx);
task_ctx          460 arch/x86/events/intel/lbr.c 	struct x86_perf_task_context *task_ctx;
task_ctx          468 arch/x86/events/intel/lbr.c 		task_ctx = event->ctx->task_ctx_data;
task_ctx          469 arch/x86/events/intel/lbr.c 		task_ctx->lbr_callstack_users++;
task_ctx          501 arch/x86/events/intel/lbr.c 	struct x86_perf_task_context *task_ctx;
task_ctx          508 arch/x86/events/intel/lbr.c 		task_ctx = event->ctx->task_ctx_data;
task_ctx          509 arch/x86/events/intel/lbr.c 		task_ctx->lbr_callstack_users--;
task_ctx          244 drivers/net/ethernet/qlogic/qed/qed_cxt.h 			 u32 tid, u8 ctx_type, void **task_ctx);
task_ctx          213 drivers/scsi/bnx2fc/bnx2fc.h 	struct fcoe_task_ctx_entry **task_ctx;
task_ctx          775 drivers/scsi/bnx2fc/bnx2fc_els.c 			interface->hba->task_ctx[task_idx];
task_ctx          889 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
task_ctx         1857 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
task_ctx         1859 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (!hba->task_ctx) {
task_ctx         1879 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
task_ctx         1883 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		if (!hba->task_ctx[i]) {
task_ctx         1897 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		if (hba->task_ctx[i]) {
task_ctx         1900 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				hba->task_ctx[i], hba->task_ctx_dma[i]);
task_ctx         1901 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			hba->task_ctx[i] = NULL;
task_ctx         1908 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	kfree(hba->task_ctx);
task_ctx         1909 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	hba->task_ctx = NULL;
task_ctx         1931 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	if (hba->task_ctx) {
task_ctx         1933 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			if (hba->task_ctx[i]) {
task_ctx         1935 drivers/scsi/bnx2fc/bnx2fc_hwi.c 						    hba->task_ctx[i],
task_ctx         1937 drivers/scsi/bnx2fc/bnx2fc_hwi.c 				hba->task_ctx[i] = NULL;
task_ctx         1940 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		kfree(hba->task_ctx);
task_ctx         1941 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		hba->task_ctx = NULL;
task_ctx          764 drivers/scsi/bnx2fc/bnx2fc_io.c 			interface->hba->task_ctx[task_idx];
task_ctx          902 drivers/scsi/bnx2fc/bnx2fc_io.c 			interface->hba->task_ctx[task_idx];
task_ctx          978 drivers/scsi/bnx2fc/bnx2fc_io.c 		     interface->hba->task_ctx[task_idx];
task_ctx         1036 drivers/scsi/bnx2fc/bnx2fc_io.c 			interface->hba->task_ctx[task_idx];
task_ctx         2083 drivers/scsi/bnx2fc/bnx2fc_io.c 	task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
task_ctx          503 drivers/scsi/qedf/qedf.h 	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
task_ctx          595 drivers/scsi/qedf/qedf_io.c 	struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
task_ctx          612 drivers/scsi/qedf/qedf_io.c 	io_req->task = task_ctx;
task_ctx          613 drivers/scsi/qedf/qedf_io.c 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
task_ctx          631 drivers/scsi/qedf/qedf_io.c 	io_req->task_params->context = task_ctx;
task_ctx          685 drivers/scsi/qedf/qedf_io.c 	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
task_ctx          703 drivers/scsi/qedf/qedf_io.c 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
task_ctx          707 drivers/scsi/qedf/qedf_io.c 	io_req->task = task_ctx;
task_ctx          710 drivers/scsi/qedf/qedf_io.c 	io_req->task_params->context = task_ctx;
task_ctx          861 drivers/scsi/qedf/qedf_io.c 	struct e4_fcoe_task_context *task_ctx;
task_ctx          917 drivers/scsi/qedf/qedf_io.c 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
task_ctx          918 drivers/scsi/qedf/qedf_io.c 	if (!task_ctx) {
task_ctx          927 drivers/scsi/qedf/qedf_io.c 	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
task_ctx         1130 drivers/scsi/qedf/qedf_io.c 	struct e4_fcoe_task_context *task_ctx;
task_ctx         1153 drivers/scsi/qedf/qedf_io.c 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
task_ctx           87 drivers/scsi/qedi/qedi_fw.c 	struct e4_iscsi_task_context *task_ctx;
task_ctx           94 drivers/scsi/qedi/qedi_fw.c 	task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
task_ctx          119 drivers/scsi/qedi/qedi_fw.c 	memset(task_ctx, '\0', sizeof(*task_ctx));
task_ctx          258 drivers/scsi/qedi/qedi_fw.c 	struct e4_iscsi_task_context *task_ctx;
task_ctx          267 drivers/scsi/qedi/qedi_fw.c 	task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
task_ctx          297 drivers/scsi/qedi/qedi_fw.c 	memset(task_ctx, '\0', sizeof(*task_ctx));
task_ctx          807 include/linux/perf_event.h 	struct perf_event_context	*task_ctx;
task_ctx          213 kernel/events/core.c 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
task_ctx          218 kernel/events/core.c 	perf_ctx_lock(cpuctx, task_ctx);
task_ctx          241 kernel/events/core.c 		WARN_ON_ONCE(task_ctx != ctx);
task_ctx          248 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
task_ctx          311 kernel/events/core.c 	struct perf_event_context *task_ctx = NULL;
task_ctx          319 kernel/events/core.c 		task_ctx = ctx;
task_ctx          322 kernel/events/core.c 	perf_ctx_lock(cpuctx, task_ctx);
task_ctx          338 kernel/events/core.c 			if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
task_ctx          347 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
task_ctx          815 kernel/events/core.c 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
task_ctx          841 kernel/events/core.c 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
task_ctx         2175 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
task_ctx         2176 kernel/events/core.c 			cpuctx->task_ctx = NULL;
task_ctx         2499 kernel/events/core.c 	if (!cpuctx->task_ctx)
task_ctx         2502 kernel/events/core.c 	if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
task_ctx         2536 kernel/events/core.c 			struct perf_event_context *task_ctx,
task_ctx         2552 kernel/events/core.c 	if (task_ctx)
task_ctx         2553 kernel/events/core.c 		task_ctx_sched_out(cpuctx, task_ctx, event_type);
task_ctx         2567 kernel/events/core.c 	perf_event_sched_in(cpuctx, task_ctx, current);
task_ctx         2574 kernel/events/core.c 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
task_ctx         2576 kernel/events/core.c 	perf_ctx_lock(cpuctx, task_ctx);
task_ctx         2577 kernel/events/core.c 	ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
task_ctx         2578 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
task_ctx         2592 kernel/events/core.c 	struct perf_event_context *task_ctx = cpuctx->task_ctx;
task_ctx         2599 kernel/events/core.c 		task_ctx = ctx;
task_ctx         2615 kernel/events/core.c 		WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
task_ctx         2616 kernel/events/core.c 	} else if (task_ctx) {
task_ctx         2617 kernel/events/core.c 		raw_spin_lock(&task_ctx->lock);
task_ctx         2635 kernel/events/core.c 		ctx_resched(cpuctx, task_ctx, get_event_type(event));
task_ctx         2641 kernel/events/core.c 	perf_ctx_unlock(cpuctx, task_ctx);
task_ctx         2752 kernel/events/core.c 	struct perf_event_context *task_ctx;
task_ctx         2780 kernel/events/core.c 	task_ctx = cpuctx->task_ctx;
task_ctx         2782 kernel/events/core.c 		WARN_ON_ONCE(task_ctx != ctx);
task_ctx         2784 kernel/events/core.c 	ctx_resched(cpuctx, task_ctx, get_event_type(event));
task_ctx         3015 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->task_ctx);
task_ctx         3024 kernel/events/core.c 		WARN_ON_ONCE(cpuctx->task_ctx != ctx);
task_ctx         3026 kernel/events/core.c 			cpuctx->task_ctx = NULL;
task_ctx         3184 kernel/events/core.c 	if (!cpuctx->task_ctx)
task_ctx         3291 kernel/events/core.c 		perf_ctx_lock(cpuctx, cpuctx->task_ctx);
task_ctx         3294 kernel/events/core.c 		pmu->sched_task(cpuctx->task_ctx, sched_in);
task_ctx         3297 kernel/events/core.c 		perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
task_ctx         3482 kernel/events/core.c 			cpuctx->task_ctx = ctx;
task_ctx         3484 kernel/events/core.c 			WARN_ON_ONCE(cpuctx->task_ctx != ctx);
task_ctx         3523 kernel/events/core.c 	if (cpuctx->task_ctx == ctx)
task_ctx         3809 kernel/events/core.c 	struct perf_event_context *task_ctx = NULL;
task_ctx         3818 kernel/events/core.c 	task_ctx = cpuctx->task_ctx;
task_ctx         3819 kernel/events/core.c 	task_rotate = task_ctx ? task_ctx->rotate_necessary : 0;
task_ctx         3824 kernel/events/core.c 	perf_ctx_lock(cpuctx, cpuctx->task_ctx);
task_ctx         3828 kernel/events/core.c 		task_event = ctx_event_to_rotate(task_ctx);
task_ctx         3836 kernel/events/core.c 	if (task_event || (task_ctx && cpu_event))
task_ctx         3837 kernel/events/core.c 		ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE);
task_ctx         3842 kernel/events/core.c 		rotate_ctx(task_ctx, task_event);
task_ctx         3846 kernel/events/core.c 	perf_event_sched_in(cpuctx, task_ctx, current);
task_ctx         3849 kernel/events/core.c 	perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
task_ctx         3970 kernel/events/core.c 	if (ctx->task && cpuctx->task_ctx != ctx)
task_ctx         6847 kernel/events/core.c 	       struct perf_event_context *task_ctx)
task_ctx         6860 kernel/events/core.c 	if (task_ctx) {
task_ctx         6861 kernel/events/core.c 		perf_iterate_ctx(task_ctx, output, data, false);
task_ctx         6974 kernel/events/core.c 	if (cpuctx->task_ctx)
task_ctx         6975 kernel/events/core.c 		perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
task_ctx         7020 kernel/events/core.c 	struct perf_event_context	*task_ctx;
task_ctx         7084 kernel/events/core.c 			      struct perf_event_context *task_ctx,
task_ctx         7096 kernel/events/core.c 		.task_ctx = task_ctx,
task_ctx         7113 kernel/events/core.c 		       task_ctx);
task_ctx          892 security/apparmor/domain.c 	ctx = task_ctx(current);
task_ctx         1169 security/apparmor/domain.c 	struct aa_task_ctx *ctx = task_ctx(current);
task_ctx         1319 security/apparmor/domain.c 	struct aa_task_ctx *ctx = task_ctx(current);
task_ctx           92 security/apparmor/lsm.c 	aa_free_task_ctx(task_ctx(task));
task_ctx           98 security/apparmor/lsm.c 	struct aa_task_ctx *new = task_ctx(task);
task_ctx          100 security/apparmor/lsm.c 	aa_dup_task_ctx(new, task_ctx(current));
task_ctx          581 security/apparmor/lsm.c 	struct aa_task_ctx *ctx = task_ctx(current);
task_ctx          704 security/apparmor/lsm.c 	aa_clear_task_ctx_trans(task_ctx(current));
task_ctx           44 security/apparmor/task.c 	struct aa_task_ctx *ctx = task_ctx(current);
task_ctx           70 security/apparmor/task.c 		aa_clear_task_ctx_trans(task_ctx(current));
task_ctx           95 security/apparmor/task.c 	struct aa_task_ctx *ctx = task_ctx(current);
task_ctx          117 security/apparmor/task.c 	struct aa_task_ctx *ctx = task_ctx(current);
task_ctx          157 security/apparmor/task.c 	struct aa_task_ctx *ctx = task_ctx(current);