/linux-4.4.14/arch/x86/kvm/ |
D | emulate.c | 213 int (*execute)(struct x86_emulate_ctxt *ctxt); 222 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 261 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_read() argument 263 if (!(ctxt->regs_valid & (1 << nr))) { in reg_read() 264 ctxt->regs_valid |= 1 << nr; in reg_read() 265 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); in reg_read() 267 return ctxt->_regs[nr]; in reg_read() 270 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) in reg_write() argument 272 ctxt->regs_valid |= 1 << nr; in reg_write() 273 ctxt->regs_dirty |= 1 << nr; in reg_write() [all …]
|
D | x86.c | 75 #define emul_to_vcpu(ctxt) \ argument 76 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) 187 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); 4183 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, in kvm_fetch_guest_virt() argument 4187 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() 4209 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, in kvm_read_guest_virt() argument 4213 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_virt() 4221 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, in kvm_read_guest_virt_system() argument 4225 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_virt_system() 4229 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, in kvm_read_guest_phys_system() argument [all …]
|
D | x86.h | 167 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 171 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/ |
D | llog_obd.c | 46 struct llog_ctxt *ctxt; in llog_new_ctxt() local 48 ctxt = kzalloc(sizeof(*ctxt), GFP_NOFS); in llog_new_ctxt() 49 if (!ctxt) in llog_new_ctxt() 52 ctxt->loc_obd = obd; in llog_new_ctxt() 53 atomic_set(&ctxt->loc_refcount, 1); in llog_new_ctxt() 55 return ctxt; in llog_new_ctxt() 58 static void llog_ctxt_destroy(struct llog_ctxt *ctxt) in llog_ctxt_destroy() argument 60 if (ctxt->loc_exp) { in llog_ctxt_destroy() 61 class_export_put(ctxt->loc_exp); in llog_ctxt_destroy() 62 ctxt->loc_exp = NULL; in llog_ctxt_destroy() [all …]
|
D | llog.c | 409 int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, in llog_open() argument 416 LASSERT(ctxt); in llog_open() 417 LASSERT(ctxt->loc_logops); in llog_open() 419 if (ctxt->loc_logops->lop_open == NULL) { in llog_open() 427 (*lgh)->lgh_ctxt = ctxt; in llog_open() 428 (*lgh)->lgh_logops = ctxt->loc_logops; in llog_open() 433 rc = ctxt->loc_logops->lop_open(env, *lgh, logid, name, open_param); in llog_open()
|
D | obd_config.c | 1252 int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, in class_config_parse_llog() argument 1261 rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); in class_config_parse_llog()
|
/linux-4.4.14/arch/x86/power/ |
D | cpu.c | 50 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument 61 store_idt(&ctxt->idt); in __save_processor_state() 64 store_idt((struct desc_ptr *)&ctxt->idt_limit); in __save_processor_state() 72 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state() 73 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); in __save_processor_state() 75 store_tr(ctxt->tr); in __save_processor_state() 82 savesegment(es, ctxt->es); in __save_processor_state() 83 savesegment(fs, ctxt->fs); in __save_processor_state() 84 savesegment(gs, ctxt->gs); in __save_processor_state() 85 savesegment(ss, ctxt->ss); in __save_processor_state() [all …]
|
/linux-4.4.14/arch/x86/include/asm/ |
D | kvm_emulate.h | 94 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); 101 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); 109 int (*read_std)(struct x86_emulate_ctxt *ctxt, 121 int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr, 131 int (*write_std)(struct x86_emulate_ctxt *ctxt, 141 int (*fetch)(struct x86_emulate_ctxt *ctxt, 151 int (*read_emulated)(struct x86_emulate_ctxt *ctxt, 162 int (*write_emulated)(struct x86_emulate_ctxt *ctxt, 175 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, 181 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
D | llog_net.c | 55 int llog_initiator_connect(struct llog_ctxt *ctxt) in llog_initiator_connect() argument 59 LASSERT(ctxt); in llog_initiator_connect() 60 new_imp = ctxt->loc_obd->u.cli.cl_import; in llog_initiator_connect() 61 LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp, in llog_initiator_connect() 62 "%p - %p\n", ctxt->loc_imp, new_imp); in llog_initiator_connect() 63 mutex_lock(&ctxt->loc_mutex); in llog_initiator_connect() 64 if (ctxt->loc_imp != new_imp) { in llog_initiator_connect() 65 if (ctxt->loc_imp) in llog_initiator_connect() 66 class_import_put(ctxt->loc_imp); in llog_initiator_connect() 67 ctxt->loc_imp = class_import_get(new_imp); in llog_initiator_connect() [all …]
|
D | llog_client.c | 52 #define LLOG_CLIENT_ENTRY(ctxt, imp) do { \ argument 53 mutex_lock(&ctxt->loc_mutex); \ 54 if (ctxt->loc_imp) { \ 55 imp = class_import_get(ctxt->loc_imp); \ 60 ctxt->loc_idx); \ 62 mutex_unlock(&ctxt->loc_mutex); \ 65 mutex_unlock(&ctxt->loc_mutex); \ 68 #define LLOG_CLIENT_EXIT(ctxt, imp) do { \ argument 69 mutex_lock(&ctxt->loc_mutex); \ 70 if (ctxt->loc_imp != imp) \ [all …]
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | svc_rdma_recvfrom.c | 59 struct svc_rdma_op_ctxt *ctxt, in rdma_build_arg_xdr() argument 68 page = ctxt->pages[0]; in rdma_build_arg_xdr() 75 min_t(size_t, byte_count, ctxt->sge[0].length); in rdma_build_arg_xdr() 94 while (bc && sge_no < ctxt->count) { in rdma_build_arg_xdr() 95 page = ctxt->pages[sge_no]; in rdma_build_arg_xdr() 98 bc -= min_t(u32, bc, ctxt->sge[sge_no].length); in rdma_build_arg_xdr() 99 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length; in rdma_build_arg_xdr() 107 while (sge_no < ctxt->count) { in rdma_build_arg_xdr() 108 page = ctxt->pages[sge_no++]; in rdma_build_arg_xdr() 111 ctxt->count = bc; in rdma_build_arg_xdr() [all …]
|
D | svc_rdma_transport.c | 158 struct svc_rdma_op_ctxt *ctxt; in svc_rdma_get_context() local 160 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, in svc_rdma_get_context() 162 ctxt->xprt = xprt; in svc_rdma_get_context() 163 INIT_LIST_HEAD(&ctxt->dto_q); in svc_rdma_get_context() 164 ctxt->count = 0; in svc_rdma_get_context() 165 ctxt->frmr = NULL; in svc_rdma_get_context() 167 return ctxt; in svc_rdma_get_context() 170 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) in svc_rdma_unmap_dma() argument 172 struct svcxprt_rdma *xprt = ctxt->xprt; in svc_rdma_unmap_dma() 174 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { in svc_rdma_unmap_dma() [all …]
|
D | svc_rdma_sendto.c | 227 struct svc_rdma_op_ctxt *ctxt; in send_write() local 239 ctxt = svc_rdma_get_context(xprt); in send_write() 240 ctxt->direction = DMA_TO_DEVICE; in send_write() 241 sge = ctxt->sge; in send_write() 269 ctxt->count++; in send_write() 284 ctxt->wr_op = IB_WR_RDMA_WRITE; in send_write() 285 write_wr.wr.wr_id = (unsigned long)ctxt; in send_write() 299 svc_rdma_unmap_dma(ctxt); in send_write() 300 svc_rdma_put_context(ctxt, 0); in send_write() 455 struct svc_rdma_op_ctxt *ctxt, in send_reply() argument [all …]
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/ |
D | phy-ctxt.c | 127 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument 133 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr() 134 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 193 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_apply() argument 202 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); in iwl_mvm_phy_ctxt_apply() 219 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_add() argument 224 ctxt->ref); in iwl_mvm_phy_ctxt_add() 227 ctxt->channel = chandef->chan; in iwl_mvm_phy_ctxt_add() 229 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, in iwl_mvm_phy_ctxt_add() 238 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) in iwl_mvm_phy_ctxt_ref() argument [all …]
|
D | Makefile | 2 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
|
D | mvm.h | 1124 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, 1127 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, 1131 struct iwl_mvm_phy_ctxt *ctxt); 1133 struct iwl_mvm_phy_ctxt *ctxt);
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/ |
D | lustre_log.h | 93 int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, 98 struct llog_ctxt *ctxt, struct llog_ctxt *bak_ctxt, 144 int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt); 148 int llog_initiator_connect(struct llog_ctxt *ctxt); 161 int (*lop_sync)(struct llog_ctxt *ctxt, struct obd_export *exp, 163 int (*lop_cleanup)(const struct lu_env *env, struct llog_ctxt *ctxt); 164 int (*lop_cancel)(const struct lu_env *env, struct llog_ctxt *ctxt, 166 int (*lop_connect)(struct llog_ctxt *ctxt, struct llog_logid *logid, 268 static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) in llog_ctxt_get() argument 270 atomic_inc(&ctxt->loc_refcount); in llog_ctxt_get() [all …]
|
D | obd_class.h | 137 int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, 275 #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op argument 397 #define CTXT_CHECK_OP(ctxt, op, err) \ argument 399 if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \ 402 ctxt->loc_obd->obd_minor); \
|
/linux-4.4.14/fs/nilfs2/ |
D | btnode.c | 171 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 175 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 181 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 182 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 228 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 241 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 243 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 244 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 272 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 282 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|
/linux-4.4.14/arch/x86/xen/ |
D | smp.c | 384 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 393 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 394 if (ctxt == NULL) in cpu_initialize_context() 401 ctxt->user_regs.fs = __KERNEL_PERCPU; in cpu_initialize_context() 402 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; in cpu_initialize_context() 404 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); in cpu_initialize_context() 407 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; in cpu_initialize_context() 408 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 409 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 410 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() [all …]
|
D | pmu.c | 28 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument 29 (uintptr_t)ctxt->field)) 185 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local 195 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate() 199 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate() 202 reg = &ctxt->global_status; in xen_intel_pmu_emulate() 205 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate() 208 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate() 213 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate() 217 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate() [all …]
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | trace.h | 81 u32 ctxt, 87 TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail), 91 __field(u32, ctxt) 101 __entry->ctxt = ctxt; 111 __entry->ctxt, 122 TP_PROTO(struct hfi1_devdata *dd, u32 ctxt), 123 TP_ARGS(dd, ctxt), 126 __field(u32, ctxt) 132 __entry->ctxt = ctxt; 133 if (dd->rcd[ctxt]->do_interrupt == [all …]
|
D | file_ops.c | 168 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument 171 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 413 uctxt->ctxt); in hfi1_file_write() 457 ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp), in hfi1_write_iter() 493 u16 ctxt; in hfi1_file_mmap() local 502 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap() 505 if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) { in hfi1_file_mmap() 601 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap() 617 ((uctxt->ctxt - dd->first_user_ctxt) * in hfi1_file_mmap() 689 uctxt->ctxt, subctxt_fp(fp), in hfi1_file_mmap() [all …]
|
D | init.c | 167 dd->rcd[rcd->ctxt] = NULL; in hfi1_create_ctxts() 177 dd->rcd[rcd->ctxt] = NULL; in hfi1_create_ctxts() 196 struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt) in hfi1_create_ctxtdata() argument 211 dd_dev_info(dd, "%s: setting up context %u\n", __func__, ctxt); in hfi1_create_ctxtdata() 217 rcd->ctxt = ctxt; in hfi1_create_ctxtdata() 218 dd->rcd[ctxt] = rcd; in hfi1_create_ctxtdata() 231 if (ctxt < dd->first_user_ctxt) { in hfi1_create_ctxtdata() 232 if (ctxt < kctxt_ngroups) { in hfi1_create_ctxtdata() 233 base = ctxt * (dd->rcv_entries.ngroups + 1); in hfi1_create_ctxtdata() 237 (ctxt * dd->rcv_entries.ngroups); in hfi1_create_ctxtdata() [all …]
|
D | chip.h | 560 static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt, in read_kctxt_csr() argument 564 return read_csr(dd, offset0 + (0x100 * ctxt)); in read_kctxt_csr() 567 static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt, in write_kctxt_csr() argument 571 write_csr(dd, offset0 + (0x100 * ctxt), value); in write_kctxt_csr() 583 int ctxt, in get_kctxt_csr_addr() argument 586 return get_csr_addr(dd, offset0 + (0x100 * ctxt)); in get_kctxt_csr_addr() 595 static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt, in read_uctxt_csr() argument 599 return read_csr(dd, offset0 + (0x1000 * ctxt)); in read_uctxt_csr() 602 static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt, in write_uctxt_csr() argument 606 write_csr(dd, offset0 + (0x1000 * ctxt), value); in write_uctxt_csr() [all …]
|
D | user_sdma.c | 276 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \ 279 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \ 386 pq->ctxt = uctxt->ctxt; in hfi1_user_sdma_alloc_queues() 395 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, in hfi1_user_sdma_alloc_queues() 404 uctxt->ctxt); in hfi1_user_sdma_alloc_queues() 448 uctxt->ctxt, fd->subctxt); in hfi1_user_sdma_free_queues() 502 dd->unit, uctxt->ctxt, subctxt_fp(fp), in hfi1_user_sdma_process_request() 510 dd->unit, uctxt->ctxt, subctxt_fp(fp), ret); in hfi1_user_sdma_process_request() 514 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, subctxt_fp(fp), in hfi1_user_sdma_process_request() 518 dd->unit, uctxt->ctxt, subctxt_fp(fp), in hfi1_user_sdma_process_request() [all …]
|
D | user_sdma.h | 69 unsigned ctxt; member
|
D | chip.c | 4640 trace_hfi1_receive_interrupt(dd, rcd->ctxt); in receive_context_interrupt() 7594 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, in adjust_rcv_timeout() 7603 u32 ctxt = rcd->ctxt; in update_usrhead() local 7614 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); in update_usrhead() 7620 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); in update_usrhead() 7628 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) in hdrqempty() 7634 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in hdrqempty() 7675 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) in hfi1_rcvctrl() argument 7681 rcd = dd->rcd[ctxt]; in hfi1_rcvctrl() 7685 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op); in hfi1_rcvctrl() [all …]
|
D | intr.c | 199 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd->ctxt); in handle_user_interrupt()
|
D | pio.c | 1657 u32 ctxt; in init_pervl_scs() local 1684 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs() 1686 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs() 1689 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs() 1692 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs() 1694 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
|
D | ud.c | 490 struct send_context *ctxt = qp_to_send_context(qp, sc5); in return_cnp() local 526 if (ctxt) { in return_cnp() 527 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); in return_cnp()
|
D | driver.c | 1168 rcd->ctxt, packet->rhf, in handle_eflags() 1189 packet->rcd->ctxt, in process_receive_ib()
|
D | hfi.h | 209 unsigned ctxt; member
|
/linux-4.4.14/drivers/scsi/be2iscsi/ |
D | be_cmds.c | 957 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local 971 ctxt, coalesce_wm); in beiscsi_cmd_cq_create() 972 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create() 973 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create() 975 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create() 976 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create() 977 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create() 978 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create() 979 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create() 980 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create() [all …]
|
/linux-4.4.14/fs/ocfs2/ |
D | xattr.c | 274 struct ocfs2_xattr_set_ctxt *ctxt); 279 struct ocfs2_xattr_set_ctxt *ctxt); 710 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument 713 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation() 736 ctxt->data_ac, in ocfs2_xattr_extend_allocation() 737 ctxt->meta_ac, in ocfs2_xattr_extend_allocation() 775 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument 779 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range() 791 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range() 792 &ctxt->dealloc); in __ocfs2_remove_xattr_range() [all …]
|
D | alloc.h | 210 int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, 212 int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, 220 struct ocfs2_cached_dealloc_ctxt *ctxt);
|
D | alloc.c | 569 static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, 3673 struct ocfs2_merge_ctxt *ctxt) in ocfs2_try_to_merge_extent() argument 3679 BUG_ON(ctxt->c_contig_type == CONTIG_NONE); in ocfs2_try_to_merge_extent() 3681 if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { in ocfs2_try_to_merge_extent() 3698 if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { in ocfs2_try_to_merge_extent() 3702 BUG_ON(!ctxt->c_split_covers_rec); in ocfs2_try_to_merge_extent() 3767 if (ctxt->c_contig_type == CONTIG_RIGHT) { in ocfs2_try_to_merge_extent() 3785 if (ctxt->c_split_covers_rec) { in ocfs2_try_to_merge_extent() 4332 struct ocfs2_merge_ctxt *ctxt) in ocfs2_figure_merge_contig_type() argument 4453 ctxt->c_contig_type = ret; in ocfs2_figure_merge_contig_type() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_file_ops.c | 193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_get_base_info() 242 kinfo->spi_ctxt = rcd->ctxt; in qib_get_base_info() 308 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_update() 499 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_free() 689 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); in qib_manage_rcvq() 766 what, rcd->ctxt, pfn, len, ret); in qib_mmap_mem() 1046 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_mmapf() 1139 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); in qib_poll_next() 1308 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, in setup_ctxt() argument 1324 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); in setup_ctxt() [all …]
|
D | qib_tx.c | 135 unsigned ctxt; in find_ctxt() local 139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt() 140 rcd = dd->rcd[ctxt]; in find_ctxt() 462 unsigned ctxt; in qib_cancel_sends() local 474 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends() 476 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
|
D | qib_init.c | 168 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, in qib_create_ctxtdata() argument 181 rcd->ctxt = ctxt; in qib_create_ctxtdata() 182 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata() 184 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ in qib_create_ctxtdata() 1347 int ctxt; in cleanup_device_data() local 1385 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { in cleanup_device_data() 1386 int ctxt_tidbase = ctxt * dd->rcvtidcnt; in cleanup_device_data() 1416 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { in cleanup_device_data() 1417 struct qib_ctxtdata *rcd = tmp[ctxt]; in cleanup_device_data() 1419 tmp[ctxt] = NULL; /* debugging paranoia */ in cleanup_device_data() [all …]
|
D | qib_iba6120.c | 307 enum qib_ureg regno, int ctxt) in qib_read_ureg32() argument 315 dd->ureg_align * ctxt)); in qib_read_ureg32() 320 dd->ureg_align * ctxt)); in qib_read_ureg32() 333 enum qib_ureg regno, u64 value, int ctxt) in qib_write_ureg() argument 340 dd->ureg_align * ctxt); in qib_write_ureg() 345 dd->ureg_align * ctxt); in qib_write_ureg() 383 const u16 regno, unsigned ctxt, in qib_write_kreg_ctxt() argument 386 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt() 1985 u32 ctxt; in qib_6120_clear_tids() local 1991 ctxt = rcd->ctxt; in qib_6120_clear_tids() [all …]
|
D | qib_iba7220.c | 230 enum qib_ureg regno, int ctxt) in qib_read_ureg32() argument 238 dd->ureg_align * ctxt)); in qib_read_ureg32() 243 dd->ureg_align * ctxt)); in qib_read_ureg32() 256 enum qib_ureg regno, u64 value, int ctxt) in qib_write_ureg() argument 263 dd->ureg_align * ctxt); in qib_write_ureg() 268 dd->ureg_align * ctxt); in qib_write_ureg() 282 const u16 regno, unsigned ctxt, in qib_write_kreg_ctxt() argument 285 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt() 2226 u32 ctxt; in qib_7220_clear_tids() local 2232 ctxt = rcd->ctxt; in qib_7220_clear_tids() [all …]
|
D | qib_iba7322.c | 776 enum qib_ureg regno, int ctxt) in qib_read_ureg32() argument 781 (dd->ureg_align * ctxt) + (dd->userbase ? in qib_read_ureg32() 797 enum qib_ureg regno, int ctxt) in qib_read_ureg() argument 803 (dd->ureg_align * ctxt) + (dd->userbase ? in qib_read_ureg() 818 enum qib_ureg regno, u64 value, int ctxt) in qib_write_ureg() argument 825 dd->ureg_align * ctxt); in qib_write_ureg() 830 dd->ureg_align * ctxt); in qib_write_ureg() 887 const u16 regno, unsigned ctxt, in qib_write_kreg_ctxt() argument 890 qib_write_kreg(dd, regno + ctxt, value); in qib_write_kreg_ctxt() 2712 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { in qib_update_rhdrq_dca() [all …]
|
D | qib_ud.c | 399 unsigned ctxt = ppd->hw_pidx; in qib_lookup_pkey() local 404 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i) in qib_lookup_pkey() 405 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey) in qib_lookup_pkey()
|
D | qib_intr.c | 206 rcd->ctxt); in qib_handle_urcv()
|
D | qib_driver.c | 297 u32 ctxt, u32 eflags, u32 l, u32 etail, in qib_rcv_hdrerr() argument 514 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, in qib_kreceive()
|
D | qib_common.h | 423 __u16 ctxt; /* ctxt on unit assigned to caller */ member
|
D | qib_user_sdma.c | 183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) in qib_user_sdma_queue_create() argument 204 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt); in qib_user_sdma_queue_create() 213 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt); in qib_user_sdma_queue_create()
|
D | qib.h | 161 unsigned ctxt; member 822 int ctxt);
|
D | qib_verbs.c | 1959 unsigned ctxt = ppd->hw_pidx; in qib_get_pkey() local 1963 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) in qib_get_pkey() 1966 ret = dd->rcd[ctxt]->pkeys[index]; in qib_get_pkey()
|
/linux-4.4.14/arch/arm64/include/asm/ |
D | kvm_host.h | 99 struct kvm_cpu_context ctxt; member 175 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 176 #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) 181 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) 182 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
|
/linux-4.4.14/arch/x86/kernel/cpu/mtrr/ |
D | mtrr.h | 48 void set_mtrr_done(struct set_mtrr_context *ctxt); 49 void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); 50 void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
|
/linux-4.4.14/Documentation/prctl/ |
D | Makefile | 3 hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-t… 7 HOSTCFLAGS_disable-tsc-ctxt-sw-stress-test.o += -I$(objtree)/usr/include
|
D | .gitignore | 1 disable-tsc-ctxt-sw-stress-test
|
/linux-4.4.14/drivers/net/ethernet/emulex/benet/ |
D | be_cmds.c | 1102 void *ctxt; in be_cmd_cq_create() local 1110 ctxt = &req->context; in be_cmd_cq_create() 1119 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, in be_cmd_cq_create() 1122 ctxt, no_delay); in be_cmd_cq_create() 1123 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, in be_cmd_cq_create() 1125 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); in be_cmd_cq_create() 1126 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); in be_cmd_cq_create() 1127 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); in be_cmd_cq_create() 1137 ctxt, coalesce_wm); in be_cmd_cq_create() 1138 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, in be_cmd_cq_create() [all …]
|
/linux-4.4.14/tools/perf/scripts/python/ |
D | futex-contention.py | 24 def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, argument 34 def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, argument
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/ |
D | dma_defs.h | 332 unsigned int ctxt : 1; member 344 unsigned int ctxt : 1; member 356 unsigned int ctxt : 1; member 367 unsigned int ctxt : 1; member
|
/linux-4.4.14/drivers/net/ethernet/intel/i40e/ |
D | i40e_main.c | 1567 struct i40e_vsi_context *ctxt, in i40e_vsi_setup_queue_map() argument 1572 struct i40e_vsi_context *ctxt, in i40e_vsi_setup_queue_map() 1671 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map() 1687 ctxt->info.up_enable_bits = enabled_tc; in i40e_vsi_setup_queue_map() 1690 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map() 1693 ctxt->info.queue_mapping[i] = in i40e_vsi_setup_queue_map() 1696 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map() 1698 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map() 1700 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map() 2227 struct i40e_vsi_context ctxt; in i40e_vlan_stripping_enable() local [all …]
|
D | i40e_fcoe.c | 366 int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) in i40e_fcoe_vsi_init() argument 368 struct i40e_aqc_vsi_properties_data *info = &ctxt->info; in i40e_fcoe_vsi_init() 380 ctxt->pf_num = hw->pf_id; in i40e_fcoe_vsi_init() 381 ctxt->vf_num = 0; in i40e_fcoe_vsi_init() 382 ctxt->uplink_seid = vsi->uplink_seid; in i40e_fcoe_vsi_init() 383 ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; in i40e_fcoe_vsi_init() 384 ctxt->flags = I40E_AQ_VSI_TYPE_PF; in i40e_fcoe_vsi_init() 403 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); in i40e_fcoe_vsi_init()
|
D | i40e_virtchnl_pf.c | 2450 struct i40e_vsi_context ctxt; in i40e_ndo_set_vf_spoofchk() local 2468 memset(&ctxt, 0, sizeof(ctxt)); in i40e_ndo_set_vf_spoofchk() 2469 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; in i40e_ndo_set_vf_spoofchk() 2470 ctxt.pf_num = pf->hw.pf_id; in i40e_ndo_set_vf_spoofchk() 2471 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); in i40e_ndo_set_vf_spoofchk() 2473 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | in i40e_ndo_set_vf_spoofchk() 2475 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); in i40e_ndo_set_vf_spoofchk()
|
D | i40e.h | 702 struct i40e_vsi_context *ctxt, 779 int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
|
/linux-4.4.14/fs/ocfs2/dlm/ |
D | dlmdomain.c | 1584 struct domain_join_ctxt *ctxt, in dlm_should_restart_join() argument 1597 ret = memcmp(ctxt->live_map, dlm->live_nodes_map, in dlm_should_restart_join() 1610 struct domain_join_ctxt *ctxt; in dlm_try_to_join_domain() local 1615 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in dlm_try_to_join_domain() 1616 if (!ctxt) { in dlm_try_to_join_domain() 1628 memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); in dlm_try_to_join_domain() 1635 while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, in dlm_try_to_join_domain() 1649 set_bit(node, ctxt->yes_resp_map); in dlm_try_to_join_domain() 1651 if (dlm_should_restart_join(dlm, ctxt, response)) { in dlm_try_to_join_domain() 1664 memcpy(dlm->domain_map, ctxt->yes_resp_map, in dlm_try_to_join_domain() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/mgc/ |
D | mgc_request.c | 634 struct llog_ctxt *ctxt; in mgc_llog_init() local 644 ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT); in mgc_llog_init() 645 LASSERT(ctxt); in mgc_llog_init() 647 llog_initiator_connect(ctxt); in mgc_llog_init() 648 llog_ctxt_put(ctxt); in mgc_llog_init() 655 struct llog_ctxt *ctxt; in mgc_llog_fini() local 657 ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT); in mgc_llog_fini() 658 if (ctxt) in mgc_llog_fini() 659 llog_cleanup(env, ctxt); in mgc_llog_fini() 1450 struct llog_ctxt *ctxt; in mgc_process_cfg_log() local [all …]
|
/linux-4.4.14/tools/testing/selftests/powerpc/mm/ |
D | subpage_prot.c | 39 ucontext_t *ctxt = (ucontext_t *)ctxt_v; in segv() local 40 struct pt_regs *regs = ctxt->uc_mcontext.regs; in segv()
|
/linux-4.4.14/arch/m68k/include/asm/ |
D | openprom.h | 176 void (*pv_setctxt)(int ctxt, char *va, int pmeg); 244 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
|
/linux-4.4.14/drivers/staging/lustre/lustre/mdc/ |
D | mdc_request.c | 1504 struct llog_ctxt *ctxt = NULL; in mdc_changelog_send_thread() local 1519 ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT); in mdc_changelog_send_thread() 1520 if (ctxt == NULL) { in mdc_changelog_send_thread() 1524 rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG, in mdc_changelog_send_thread() 1550 if (ctxt) in mdc_changelog_send_thread() 1551 llog_ctxt_put(ctxt); in mdc_changelog_send_thread() 2276 struct llog_ctxt *ctxt; in mdc_llog_init() local 2284 ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT); in mdc_llog_init() 2285 llog_initiator_connect(ctxt); in mdc_llog_init() 2286 llog_ctxt_put(ctxt); in mdc_llog_init() [all …]
|
/linux-4.4.14/drivers/media/usb/pvrusb2/ |
D | pvrusb2-encoder.c | 141 static int pvr2_encoder_cmd(void *ctxt, in pvr2_encoder_cmd() argument 155 struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt; in pvr2_encoder_cmd()
|
/linux-4.4.14/drivers/xen/ |
D | xen-acpi-memhotplug.c | 376 u32 level, void *ctxt, void **retv) in acpi_memory_register_notify_handler() argument 392 u32 level, void *ctxt, void **retv) in acpi_memory_deregister_notify_handler() argument
|
/linux-4.4.14/net/ipv4/ |
D | sysctl_net_ipv4.c | 215 struct tcp_fastopen_context *ctxt; in proc_tcp_fastopen_key() local 224 ctxt = rcu_dereference(tcp_fastopen_ctx); in proc_tcp_fastopen_key() 225 if (ctxt) in proc_tcp_fastopen_key() 226 memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); in proc_tcp_fastopen_key()
|
/linux-4.4.14/arch/arm64/kvm/ |
D | regmap.c | 114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; in vcpu_reg32()
|
D | sys_regs.c | 1747 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); in kvm_reset_sys_regs()
|
/linux-4.4.14/include/linux/sunrpc/ |
D | svc_rdma.h | 221 extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
|
/linux-4.4.14/include/uapi/rdma/hfi/ |
D | hfi1_user.h | 221 __u16 ctxt; /* ctxt on unit assigned to caller */ member
|
/linux-4.4.14/security/selinux/ss/ |
D | services.c | 3146 struct context *ctxt; in selinux_audit_rule_match() local 3163 ctxt = sidtab_search(&sidtab, sid); in selinux_audit_rule_match() 3164 if (unlikely(!ctxt)) { in selinux_audit_rule_match() 3178 match = (ctxt->user == rule->au_ctxt.user); in selinux_audit_rule_match() 3181 match = (ctxt->user != rule->au_ctxt.user); in selinux_audit_rule_match() 3189 match = (ctxt->role == rule->au_ctxt.role); in selinux_audit_rule_match() 3192 match = (ctxt->role != rule->au_ctxt.role); in selinux_audit_rule_match() 3200 match = (ctxt->type == rule->au_ctxt.type); in selinux_audit_rule_match() 3203 match = (ctxt->type != rule->au_ctxt.type); in selinux_audit_rule_match() 3213 &ctxt->range.level[0] : &ctxt->range.level[1]); in selinux_audit_rule_match()
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | openprom.h | 141 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
|
/linux-4.4.14/fs/ocfs2/cluster/ |
D | netdebug.c | 399 static int sc_common_open(struct file *file, int ctxt) in sc_common_open() argument 414 sd->dbg_ctxt = ctxt; in sc_common_open()
|
/linux-4.4.14/arch/arm64/kernel/ |
D | asm-offsets.c | 107 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); in main()
|
/linux-4.4.14/drivers/scsi/aacraid/ |
D | aacraid.h | 856 typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); 2125 …context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
|
/linux-4.4.14/Documentation/ |
D | coccinelle.txt | 68 'rep+ctxt' runs successively the report mode and the context mode.
|
/linux-4.4.14/Documentation/filesystems/ |
D | proc.txt | 1283 ctxt 1990473 1312 The "ctxt" line gives the total number of context switches across all CPUs.
|