/linux-4.4.14/drivers/staging/media/lirc/ |
D | lirc_imon.c | 189 static void free_imon_context(struct imon_context *context) in free_imon_context() argument 191 struct device *dev = context->driver->dev; in free_imon_context() 193 usb_free_urb(context->tx_urb); in free_imon_context() 194 usb_free_urb(context->rx_urb); in free_imon_context() 195 lirc_buffer_free(context->driver->rbuf); in free_imon_context() 196 kfree(context->driver->rbuf); in free_imon_context() 197 kfree(context->driver); in free_imon_context() 198 kfree(context); in free_imon_context() 203 static void deregister_from_lirc(struct imon_context *context) in deregister_from_lirc() argument 206 int minor = context->driver->minor; in deregister_from_lirc() [all …]
|
D | lirc_sasem.c | 165 static void delete_context(struct sasem_context *context) in delete_context() argument 167 usb_free_urb(context->tx_urb); /* VFD */ in delete_context() 168 usb_free_urb(context->rx_urb); /* IR */ in delete_context() 169 lirc_buffer_free(context->driver->rbuf); in delete_context() 170 kfree(context->driver->rbuf); in delete_context() 171 kfree(context->driver); in delete_context() 172 kfree(context); in delete_context() 175 static void deregister_from_lirc(struct sasem_context *context) in deregister_from_lirc() argument 178 int minor = context->driver->minor; in deregister_from_lirc() 182 dev_err(&context->dev->dev, in deregister_from_lirc() [all …]
|
/linux-4.4.14/drivers/misc/vmw_vmci/ |
D | vmci_context.c | 48 static void ctx_signal_notify(struct vmci_ctx *context) in ctx_signal_notify() argument 50 *context->notify = true; in ctx_signal_notify() 53 static void ctx_clear_notify(struct vmci_ctx *context) in ctx_clear_notify() argument 55 *context->notify = false; in ctx_clear_notify() 62 static void ctx_clear_notify_call(struct vmci_ctx *context) in ctx_clear_notify_call() argument 64 if (context->pending_datagrams == 0 && in ctx_clear_notify_call() 65 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) in ctx_clear_notify_call() 66 ctx_clear_notify(context); in ctx_clear_notify_call() 73 void vmci_ctx_check_signal_notify(struct vmci_ctx *context) in vmci_ctx_check_signal_notify() argument 75 spin_lock(&context->lock); in vmci_ctx_check_signal_notify() [all …]
|
D | vmci_route.c | 50 if (VMCI_INVALID_ID == dst->context) in vmci_route() 54 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { in vmci_route() 72 if (VMCI_HOST_CONTEXT_ID == src->context) in vmci_route() 83 if (VMCI_INVALID_ID == src->context && in vmci_route() 85 src->context = vmci_get_context_id(); in vmci_route() 93 if (VMCI_HOST_CONTEXT_ID == dst->context) { in vmci_route() 102 if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { in vmci_route() 121 if (VMCI_INVALID_ID == src->context) in vmci_route() 122 src->context = vmci_get_context_id(); in vmci_route() 138 if (VMCI_INVALID_ID == src->context) { in vmci_route() [all …]
|
D | vmci_context.h | 137 void vmci_ctx_destroy(struct vmci_ctx *context); 139 bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); 141 int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, 145 void vmci_ctx_put(struct vmci_ctx *context); 155 int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle); 156 int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle); 157 bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle); 159 void vmci_ctx_check_signal_notify(struct vmci_ctx *context); 160 void vmci_ctx_unset_notify(struct vmci_ctx *context); 175 static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) in vmci_ctx_get_id() argument [all …]
|
D | vmci_host.c | 93 struct vmci_ctx *context; member 146 vmci_ctx_destroy(vmci_host_dev->context); in vmci_host_close() 147 vmci_host_dev->context = NULL; in vmci_host_close() 171 struct vmci_ctx *context = vmci_host_dev->context; in vmci_host_poll() local 177 poll_wait(filp, &context->host_context.wait_queue, in vmci_host_poll() 180 spin_lock(&context->lock); in vmci_host_poll() 181 if (context->pending_datagrams > 0 || in vmci_host_poll() 183 context->pending_doorbell_array) > 0) { in vmci_host_poll() 186 spin_unlock(&context->lock); in vmci_host_poll() 224 static int vmci_host_setup_notify(struct vmci_ctx *context, in vmci_host_setup_notify() argument [all …]
|
D | vmci_datagram.c | 100 handle.context, handle.resource, result); in dg_create_handle() 174 if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) in dg_dispatch_as_host() 178 if (dg->src.context != context_id) { in dg_dispatch_as_host() 180 context_id, dg->src.context, dg->src.resource); in dg_dispatch_as_host() 189 dg->src.context, dg->src.resource); in dg_dispatch_as_host() 194 if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { in dg_dispatch_as_host() 199 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && in dg_dispatch_as_host() 208 dg->dst.context, dg->dst.resource); in dg_dispatch_as_host() 225 dg->src.context == VMCI_HOST_CONTEXT_ID) { in dg_dispatch_as_host() 261 if (context_id != dg->dst.context) { in dg_dispatch_as_host() [all …]
|
D | vmci_doorbell.c | 95 if (priv_flags == NULL || handle.context == VMCI_INVALID_ID) in vmci_dbell_get_priv_flags() 98 if (handle.context == VMCI_HOST_CONTEXT_ID) { in vmci_dbell_get_priv_flags() 110 } else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) { in vmci_dbell_get_priv_flags() 117 *priv_flags = vmci_context_get_priv_flags(handle.context); in vmci_dbell_get_priv_flags() 307 handle.context, handle.resource); in vmci_dbell_host_context_notify() 315 handle.context, handle.resource); in vmci_dbell_host_context_notify() 446 if (handle->context == VMCI_HOST_CONTEXT_ID || in vmci_doorbell_create() 448 vmci_get_context_id() == handle->context)) { in vmci_doorbell_create() 454 handle->context, handle->resource); in vmci_doorbell_create() 476 new_handle.context, new_handle.resource, result); in vmci_doorbell_create() [all …]
|
D | vmci_queue_pair.c | 1004 handle.context, handle.resource, result); in qp_guest_endpoint_create() 1286 if (queue_pair_entry->qp.handle.context != context_id || in qp_alloc_guest_work() 1373 struct vmci_ctx *context, in qp_broker_create() argument 1378 const u32 context_id = vmci_ctx_get_id(context); in qp_broker_create() 1392 if (handle.context != context_id && handle.context != peer) in qp_broker_create() 1409 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { in qp_broker_create() 1435 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); in qp_broker_create() 1503 handle.context, handle.resource, result); in qp_broker_create() 1515 vmci_ctx_qp_create(context, entry->qp.handle); in qp_broker_create() 1601 struct vmci_ctx *context, in qp_broker_attach() argument [all …]
|
D | vmci_queue_pair.h | 154 struct vmci_ctx *context); 157 struct vmci_ctx *context); 158 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context); 169 struct vmci_ctx *context, u64 guest_mem); 171 struct vmci_ctx *context, u32 gid);
|
/linux-4.4.14/security/selinux/ss/ |
D | mls.h | 27 int mls_compute_context_len(struct context *context); 28 void mls_sid_to_context(struct context *context, char **scontext); 29 int mls_context_isvalid(struct policydb *p, struct context *c); 36 struct context *context, 40 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask); 42 int mls_range_set(struct context *context, struct mls_range *range); 46 struct context *context); 48 int mls_compute_sid(struct context *scontext, 49 struct context *tcontext, 52 struct context *newcontext, [all …]
|
D | mls.c | 35 int mls_compute_context_len(struct context *context) in mls_compute_context_len() argument 47 int index_sens = context->range.level[l].sens; in mls_compute_context_len() 53 e = &context->range.level[l].cat; in mls_compute_context_len() 72 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len() 73 &context->range.level[1])) in mls_compute_context_len() 88 void mls_sid_to_context(struct context *context, in mls_sid_to_context() argument 106 context->range.level[l].sens - 1)); in mls_sid_to_context() 112 e = &context->range.level[l].cat; in mls_sid_to_context() 148 if (mls_level_eq(&context->range.level[0], in mls_sid_to_context() 149 &context->range.level[1])) in mls_sid_to_context() [all …]
|
D | sidtab.c | 33 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) in sidtab_insert() argument 62 if (context_cpy(&newnode->context, context)) { in sidtab_insert() 85 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force) in sidtab_search_core() 98 if (force && cur && sid == cur->sid && cur->context.len) in sidtab_search_core() 99 return &cur->context; in sidtab_search_core() 101 if (cur == NULL || sid != cur->sid || cur->context.len) { in sidtab_search_core() 112 return &cur->context; in sidtab_search_core() 115 struct context *sidtab_search(struct sidtab *s, u32 sid) in sidtab_search() 120 struct context *sidtab_search_force(struct sidtab *s, u32 sid) in sidtab_search_force() 127 struct context *context, in sidtab_map() argument [all …]
|
D | sidtab.h | 14 struct context context; /* security context structure */ member 35 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context); 36 struct context *sidtab_search(struct sidtab *s, u32 sid); 37 struct context *sidtab_search_force(struct sidtab *s, u32 sid); 41 struct context *context, 46 struct context *context,
|
D | context.h | 26 struct context { struct 35 static inline void mls_context_init(struct context *c) in mls_context_init() 40 static inline int mls_context_cpy(struct context *dst, struct context *src) in mls_context_cpy() 60 static inline int mls_context_cpy_low(struct context *dst, struct context *src) in mls_context_cpy_low() 80 static inline int mls_context_cpy_high(struct context *dst, struct context *src) in mls_context_cpy_high() 97 static inline int mls_context_cmp(struct context *c1, struct context *c2) in mls_context_cmp() 105 static inline void mls_context_destroy(struct context *c) in mls_context_destroy() 112 static inline void context_init(struct context *c) in context_init() 117 static inline int context_cpy(struct context *dst, struct context *src) in context_cpy() 141 static inline void context_destroy(struct context *c) in context_destroy() [all …]
|
D | services.c | 92 static int context_struct_to_string(struct context *context, char **scontext, 95 static void context_struct_compute_av(struct context *scontext, 96 struct context *tcontext, 272 static int constraint_expr_eval(struct context *scontext, in constraint_expr_eval() 273 struct context *tcontext, in constraint_expr_eval() 274 struct context *xcontext, in constraint_expr_eval() 278 struct context *c; in constraint_expr_eval() 462 static void security_dump_masked_av(struct context *scontext, in security_dump_masked_av() 463 struct context *tcontext, in security_dump_masked_av() 540 static void type_attribute_bounds_av(struct context *scontext, in type_attribute_bounds_av() [all …]
|
/linux-4.4.14/fs/xfs/ |
D | xfs_attr_list.c | 69 xfs_attr_shortform_list(xfs_attr_list_context_t *context) in xfs_attr_shortform_list() argument 79 ASSERT(context != NULL); in xfs_attr_shortform_list() 80 dp = context->dp; in xfs_attr_shortform_list() 87 cursor = context->cursor; in xfs_attr_shortform_list() 90 trace_xfs_attr_list_sf(context); in xfs_attr_shortform_list() 101 if (context->bufsize == 0 || in xfs_attr_shortform_list() 103 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { in xfs_attr_shortform_list() 105 error = context->put_listent(context, in xfs_attr_shortform_list() 116 if (context->seen_enough) in xfs_attr_shortform_list() 123 trace_xfs_attr_list_sf_all(context); in xfs_attr_shortform_list() [all …]
|
D | xfs_xattr.c | 160 struct xfs_attr_list_context *context, in xfs_xattr_put_listent() argument 171 ASSERT(context->count >= 0); in xfs_xattr_put_listent() 180 arraytop = context->count + prefix_len + namelen + 1; in xfs_xattr_put_listent() 181 if (arraytop > context->firstu) { in xfs_xattr_put_listent() 182 context->count = -1; /* insufficient space */ in xfs_xattr_put_listent() 185 offset = (char *)context->alist + context->count; in xfs_xattr_put_listent() 191 context->count += prefix_len + namelen + 1; in xfs_xattr_put_listent() 197 struct xfs_attr_list_context *context, in xfs_xattr_put_listent_sizes() argument 204 context->count += xfs_xattr_prefix_len(flags) + namelen + 1; in xfs_xattr_put_listent_sizes() 227 struct xfs_attr_list_context context; in xfs_vn_listxattr() local [all …]
|
/linux-4.4.14/kernel/ |
D | auditsc.c | 826 struct audit_context *context = tsk->audit_context; in audit_take_context() local 828 if (!context) in audit_take_context() 830 context->return_valid = return_valid; in audit_take_context() 846 context->return_code = -EINTR; in audit_take_context() 848 context->return_code = return_code; in audit_take_context() 850 if (context->in_syscall && !context->dummy) { in audit_take_context() 851 audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); in audit_take_context() 852 audit_filter_inodes(tsk, context); in audit_take_context() 856 return context; in audit_take_context() 859 static inline void audit_proctitle_free(struct audit_context *context) in audit_proctitle_free() argument [all …]
|
/linux-4.4.14/drivers/infiniband/core/ |
D | umem_odp.c | 82 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context) in ib_ucontext_notifier_start_account() argument 84 atomic_inc(&context->notifier_count); in ib_ucontext_notifier_start_account() 91 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context) in ib_ucontext_notifier_end_account() argument 93 int zero_notifiers = atomic_dec_and_test(&context->notifier_count); in ib_ucontext_notifier_end_account() 96 !list_empty(&context->no_private_counters)) { in ib_ucontext_notifier_end_account() 103 down_write(&context->umem_rwsem); in ib_ucontext_notifier_end_account() 107 if (!atomic_read(&context->notifier_count)) { in ib_ucontext_notifier_end_account() 109 &context->no_private_counters, in ib_ucontext_notifier_end_account() 119 up_write(&context->umem_rwsem); in ib_ucontext_notifier_end_account() 135 item->context->invalidate_range(item, ib_umem_start(item), in ib_umem_notifier_release_trampoline() [all …]
|
D | device.c | 252 struct ib_client_data *context; in add_client_context() local 255 context = kmalloc(sizeof *context, GFP_KERNEL); in add_client_context() 256 if (!context) { in add_client_context() 262 context->client = client; in add_client_context() 263 context->data = NULL; in add_client_context() 264 context->going_down = false; in add_client_context() 268 list_add(&context->list, &device->client_data_list); in add_client_context() 386 struct ib_client_data *context, *tmp; in ib_unregister_device() local 394 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) in ib_unregister_device() 395 context->going_down = true; in ib_unregister_device() [all …]
|
D | umem.c | 83 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, in ib_umem_get() argument 120 umem->context = context; in ib_umem_get() 137 ret = ib_umem_odp_get(context, umem); in ib_umem_get() 214 umem->nmap = ib_dma_map_sg_attrs(context->device, in ib_umem_get() 230 __ib_umem_release(context->device, umem, 0); in ib_umem_get() 262 struct ib_ucontext *context = umem->context; in ib_umem_release() local 272 __ib_umem_release(umem->context->device, umem, 1); in ib_umem_release() 293 if (context->closing) { in ib_umem_release()
|
/linux-4.4.14/drivers/tty/serial/ |
D | tilegx.c | 49 gxio_uart_context_t context; member 71 gxio_uart_context_t *context = &tile_uart->context; in receive_chars() local 74 count.word = gxio_uart_read(context, UART_FIFO_COUNT); in receive_chars() 76 c = (char)gxio_uart_read(context, UART_RECEIVE_DATA); in receive_chars() 89 gxio_uart_context_t *context = &tile_uart->context; in handle_receive() local 98 gxio_uart_write(context, UART_INTERRUPT_STATUS, in handle_receive() 117 static int tilegx_putchar(gxio_uart_context_t *context, char c) in tilegx_putchar() argument 120 flag.word = gxio_uart_read(context, UART_FLAG); in tilegx_putchar() 124 gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c); in tilegx_putchar() 137 gxio_uart_context_t *context = &tile_uart->context; in handle_transmit() local [all …]
|
/linux-4.4.14/arch/tile/gxio/ |
D | mpipe.c | 33 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) in gxio_mpipe_init() argument 46 context->fd = fd; in gxio_mpipe_init() 56 context->mmio_cfg_base = (void __force *) in gxio_mpipe_init() 59 if (context->mmio_cfg_base == NULL) in gxio_mpipe_init() 62 context->mmio_fast_base = (void __force *) in gxio_mpipe_init() 65 if (context->mmio_fast_base == NULL) in gxio_mpipe_init() 70 context->__stacks.stacks[i] = 255; in gxio_mpipe_init() 72 context->instance = mpipe_index; in gxio_mpipe_init() 77 iounmap((void __force __iomem *)(context->mmio_cfg_base)); in gxio_mpipe_init() 79 hv_dev_close(context->fd); in gxio_mpipe_init() [all …]
|
D | uart.c | 28 int gxio_uart_init(gxio_uart_context_t *context, int uart_index) in gxio_uart_init() argument 42 context->fd = fd; in gxio_uart_init() 45 context->mmio_base = (void __force *) in gxio_uart_init() 48 if (context->mmio_base == NULL) { in gxio_uart_init() 49 hv_dev_close(context->fd); in gxio_uart_init() 50 context->fd = -1; in gxio_uart_init() 59 int gxio_uart_destroy(gxio_uart_context_t *context) in gxio_uart_destroy() argument 61 iounmap((void __force __iomem *)(context->mmio_base)); in gxio_uart_destroy() 62 hv_dev_close(context->fd); in gxio_uart_destroy() 64 context->mmio_base = NULL; in gxio_uart_destroy() [all …]
|
D | iorpc_mpipe.c | 24 int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, in gxio_mpipe_alloc_buffer_stacks() argument 35 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_alloc_buffer_stacks() 48 int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, in gxio_mpipe_init_buffer_stack_aux() argument 69 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_init_buffer_stack_aux() 83 int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, in gxio_mpipe_alloc_notif_rings() argument 94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_alloc_notif_rings() 105 int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, in gxio_mpipe_init_notif_ring_aux() argument 124 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_init_notif_ring_aux() 136 int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, in gxio_mpipe_request_notif_ring_interrupt() argument 150 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_request_notif_ring_interrupt() [all …]
|
D | usb_host.c | 29 int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, in gxio_usb_host_init() argument 50 context->fd = fd; in gxio_usb_host_init() 53 context->mmio_base = in gxio_usb_host_init() 56 if (context->mmio_base == NULL) { in gxio_usb_host_init() 57 hv_dev_close(context->fd); in gxio_usb_host_init() 66 int gxio_usb_host_destroy(gxio_usb_host_context_t *context) in gxio_usb_host_destroy() argument 68 iounmap((void __force __iomem *)(context->mmio_base)); in gxio_usb_host_destroy() 69 hv_dev_close(context->fd); in gxio_usb_host_destroy() 71 context->mmio_base = NULL; in gxio_usb_host_destroy() 72 context->fd = -1; in gxio_usb_host_destroy() [all …]
|
D | iorpc_trio.c | 24 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, in gxio_trio_alloc_asids() argument 34 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_trio_alloc_asids() 47 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, in gxio_trio_alloc_memory_maps() argument 58 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_trio_alloc_memory_maps() 70 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, in gxio_trio_alloc_scatter_queues() argument 81 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_trio_alloc_scatter_queues() 94 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, in gxio_trio_alloc_pio_regions() argument 105 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_trio_alloc_pio_regions() 118 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, in gxio_trio_init_pio_region_aux() argument 130 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_trio_init_pio_region_aux() [all …]
|
D | iorpc_mpipe_info.c | 22 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, in gxio_mpipe_info_instance_aux() argument 30 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_info_instance_aux() 41 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, in gxio_mpipe_info_enumerate_aux() argument 51 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), in gxio_mpipe_info_enumerate_aux() 66 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, in gxio_mpipe_info_get_mmio_base() argument 74 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), in gxio_mpipe_info_get_mmio_base() 88 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context, in gxio_mpipe_info_check_mmio_offset() argument 97 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_mpipe_info_check_mmio_offset()
|
D | iorpc_usb_host.c | 22 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, in gxio_usb_host_cfg_interrupt() argument 33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_usb_host_cfg_interrupt() 44 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, in gxio_usb_host_register_client_memory() argument 53 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_usb_host_register_client_memory() 64 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base) in gxio_usb_host_get_mmio_base() argument 71 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), in gxio_usb_host_get_mmio_base() 85 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context, in gxio_usb_host_check_mmio_offset() argument 94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_usb_host_check_mmio_offset()
|
D | iorpc_uart.c | 22 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x, in gxio_uart_cfg_interrupt() argument 33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_uart_cfg_interrupt() 43 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base) in gxio_uart_get_mmio_base() argument 50 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), in gxio_uart_get_mmio_base() 64 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context, in gxio_uart_check_mmio_offset() argument 73 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, in gxio_uart_check_mmio_offset()
|
D | trio.c | 28 int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index) in gxio_trio_init() argument 36 context->fd = -1; in gxio_trio_init() 44 context->fd = fd; in gxio_trio_init()
|
/linux-4.4.14/arch/tile/include/gxio/ |
D | iorpc_mpipe.h | 59 int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, 63 int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, 69 int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, 73 int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 77 int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, 82 int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context, 85 int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, 89 int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, 93 int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count, 96 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, [all …]
|
D | mpipe.h | 347 extern int gxio_mpipe_init(gxio_mpipe_context_t *context, 360 extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context); 378 extern int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, 439 extern int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context, 457 static inline void gxio_mpipe_push_buffer(gxio_mpipe_context_t *context, in gxio_mpipe_push_buffer() argument 478 __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word); in gxio_mpipe_push_buffer() 487 static inline void *gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context, in gxio_mpipe_pop_buffer() argument 509 __gxio_mmio_read(context->mmio_fast_base + in gxio_mpipe_pop_buffer() 541 extern int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, 558 extern int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context, [all …]
|
D | iorpc_trio.h | 49 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, 53 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, 58 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, 62 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, 66 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, 71 int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, 78 int gxio_trio_get_port_property(gxio_trio_context_t *context, 81 int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, 85 int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, 92 int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, [all …]
|
D | uart.h | 58 extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, 74 extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index); 88 extern int gxio_uart_destroy(gxio_uart_context_t *context); 95 extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset, 103 extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
|
D | usb_host.h | 56 extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, 71 extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context); 78 extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context); 85 extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);
|
D | iorpc_mpipe_info.h | 36 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, 39 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, 44 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, 47 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
|
D | iorpc_usb_host.h | 34 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, 37 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, 40 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, 43 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
|
D | iorpc_uart.h | 32 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x, 35 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base); 37 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | en_resources.c | 42 int user_prio, struct mlx4_qp_context *context) in mlx4_en_fill_qp_context() argument 47 memset(context, 0, sizeof *context); in mlx4_en_fill_qp_context() 48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); in mlx4_en_fill_qp_context() 49 context->pd = cpu_to_be32(mdev->priv_pdn); in mlx4_en_fill_qp_context() 50 context->mtu_msgmax = 0xff; in mlx4_en_fill_qp_context() 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context() 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context() 56 context->params2 |= MLX4_QP_BIT_FPP; in mlx4_en_fill_qp_context() 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; in mlx4_en_fill_qp_context() 61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); in mlx4_en_fill_qp_context() [all …]
|
D | intf.c | 44 void *context; member 60 dev_ctx->context = intf->add(&priv->dev); in mlx4_add_device() 62 if (dev_ctx->context) { in mlx4_add_device() 67 intf->activate(&priv->dev, dev_ctx->context); in mlx4_add_device() 83 intf->remove(&priv->dev, dev_ctx->context); in mlx4_remove_device() 167 dev_ctx->intf->remove(dev, dev_ctx->context); in mlx4_do_bond() 168 dev_ctx->context = dev_ctx->intf->add(dev); in mlx4_do_bond() 192 dev_ctx->intf->event(dev, dev_ctx->context, type, param); in mlx4_dispatch_event() 243 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); in mlx4_get_protocol_dev()
|
/linux-4.4.14/arch/s390/include/asm/ |
D | mmu_context.h | 18 spin_lock_init(&mm->context.list_lock); in init_new_context() 19 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context() 20 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 21 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 22 atomic_set(&mm->context.attach_count, 0); in init_new_context() 23 mm->context.flush_mm = 0; in init_new_context() 25 mm->context.alloc_pgste = page_table_allocate_pgste; in init_new_context() 26 mm->context.has_pgste = 0; in init_new_context() 27 mm->context.use_skey = 0; in init_new_context() 29 switch (mm->context.asce_limit) { in init_new_context() [all …]
|
D | tlbflush.h | 67 atomic_add(0x10000, &mm->context.attach_count); in __tlb_flush_full() 77 &mm->context.cpu_attach_mask); in __tlb_flush_full() 79 atomic_sub(0x10000, &mm->context.attach_count); in __tlb_flush_full() 92 count = atomic_add_return(0x10000, &mm->context.attach_count); in __tlb_flush_asce() 104 &mm->context.cpu_attach_mask); in __tlb_flush_asce() 106 atomic_sub(0x10000, &mm->context.attach_count); in __tlb_flush_asce() 113 __tlb_flush_idte(init_mm.context.asce); in __tlb_flush_kernel() 135 __tlb_flush_idte_local(init_mm.context.asce); in __tlb_flush_kernel() 148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) in __tlb_flush_mm() 149 __tlb_flush_asce(mm, mm->context.asce); in __tlb_flush_mm() [all …]
|
D | mmu.h | 26 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 27 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ 28 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
/linux-4.4.14/arch/sparc/mm/ |
D | tsb.c | 77 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user() 80 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user() 81 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user() 87 if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user() 88 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user() 89 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user() 95 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user() 102 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page() 105 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user_page() 106 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user_page() [all …]
|
/linux-4.4.14/drivers/usb/image/ |
D | microtek.c | 190 MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ 191 …MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_len… 192 mts_debug_dump(context->instance);\ 207 struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \ 384 context->instance->usb_dev, in mts_int_submit_urb() 389 context in mts_int_submit_urb() 395 context->srb->result = DID_ERROR << 16; in mts_int_submit_urb() 406 if ( likely(context->final_callback != NULL) ) in mts_transfer_cleanup() 407 context->final_callback(context->srb); in mts_transfer_cleanup() 414 context->srb->result &= MTS_SCSI_ERR_MASK; in mts_transfer_done() [all …]
|
/linux-4.4.14/drivers/pci/hotplug/ |
D | acpiphp_glue.c | 64 static void hotplug_event(u32 type, struct acpiphp_context *context); 75 struct acpiphp_context *context; in acpiphp_init_context() local 77 context = kzalloc(sizeof(*context), GFP_KERNEL); in acpiphp_init_context() 78 if (!context) in acpiphp_init_context() 81 context->refcount = 1; in acpiphp_init_context() 82 context->hp.notify = acpiphp_hotplug_notify; in acpiphp_init_context() 83 context->hp.fixup = acpiphp_post_dock_fixup; in acpiphp_init_context() 84 acpi_set_hp_context(adev, &context->hp); in acpiphp_init_context() 85 return context; in acpiphp_init_context() 96 struct acpiphp_context *context; in acpiphp_get_context() local [all …]
|
/linux-4.4.14/arch/ia64/include/asm/ |
D | mmu_context.h | 81 nv_mm_context_t context = mm->context; in get_mmu_context() local 83 if (likely(context)) in get_mmu_context() 88 context = mm->context; in get_mmu_context() 89 if (context == 0) { in get_mmu_context() 99 mm->context = context = ia64_ctx.next++; in get_mmu_context() 100 __set_bit(context, ia64_ctx.bitmap); in get_mmu_context() 110 return context; in get_mmu_context() 120 mm->context = 0; in init_new_context() 131 reload_context (nv_mm_context_t context) in reload_context() argument 138 rid = context << 3; /* make space for encoding the region number */ in reload_context() [all …]
|
/linux-4.4.14/arch/parisc/include/asm/ |
D | mmu_context.h | 26 mm->context = alloc_sid(); in init_new_context() 33 free_sid(mm->context); in destroy_context() 34 mm->context = 0; in destroy_context() 37 static inline unsigned long __space_to_prot(mm_context_t context) in __space_to_prot() argument 40 return context << 1; in __space_to_prot() 42 return context >> (SPACEID_SHIFT - 1); in __space_to_prot() 46 static inline void load_context(mm_context_t context) in load_context() argument 48 mtsp(context, 3); in load_context() 49 mtctl(__space_to_prot(context), 8); in load_context() 57 load_context(next->context); in switch_mm() [all …]
|
D | tlbflush.h | 34 __flush_tlb_range((vma)->vm_mm->context, start, end) 69 if (mm->context != 0) in flush_tlb_mm() 70 free_sid(mm->context); in flush_tlb_mm() 71 mm->context = alloc_sid(); in flush_tlb_mm() 73 load_context(mm->context); in flush_tlb_mm() 83 sid = vma->vm_mm->context; in flush_tlb_page()
|
/linux-4.4.14/fs/ocfs2/ |
D | move_extents.c | 59 struct ocfs2_move_extents_context *context, in __ocfs2_move_extent() argument 64 struct inode *inode = context->inode; in __ocfs2_move_extent() 69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); in __ocfs2_move_extent() 85 path = ocfs2_new_path_from_et(&context->et); in __ocfs2_move_extent() 118 context->et.et_root_bh, in __ocfs2_move_extent() 125 ret = ocfs2_split_extent(handle, &context->et, path, index, in __ocfs2_move_extent() 126 &replace_rec, context->meta_ac, in __ocfs2_move_extent() 127 &context->dealloc); in __ocfs2_move_extent() 133 ocfs2_journal_dirty(handle, context->et.et_root_bh); in __ocfs2_move_extent() 135 context->new_phys_cpos = new_p_cpos; in __ocfs2_move_extent() [all …]
|
D | refcounttree.c | 64 int (*get_clusters)(struct ocfs2_cow_context *context, 3114 struct ocfs2_cow_context *context, in ocfs2_replace_clusters() argument 3120 struct ocfs2_caching_info *ci = context->data_et.et_ci; in ocfs2_replace_clusters() 3128 ret = context->cow_duplicate_clusters(handle, context->inode, in ocfs2_replace_clusters() 3136 ret = ocfs2_clear_ext_refcount(handle, &context->data_et, in ocfs2_replace_clusters() 3138 context->meta_ac, &context->dealloc); in ocfs2_replace_clusters() 3195 static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, in ocfs2_di_get_clusters() argument 3200 return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, in ocfs2_di_get_clusters() 3205 struct ocfs2_cow_context *context, in ocfs2_make_clusters_writable() argument 3215 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; in ocfs2_make_clusters_writable() [all …]
|
/linux-4.4.14/drivers/gpu/drm/ |
D | drm_lock.c | 41 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 67 if (lock->context == DRM_KERNEL_CONTEXT) { in drm_legacy_lock() 69 task_pid_nr(current), lock->context); in drm_legacy_lock() 74 lock->context, task_pid_nr(current), in drm_legacy_lock() 90 if (drm_lock_take(&master->lock, lock->context)) { in drm_legacy_lock() 111 DRM_DEBUG("%d %s\n", lock->context, in drm_legacy_lock() 119 dev->sigdata.context = lock->context; in drm_legacy_lock() 127 lock->context); in drm_legacy_lock() 154 if (lock->context == DRM_KERNEL_CONTEXT) { in drm_legacy_unlock() 156 task_pid_nr(current), lock->context); in drm_legacy_unlock() [all …]
|
/linux-4.4.14/arch/m68k/include/asm/ |
D | mmu_context.h | 34 if (mm->context != NO_CONTEXT) in get_mmu_context() 47 mm->context = ctx; in get_mmu_context() 54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 61 if (mm->context != NO_CONTEXT) { in destroy_context() 62 clear_bit(mm->context, context_map); in destroy_context() 63 mm->context = NO_CONTEXT; in destroy_context() 68 static inline void set_context(mm_context_t context, pgd_t *pgd) in set_context() argument 70 __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context)); in set_context() 77 set_context(tsk->mm->context, next->pgd); in switch_mm() 88 set_context(mm->context, mm->pgd); in activate_mm() [all …]
|
/linux-4.4.14/arch/powerpc/mm/ |
D | mmu_context_hash64.c | 77 mm->context.id = index; in init_new_context() 79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); in init_new_context() 80 if (!mm->context.cop_lockp) { in init_new_context() 83 mm->context.id = MMU_NO_CONTEXT; in init_new_context() 86 spin_lock_init(mm->context.cop_lockp); in init_new_context() 90 mm->context.pte_frag = NULL; in init_new_context() 93 mm_iommu_init(&mm->context); in init_new_context() 113 pte_frag = mm->context.pte_frag; in destroy_pagetable_page() 139 mm_iommu_cleanup(&mm->context); in destroy_context() 143 drop_cop(mm->context.acop, mm); in destroy_context() [all …]
|
D | mmu_context_nohash.c | 100 if (mm->context.active) { in steal_context_smp() 109 mm->context.id = MMU_NO_CONTEXT; in steal_context_smp() 152 mm->context.id = MMU_NO_CONTEXT; in steal_all_contexts() 157 mm->context.active = 0; in steal_all_contexts() 190 mm->context.id = MMU_NO_CONTEXT; in steal_context_up() 212 nact += context_mm[id]->context.active; in context_check_map() 238 cpu, next, next->context.active, next->context.id); in switch_mmu_context() 242 next->context.active++; in switch_mmu_context() 244 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); in switch_mmu_context() 245 WARN_ON(prev->context.active < 1); in switch_mmu_context() [all …]
|
D | icswx_pid.c | 62 if (mm->context.cop_pid == COP_PID_NONE) { in get_cop_pid() 66 mm->context.cop_pid = pid; in get_cop_pid() 68 return mm->context.cop_pid; in get_cop_pid() 75 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { in disable_cop_pid() 76 free_pid = mm->context.cop_pid; in disable_cop_pid() 77 mm->context.cop_pid = COP_PID_NONE; in disable_cop_pid()
|
D | icswx.c | 71 mtspr(SPRN_PID, next->context.cop_pid); in switch_cop() 73 mtspr(SPRN_ACOP, next->context.acop); in switch_cop() 97 spin_lock(mm->context.cop_lockp); in use_cop() 104 mm->context.acop |= acop; in use_cop() 117 spin_unlock(mm->context.cop_lockp); in use_cop() 141 spin_lock(mm->context.cop_lockp); in drop_cop() 143 mm->context.acop &= ~acop; in drop_cop() 159 spin_unlock(mm->context.cop_lockp); in drop_cop() 243 if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) { in acop_handle_fault()
|
D | slice.c | 158 lpsizes = mm->context.low_slices_psize; in slice_mask_for_size() 163 hpsizes = mm->context.high_slices_psize; in slice_mask_for_size() 189 get_paca()->context = current->active_mm->context; in slice_flush_segments() 212 lpsizes = mm->context.low_slices_psize; in slice_convert() 219 mm->context.low_slices_psize = lpsizes; in slice_convert() 221 hpsizes = mm->context.high_slices_psize; in slice_convert() 232 mm->context.low_slices_psize, in slice_convert() 233 mm->context.high_slices_psize); in slice_convert() 554 current->mm->context.user_psize, 0); in arch_get_unmapped_area() 564 current->mm->context.user_psize, 1); in arch_get_unmapped_area_topdown() [all …]
|
D | mmu_context_hash32.c | 84 mm->context.id = __init_new_context(); in init_new_context() 104 if (mm->context.id != NO_CONTEXT) { in destroy_context() 105 __destroy_context(mm->context.id); in destroy_context() 106 mm->context.id = NO_CONTEXT; in destroy_context()
|
/linux-4.4.14/arch/x86/kernel/ |
D | ldt.c | 32 pc = ¤t->active_mm->context; in flush_ldt() 83 smp_store_release(¤t_mm->context.ldt, ldt); in install_ldt() 112 mutex_init(&mm->context.lock); in init_new_context() 115 mm->context.ldt = NULL; in init_new_context() 119 mutex_lock(&old_mm->context.lock); in init_new_context() 120 if (!old_mm->context.ldt) { in init_new_context() 121 mm->context.ldt = NULL; in init_new_context() 125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); in init_new_context() 131 memcpy(new_ldt->entries, old_mm->context.ldt->entries, in init_new_context() 135 mm->context.ldt = new_ldt; in init_new_context() [all …]
|
D | step.c | 34 mutex_lock(&child->mm->context.lock); in convert_ip_to_linear() 35 if (unlikely(!child->mm->context.ldt || in convert_ip_to_linear() 36 seg >= child->mm->context.ldt->size)) in convert_ip_to_linear() 39 desc = &child->mm->context.ldt->entries[seg]; in convert_ip_to_linear() 47 mutex_unlock(&child->mm->context.lock); in convert_ip_to_linear()
|
/linux-4.4.14/arch/blackfin/include/asm/ |
D | mmu_context.h | 64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; in activate_l1stack() 82 if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) { in __switch_mm() 84 set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu); in __switch_mm() 90 if (!next_mm->context.l1_stack_save) in __switch_mm() 92 if (next_mm->context.l1_stack_save == current_l1_stack_save) in __switch_mm() 97 current_l1_stack_save = next_mm->context.l1_stack_save; in __switch_mm() 123 unsigned long *mask = mm->context.page_rwx_mask; in protect_page() 154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { in update_protections() 156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu); in update_protections() 177 mm->context.page_rwx_mask = (unsigned long *)p; in init_new_context() [all …]
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | mmu_context_64.h | 38 &mm->context.tsb_block[0], in tsb_context_switch() 40 (mm->context.tsb_block[1].tsb ? in tsb_context_switch() 41 &mm->context.tsb_block[1] : in tsb_context_switch() 46 , __pa(&mm->context.tsb_descr[0])); in tsb_context_switch() 68 : "r" (CTX_HWBITS((__mm)->context)), \ 82 spin_lock_irqsave(&mm->context.lock, flags); in switch_mm() 83 ctx_valid = CTX_VALID(mm->context); in switch_mm() 127 __flush_tlb_mm(CTX_HWBITS(mm->context), in switch_mm() 130 spin_unlock_irqrestore(&mm->context.lock, flags); in switch_mm() 141 spin_lock_irqsave(&mm->context.lock, flags); in activate_mm() [all …]
|
/linux-4.4.14/drivers/media/usb/as102/ |
D | as10x_cmd_cfg.c | 43 sizeof(pcmd->body.context.req)); in as10x_cmd_get_context() 46 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); in as10x_cmd_get_context() 47 pcmd->body.context.req.tag = cpu_to_le16(tag); in as10x_cmd_get_context() 48 pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA); in as10x_cmd_get_context() 54 sizeof(pcmd->body.context.req) in as10x_cmd_get_context() 57 sizeof(prsp->body.context.rsp) in as10x_cmd_get_context() 72 *pvalue = le32_to_cpu((__force __le32)prsp->body.context.rsp.reg_val.u.value32); in as10x_cmd_get_context() 99 sizeof(pcmd->body.context.req)); in as10x_cmd_set_context() 102 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); in as10x_cmd_set_context() 104 pcmd->body.context.req.reg_val.u.value32 = (__force u32)cpu_to_le32(value); in as10x_cmd_set_context() [all …]
|
/linux-4.4.14/drivers/base/regmap/ |
D | regmap-i2c.c | 19 static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, in regmap_smbus_byte_reg_read() argument 22 struct device *dev = context; in regmap_smbus_byte_reg_read() 38 static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, in regmap_smbus_byte_reg_write() argument 41 struct device *dev = context; in regmap_smbus_byte_reg_write() 55 static int regmap_smbus_word_reg_read(void *context, unsigned int reg, in regmap_smbus_word_reg_read() argument 58 struct device *dev = context; in regmap_smbus_word_reg_read() 74 static int regmap_smbus_word_reg_write(void *context, unsigned int reg, in regmap_smbus_word_reg_write() argument 77 struct device *dev = context; in regmap_smbus_word_reg_write() 91 static int regmap_smbus_word_read_swapped(void *context, unsigned int reg, in regmap_smbus_word_read_swapped() argument 94 struct device *dev = context; in regmap_smbus_word_read_swapped() [all …]
|
D | regmap-spmi.c | 25 static int regmap_spmi_base_read(void *context, in regmap_spmi_base_read() argument 35 err = spmi_register_read(context, addr++, val++); in regmap_spmi_base_read() 40 static int regmap_spmi_base_gather_write(void *context, in regmap_spmi_base_gather_write() argument 55 err = spmi_register_zero_write(context, *data); in regmap_spmi_base_gather_write() 65 err = spmi_register_write(context, addr, *data); in regmap_spmi_base_gather_write() 78 static int regmap_spmi_base_write(void *context, const void *data, in regmap_spmi_base_write() argument 82 return regmap_spmi_base_gather_write(context, data, 1, data + 1, in regmap_spmi_base_write() 114 static int regmap_spmi_ext_read(void *context, in regmap_spmi_ext_read() argument 133 err = spmi_ext_register_read(context, addr, val, len); in regmap_spmi_ext_read() 145 err = spmi_ext_register_readl(context, addr, val, len); in regmap_spmi_ext_read() [all …]
|
D | regmap-spi.c | 32 static int regmap_spi_write(void *context, const void *data, size_t count) in regmap_spi_write() argument 34 struct device *dev = context; in regmap_spi_write() 40 static int regmap_spi_gather_write(void *context, in regmap_spi_gather_write() argument 44 struct device *dev = context; in regmap_spi_gather_write() 57 static int regmap_spi_async_write(void *context, in regmap_spi_async_write() argument 65 struct device *dev = context; in regmap_spi_async_write() 79 async->m.context = async; in regmap_spi_async_write() 95 static int regmap_spi_read(void *context, in regmap_spi_read() argument 99 struct device *dev = context; in regmap_spi_read()
|
D | regmap-mmio.c | 88 static int regmap_mmio_gather_write(void *context, in regmap_mmio_gather_write() argument 92 struct regmap_mmio_context *ctx = context; in regmap_mmio_gather_write() 137 static int regmap_mmio_write(void *context, const void *data, size_t count) in regmap_mmio_write() argument 139 struct regmap_mmio_context *ctx = context; in regmap_mmio_write() 144 return regmap_mmio_gather_write(context, data, ctx->reg_bytes, in regmap_mmio_write() 148 static int regmap_mmio_read(void *context, in regmap_mmio_read() argument 152 struct regmap_mmio_context *ctx = context; in regmap_mmio_read() 197 static void regmap_mmio_free_context(void *context) in regmap_mmio_free_context() argument 199 struct regmap_mmio_context *ctx = context; in regmap_mmio_free_context() 205 kfree(context); in regmap_mmio_free_context()
|
D | regmap-ac97.c | 56 static int regmap_ac97_reg_read(void *context, unsigned int reg, in regmap_ac97_reg_read() argument 59 struct snd_ac97 *ac97 = context; in regmap_ac97_reg_read() 66 static int regmap_ac97_reg_write(void *context, unsigned int reg, in regmap_ac97_reg_write() argument 69 struct snd_ac97 *ac97 = context; in regmap_ac97_reg_write()
|
/linux-4.4.14/tools/perf/scripts/python/ |
D | netdev-times.py | 227 def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument 230 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 233 def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument 236 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 239 def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): argument 242 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 245 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, argument 247 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 251 def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret): argument 252 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) [all …]
|
D | check-perf-trace.py | 28 def irq__softirq_entry(event_name, context, common_cpu, argument 34 print_uncommon(context) 39 def kmem__kmalloc(event_name, context, common_cpu, argument 46 print_uncommon(context) 54 def trace_unhandled(event_name, context, event_fields_dict): argument 65 def print_uncommon(context): argument 67 % (common_pc(context), trace_flag_str(common_flags(context)), \ 68 common_lock_depth(context))
|
D | sched-migration.py | 370 def sched__sched_stat_runtime(event_name, context, common_cpu, argument 375 def sched__sched_stat_iowait(event_name, context, common_cpu, argument 380 def sched__sched_stat_sleep(event_name, context, common_cpu, argument 385 def sched__sched_stat_wait(event_name, context, common_cpu, argument 390 def sched__sched_process_fork(event_name, context, common_cpu, argument 395 def sched__sched_process_wait(event_name, context, common_cpu, argument 400 def sched__sched_process_exit(event_name, context, common_cpu, argument 405 def sched__sched_process_free(event_name, context, common_cpu, argument 410 def sched__sched_migrate_task(event_name, context, common_cpu, argument 418 def sched__sched_switch(event_name, context, common_cpu, argument [all …]
|
/linux-4.4.14/crypto/asymmetric_keys/ |
D | pkcs7_parser.c | 194 int pkcs7_note_OID(void *context, size_t hdrlen, in pkcs7_note_OID() argument 198 struct pkcs7_parse_context *ctx = context; in pkcs7_note_OID() 213 int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, in pkcs7_sig_note_digest_algo() argument 217 struct pkcs7_parse_context *ctx = context; in pkcs7_sig_note_digest_algo() 250 int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, in pkcs7_sig_note_pkey_algo() argument 254 struct pkcs7_parse_context *ctx = context; in pkcs7_sig_note_pkey_algo() 270 int pkcs7_check_content_type(void *context, size_t hdrlen, in pkcs7_check_content_type() argument 274 struct pkcs7_parse_context *ctx = context; in pkcs7_check_content_type() 287 int pkcs7_note_signeddata_version(void *context, size_t hdrlen, in pkcs7_note_signeddata_version() argument 291 struct pkcs7_parse_context *ctx = context; in pkcs7_note_signeddata_version() [all …]
|
D | x509_cert_parser.c | 139 int x509_note_OID(void *context, size_t hdrlen, in x509_note_OID() argument 143 struct x509_parse_context *ctx = context; in x509_note_OID() 159 int x509_note_tbs_certificate(void *context, size_t hdrlen, in x509_note_tbs_certificate() argument 163 struct x509_parse_context *ctx = context; in x509_note_tbs_certificate() 176 int x509_note_pkey_algo(void *context, size_t hdrlen, in x509_note_pkey_algo() argument 180 struct x509_parse_context *ctx = context; in x509_note_pkey_algo() 228 int x509_note_signature(void *context, size_t hdrlen, in x509_note_signature() argument 232 struct x509_parse_context *ctx = context; in x509_note_signature() 250 int x509_note_serial(void *context, size_t hdrlen, in x509_note_serial() argument 254 struct x509_parse_context *ctx = context; in x509_note_serial() [all …]
|
D | mscode_parser.c | 46 int mscode_note_content_type(void *context, size_t hdrlen, in mscode_note_content_type() argument 78 int mscode_note_digest_algo(void *context, size_t hdrlen, in mscode_note_digest_algo() argument 82 struct pefile_context *ctx = context; in mscode_note_digest_algo() 126 int mscode_note_digest(void *context, size_t hdrlen, in mscode_note_digest() argument 130 struct pefile_context *ctx = context; in mscode_note_digest()
|
/linux-4.4.14/tools/perf/scripts/python/Perf-Trace-Util/ |
D | Context.c | 31 PyObject *context; in perf_trace_context_common_pc() local 34 if (!PyArg_ParseTuple(args, "O", &context)) in perf_trace_context_common_pc() 37 scripting_context = PyCObject_AsVoidPtr(context); in perf_trace_context_common_pc() 47 PyObject *context; in perf_trace_context_common_flags() local 50 if (!PyArg_ParseTuple(args, "O", &context)) in perf_trace_context_common_flags() 53 scripting_context = PyCObject_AsVoidPtr(context); in perf_trace_context_common_flags() 63 PyObject *context; in perf_trace_context_common_lock_depth() local 66 if (!PyArg_ParseTuple(args, "O", &context)) in perf_trace_context_common_lock_depth() 69 scripting_context = PyCObject_AsVoidPtr(context); in perf_trace_context_common_lock_depth()
|
/linux-4.4.14/drivers/staging/vt6656/ |
D | usbpipe.c | 107 struct vnt_private *priv = urb->context; in vnt_start_interrupt_urb_complete() 169 struct vnt_rcb *rcb = urb->context; in vnt_submit_rx_urb_complete() 243 struct vnt_usb_send_context *context = urb->context; in vnt_tx_context_complete() local 244 struct vnt_private *priv = context->priv; in vnt_tx_context_complete() 248 dev_dbg(&priv->usb->dev, "Write %d bytes\n", context->buf_len); in vnt_tx_context_complete() 253 context->in_use = false; in vnt_tx_context_complete() 261 if (context->type == CONTEXT_DATA_PACKET) in vnt_tx_context_complete() 264 if (urb->status || context->type == CONTEXT_BEACON_PACKET) { in vnt_tx_context_complete() 265 if (context->skb) in vnt_tx_context_complete() 266 ieee80211_free_txskb(priv->hw, context->skb); in vnt_tx_context_complete() [all …]
|
D | int.c | 72 struct vnt_usb_send_context *context; in vnt_int_report_rate() local 81 context = priv->tx_context[pkt_no]; in vnt_int_report_rate() 83 if (!context->skb) in vnt_int_report_rate() 86 info = IEEE80211_SKB_CB(context->skb); in vnt_int_report_rate() 89 if (context->fb_option && !(tsr & (TSR_TMO | TSR_RETRYTMO))) { in vnt_int_report_rate() 99 if (context->fb_option == AUTO_FB_0) in vnt_int_report_rate() 101 else if (context->fb_option == AUTO_FB_1) in vnt_int_report_rate() 119 ieee80211_tx_status_irqsafe(priv->hw, context->skb); in vnt_int_report_rate() 121 context->in_use = false; in vnt_int_report_rate()
|
/linux-4.4.14/arch/nios2/mm/ |
D | mmu_context.c | 53 static void set_context(mm_context_t context) in set_context() argument 55 set_mmu_pid(CTX_PID(context)); in set_context() 89 if (unlikely(CTX_VERSION(next->context) != in switch_mm() 91 next->context = get_new_context(); in switch_mm() 97 set_context(next->context); in switch_mm() 108 next->context = get_new_context(); in activate_mm() 109 set_context(next->context); in activate_mm() 113 unsigned long get_pid_from_context(mm_context_t *context) in get_pid_from_context() argument 115 return CTX_PID((*context)); in get_pid_from_context()
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 305 struct mthca_ucontext *context; in mthca_alloc_ucontext() local 319 context = kmalloc(sizeof *context, GFP_KERNEL); in mthca_alloc_ucontext() 320 if (!context) in mthca_alloc_ucontext() 323 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); in mthca_alloc_ucontext() 325 kfree(context); in mthca_alloc_ucontext() 329 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); in mthca_alloc_ucontext() 330 if (IS_ERR(context->db_tab)) { in mthca_alloc_ucontext() 331 err = PTR_ERR(context->db_tab); in mthca_alloc_ucontext() 332 mthca_uar_free(to_mdev(ibdev), &context->uar); in mthca_alloc_ucontext() 333 kfree(context); in mthca_alloc_ucontext() [all …]
|
D | mthca_srq.c | 98 struct mthca_tavor_srq_context *context) in mthca_tavor_init_srq_context() argument 100 memset(context, 0, sizeof *context); in mthca_tavor_init_srq_context() 102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context() 103 context->state_pd = cpu_to_be32(pd->pd_num); in mthca_tavor_init_srq_context() 104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context() 107 context->uar = in mthca_tavor_init_srq_context() 108 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); in mthca_tavor_init_srq_context() 110 context->uar = cpu_to_be32(dev->driver_uar.index); in mthca_tavor_init_srq_context() 116 struct mthca_arbel_srq_context *context) in mthca_arbel_init_srq_context() argument 120 memset(context, 0, sizeof *context); in mthca_arbel_init_srq_context() [all …]
|
/linux-4.4.14/tools/perf/scripts/perl/ |
D | check-perf-trace.pl | 30 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 37 print_uncommon($context); 45 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 53 print_uncommon($context); 65 my ($context) = @_; 68 common_pc($context), trace_flag_str(common_flags($context)), 69 common_lock_depth($context)); 94 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
|
D | rwtop.pl | 37 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 55 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 68 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 81 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 199 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
|
D | rw-by-pid.pl | 26 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 42 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 53 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 64 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 180 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | doorbell.c | 46 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, in mlx5_ib_db_map_user() argument 52 mutex_lock(&context->db_page_mutex); in mlx5_ib_db_map_user() 54 list_for_each_entry(page, &context->db_page_list, list) in mlx5_ib_db_map_user() 66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user() 74 list_add(&page->list, &context->db_page_list); in mlx5_ib_db_map_user() 82 mutex_unlock(&context->db_page_mutex); in mlx5_ib_db_map_user() 87 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) in mlx5_ib_db_unmap_user() argument 89 mutex_lock(&context->db_page_mutex); in mlx5_ib_db_unmap_user() 97 mutex_unlock(&context->db_page_mutex); in mlx5_ib_db_unmap_user()
|
D | qp.c | 598 struct mlx5_ib_ucontext *context; in create_user_qp() local 614 context = to_mucontext(pd->uobject->context); in create_user_qp() 618 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); in create_user_qp() 622 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); in create_user_qp() 626 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); in create_user_qp() 634 uar_index = uuarn_to_uar_index(&context->uuari, uuarn); in create_user_qp() 646 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_user_qp() 685 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); in create_user_qp() 701 mlx5_ib_db_unmap_user(context, &qp->db); in create_user_qp() 711 free_uuar(&context->uuari, uuarn); in create_user_qp() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | doorbell.c | 44 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, in mlx4_ib_db_map_user() argument 50 mutex_lock(&context->db_page_mutex); in mlx4_ib_db_map_user() 52 list_for_each_entry(page, &context->db_page_list, list) in mlx4_ib_db_map_user() 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 72 list_add(&page->list, &context->db_page_list); in mlx4_ib_db_map_user() 80 mutex_unlock(&context->db_page_mutex); in mlx4_ib_db_map_user() 85 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) in mlx4_ib_db_unmap_user() argument 87 mutex_lock(&context->db_page_mutex); in mlx4_ib_db_unmap_user() 95 mutex_unlock(&context->db_page_mutex); in mlx4_ib_db_unmap_user()
|
D | qp.c | 737 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common() 754 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), in create_qp_common() 894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); in create_qp_common() 1068 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), in destroy_qp_common() 1454 struct mlx4_qp_context *context) in handle_eth_ud_smac_index() argument 1461 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index() 1468 context->pri_path.grh_mylmc = 0x80 | (u8) smac_index; in handle_eth_ud_smac_index() 1518 struct mlx4_qp_context *context; in __mlx4_ib_modify_qp() local 1531 context = kzalloc(sizeof *context, GFP_KERNEL); in __mlx4_ib_modify_qp() 1532 if (!context) in __mlx4_ib_modify_qp() [all …]
|
/linux-4.4.14/arch/microblaze/include/asm/ |
D | mmu_context_mm.h | 54 extern void set_context(mm_context_t context, pgd_t *pgd); 85 if (mm->context != NO_CONTEXT) in get_mmu_context() 96 mm->context = ctx; in get_mmu_context() 103 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 110 if (mm->context != NO_CONTEXT) { in destroy_context() 111 clear_bit(mm->context, context_map); in destroy_context() 112 mm->context = NO_CONTEXT; in destroy_context() 122 set_context(next->context, next->pgd); in switch_mm() 134 set_context(mm->context, mm->pgd); in activate_mm()
|
/linux-4.4.14/arch/m68k/sun3/ |
D | mmu_emu.c | 208 void clear_context(unsigned long context) in clear_context() argument 213 if(context) { in clear_context() 214 if(!ctx_alloc[context]) in clear_context() 217 ctx_alloc[context]->context = SUN3_INVALID_CONTEXT; in clear_context() 218 ctx_alloc[context] = (struct mm_struct *)0; in clear_context() 224 sun3_put_context(context); in clear_context() 227 if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) { in clear_context() 282 inline void mmu_emu_map_pmeg (int context, int vaddr) in mmu_emu_map_pmeg() argument 297 curr_pmeg, context, vaddr); in mmu_emu_map_pmeg() 304 sun3_put_context(context); in mmu_emu_map_pmeg() [all …]
|
/linux-4.4.14/tools/perf/scripts/perl/Perf-Trace-Util/ |
D | Context.xs | 32 common_pc(context) 33 struct scripting_context * context 36 common_flags(context) 37 struct scripting_context * context 40 common_lock_depth(context) 41 struct scripting_context * context
|
D | Context.c | 58 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); in XS() local 62 RETVAL = common_pc(context); in XS() 81 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); in XS() local 85 RETVAL = common_flags(context); in XS() 104 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); in XS() local 108 RETVAL = common_lock_depth(context); in XS()
|
/linux-4.4.14/drivers/gpu/drm/tegra/ |
D | drm.c | 255 static void tegra_drm_context_free(struct tegra_drm_context *context) in tegra_drm_context_free() argument 257 context->client->ops->close_channel(context); in tegra_drm_context_free() 258 kfree(context); in tegra_drm_context_free() 327 int tegra_drm_submit(struct tegra_drm_context *context, in tegra_drm_submit() argument 348 job = host1x_job_alloc(context->channel, args->num_cmdbufs, in tegra_drm_submit() 355 job->client = (u32)args->context; in tegra_drm_submit() 356 job->class = context->client->base.class; in tegra_drm_submit() 400 job->is_addr_reg = context->client->ops->is_addr_reg; in tegra_drm_submit() 408 err = host1x_job_pin(job, context->client->base.dev); in tegra_drm_submit() 430 static struct tegra_drm_context *tegra_drm_get_context(__u64 context) in tegra_drm_get_context() argument [all …]
|
/linux-4.4.14/drivers/net/ethernet/microchip/ |
D | encx24j600-regmap.c | 54 static void regmap_lock_mutex(void *context) in regmap_lock_mutex() argument 56 struct encx24j600_context *ctx = context; in regmap_lock_mutex() 60 static void regmap_unlock_mutex(void *context) in regmap_unlock_mutex() argument 62 struct encx24j600_context *ctx = context; in regmap_unlock_mutex() 66 static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val, in regmap_encx24j600_sfr_read() argument 69 struct encx24j600_context *ctx = context; in regmap_encx24j600_sfr_read() 173 static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val, in regmap_encx24j600_sfr_write() argument 176 struct encx24j600_context *ctx = context; in regmap_encx24j600_sfr_write() 192 static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg, in regmap_encx24j600_reg_update_bits() argument 196 struct encx24j600_context *ctx = context; in regmap_encx24j600_reg_update_bits() [all …]
|
/linux-4.4.14/drivers/nvmem/ |
D | rockchip-efuse.c | 44 static int rockchip_efuse_write(void *context, const void *data, size_t count) in rockchip_efuse_write() argument 50 static int rockchip_efuse_read(void *context, in rockchip_efuse_read() argument 55 struct rockchip_efuse_context *_context = context; in rockchip_efuse_read() 130 struct rockchip_efuse_context *context; in rockchip_efuse_probe() local 137 context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context), in rockchip_efuse_probe() 139 if (IS_ERR(context)) in rockchip_efuse_probe() 140 return PTR_ERR(context); in rockchip_efuse_probe() 146 context->dev = dev; in rockchip_efuse_probe() 147 context->base = base; in rockchip_efuse_probe() 148 context->efuse_clk = clk; in rockchip_efuse_probe() [all …]
|
/linux-4.4.14/drivers/isdn/hisax/ |
D | hfc_usb.c | 260 hfcusb_data *hfc = (hfcusb_data *) urb->context; in ctrl_complete() 429 usb_complete_t complete, void *context) in fill_isoc_urb() argument 438 urb->context = context; in fill_isoc_urb() 544 iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; in tx_iso_complete() 594 tx_iso_complete, urb->context); in tx_iso_complete() 682 iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; in rx_iso_complete() 762 rx_iso_complete, urb->context); in rx_iso_complete() 855 usb_fifo *fifo = (usb_fifo *) urb->context; in rx_int_complete() 1260 hfcusb_data *context; in hfc_usb_probe() local 1359 if (!(context = kzalloc(sizeof(hfcusb_data), GFP_KERNEL))) in hfc_usb_probe() [all …]
|
D | st5481_usb.c | 62 ctrl_complete_t complete, void *context) in usb_ctrl_msg() argument 80 ctrl_msg->context = context; in usb_ctrl_msg() 90 ctrl_complete_t complete, void *context) in st5481_usb_device_ctrl_msg() argument 94 value, 0, complete, context); in st5481_usb_device_ctrl_msg() 102 ctrl_complete_t complete, void *context) in st5481_usb_pipe_reset() argument 108 0, pipe, complete, context); in st5481_usb_pipe_reset() 130 struct st5481_adapter *adapter = urb->context; in usb_ctrl_complete() 156 ctrl_msg->complete(ctrl_msg->context); in usb_ctrl_complete() 179 struct st5481_adapter *adapter = urb->context; in usb_int_complete() 407 void *context) in fill_isoc_urb() argument [all …]
|
/linux-4.4.14/drivers/net/phy/ |
D | at803x.c | 66 struct at803x_context *context) in at803x_context_save() argument 68 context->bmcr = phy_read(phydev, MII_BMCR); in at803x_context_save() 69 context->advertise = phy_read(phydev, MII_ADVERTISE); in at803x_context_save() 70 context->control1000 = phy_read(phydev, MII_CTRL1000); in at803x_context_save() 71 context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE); in at803x_context_save() 72 context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED); in at803x_context_save() 73 context->led_control = phy_read(phydev, AT803X_LED_CONTROL); in at803x_context_save() 78 const struct at803x_context *context) in at803x_context_restore() argument 80 phy_write(phydev, MII_BMCR, context->bmcr); in at803x_context_restore() 81 phy_write(phydev, MII_ADVERTISE, context->advertise); in at803x_context_restore() [all …]
|
/linux-4.4.14/arch/xtensa/include/asm/ |
D | mmu_context.h | 80 mm->context.asid[cpu] = asid; in get_new_mmu_context() 81 mm->context.cpu = cpu; in get_new_mmu_context() 91 unsigned long asid = mm->context.asid[cpu]; in get_mmu_context() 102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in activate_context() 117 mm->context.asid[cpu] = NO_CONTEXT; in init_new_context() 119 mm->context.cpu = -1; in init_new_context() 127 int migrated = next->context.cpu != cpu; in switch_mm() 131 next->context.cpu = cpu; in switch_mm()
|
/linux-4.4.14/arch/hexagon/mm/ |
D | vm_tlb.c | 42 if (mm->context.ptbase == current->active_mm->context.ptbase) in flush_tlb_range() 71 if (current->active_mm->context.ptbase == mm->context.ptbase) in flush_tlb_mm() 82 if (mm->context.ptbase == current->active_mm->context.ptbase) in flush_tlb_page()
|
/linux-4.4.14/drivers/gpio/ |
D | gpio-omap.c | 57 struct gpio_regs context; member 112 bank->context.oe = l; in omap_set_gpio_direction() 125 bank->context.dataout |= l; in omap_set_gpio_dataout_reg() 128 bank->context.dataout &= ~l; in omap_set_gpio_dataout_reg() 148 bank->context.dataout = l; in omap_set_gpio_dataout_mask() 256 bank->context.debounce = debounce; in omap2_set_gpio_debounce() 257 bank->context.debounce_en = val; in omap2_set_gpio_debounce() 282 bank->context.debounce_en &= ~gpio_bit; in omap_clear_gpio_debounce() 283 writel_relaxed(bank->context.debounce_en, in omap_clear_gpio_debounce() 287 bank->context.debounce = 0; in omap_clear_gpio_debounce() [all …]
|
/linux-4.4.14/arch/cris/mm/ |
D | tlb.c | 52 old_mm->context.page_id = NO_CONTEXT; in alloc_context() 57 mm->context.page_id = map_replace_ptr; in alloc_context() 73 if(mm->context.page_id == NO_CONTEXT) in get_mmu_context() 88 if(mm->context.page_id != NO_CONTEXT) { in destroy_context() 89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); in destroy_context() 91 page_id_map[mm->context.page_id] = NULL; in destroy_context()
|
/linux-4.4.14/include/trace/events/ |
D | fence.h | 21 __field(unsigned int, context) 33 __entry->context = fence->context; 38 __entry->waiting_context = f1->context; 45 __get_str(driver), __get_str(timeline), __entry->context, 60 __field(unsigned int, context) 67 __entry->context = fence->context; 72 __get_str(driver), __get_str(timeline), __entry->context,
|
/linux-4.4.14/arch/arm/include/asm/ |
D | mmu_context.h | 29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context() 59 mm->context.switch_pending = 1; in check_and_switch_context() 70 if (mm && mm->context.switch_pending) { in finish_arch_post_lock_switch() 78 if (mm->context.switch_pending) { in finish_arch_post_lock_switch() 79 mm->context.switch_pending = 0; in finish_arch_post_lock_switch()
|
/linux-4.4.14/drivers/acpi/ |
D | bus.c | 132 void *context) in acpi_bus_private_data_handler() argument 177 struct acpi_osc_context *context, char *error) in acpi_print_osc_error() argument 189 for (i = 0; i < context->cap.length; i += sizeof(u32)) in acpi_print_osc_error() 190 printk("%x ", *((u32 *)(context->cap.pointer + i))); in acpi_print_osc_error() 217 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) in acpi_run_osc() argument 227 if (!context) in acpi_run_osc() 229 if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid))) in acpi_run_osc() 231 context->ret.length = ACPI_ALLOCATE_BUFFER; in acpi_run_osc() 232 context->ret.pointer = NULL; in acpi_run_osc() 241 in_params[1].integer.value = context->rev; in acpi_run_osc() [all …]
|
D | sbshc.c | 31 void *context; member 182 smbus_alarm_callback callback, void *context) in acpi_smbus_register_callback() argument 186 hc->context = context; in acpi_smbus_register_callback() 197 hc->context = NULL; in acpi_smbus_unregister_callback() 204 static inline void acpi_smbus_callback(void *context) in acpi_smbus_callback() argument 206 struct acpi_smb_hc *hc = context; in acpi_smbus_callback() 208 hc->callback(hc->context); in acpi_smbus_callback() 211 static int smbus_alarm(void *context) in smbus_alarm() argument 213 struct acpi_smb_hc *hc = context; in smbus_alarm()
|
/linux-4.4.14/drivers/uwb/i1480/dfu/ |
D | dfu.c | 53 const char *cmd, u8 context, u8 expected_type, in i1480_rceb_check() argument 58 if (rceb->bEventContext != context) { in i1480_rceb_check() 62 rceb->bEventContext, context); in i1480_rceb_check() 100 u8 context; in i1480_cmd() local 105 get_random_bytes(&context, 1); in i1480_cmd() 106 } while (context == 0x00 || context == 0xff); in i1480_cmd() 107 cmd->bCommandContext = context; in i1480_cmd() 147 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, in i1480_cmd()
|
/linux-4.4.14/drivers/acpi/acpica/ |
D | evxface.c | 61 acpi_gpe_handler address, void *context); 93 acpi_notify_handler handler, void *context) in acpi_install_notify_handler() argument 132 acpi_gbl_global_notify[i].context = context; in acpi_install_notify_handler() 202 handler_obj->notify.context = context; in acpi_install_notify_handler() 287 acpi_gbl_global_notify[i].context = NULL; in ACPI_EXPORT_SYMBOL() 423 acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context) in ACPI_EXPORT_SYMBOL() 444 new_sci_handler->context = context; in ACPI_EXPORT_SYMBOL() 568 acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) in ACPI_EXPORT_SYMBOL() 593 acpi_gbl_global_event_handler_context = context; in ACPI_EXPORT_SYMBOL() 619 acpi_event_handler handler, void *context) in ACPI_EXPORT_SYMBOL() [all …]
|
D | dbnames.c | 57 void *context, void **return_value); 62 void *context, void **return_value); 67 void *context, void **return_value); 72 void *context, void **return_value); 76 u32 nesting_level, void *context, void **return_value); 81 void *context, void **return_value); 85 u32 nesting_level, void *context, void **return_value); 323 void *context, void **return_value) in acpi_db_walk_and_match_name() argument 326 char *requested_name = (char *)context; in acpi_db_walk_and_match_name() 426 void *context, void **return_value) in acpi_db_walk_for_predefined_names() argument [all …]
|
D | evregion.c | 62 u32 level, void *context, void **return_value); 144 struct acpi_connection_info *context; in acpi_ev_address_space_dispatch() local 168 context = handler_desc->address_space.context; in acpi_ev_address_space_dispatch() 199 context, ®ion_context); in acpi_ev_address_space_dispatch() 252 context && field_obj) { in acpi_ev_address_space_dispatch() 256 context->connection = field_obj->field.resource_buffer; in acpi_ev_address_space_dispatch() 257 context->length = field_obj->field.resource_length; in acpi_ev_address_space_dispatch() 258 context->access_length = field_obj->field.access_length; in acpi_ev_address_space_dispatch() 261 context && field_obj) { in acpi_ev_address_space_dispatch() 265 context->connection = field_obj->field.resource_buffer; in acpi_ev_address_space_dispatch() [all …]
|
D | evsci.c | 53 static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context); 90 int_status |= sci_handler->address(sci_handler->context); in acpi_ev_sci_dispatch() 112 static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context) in acpi_ev_sci_xrupt_handler() argument 114 struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; in acpi_ev_sci_xrupt_handler() 156 u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context) in acpi_ev_gpe_xrupt_handler() argument 158 struct acpi_gpe_xrupt_info *gpe_xrupt_list = context; in acpi_ev_gpe_xrupt_handler()
|
D | acevents.h | 117 void *context); 138 u32 level, void *context, void **return_value); 144 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); 148 struct acpi_gpe_block_info *gpe_block, void *context); 159 void *context); 174 acpi_adr_space_setup setup, void *context); 244 u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
|
D | nsinit.c | 56 u32 level, void *context, void **return_value); 60 u32 nesting_level, void *context, void **return_value); 64 u32 nesting_level, void *context, void **return_value); 242 u32 level, void *context, void **return_value) in acpi_ns_init_one_object() argument 247 (struct acpi_init_walk_info *)context; in acpi_ns_init_one_object() 384 u32 nesting_level, void *context, void **return_value) in acpi_ns_find_ini_methods() argument 387 ACPI_CAST_PTR(struct acpi_device_walk_info, context); in acpi_ns_find_ini_methods() 449 u32 nesting_level, void *context, void **return_value) in acpi_ns_init_one_device() argument 452 ACPI_CAST_PTR(struct acpi_device_walk_info, context); in acpi_ns_init_one_device()
|
D | evmisc.c | 53 static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 195 static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) in acpi_ev_notify_dispatch() argument 197 union acpi_generic_state *info = (union acpi_generic_state *)context; in acpi_ev_notify_dispatch() 207 info->notify.global->context); in acpi_ev_notify_dispatch() 216 handler_obj->notify.context); in acpi_ev_notify_dispatch()
|
/linux-4.4.14/sound/firewire/ |
D | amdtp-stream.c | 83 s->context = ERR_PTR(-1); in amdtp_stream_init() 373 if (IS_ERR(s->context)) in queue_packet() 381 err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer, in queue_packet() 547 static void out_stream_callback(struct fw_iso_context *context, u32 cycle, in out_stream_callback() argument 576 fw_iso_context_queue_flush(s->context); in out_stream_callback() 579 static void in_stream_callback(struct fw_iso_context *context, u32 cycle, in in_stream_callback() argument 643 fw_iso_context_queue_flush(s->sync_slave->context); in in_stream_callback() 645 fw_iso_context_queue_flush(s->context); in in_stream_callback() 649 static void slave_stream_callback(struct fw_iso_context *context, u32 cycle, in slave_stream_callback() argument 657 static void amdtp_stream_first_callback(struct fw_iso_context *context, in amdtp_stream_first_callback() argument [all …]
|
D | isight.c | 58 struct fw_iso_context *context; member 165 static void isight_packet(struct fw_iso_context *context, u32 cycle, in isight_packet() argument 196 err = fw_iso_context_queue(isight->context, &audio_packet, in isight_packet() 205 fw_iso_context_queue_flush(isight->context); in isight_packet() 317 if (!isight->context) in isight_stop_streaming() 320 fw_iso_context_stop(isight->context); in isight_stop_streaming() 321 fw_iso_context_destroy(isight->context); in isight_stop_streaming() 322 isight->context = NULL; in isight_stop_streaming() 348 if (isight->context) { in isight_start_streaming() 367 isight->context = fw_iso_context_create(isight->device->card, in isight_start_streaming() [all …]
|
/linux-4.4.14/arch/metag/include/asm/ |
D | mmu_context.h | 26 mm->context.pgd_base = (unsigned long) mm->pgd; in init_new_context() 29 INIT_LIST_HEAD(&mm->context.tcm); in init_new_context() 43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { in destroy_context() 81 if (prev->context.pgd_base != (unsigned long) prev->pgd) { in switch_mmu() 83 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i]; in switch_mmu() 88 prev->pgd = (pgd_t *) prev->context.pgd_base; in switch_mmu() 91 next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i]; in switch_mmu()
|
/linux-4.4.14/include/linux/ |
D | fence.h | 78 unsigned context, seqno; member 179 spinlock_t *lock, unsigned context, unsigned seqno); 292 if (WARN_ON(f1->context != f2->context)) in fence_is_later() 309 if (WARN_ON(f1->context != f2->context)) in fence_later() 360 __ff->context, __ff->seqno, ##args); \ 366 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 373 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
|
D | ssbi.h | 24 ssbi_reg_read(void *context, unsigned int reg, unsigned int *val) in ssbi_reg_read() argument 29 ret = ssbi_read(context, reg, &v, 1); in ssbi_reg_read() 37 ssbi_reg_write(void *context, unsigned int reg, unsigned int val) in ssbi_reg_write() argument 40 return ssbi_write(context, reg, &v, 1); in ssbi_reg_write()
|
D | hw_breakpoint.h | 49 void *context, 62 void *context, 68 void *context); 94 void *context, in register_user_hw_breakpoint() argument 102 void *context, in register_wide_hw_breakpoint_cpu() argument 107 void *context) { return NULL; } in register_wide_hw_breakpoint() argument
|
D | dm-kcopyd.h | 62 void *context); 66 unsigned flags, dm_kcopyd_notify_fn fn, void *context); 80 dm_kcopyd_notify_fn fn, void *context); 85 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
|
D | firmware.h | 46 const char *name, struct device *device, gfp_t gfp, void *context, 47 void (*cont)(const struct firmware *fw, void *context)); 61 const char *name, struct device *device, gfp_t gfp, void *context, in request_firmware_nowait() argument 62 void (*cont)(const struct firmware *fw, void *context)) in request_firmware_nowait()
|
D | dm-region-hash.h | 36 void *context, void (*dispatch_bios)(void *context, 38 void (*wakeup_workers)(void *context), 39 void (*wakeup_all_recovery_waiters)(void *context),
|
D | vexpress.h | 39 struct regmap * (*regmap_init)(struct device *dev, void *context); 40 void (*regmap_exit)(struct regmap *regmap, void *context); 44 struct vexpress_config_bridge_ops *ops, void *context);
|
/linux-4.4.14/arch/frv/mm/ |
D | mmu-context.c | 31 memset(&mm->context, 0, sizeof(mm->context)); in init_new_context() 32 INIT_LIST_HEAD(&mm->context.id_link); in init_new_context() 33 mm->context.itlb_cached_pge = 0xffffffffUL; in init_new_context() 34 mm->context.dtlb_cached_pge = 0xffffffffUL; in init_new_context() 132 mm_context_t *ctx = &mm->context; in destroy_context() 157 buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); in proc_pid_status_frv_cxnr() 203 cxn_pinned = get_cxn(&mm->context); in cxn_pin_by_pid()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_mmap.c | 50 struct ipath_ibdev *dev = to_idev(ip->context->device); in ipath_release_mmap_info() 89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in ipath_mmap() argument 91 struct ipath_ibdev *dev = to_idev(context->device); in ipath_mmap() 106 if (context != ip->context || (__u64) offset != ip->offset) in ipath_mmap() 133 struct ib_ucontext *context, in ipath_create_mmap_info() argument 152 ip->context = context; in ipath_create_mmap_info()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_mmap.c | 50 struct qib_ibdev *dev = to_idev(ip->context->device); in qib_release_mmap_info() 89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in qib_mmap() argument 91 struct qib_ibdev *dev = to_idev(context->device); in qib_mmap() 106 if (context != ip->context || (__u64) offset != ip->offset) in qib_mmap() 133 struct ib_ucontext *context, in qib_create_mmap_info() argument 152 ip->context = context; in qib_create_mmap_info()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | mmap.c | 68 struct hfi1_ibdev *dev = to_idev(ip->context->device); in hfi1_release_mmap_info() 107 int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in hfi1_mmap() argument 109 struct hfi1_ibdev *dev = to_idev(context->device); in hfi1_mmap() 124 if (context != ip->context || (__u64) offset != ip->offset) in hfi1_mmap() 151 struct ib_ucontext *context, in hfi1_create_mmap_info() argument 170 ip->context = context; in hfi1_create_mmap_info()
|
/linux-4.4.14/tools/perf/util/ |
D | trace-event-parse.c | 31 static int get_common_field(struct scripting_context *context, in get_common_field() argument 34 struct pevent *pevent = context->pevent; in get_common_field() 50 return pevent_read_number(pevent, context->event_data + *offset, *size); in get_common_field() 53 int common_lock_depth(struct scripting_context *context) in common_lock_depth() argument 59 ret = get_common_field(context, &size, &offset, in common_lock_depth() 67 int common_flags(struct scripting_context *context) in common_flags() argument 73 ret = get_common_field(context, &size, &offset, in common_flags() 81 int common_pc(struct scripting_context *context) in common_pc() argument 87 ret = get_common_field(context, &size, &offset, in common_pc()
|
/linux-4.4.14/arch/hexagon/include/asm/ |
D | mmu_context.h | 75 if (next->context.generation < prev->context.generation) { in switch_mm() 79 next->context.generation = prev->context.generation; in switch_mm() 82 __vmnewmap((void *)next->context.ptbase); in switch_mm()
|
D | pgalloc.h | 49 mm->context.generation = kmap_generation; in pgd_alloc() 52 mm->context.ptbase = __pa(pgd); in pgd_alloc() 125 mm->context.generation = kmap_generation; in pmd_populate_kernel() 126 current->active_mm->context.generation = kmap_generation; in pmd_populate_kernel()
|
/linux-4.4.14/arch/s390/numa/ |
D | toptree.c | 248 struct toptree *toptree_first(struct toptree *context, int level) in toptree_first() argument 252 if (context->level == level) in toptree_first() 253 return context; in toptree_first() 255 if (!list_empty(&context->children)) { in toptree_first() 256 list_for_each_entry(child, &context->children, sibling) { in toptree_first() 295 struct toptree *toptree_next(struct toptree *cur, struct toptree *context, in toptree_next() argument 303 if (context->level == level) in toptree_next() 311 while (cur_context->level < context->level - 1) { in toptree_next() 334 int toptree_count(struct toptree *context, int level) in toptree_count() argument 339 toptree_for_each(cur, context, level) in toptree_count()
|
/linux-4.4.14/arch/um/kernel/skas/ |
D | mmu.c | 53 struct mm_context *to_mm = &mm->context; in init_new_context() 63 from_mm = ¤t->mm->context; in init_new_context() 102 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); in uml_setup_stubs() 106 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); in uml_setup_stubs() 107 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); in uml_setup_stubs() 113 mm->context.stub_pages); in uml_setup_stubs() 141 struct mm_context *mmu = &mm->context; in destroy_context()
|
/linux-4.4.14/drivers/md/persistent-data/ |
D | dm-btree.h | 41 void *context; member 61 void (*inc)(void *context, const void *value); 68 void (*dec)(void *context, const void *value); 75 int (*equal)(void *context, const void *value1, const void *value2); 176 int (*fn)(void *context, uint64_t *keys, void *leaf), 177 void *context);
|
D | dm-array.c | 116 fn(info->value_type.context, element_at(info, ab, i)); in on_entries() 188 vt->inc(vt->context, value); in fill_ablock() 212 vt->dec(vt->context, element_at(info, ab, i - 1)); in trim_ablock() 562 static void block_inc(void *context, const void *value) in block_inc() argument 565 struct dm_array_info *info = context; in block_inc() 571 static void block_dec(void *context, const void *value) in block_dec() argument 579 struct dm_array_info *info = context; in block_dec() 610 static int block_equal(void *context, const void *value1, const void *value2) in block_equal() argument 627 bvt->context = info; in dm_array_info_init() 746 (!vt->equal || !vt->equal(vt->context, old_value, value))) { in array_set_value() [all …]
|
/linux-4.4.14/arch/score/include/asm/ |
D | mmu_context.h | 57 mm->context = asid; in get_new_mmu_context() 68 mm->context = 0; in init_new_context() 78 if ((next->context ^ asid_cache) & ASID_VERSION_MASK) in switch_mm() 81 pevn_set(next->context); in switch_mm() 108 pevn_set(next->context); in activate_mm()
|
/linux-4.4.14/crypto/ |
D | rsa_helper.c | 21 int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, in rsa_get_n() argument 24 struct rsa_key *key = context; in rsa_get_n() 42 int rsa_get_e(void *context, size_t hdrlen, unsigned char tag, in rsa_get_e() argument 45 struct rsa_key *key = context; in rsa_get_e() 55 int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, in rsa_get_d() argument 58 struct rsa_key *key = context; in rsa_get_d()
|
/linux-4.4.14/arch/arm/plat-omap/ |
D | dmtimer.c | 97 timer->context.twer); in omap_timer_restore_context() 99 timer->context.tcrr); in omap_timer_restore_context() 101 timer->context.tldr); in omap_timer_restore_context() 103 timer->context.tmar); in omap_timer_restore_context() 105 timer->context.tsicr); in omap_timer_restore_context() 106 writel_relaxed(timer->context.tier, timer->irq_ena); in omap_timer_restore_context() 108 timer->context.tclr); in omap_timer_restore_context() 452 timer->context.tclr = l; in omap_dm_timer_start() 474 timer->context.tclr = in omap_dm_timer_stop() 557 timer->context.tclr = l; in omap_dm_timer_set_load() [all …]
|
/linux-4.4.14/arch/x86/kvm/ |
D | mmu.c | 3547 struct kvm_mmu *context) in nonpaging_init_context() argument 3549 context->page_fault = nonpaging_page_fault; in nonpaging_init_context() 3550 context->gva_to_gpa = nonpaging_gva_to_gpa; in nonpaging_init_context() 3551 context->sync_page = nonpaging_sync_page; in nonpaging_init_context() 3552 context->invlpg = nonpaging_invlpg; in nonpaging_init_context() 3553 context->update_pte = nonpaging_update_pte; in nonpaging_init_context() 3554 context->root_level = 0; in nonpaging_init_context() 3555 context->shadow_root_level = PT32E_ROOT_LEVEL; in nonpaging_init_context() 3556 context->root_hpa = INVALID_PAGE; in nonpaging_init_context() 3557 context->direct_map = true; in nonpaging_init_context() [all …]
|
/linux-4.4.14/arch/frv/include/asm/ |
D | mmu_context.h | 38 change_mm_context(&prev->context, &next->context, next->pgd); \ 43 change_mm_context(&prev->context, &next->context, next->pgd); \
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | provider.c | 94 static int c4iw_dealloc_ucontext(struct ib_ucontext *context) in c4iw_dealloc_ucontext() argument 96 struct c4iw_dev *rhp = to_c4iw_dev(context->device); in c4iw_dealloc_ucontext() 97 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); in c4iw_dealloc_ucontext() 100 PDBG("%s context %p\n", __func__, context); in c4iw_dealloc_ucontext() 111 struct c4iw_ucontext *context; in c4iw_alloc_ucontext() local 119 context = kzalloc(sizeof(*context), GFP_KERNEL); in c4iw_alloc_ucontext() 120 if (!context) { in c4iw_alloc_ucontext() 125 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); in c4iw_alloc_ucontext() 126 INIT_LIST_HEAD(&context->mmaps); in c4iw_alloc_ucontext() 127 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext() [all …]
|
/linux-4.4.14/drivers/irqchip/ |
D | irq-metag-ext.c | 625 struct meta_intc_context *context; in meta_intc_suspend() local 629 context = kzalloc(sizeof(*context), GFP_ATOMIC); in meta_intc_suspend() 630 if (!context) in meta_intc_suspend() 649 context->vectors[hw] = metag_in32(vec_addr); in meta_intc_suspend() 658 context->levels[bank] = metag_in32(level_addr); in meta_intc_suspend() 661 context->masks[bank] = metag_in32(mask_addr); in meta_intc_suspend() 671 context->txvecint[i][j] = metag_in32(T0VECINT_BHALT + in meta_intc_suspend() 676 meta_intc_context = context; in meta_intc_suspend() 692 struct meta_intc_context *context = meta_intc_context; in meta_intc_resume() local 714 metag_out32(context->vectors[hw], vec_addr); in meta_intc_resume() [all …]
|
/linux-4.4.14/drivers/md/ |
D | dm-round-robin.c | 71 ps->context = s; in rr_create() 77 struct selector *s = (struct selector *) ps->context; in rr_destroy() 82 ps->context = NULL; in rr_destroy() 114 struct selector *s = (struct selector *) ps->context; in rr_add_path() 149 struct selector *s = (struct selector *) ps->context; in rr_fail_path() 157 struct selector *s = (struct selector *) ps->context; in rr_reinstate_path() 168 struct selector *s = (struct selector *) ps->context; in rr_select_path()
|
D | dm-queue-length.c | 60 ps->context = s; in ql_create() 76 struct selector *s = ps->context; in ql_destroy() 81 ps->context = NULL; in ql_destroy() 112 struct selector *s = ps->context; in ql_add_path() 152 struct selector *s = ps->context; in ql_fail_path() 160 struct selector *s = ps->context; in ql_reinstate_path() 174 struct selector *s = ps->context; in ql_select_path()
|
D | dm-region-hash.c | 88 void *context; member 92 void (*dispatch_bios)(void *context, struct bio_list *bios); 95 void (*wakeup_workers)(void *context); 98 void (*wakeup_all_recovery_waiters)(void *context); 136 return reg->rh->context; in dm_rh_region_context() 161 void *context, void (*dispatch_bios)(void *context, in dm_region_hash_create() argument 163 void (*wakeup_workers)(void *context), in dm_region_hash_create() argument 164 void (*wakeup_all_recovery_waiters)(void *context), in dm_region_hash_create() argument 188 rh->context = context; in dm_region_hash_create() 377 rh->dispatch_bios(rh->context, ®->delayed_bios); in complete_resync_work() [all …]
|
D | dm-kcopyd.c | 350 void *context; member 438 void *context = job->context; in run_complete_job() local 452 fn(read_err, write_err, context); in run_complete_job() 460 static void complete_io(unsigned long error, void *context) in complete_io() argument 462 struct kcopyd_job *job = (struct kcopyd_job *) context; in complete_io() 504 .notify.context = job, in run_io_job() 618 void *context) in segment_complete() argument 623 struct kcopyd_job *sub_job = (struct kcopyd_job *) context; in segment_complete() 666 sub_job->context = sub_job; in segment_complete() 703 unsigned int flags, dm_kcopyd_notify_fn fn, void *context) in dm_kcopyd_copy() argument [all …]
|
D | dm-log-userspace-base.c | 321 log->context = lc; in userspace_ctr() 329 struct log_c *lc = log->context; in userspace_dtr() 356 struct log_c *lc = log->context; in userspace_presuspend() 367 struct log_c *lc = log->context; in userspace_postsuspend() 384 struct log_c *lc = log->context; in userspace_resume() 395 struct log_c *lc = log->context; in userspace_get_region_size() 414 struct log_c *lc = log->context; in userspace_is_clean() 442 struct log_c *lc = log->context; in userspace_in_sync() 567 struct log_c *lc = log->context; in userspace_flush() 652 struct log_c *lc = log->context; in userspace_mark_region() [all …]
|
D | dm-log.c | 509 log->context = lc; in create_log_context() 529 struct log_c *lc = (struct log_c *) log->context; in core_dtr() 566 struct log_c *lc = (struct log_c *) log->context; in disk_dtr() 587 struct log_c *lc = (struct log_c *) log->context; in disk_resume() 649 struct log_c *lc = (struct log_c *) log->context; in core_get_region_size() 655 struct log_c *lc = (struct log_c *) log->context; in core_resume() 662 struct log_c *lc = (struct log_c *) log->context; in core_is_clean() 668 struct log_c *lc = (struct log_c *) log->context; in core_in_sync() 681 struct log_c *lc = log->context; in disk_flush() 720 struct log_c *lc = (struct log_c *) log->context; in core_mark_region() [all …]
|
D | dm-service-time.c | 56 ps->context = s; in st_create() 72 struct selector *s = ps->context; in st_destroy() 77 ps->context = NULL; in st_destroy() 109 struct selector *s = ps->context; in st_add_path() 165 struct selector *s = ps->context; in st_fail_path() 173 struct selector *s = ps->context; in st_reinstate_path() 261 struct selector *s = ps->context; in st_select_path()
|
D | dm-cache-metadata.h | 75 typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, 78 load_discard_fn fn, void *context); 86 typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock, 92 void *context);
|
D | dm-io.c | 38 void *context; member 117 void *context = io->context; in complete_io() local 124 fn(error_bits, context); in complete_io() 396 static void sync_io_complete(unsigned long error, void *context) in sync_io_complete() argument 398 struct sync_io *sio = context; in sync_io_complete() 423 io->context = &sio; in sync_io() 440 io_notify_fn fn, void *context) in async_io() argument 446 fn(1, context); in async_io() 455 io->context = context; in async_io() 524 &dp, io_req->notify.fn, io_req->notify.context); in dm_io()
|
/linux-4.4.14/scripts/coccinelle/api/ |
D | vma_pages.cocci | 8 virtual context 14 // For context mode 17 @r_context depends on context && !patch && !org && !report@ 27 @r_patch depends on !context && patch && !org && !report@ 38 @r_org depends on !context && !patch && (org || report)@
|
D | platform_no_drv_owner.cocci | 7 virtual context 25 @fix1 depends on match1 && patch && !context && !org && !report@ 34 @fix1_i2c depends on match1 && patch && !context && !org && !report@ 56 @fix2 depends on match2 && patch && !context && !org && !report@ 65 @fix2_i2c depends on match2 && patch && !context && !org && !report@ 76 @fix1_context depends on match1 && !patch && (context || org || report)@ 87 @fix1_i2c_context depends on match1 && !patch && (context || org || report)@ 98 @fix2_context depends on match2 && !patch && (context || org || report)@ 109 @fix2_i2c_context depends on match2 && !patch && (context || org || report)@
|
D | resource_size.cocci | 16 virtual context 22 // For context mode 25 @r_context depends on context && !patch && !org@ 35 @r_patch depends on !context && patch && !org@ 47 @r_org depends on !context && !patch && (org || report)@ 54 @rbad_org depends on !context && !patch && (org || report)@
|
D | err_cast.cocci | 15 virtual context 21 @ depends on context && !patch && !org && !report@ 27 @ depends on !context && patch && !org && !report @ 34 @r depends on !context && !patch && (org || report)@
|
/linux-4.4.14/arch/xtensa/mm/ |
D | tlb.c | 70 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm() 74 mm->context.asid[cpu] = NO_CONTEXT; in local_flush_tlb_mm() 75 mm->context.cpu = -1; in local_flush_tlb_mm() 95 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_range() 100 (unsigned long)mm->context.asid[cpu], start, end); in local_flush_tlb_range() 107 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_range() 135 if (mm->context.asid[cpu] == NO_CONTEXT) in local_flush_tlb_page() 141 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); in local_flush_tlb_page()
|
/linux-4.4.14/drivers/net/can/usb/peak_usb/ |
D | pcan_usb_core.c | 190 struct peak_usb_device *dev = urb->context; in peak_usb_read_bulk_callback() 255 struct peak_tx_urb_context *context = urb->context; in peak_usb_write_bulk_callback() local 259 BUG_ON(!context); in peak_usb_write_bulk_callback() 261 dev = context->dev; in peak_usb_write_bulk_callback() 274 netdev->stats.tx_bytes += context->data_len; in peak_usb_write_bulk_callback() 293 can_get_echo_skb(netdev, context->echo_index); in peak_usb_write_bulk_callback() 294 context->echo_index = PCAN_USB_MAX_TX_URBS; in peak_usb_write_bulk_callback() 308 struct peak_tx_urb_context *context = NULL; in peak_usb_ndo_start_xmit() local 321 context = dev->tx_contexts + i; in peak_usb_ndo_start_xmit() 325 if (!context) { in peak_usb_ndo_start_xmit() [all …]
|
/linux-4.4.14/arch/arm/mm/ |
D | context.c | 62 context_id = mm->context.id.counter; in a15_erratum_get_cpumask() 195 u64 asid = atomic64_read(&mm->context.id); in new_context() 246 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) in check_and_switch_context() 256 asid = atomic64_read(&mm->context.id); in check_and_switch_context() 263 asid = atomic64_read(&mm->context.id); in check_and_switch_context() 266 atomic64_set(&mm->context.id, asid); in check_and_switch_context()
|
/linux-4.4.14/drivers/net/ieee802154/ |
D | at86rf230.c | 80 void (*complete)(void *context); 123 const u8 state, void (*complete)(void *context)); 346 at86rf230_async_error_recover(void *context) in at86rf230_async_error_recover() argument 348 struct at86rf230_state_change *ctx = context; in at86rf230_async_error_recover() 372 void (*complete)(void *context)) in at86rf230_async_read_reg() argument 388 void (*complete)(void *context)) in at86rf230_async_write_reg() argument 401 at86rf230_async_state_assert(void *context) in at86rf230_async_state_assert() argument 403 struct at86rf230_state_change *ctx = context; in at86rf230_async_state_assert() 456 ctx->complete(context); in at86rf230_async_state_assert() 473 at86rf230_async_state_delay(void *context) in at86rf230_async_state_delay() argument [all …]
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
D | chang84.c | 179 u32 context; in g84_fifo_chan_object_ctor() local 183 case NVKM_ENGINE_SW : context = 0x00000000; break; in g84_fifo_chan_object_ctor() 184 case NVKM_ENGINE_GR : context = 0x00100000; break; in g84_fifo_chan_object_ctor() 186 case NVKM_ENGINE_MSPPP : context = 0x00200000; break; in g84_fifo_chan_object_ctor() 188 case NVKM_ENGINE_CE0 : context = 0x00300000; break; in g84_fifo_chan_object_ctor() 190 case NVKM_ENGINE_MSPDEC: context = 0x00400000; break; in g84_fifo_chan_object_ctor() 193 case NVKM_ENGINE_VIC : context = 0x00500000; break; in g84_fifo_chan_object_ctor() 195 case NVKM_ENGINE_MSVLD : context = 0x00600000; break; in g84_fifo_chan_object_ctor() 201 return nvkm_ramht_insert(chan->ramht, object, 0, 4, handle, context); in g84_fifo_chan_object_ctor()
|
/linux-4.4.14/include/linux/mlx4/ |
D | driver.h | 58 void (*remove)(struct mlx4_dev *dev, void *context); 59 void (*event) (struct mlx4_dev *dev, void *context, 61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 62 void (*activate)(struct mlx4_dev *dev, void *context);
|
/linux-4.4.14/include/linux/i2c/ |
D | max732x.h | 13 void *context; /* param to setup/teardown */ member 17 void *context); 20 void *context);
|
D | pcf857x.h | 37 void *context); 40 void *context); 41 void *context; member
|
/linux-4.4.14/include/linux/platform_data/ |
D | pca953x.h | 19 void *context; /* param to setup/teardown */ member 23 void *context); 26 void *context);
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | mmu_context.h | 80 if (prev->context.acop || next->context.acop) in switch_mm() 142 if (start <= mm->context.vdso_base && mm->context.vdso_base < end) in arch_unmap() 143 mm->context.vdso_base = 0; in arch_unmap()
|
/linux-4.4.14/drivers/video/fbdev/omap2/ |
D | vrfb.c | 40 #define SMS_ROT_CONTROL(context) (0x0 + 0x10 * context) argument 41 #define SMS_ROT_SIZE(context) (0x4 + 0x10 * context) argument 42 #define SMS_ROT_PHYSICAL_BA(context) (0x8 + 0x10 * context) argument 188 u8 ctx = vrfb->context; in omap_vrfb_setup() 264 int ctx = vrfb->context; in omap_vrfb_release_ctx() 284 vrfb->context = 0xff; in omap_vrfb_release_ctx() 317 vrfb->context = ctx; in omap_vrfb_request_ctx()
|
/linux-4.4.14/drivers/net/wireless/mwifiex/ |
D | usb.c | 167 struct urb_context *context = (struct urb_context *)urb->context; in mwifiex_usb_rx_complete() local 168 struct mwifiex_adapter *adapter = context->adapter; in mwifiex_usb_rx_complete() 169 struct sk_buff *skb = context->skb; in mwifiex_usb_rx_complete() 180 if (card->rx_cmd_ep == context->ep) in mwifiex_usb_rx_complete() 190 if (card->rx_cmd_ep != context->ep) in mwifiex_usb_rx_complete() 199 status = mwifiex_usb_recv(adapter, skb, context->ep); in mwifiex_usb_rx_complete() 211 if (card->rx_cmd_ep == context->ep) in mwifiex_usb_rx_complete() 219 if (card->rx_cmd_ep != context->ep) in mwifiex_usb_rx_complete() 232 if (card->rx_cmd_ep != context->ep) in mwifiex_usb_rx_complete() 239 if (card->rx_cmd_ep == context->ep) in mwifiex_usb_rx_complete() [all …]
|
/linux-4.4.14/drivers/bus/ |
D | vexpress-config.c | 23 void *context; member 89 bridge->ops->regmap_exit(regmap, bridge->context); in vexpress_config_devres_release() 110 regmap = (bridge->ops->regmap_init)(dev, bridge->context); in devm_regmap_init_vexpress_config() 124 struct vexpress_config_bridge_ops *ops, void *context) in vexpress_config_bridge_register() argument 149 bridge->context = context; in vexpress_config_bridge_register()
|
/linux-4.4.14/drivers/hv/ |
D | hv_util.c | 59 static void shutdown_onchannelcallback(void *context); 64 static void timesync_onchannelcallback(void *context); 69 static void heartbeat_onchannelcallback(void *context); 102 static void shutdown_onchannelcallback(void *context) in shutdown_onchannelcallback() argument 104 struct vmbus_channel *channel = context; in shutdown_onchannelcallback() 233 static void timesync_onchannelcallback(void *context) in timesync_onchannelcallback() argument 235 struct vmbus_channel *channel = context; in timesync_onchannelcallback() 276 static void heartbeat_onchannelcallback(void *context) in heartbeat_onchannelcallback() argument 278 struct vmbus_channel *channel = context; in heartbeat_onchannelcallback()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/ |
D | ramht.c | 60 int chid, int addr, u32 handle, u32 context) in nvkm_ramht_update() argument 87 if (addr < 0) context |= inst << -addr; in nvkm_ramht_update() 88 else context |= inst >> addr; in nvkm_ramht_update() 93 nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context); in nvkm_ramht_update() 107 int chid, int addr, u32 handle, u32 context) in nvkm_ramht_insert() argument 118 addr, handle, context); in nvkm_ramht_insert()
|
/linux-4.4.14/drivers/isdn/hardware/eicon/ |
D | istream.c | 39 int diva_istream_write(void *context, 46 int diva_istream_read(void *context, 70 int diva_istream_write(void *context, in diva_istream_write() argument 77 ADAPTER *a = (ADAPTER *)context; in diva_istream_write() 148 int diva_istream_read(void *context, in diva_istream_read() argument 155 ADAPTER *a = (ADAPTER *)context; in diva_istream_read()
|
D | dadapter.c | 36 void IDI_CALL_ENTITY_T *context; member 85 void IDI_CALL_ENTITY_T *context); 226 (void IDI_CALL_ENTITY_T *)pinfo->context); in diva_dadapter_request() 270 void IDI_CALL_ENTITY_T *context) { in diva_register_adapter_callback() argument 278 NotificationTable[i].context = context; in diva_register_adapter_callback() 296 NotificationTable[handle].context = NULL; in diva_remove_adapter_callback() 322 (*(nfy.callback))(nfy.context, d, removal); in diva_notify_adapter_change()
|
/linux-4.4.14/Documentation/ |
D | unshare.txt | 61 2.1 Per-security context namespaces 65 such as per-user and/or per-security context instance of /tmp, /var/tmp or 66 per-security context instance of a user's home directory, isolate user 83 disassociate parts of the context during the servicing of the 109 allowed incremental context unsharing in future without an ABI change. 112 new context flags without requiring a rebuild of old applications. 113 If and when new context flags are added, unshare design should allow 119 unshare - disassociate parts of the process execution context 128 context that are currently being shared with other processes. Part 129 of execution context, such as the namespace, is shared by default [all …]
|
/linux-4.4.14/drivers/usb/musb/ |
D | musb_core.c | 2318 musb->context.frame = musb_readw(musb_base, MUSB_FRAME); in musb_save_context() 2319 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE); in musb_save_context() 2320 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs); in musb_save_context() 2321 musb->context.power = musb_readb(musb_base, MUSB_POWER); in musb_save_context() 2322 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); in musb_save_context() 2323 musb->context.index = musb_readb(musb_base, MUSB_INDEX); in musb_save_context() 2324 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL); in musb_save_context() 2338 musb->context.index_regs[i].txmaxp = in musb_save_context() 2340 musb->context.index_regs[i].txcsr = in musb_save_context() 2342 musb->context.index_regs[i].rxmaxp = in musb_save_context() [all …]
|
/linux-4.4.14/drivers/uwb/ |
D | neh.c | 109 u8 context; member 176 neh->context = result; in __uwb_rc_ctx_get() 186 if (neh->context == 0) in __uwb_rc_ctx_put() 188 if (test_bit(neh->context, rc->ctx_bm) == 0) { in __uwb_rc_ctx_put() 190 neh->context); in __uwb_rc_ctx_put() 193 clear_bit(neh->context, rc->ctx_bm); in __uwb_rc_ctx_put() 194 neh->context = 0; in __uwb_rc_ctx_put() 237 cmd->bCommandContext = neh->context; in uwb_rc_neh_add() 293 if (neh->context) in uwb_rc_neh_arm() 309 && neh->context == rceb->bEventContext; in uwb_rc_neh_match() [all …]
|
/linux-4.4.14/drivers/media/firewire/ |
D | firedtv-fw.c | 77 struct fw_iso_context *context; member 93 return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, in queue_iso() 97 static void handle_iso(struct fw_iso_context *context, u32 cycle, in handle_iso() argument 127 fw_iso_context_queue_flush(ctx->context); in handle_iso() 141 ctx->context = fw_iso_context_create(device->card, in fdtv_start_iso() 144 if (IS_ERR(ctx->context)) { in fdtv_start_iso() 145 err = PTR_ERR(ctx->context); in fdtv_start_iso() 166 err = fw_iso_context_start(ctx->context, -1, 0, in fdtv_start_iso() 177 fw_iso_context_destroy(ctx->context); in fdtv_start_iso() 188 fw_iso_context_stop(ctx->context); in fdtv_stop_iso() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.c | 438 struct ib_ucontext *context, in usnic_ib_alloc_pd() argument 457 pd, context, ibdev->name); in usnic_ib_alloc_pd() 485 ucontext = to_uucontext(pd->uobject->context); in usnic_ib_create_qp() 594 struct ib_ucontext *context, in usnic_ib_create_cq() argument 652 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); in usnic_ib_dereg_mr() 660 struct usnic_ib_ucontext *context; in usnic_ib_alloc_ucontext() local 664 context = kmalloc(sizeof(*context), GFP_KERNEL); in usnic_ib_alloc_ucontext() 665 if (!context) in usnic_ib_alloc_ucontext() 668 INIT_LIST_HEAD(&context->qp_grp_list); in usnic_ib_alloc_ucontext() 670 list_add_tail(&context->link, &us_ibdev->ctx_list); in usnic_ib_alloc_ucontext() [all …]
|
/linux-4.4.14/arch/avr32/include/asm/ |
D | mmu_context.h | 45 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) in get_mmu_context() 64 mm->context = mc; in get_mmu_context() 74 mm->context = NO_CONTEXT; in init_new_context() 105 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); in activate_context()
|
/linux-4.4.14/arch/x86/math-emu/ |
D | fpu_system.h | 29 mutex_lock(¤t->mm->context.lock); in FPU_get_ldt_descriptor() 30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size) in FPU_get_ldt_descriptor() 31 ret = current->mm->context.ldt->entries[seg]; in FPU_get_ldt_descriptor() 32 mutex_unlock(¤t->mm->context.lock); in FPU_get_ldt_descriptor()
|
/linux-4.4.14/arch/score/mm/ |
D | tlb-score.c | 70 pevn_set(mm->context & ASID_MASK); in drop_mmu_context() 76 if (mm->context != 0) in local_flush_tlb_mm() 84 unsigned long vma_mm_context = mm->context; in local_flush_tlb_range() 85 if (mm->context != 0) { in local_flush_tlb_range() 161 if (vma && vma->vm_mm->context != 0) { in local_flush_tlb_page() 164 unsigned long vma_ASID = vma->vm_mm->context; in local_flush_tlb_page()
|
/linux-4.4.14/drivers/misc/ |
D | vexpress-syscfg.c | 117 static int vexpress_syscfg_read(void *context, unsigned int index, in vexpress_syscfg_read() argument 120 struct vexpress_syscfg_func *func = context; in vexpress_syscfg_read() 125 static int vexpress_syscfg_write(void *context, unsigned int index, in vexpress_syscfg_write() argument 128 struct vexpress_syscfg_func *func = context; in vexpress_syscfg_write() 146 void *context) in vexpress_syscfg_regmap_init() argument 149 struct vexpress_syscfg *syscfg = context; in vexpress_syscfg_regmap_init() 227 static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) in vexpress_syscfg_regmap_exit() argument 229 struct vexpress_syscfg *syscfg = context; in vexpress_syscfg_regmap_exit()
|
/linux-4.4.14/scripts/coccinelle/null/ |
D | deref_null.cocci | 13 virtual context 51 @r depends on !context && (org || report) exists@ 91 @script:python depends on !context && !org && report@ 101 @script:python depends on !context && org && !report@ 112 @s depends on !context && (org || report) exists@ 151 @script:python depends on !context && !org && report@ 160 @script:python depends on !context && org && !report@ 170 // For context mode 172 @depends on context && !org && !report exists@ 245 @depends on context && !org && !report exists@
|
/linux-4.4.14/drivers/usb/misc/ |
D | usbtest.c | 234 complete(urb->context); in simple_callback() 438 urb->context = &completion; in simple_io() 1049 struct ctrl_ctx *ctx = urb->context; in ctrl_complete() 1164 struct ctrl_ctx context; in test_ctrl_queue() local 1170 spin_lock_init(&context.lock); in test_ctrl_queue() 1171 context.dev = dev; in test_ctrl_queue() 1172 init_completion(&context.complete); in test_ctrl_queue() 1173 context.count = param->sglen * param->iterations; in test_ctrl_queue() 1174 context.pending = 0; in test_ctrl_queue() 1175 context.status = -ENOMEM; in test_ctrl_queue() [all …]
|
/linux-4.4.14/arch/s390/mm/ |
D | pgtable.c | 57 BUG_ON(mm->context.asce_limit != (1UL << 42)); in crst_table_upgrade() 68 mm->context.asce_limit = 1UL << 53; in crst_table_upgrade() 69 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_upgrade() 71 mm->task_size = mm->context.asce_limit; in crst_table_upgrade() 83 BUG_ON(mm->context.asce_limit != (1UL << 42)); in crst_table_downgrade() 92 mm->context.asce_limit = 1UL << 31; in crst_table_downgrade() 93 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in crst_table_downgrade() 95 mm->task_size = mm->context.asce_limit; in crst_table_downgrade() 155 list_add(&gmap->list, &mm->context.gmap_list); in gmap_alloc() 453 list_for_each_entry(gmap, &mm->context.gmap_list, list) { in gmap_unlink() [all …]
|
/linux-4.4.14/Documentation/infiniband/ |
D | core_locking.txt | 8 Sleeping and interrupt context 25 which may not sleep and must be callable from any context. 40 are therefore safe to call from any context. 47 the midlayer is also safe to call from any context. 92 The context in which completion event and asynchronous event 94 may be process context, softirq context, or interrupt context. 105 ib_unregister_device() from process context. It must not hold any
|
/linux-4.4.14/arch/cris/arch-v10/mm/ |
D | tlb.c | 67 int page_id = mm->context.page_id; in flush_tlb_mm() 102 int page_id = mm->context.page_id; in flush_tlb_page() 145 mm->context.page_id = NO_CONTEXT; in init_new_context() 170 next->context, next)); in switch_mm() 173 page_id, next->context.page_id); in switch_mm()
|
/linux-4.4.14/arch/cris/arch-v32/mm/ |
D | tlb.c | 88 page_id = mm->context.page_id; in __flush_tlb_mm() 130 page_id = vma->vm_mm->context.page_id; in __flush_tlb_page() 171 mm->context.page_id = NO_CONTEXT; in init_new_context() 200 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | in switch_mm() 203 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); in switch_mm()
|
/linux-4.4.14/arch/x86/include/asm/ |
D | mmu_context.h | 27 atomic_read(&mm->context.perf_rdpmc_allowed)) in load_mm_cr4() 72 ldt = lockless_dereference(mm->context.ldt); in load_mm_ldt() 170 if (unlikely(prev->context.ldt != next->context.ldt)) in switch_mm() 239 !(mm->context.ia32_compat == TIF_IA32); in is_64bit_mm()
|
/linux-4.4.14/sound/soc/codecs/ |
D | rl6347a.c | 19 int rl6347a_hw_write(void *context, unsigned int reg, unsigned int value) in rl6347a_hw_write() argument 21 struct i2c_client *client = context; in rl6347a_hw_write() 62 int rl6347a_hw_read(void *context, unsigned int reg, unsigned int *value) in rl6347a_hw_read() argument 64 struct i2c_client *client = context; in rl6347a_hw_read()
|
/linux-4.4.14/drivers/firewire/ |
D | ohci.c | 112 struct context; 114 typedef int (*descriptor_callback_t)(struct context *ctx, 130 struct context { struct 178 struct context context; argument 219 struct context at_request_ctx; 220 struct context at_response_ctx; 1069 struct context *ctx = (struct context *) data; in context_tasklet() 1114 static int context_add_buffer(struct context *ctx) in context_add_buffer() 1143 static int context_init(struct context *ctx, struct fw_ohci *ohci, in context_init() 1176 static void context_release(struct context *ctx) in context_release() [all …]
|
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/ |
D | mce-severity.c | 32 enum context { IN_KERNEL = 1, IN_USER = 2 }; enum 43 unsigned char context; member 49 #define KERNEL .context = IN_KERNEL 50 #define USER .context = IN_USER 195 enum context ctx = error_context(m); in mce_severity_amd() 246 enum context ctx = error_context(m); in mce_severity_intel() 258 if (s->context && ctx != s->context) in mce_severity_intel()
|
/linux-4.4.14/include/rdma/ |
D | ib_sa.h | 314 void *context), 315 void *context, 326 void *context), 327 void *context, 335 void *context; member 373 void *context); 435 void *context), 436 void *context,
|
/linux-4.4.14/Documentation/powerpc/ |
D | cxlflash.txt | 52 concept of a master context. A master typically has special privileges 59 The CXL Flash Adapter Driver establishes a master context with the 65 | (per context) | 68 | 512 * 128 B per context | 79 areas in the MMIO Space shown above. The master context driver 88 This master context driver also provides a series of ioctls for this 149 This ioctl obtains, initializes, and starts a context using the CXL 150 kernel services. These services specify a context id (u16) by which 151 to uniquely identify the context and its allocated resources. The 161 There are a few important aspects regarding the "tokens" (context id [all …]
|
D | cxl.txt | 16 Coherent in this context means that the accelerator and CPUs can 59 the fault. The context to which this fault is serviced is based on 69 When using dedicated mode only one MMU context is supported. In 76 support fewer). In this mode, the AFU sends a 16 bit context ID 77 with each of its requests. This tells the PSL which context is 80 determine the userspace context associated with an operation. 88 just a per context portion. The hardware is self describing, hence 89 the kernel can determine the offset and size of the per context 107 The WED is a 64-bit parameter passed to the AFU when a context is 121 master context and /dev/cxl/afu0.0s will correspond to a slave [all …]
|
/linux-4.4.14/include/uapi/drm/ |
D | tegra_drm.h | 65 __u64 context; member 69 __u64 context; member 73 __u64 context; member 79 __u64 context; member 117 __u64 context; member
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/ |
D | hub.fuc | 67 // 31:0: total PGRAPH context size 134 // context size calculation, reserve first 256 bytes for use by fuc 144 // calculate size of mmio context data 168 // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which 172 // when it has completed, and return the size of its context data 191 // wait for it to complete, and adjust context size 212 // save context size, and tell host we're ready 229 // context switch, requested by GPU? 269 // ack the context switch request 276 // request to set current channel? (*not* a context switch) [all …]
|
/linux-4.4.14/arch/um/kernel/ |
D | exec.c | 27 ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); in flush_thread() 28 ret = ret || unmap(¤t->mm->context.id, STUB_END, in flush_thread() 38 __switch_mm(¤t->mm->context.id); in flush_thread()
|
/linux-4.4.14/drivers/net/can/usb/ |
D | usb_8dev.c | 512 struct usb_8dev_priv *priv = urb->context; in usb_8dev_read_bulk_callback() 572 struct usb_8dev_tx_urb_context *context = urb->context; in usb_8dev_write_bulk_callback() local 576 BUG_ON(!context); in usb_8dev_write_bulk_callback() 578 priv = context->priv; in usb_8dev_write_bulk_callback() 595 netdev->stats.tx_bytes += context->dlc; in usb_8dev_write_bulk_callback() 597 can_get_echo_skb(netdev, context->echo_index); in usb_8dev_write_bulk_callback() 602 context->echo_index = MAX_TX_URBS; in usb_8dev_write_bulk_callback() 616 struct usb_8dev_tx_urb_context *context = NULL; in usb_8dev_start_xmit() local 657 context = &priv->tx_contexts[i]; in usb_8dev_start_xmit() 665 if (!context) in usb_8dev_start_xmit() [all …]
|