/linux-4.4.14/security/selinux/ss/ |
H A D | mls.h | 24 #include "context.h" 27 int mls_compute_context_len(struct context *context); 28 void mls_sid_to_context(struct context *context, char **scontext); 29 int mls_context_isvalid(struct policydb *p, struct context *c); 36 struct context *context, 40 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask); 42 int mls_range_set(struct context *context, struct mls_range *range); 46 struct context *context); 48 int mls_compute_sid(struct context *scontext, 49 struct context *tcontext, 52 struct context *newcontext, 55 int mls_setup_user_range(struct context *fromcon, struct user_datum *user, 56 struct context *usercon); 59 void mls_export_netlbl_lvl(struct context *context, 61 void mls_import_netlbl_lvl(struct context *context, 63 int mls_export_netlbl_cat(struct context *context, 65 int mls_import_netlbl_cat(struct context *context, 68 static inline void mls_export_netlbl_lvl(struct context *context, mls_export_netlbl_lvl() argument 73 static inline void mls_import_netlbl_lvl(struct context *context, mls_import_netlbl_lvl() argument 78 static inline int mls_export_netlbl_cat(struct context *context, mls_export_netlbl_cat() argument 83 static inline int mls_import_netlbl_cat(struct context *context, mls_import_netlbl_cat() argument
|
H A D | sidtab.h | 3 * of security context structures indexed by SID value. 10 #include "context.h" 14 struct context context; /* security context structure */ member in struct:sidtab_node 35 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context); 36 struct context *sidtab_search(struct sidtab *s, u32 sid); 37 struct context *sidtab_search_force(struct sidtab *s, u32 sid); 41 struct context *context, 46 struct context *context,
|
H A D | sidtab.c | 33 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) sidtab_insert() argument 62 if (context_cpy(&newnode->context, context)) { sidtab_insert() 85 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force) sidtab_search_core() 98 if (force && cur && sid == cur->sid && cur->context.len) sidtab_search_core() 99 return &cur->context; sidtab_search_core() 101 if (cur == NULL || sid != cur->sid || cur->context.len) { sidtab_search_core() 112 return &cur->context; sidtab_search_core() 115 struct context *sidtab_search(struct sidtab *s, u32 sid) sidtab_search() 120 struct context *sidtab_search_force(struct sidtab *s, u32 sid) sidtab_search_force() 127 struct context *context, sidtab_map() 140 rc = apply(cur->sid, &cur->context, args); sidtab_map() 162 struct context *context) sidtab_search_context() 170 if (context_cmp(&cur->context, context)) { sidtab_search_context() 180 static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context) sidtab_search_cache() argument 189 if (context_cmp(&node->context, context)) { sidtab_search_cache() 198 struct context *context, sidtab_context_to_sid() 207 sid = sidtab_search_cache(s, context); sidtab_context_to_sid() 209 sid = sidtab_search_context(s, context); sidtab_context_to_sid() 213 sid = sidtab_search_context(s, context); sidtab_context_to_sid() 216 /* No SID exists for the context. Allocate a new one. */ sidtab_context_to_sid() 222 if (context->len) sidtab_context_to_sid() 225 context->str); sidtab_context_to_sid() 226 ret = sidtab_insert(s, sid, context); sidtab_context_to_sid() 280 context_destroy(&temp->context); sidtab_destroy() 125 sidtab_map(struct sidtab *s, int (*apply) (u32 sid, struct context *context, void *args), void *args) sidtab_map() argument 161 sidtab_search_context(struct sidtab *s, struct context *context) sidtab_search_context() argument 197 sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *out_sid) sidtab_context_to_sid() argument
|
H A D | mls.c | 33 * security context string representation of `context'. 35 int mls_compute_context_len(struct context *context) mls_compute_context_len() argument 47 int index_sens = context->range.level[l].sens; mls_compute_context_len() 53 e = &context->range.level[l].cat; ebitmap_for_each_positive_bit() 72 if (mls_level_eq(&context->range.level[0], 73 &context->range.level[1])) 84 * Write the security context string representation of 85 * the MLS fields of `context' into the string `*scontext'. 88 void mls_sid_to_context(struct context *context, mls_sid_to_context() argument 106 context->range.level[l].sens - 1)); mls_sid_to_context() 112 e = &context->range.level[l].cat; ebitmap_for_each_positive_bit() 148 if (mls_level_eq(&context->range.level[0], 149 &context->range.level[1])) 188 * Return 1 if the MLS fields in the security context 191 int mls_context_isvalid(struct policydb *p, struct context *c) mls_context_isvalid() 217 * Set the MLS fields in the security context structure 218 * `context' based on the string representation in 227 * copy the MLS field of the associated default context. 237 struct context *context, mls_context_to_sid() 255 * No MLS component to the security context, try and map to mls_context_to_sid() 259 struct context *defcon; mls_context_to_sid() 268 rc = mls_context_cpy(context, defcon); mls_context_to_sid() 288 context->range.level[l].sens = levdatum->level->sens; mls_context_to_sid() 314 rc = ebitmap_set_bit(&context->range.level[l].cat, mls_context_to_sid() 335 rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1); mls_context_to_sid() 359 context->range.level[1].sens = context->range.level[0].sens; mls_context_to_sid() 360 rc = ebitmap_cpy(&context->range.level[1].cat, mls_context_to_sid() 361 &context->range.level[0].cat); mls_context_to_sid() 372 * Set the MLS fields in the security context structure 373 * `context' based on the string representation in 377 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask) mls_from_string() argument 391 rc = mls_context_to_sid(&policydb, ':', &tmpstr, context, mls_from_string() 400 * Copies the MLS range `range' into `context'. 402 int mls_range_set(struct context *context, mls_range_set() argument 407 /* Copy the MLS range into the context */ mls_range_set() 409 context->range.level[l].sens = range->level[l].sens; mls_range_set() 410 rc = ebitmap_cpy(&context->range.level[l].cat, mls_range_set() 419 int mls_setup_user_range(struct context *fromcon, struct user_datum *user, mls_setup_user_range() 420 struct context *usercon) mls_setup_user_range() 458 * Convert the MLS fields in the security context 464 struct context *c) mls_convert_context() 505 int mls_compute_sid(struct context *scontext, mls_compute_sid() 506 struct context *tcontext, mls_compute_sid() 509 struct context *newcontext, mls_compute_sid() 571 * @context: the security context 575 * Given the security context copy the low MLS sensitivity level into the 579 void mls_export_netlbl_lvl(struct context *context, mls_export_netlbl_lvl() argument 585 secattr->attr.mls.lvl = context->range.level[0].sens - 1; mls_export_netlbl_lvl() 591 * @context: the security context 595 * Given the security context and the NetLabel security attributes, copy the 596 * NetLabel MLS sensitivity level into the context. 599 void mls_import_netlbl_lvl(struct context *context, mls_import_netlbl_lvl() argument 605 context->range.level[0].sens = secattr->attr.mls.lvl + 1; mls_import_netlbl_lvl() 606 context->range.level[1].sens = context->range.level[0].sens; mls_import_netlbl_lvl() 611 * @context: the security context 615 * Given the security context copy the low MLS categories into the NetLabel 619 int mls_export_netlbl_cat(struct context *context, mls_export_netlbl_cat() argument 627 rc = ebitmap_netlbl_export(&context->range.level[0].cat, mls_export_netlbl_cat() 637 * @context: the security context 641 * Copy the NetLabel security attributes into the SELinux context; since the 643 * both the low and high categories of the context. Returns zero on success, 647 int mls_import_netlbl_cat(struct context *context, mls_import_netlbl_cat() argument 655 rc = ebitmap_netlbl_import(&context->range.level[0].cat, mls_import_netlbl_cat() 659 memcpy(&context->range.level[1].cat, &context->range.level[0].cat, mls_import_netlbl_cat() 660 sizeof(context->range.level[0].cat)); mls_import_netlbl_cat() 665 ebitmap_destroy(&context->range.level[0].cat); mls_import_netlbl_cat() 234 mls_context_to_sid(struct policydb *pol, char oldc, char **scontext, struct context *context, struct sidtab *s, u32 def_sid) mls_context_to_sid() argument
|
H A D | context.h | 2 * A security context is a set of security attributes 23 * A security context consists of an authenticated user 26 struct context { struct 32 char *str; /* string representation if context cannot be mapped. */ 35 static inline void mls_context_init(struct context *c) mls_context_init() 40 static inline int mls_context_cpy(struct context *dst, struct context *src) mls_context_cpy() 60 static inline int mls_context_cpy_low(struct context *dst, struct context *src) mls_context_cpy_low() 80 static inline int mls_context_cpy_high(struct context *dst, struct context *src) mls_context_cpy_high() 97 static inline int mls_context_cmp(struct context *c1, struct context *c2) mls_context_cmp() 105 static inline void mls_context_destroy(struct context *c) mls_context_destroy() 112 static inline void context_init(struct context *c) context_init() 117 static inline int context_cpy(struct context *dst, struct context *src) context_cpy() 141 static inline void context_destroy(struct context *c) context_destroy() 150 static inline int context_cmp(struct context *c1, struct context *c2) context_cmp()
|
H A D | services.c | 10 * Support for context based audit filters. 61 #include "context.h" 92 static int context_struct_to_string(struct context *context, char **scontext, 95 static void context_struct_compute_av(struct context *scontext, 96 struct context *tcontext, 267 * only. For these rules, scontext is the context before the transition, 268 * tcontext is the context after the transition, and xcontext is the context 272 static int constraint_expr_eval(struct context *scontext, constraint_expr_eval() 273 struct context *tcontext, constraint_expr_eval() 274 struct context *xcontext, constraint_expr_eval() 278 struct context *c; constraint_expr_eval() 462 static void security_dump_masked_av(struct context *scontext, security_dump_masked_av() 463 struct context *tcontext, security_dump_masked_av() 540 static void type_attribute_bounds_av(struct context *scontext, type_attribute_bounds_av() 541 struct context *tcontext, type_attribute_bounds_av() 545 struct context lo_scontext; type_attribute_bounds_av() 546 struct context lo_tcontext; type_attribute_bounds_av() 645 * Compute access vectors and extended permissions based on a context 648 static void context_struct_compute_av(struct context *scontext, context_struct_compute_av() 649 struct context *tcontext, context_struct_compute_av() 753 static int security_validtrans_handle_fail(struct context *ocontext, security_validtrans_handle_fail() 754 struct context *ncontext, security_validtrans_handle_fail() 755 struct context *tcontext, security_validtrans_handle_fail() 784 struct context *ocontext; security_validate_transition() 785 struct context *ncontext; security_validate_transition() 786 struct context *tcontext; security_validate_transition() 858 struct context *old_context, *new_context; security_bounded_transition() 1000 struct context *scontext, *tcontext; security_compute_xperms_decision() 1091 struct context *scontext = NULL, *tcontext = NULL; security_compute_av() 1138 struct context *scontext = NULL, *tcontext = NULL; security_compute_av_user() 1179 * Write the security context string representation of 1180 * the context structure `context' into a dynamically 1185 static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len) context_struct_to_string() argument 1193 if (context->len) { context_struct_to_string() 1194 *scontext_len = context->len; context_struct_to_string() 1196 *scontext = kstrdup(context->str, GFP_ATOMIC); context_struct_to_string() 1203 /* Compute the size of the context. */ context_struct_to_string() 1204 *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1; context_struct_to_string() 1205 *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1; context_struct_to_string() 1206 *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1; context_struct_to_string() 1207 *scontext_len += mls_compute_context_len(context); context_struct_to_string() 1212 /* Allocate space for the context; caller must free this space. */ context_struct_to_string() 1219 * Copy the user name, role name and type name into the context. context_struct_to_string() 1222 sym_name(&policydb, SYM_USERS, context->user - 1), context_struct_to_string() 1223 sym_name(&policydb, SYM_ROLES, context->role - 1), context_struct_to_string() 1224 sym_name(&policydb, SYM_TYPES, context->type - 1)); context_struct_to_string() 1226 mls_sid_to_context(context, &scontextp); context_struct_to_string() 1245 struct context *context; security_sid_to_context_core() local 1275 context = sidtab_search_force(&sidtab, sid); security_sid_to_context_core() 1277 context = sidtab_search(&sidtab, sid); security_sid_to_context_core() 1278 if (!context) { security_sid_to_context_core() 1284 rc = context_struct_to_string(context, scontext, scontext_len); security_sid_to_context_core() 1293 * security_sid_to_context - Obtain a context for a given SID. 1295 * @scontext: security context 1298 * Write the string representation of the context associated with @sid 1319 struct context *ctx, string_to_context_struct() 1330 /* Parse the security context. */ string_to_context_struct() 1387 /* Check the validity of the new context. */ string_to_context_struct() 1402 struct context context; security_context_to_sid_core() local 1405 /* An empty security context is never valid. */ security_context_to_sid_core() 1440 scontext_len, &context, def_sid); security_context_to_sid_core() 1442 context.str = str; security_context_to_sid_core() 1443 context.len = scontext_len; security_context_to_sid_core() 1447 rc = sidtab_context_to_sid(&sidtab, &context, sid); security_context_to_sid_core() 1448 context_destroy(&context); security_context_to_sid_core() 1458 * security_context_to_sid - Obtain a SID for a given security context. 1459 * @scontext: security context 1462 * @gfp: context for the allocation 1464 * Obtains a SID associated with the security context that 1466 * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient 1482 * security_context_to_sid_default - Obtain a SID for a given security context, 1485 * @scontext: security context 1490 * Obtains a SID associated with the security context that 1495 * Implicitly forces adding of the context even if it cannot be mapped yet. 1496 * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient 1514 struct context *scontext, compute_sid_handle_invalid_context() 1515 struct context *tcontext, compute_sid_handle_invalid_context() 1517 struct context *newcontext) compute_sid_handle_invalid_context() 1543 static void filename_compute_type(struct policydb *p, struct context *newcontext, filename_compute_type() 1577 struct context *scontext = NULL, *tcontext = NULL, newcontext; security_compute_sid() 1722 /* Check the validity of the context. */ security_compute_sid() 1731 /* Obtain the sid for the context. */ security_compute_sid() 1813 struct context *context, clone_sid() 1819 return sidtab_insert(s, sid, context); clone_sid() 1824 static inline int convert_context_handle_invalid_context(struct context *context) convert_context_handle_invalid_context() argument 1832 if (!context_struct_to_string(context, &s, &len)) { convert_context_handle_invalid_context() 1845 * Convert the values in the security context 1849 * context is valid under the new policy. 1852 struct context *c, convert_context() 1856 struct context oldc; convert_context() 1872 struct context ctx; convert_context() 1895 printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n", convert_context() 1938 * context for all existing entries in the sidtab. convert_context() 1944 * ensure that the MLS fields of the context for all convert_context() 1958 range = &oc->context[0].range; convert_context() 1964 /* Check the validity of the new context. */ convert_context() 2196 &c->context[0], security_port_sid() 2233 &c->context[0], security_netif_sid() 2238 &c->context[1], security_netif_sid() 2323 &c->context[0], security_node_sid() 2360 struct context *fromcon, usercon; security_get_user_sids() 2506 rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); __security_genfs_sid() 2563 rc = sidtab_context_to_sid(&sidtab, &c->context[0], security_fs_use() 2734 struct context *context1; security_sid_mls_copy() 2735 struct context *context2; security_sid_mls_copy() 2736 struct context newcon; security_sid_mls_copy() 2774 /* Check the validity of the new context. */ security_sid_mls_copy() 2822 struct context *nlbl_ctx; security_net_peersid_resolve() 2823 struct context *xfrm_ctx; security_net_peersid_resolve() 3009 struct context au_ctxt; 3146 struct context *ctxt; selinux_audit_rule_match() 3281 * Attempt to cache the context in @ctx, which was derived from the packet in 3314 * SID/context then use SECINITSID_NETMSG as the foundation. If possible the 3325 struct context *ctx; security_netlbl_secattr_to_sid() 3326 struct context ctx_new; security_netlbl_secattr_to_sid() 3391 struct context *ctx; security_netlbl_sid_to_secattr() 1812 clone_sid(u32 sid, struct context *context, void *arg) clone_sid() argument
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | mmu.h | 17 /* The mmu context allocates 4K page tables. */ 19 /* The mmu context uses extended page tables. */ 21 /* The mmu context uses storage keys. */ 26 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 27 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ 28 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
H A D | mmu_context.h | 18 spin_lock_init(&mm->context.list_lock); init_new_context() 19 INIT_LIST_HEAD(&mm->context.pgtable_list); init_new_context() 20 INIT_LIST_HEAD(&mm->context.gmap_list); init_new_context() 21 cpumask_clear(&mm->context.cpu_attach_mask); init_new_context() 22 atomic_set(&mm->context.attach_count, 0); init_new_context() 23 mm->context.flush_mm = 0; init_new_context() 25 mm->context.alloc_pgste = page_table_allocate_pgste; init_new_context() 26 mm->context.has_pgste = 0; init_new_context() 27 mm->context.use_skey = 0; init_new_context() 29 switch (mm->context.asce_limit) { init_new_context() 36 /* context created by exec, set asce limit to 4TB */ init_new_context() 37 mm->context.asce_limit = STACK_TOP_MAX; init_new_context() 38 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | init_new_context() 43 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | init_new_context() 48 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | init_new_context() 61 S390_lowcore.user_asce = mm->context.asce; set_user_asce() 90 S390_lowcore.user_asce = next->context.asce; switch_mm() 94 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); switch_mm() 98 atomic_inc(&next->context.attach_count); switch_mm() 99 atomic_dec(&prev->context.attach_count); switch_mm() 101 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); switch_mm() 113 while (atomic_read(&mm->context.attach_count) >> 16) finish_arch_post_lock_switch() 117 if (mm->context.flush_mm) finish_arch_post_lock_switch()
|
H A D | tlbflush.h | 67 atomic_add(0x10000, &mm->context.attach_count); __tlb_flush_full() 77 &mm->context.cpu_attach_mask); __tlb_flush_full() 79 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_full() 92 count = atomic_add_return(0x10000, &mm->context.attach_count); __tlb_flush_asce() 104 &mm->context.cpu_attach_mask); __tlb_flush_asce() 106 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_asce() 113 __tlb_flush_idte(init_mm.context.asce); __tlb_flush_kernel() 135 __tlb_flush_idte_local(init_mm.context.asce); __tlb_flush_kernel() 148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_mm() 149 __tlb_flush_asce(mm, mm->context.asce); __tlb_flush_mm() 156 if (mm->context.flush_mm) { __tlb_flush_mm_lazy() 158 mm->context.flush_mm = 0; __tlb_flush_mm_lazy() 166 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
|
/linux-4.4.14/drivers/staging/media/lirc/ |
H A D | lirc_sasem.c | 165 static void delete_context(struct sasem_context *context) delete_context() argument 167 usb_free_urb(context->tx_urb); /* VFD */ delete_context() 168 usb_free_urb(context->rx_urb); /* IR */ delete_context() 169 lirc_buffer_free(context->driver->rbuf); delete_context() 170 kfree(context->driver->rbuf); delete_context() 171 kfree(context->driver); delete_context() 172 kfree(context); delete_context() 175 static void deregister_from_lirc(struct sasem_context *context) deregister_from_lirc() argument 178 int minor = context->driver->minor; deregister_from_lirc() 182 dev_err(&context->dev->dev, deregister_from_lirc() 186 dev_info(&context->dev->dev, deregister_from_lirc() 198 struct sasem_context *context = NULL; vfd_open() local 213 context = usb_get_intfdata(interface); vfd_open() 215 if (!context) { vfd_open() 216 dev_err(&interface->dev, "no context found for minor %d\n", vfd_open() 222 mutex_lock(&context->ctx_lock); vfd_open() 224 if (context->vfd_isopen) { vfd_open() 229 context->vfd_isopen = 1; vfd_open() 230 file->private_data = context; vfd_open() 234 mutex_unlock(&context->ctx_lock); vfd_open() 247 struct sasem_context *context; vfd_ioctl() local 249 context = (struct sasem_context *) file->private_data; vfd_ioctl() 251 if (!context) { vfd_ioctl() 252 pr_err("%s: no context for device\n", __func__); vfd_ioctl() 256 mutex_lock(&context->ctx_lock); vfd_ioctl() 262 context->vfd_contrast = (unsigned int)arg; vfd_ioctl() 266 mutex_unlock(&context->ctx_lock); vfd_ioctl() 270 mutex_unlock(&context->ctx_lock); vfd_ioctl() 280 struct sasem_context *context = NULL; vfd_close() local 283 context = (struct sasem_context *) file->private_data; vfd_close() 285 if (!context) { vfd_close() 286 pr_err("%s: no context for device\n", __func__); vfd_close() 290 mutex_lock(&context->ctx_lock); vfd_close() 292 if (!context->vfd_isopen) { vfd_close() 293 dev_err(&context->dev->dev, "%s: VFD is not open\n", __func__); vfd_close() 296 context->vfd_isopen = 0; vfd_close() 297 dev_info(&context->dev->dev, "VFD port closed\n"); vfd_close() 298 if (!context->dev_present && !context->ir_isopen) { vfd_close() 301 * not open. If IR port is open, context will be vfd_close() 303 mutex_unlock(&context->ctx_lock); vfd_close() 304 delete_context(context); vfd_close() 309 mutex_unlock(&context->ctx_lock); vfd_close() 316 static int send_packet(struct sasem_context *context) send_packet() argument 322 pipe = usb_sndintpipe(context->dev, send_packet() 323 context->tx_endpoint->bEndpointAddress); send_packet() 324 interval = context->tx_endpoint->bInterval; send_packet() 326 usb_fill_int_urb(context->tx_urb, context->dev, pipe, send_packet() 327 context->usb_tx_buf, sizeof(context->usb_tx_buf), send_packet() 328 usb_tx_callback, context, interval); send_packet() 330 context->tx_urb->actual_length = 0; send_packet() 332 init_completion(&context->tx.finished); send_packet() 333 atomic_set(&context->tx.busy, 1); send_packet() 335 retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); send_packet() 337 atomic_set(&context->tx.busy, 0); send_packet() 338 dev_err(&context->dev->dev, "error submitting urb (%d)\n", send_packet() 342 mutex_unlock(&context->ctx_lock); send_packet() 343 wait_for_completion(&context->tx.finished); send_packet() 344 mutex_lock(&context->ctx_lock); send_packet() 346 retval = context->tx.status; send_packet() 348 dev_err(&context->dev->dev, send_packet() 365 struct sasem_context *context; vfd_write() local 368 context = (struct sasem_context *) file->private_data; vfd_write() 369 if (!context) { vfd_write() 370 pr_err("%s: no context for device\n", __func__); vfd_write() 374 mutex_lock(&context->ctx_lock); vfd_write() 376 if (!context->dev_present) { vfd_write() 383 dev_err(&context->dev->dev, "%s: invalid payload size\n", vfd_write() 396 memcpy(context->tx.data_buf, data_buf, n_bytes); vfd_write() 400 context->tx.data_buf[i] = ' '; vfd_write() 408 memcpy(context->usb_tx_buf, "\x07\0\0\0\0\0\0\0", 8); vfd_write() 409 context->usb_tx_buf[1] = (context->vfd_contrast) ? vfd_write() 410 (0x2B - (context->vfd_contrast - 1) / 250) vfd_write() 414 memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); vfd_write() 417 memcpy(context->usb_tx_buf, "\x0b\x01\0\0\0\0\0\0", 8); vfd_write() 420 memcpy(context->usb_tx_buf, context->tx.data_buf, 8); vfd_write() 423 memcpy(context->usb_tx_buf, vfd_write() 424 context->tx.data_buf + 8, 8); vfd_write() 427 memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); vfd_write() 430 memcpy(context->usb_tx_buf, "\x0b\x02\0\0\0\0\0\0", 8); vfd_write() 433 memcpy(context->usb_tx_buf, vfd_write() 434 context->tx.data_buf + 16, 8); vfd_write() 437 memcpy(context->usb_tx_buf, vfd_write() 438 context->tx.data_buf + 24, 8); vfd_write() 441 retval = send_packet(context); vfd_write() 443 dev_err(&context->dev->dev, vfd_write() 450 mutex_unlock(&context->ctx_lock); vfd_write() 461 struct sasem_context *context; usb_tx_callback() local 465 context = (struct sasem_context *) urb->context; usb_tx_callback() 466 if (!context) usb_tx_callback() 469 context->tx.status = urb->status; usb_tx_callback() 472 atomic_set(&context->tx.busy, 0); usb_tx_callback() 473 complete(&context->tx.finished); usb_tx_callback() 482 struct sasem_context *context; ir_open() local 487 context = data; ir_open() 489 mutex_lock(&context->ctx_lock); ir_open() 491 if (context->ir_isopen) { ir_open() 492 dev_err(&context->dev->dev, "%s: IR port is already open\n", ir_open() 498 usb_fill_int_urb(context->rx_urb, context->dev, ir_open() 499 usb_rcvintpipe(context->dev, ir_open() 500 context->rx_endpoint->bEndpointAddress), ir_open() 501 context->usb_rx_buf, sizeof(context->usb_rx_buf), ir_open() 502 usb_rx_callback, context, context->rx_endpoint->bInterval); ir_open() 504 retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); ir_open() 507 dev_err(&context->dev->dev, ir_open() 510 context->ir_isopen = 1; ir_open() 511 dev_info(&context->dev->dev, "IR port opened\n"); ir_open() 515 mutex_unlock(&context->ctx_lock); ir_open() 526 struct sasem_context *context; ir_close() local 528 context = data; ir_close() 529 if (!context) { ir_close() 530 pr_err("%s: no context for device\n", __func__); ir_close() 534 mutex_lock(&context->ctx_lock); ir_close() 536 usb_kill_urb(context->rx_urb); ir_close() 537 context->ir_isopen = 0; ir_close() 540 if (!context->dev_present) { ir_close() 547 deregister_from_lirc(context); ir_close() 549 if (!context->vfd_isopen) { ir_close() 551 mutex_unlock(&context->ctx_lock); ir_close() 552 delete_context(context); ir_close() 555 /* If VFD port is open, context will be deleted by vfd_close */ ir_close() 558 mutex_unlock(&context->ctx_lock); ir_close() 564 static void incoming_packet(struct sasem_context *context, incoming_packet() argument 573 dev_warn(&context->dev->dev, incoming_packet() 580 dev_info(&context->dev->dev, "Incoming data: %*ph\n", len, buf); incoming_packet() 588 ms = (tv.tv_sec - context->presstime.tv_sec) * 1000 + incoming_packet() 589 (tv.tv_usec - context->presstime.tv_usec) / 1000; incoming_packet() 603 if ((ms < 250) && (context->codesaved != 0)) { incoming_packet() 604 memcpy(buf, &context->lastcode, 8); incoming_packet() 605 context->presstime.tv_sec = tv.tv_sec; incoming_packet() 606 context->presstime.tv_usec = tv.tv_usec; incoming_packet() 610 memcpy(&context->lastcode, buf, 8); incoming_packet() 615 context->codesaved = 1; incoming_packet() 616 context->presstime.tv_sec = tv.tv_sec; incoming_packet() 617 context->presstime.tv_usec = tv.tv_usec; incoming_packet() 620 lirc_buffer_write(context->driver->rbuf, buf); incoming_packet() 621 wake_up(&context->driver->rbuf->wait_poll); incoming_packet() 629 struct sasem_context *context; usb_rx_callback() local 633 context = (struct sasem_context *) urb->context; usb_rx_callback() 634 if (!context) usb_rx_callback() 643 if (context->ir_isopen) usb_rx_callback() 644 incoming_packet(context, urb); usb_rx_callback() 653 usb_submit_urb(context->rx_urb, GFP_ATOMIC); usb_rx_callback() 678 struct sasem_context *context = NULL; sasem_probe() local 740 context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL); sasem_probe() 741 if (!context) { sasem_probe() 779 mutex_init(&context->ctx_lock); sasem_probe() 786 driver->data = context; sasem_probe() 793 mutex_lock(&context->ctx_lock); sasem_probe() 810 context->dev = dev; sasem_probe() 811 context->dev_present = 1; sasem_probe() 812 context->rx_endpoint = rx_endpoint; sasem_probe() 813 context->rx_urb = rx_urb; sasem_probe() 815 context->tx_endpoint = tx_endpoint; sasem_probe() 816 context->tx_urb = tx_urb; sasem_probe() 817 context->vfd_contrast = 1000; /* range 0 - 1000 */ sasem_probe() 819 context->driver = driver; sasem_probe() 821 usb_set_intfdata(interface, context); sasem_probe() 839 mutex_unlock(&context->ctx_lock); sasem_probe() 860 kfree(context); sasem_probe() 861 context = NULL; sasem_probe() 877 struct sasem_context *context; sasem_disconnect() local 882 context = usb_get_intfdata(interface); sasem_disconnect() 883 mutex_lock(&context->ctx_lock); sasem_disconnect() 889 context->dev_present = 0; sasem_disconnect() 892 usb_kill_urb(context->rx_urb); sasem_disconnect() 895 if (atomic_read(&context->tx.busy)) { sasem_disconnect() 897 usb_kill_urb(context->tx_urb); sasem_disconnect() 898 wait_for_completion(&context->tx.finished); sasem_disconnect() 902 if (!context->ir_isopen) sasem_disconnect() 903 deregister_from_lirc(context); sasem_disconnect() 907 mutex_unlock(&context->ctx_lock); sasem_disconnect() 909 if (!context->ir_isopen && !context->vfd_isopen) sasem_disconnect() 910 delete_context(context); sasem_disconnect()
|
H A D | lirc_imon.c | 189 static void free_imon_context(struct imon_context *context) free_imon_context() argument 191 struct device *dev = context->driver->dev; free_imon_context() 193 usb_free_urb(context->tx_urb); free_imon_context() 194 usb_free_urb(context->rx_urb); free_imon_context() 195 lirc_buffer_free(context->driver->rbuf); free_imon_context() 196 kfree(context->driver->rbuf); free_imon_context() 197 kfree(context->driver); free_imon_context() 198 kfree(context); free_imon_context() 200 dev_dbg(dev, "%s: iMON context freed\n", __func__); free_imon_context() 203 static void deregister_from_lirc(struct imon_context *context) deregister_from_lirc() argument 206 int minor = context->driver->minor; deregister_from_lirc() 210 dev_err(&context->usbdev->dev, deregister_from_lirc() 213 dev_info(&context->usbdev->dev, deregister_from_lirc() 225 struct imon_context *context = NULL; display_open() local 240 context = usb_get_intfdata(interface); display_open() 242 if (!context) { display_open() 243 dev_err(&interface->dev, "no context found for minor %d\n", display_open() 249 mutex_lock(&context->ctx_lock); display_open() 251 if (!context->display) { display_open() 255 } else if (context->display_isopen) { display_open() 260 context->display_isopen = 1; display_open() 261 file->private_data = context; display_open() 262 dev_info(context->driver->dev, "display port opened\n"); display_open() 265 mutex_unlock(&context->ctx_lock); display_open() 278 struct imon_context *context = NULL; display_close() local 281 context = file->private_data; display_close() 283 if (!context) { display_close() 284 pr_err("%s: no context for device\n", __func__); display_close() 288 mutex_lock(&context->ctx_lock); display_close() 290 if (!context->display) { display_close() 291 dev_err(&context->usbdev->dev, display_close() 294 } else if (!context->display_isopen) { display_close() 295 dev_err(&context->usbdev->dev, display_close() 299 context->display_isopen = 0; display_close() 300 dev_info(context->driver->dev, "display port closed\n"); display_close() 301 if (!context->dev_present && !context->ir_isopen) { display_close() 304 * open. If IR port is open, context will be deleted by display_close() 307 mutex_unlock(&context->ctx_lock); display_close() 308 free_imon_context(context); display_close() 313 mutex_unlock(&context->ctx_lock); display_close() 319 * with context->ctx_lock held. 321 static int send_packet(struct imon_context *context) send_packet() argument 328 pipe = usb_sndintpipe(context->usbdev, send_packet() 329 context->tx_endpoint->bEndpointAddress); send_packet() 330 interval = context->tx_endpoint->bInterval; send_packet() 332 usb_fill_int_urb(context->tx_urb, context->usbdev, pipe, send_packet() 333 context->usb_tx_buf, send_packet() 334 sizeof(context->usb_tx_buf), send_packet() 335 usb_tx_callback, context, interval); send_packet() 337 context->tx_urb->actual_length = 0; send_packet() 339 init_completion(&context->tx.finished); send_packet() 340 atomic_set(&context->tx.busy, 1); send_packet() 342 retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); send_packet() 344 atomic_set(&context->tx.busy, 0); send_packet() 345 dev_err(&context->usbdev->dev, "error submitting urb(%d)\n", send_packet() 349 mutex_unlock(&context->ctx_lock); send_packet() 351 &context->tx.finished); send_packet() 353 dev_err(&context->usbdev->dev, send_packet() 355 mutex_lock(&context->ctx_lock); send_packet() 357 retval = context->tx.status; send_packet() 359 dev_err(&context->usbdev->dev, send_packet() 384 struct imon_context *context; vfd_write() local 389 context = file->private_data; vfd_write() 390 if (!context) { vfd_write() 391 pr_err("%s: no context for device\n", __func__); vfd_write() 395 mutex_lock(&context->ctx_lock); vfd_write() 397 if (!context->dev_present) { vfd_write() 398 dev_err(&context->usbdev->dev, vfd_write() 405 dev_err(&context->usbdev->dev, vfd_write() 418 memcpy(context->tx.data_buf, data_buf, n_bytes); vfd_write() 422 context->tx.data_buf[i] = ' '; vfd_write() 425 context->tx.data_buf[i] = 0xFF; vfd_write() 431 memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7); vfd_write() 432 context->usb_tx_buf[7] = (unsigned char) seq; vfd_write() 434 retval = send_packet(context); vfd_write() 436 dev_err(&context->usbdev->dev, vfd_write() 447 if (context->vfd_proto_6p) { vfd_write() 449 memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); vfd_write() 450 context->usb_tx_buf[7] = (unsigned char) seq; vfd_write() 451 retval = send_packet(context); vfd_write() 453 dev_err(&context->usbdev->dev, vfd_write() 459 mutex_unlock(&context->ctx_lock); vfd_write() 470 struct imon_context *context; usb_tx_callback() local 474 context = (struct imon_context *)urb->context; usb_tx_callback() 475 if (!context) usb_tx_callback() 478 context->tx.status = urb->status; usb_tx_callback() 481 atomic_set(&context->tx.busy, 0); usb_tx_callback() 482 complete(&context->tx.finished); usb_tx_callback() 490 struct imon_context *context; ir_open() local 495 context = data; ir_open() 498 context->rx.count = 0; ir_open() 499 context->rx.initial_space = 1; ir_open() 500 context->rx.prev_bit = 0; ir_open() 502 context->ir_isopen = 1; ir_open() 503 dev_info(context->driver->dev, "IR port opened\n"); ir_open() 514 struct imon_context *context; ir_close() local 516 context = data; ir_close() 517 if (!context) { ir_close() 518 pr_err("%s: no context for device\n", __func__); ir_close() 522 mutex_lock(&context->ctx_lock); ir_close() 524 context->ir_isopen = 0; ir_close() 525 dev_info(context->driver->dev, "IR port closed\n"); ir_close() 527 if (!context->dev_present) { ir_close() 532 deregister_from_lirc(context); ir_close() 534 if (!context->display_isopen) { ir_close() 535 mutex_unlock(&context->ctx_lock); ir_close() 536 free_imon_context(context); ir_close() 540 * If display port is open, context will be deleted by ir_close() 545 mutex_unlock(&context->ctx_lock); ir_close() 552 static void submit_data(struct imon_context *context) submit_data() argument 555 int value = context->rx.count; submit_data() 558 dev_dbg(context->driver->dev, "submitting data to LIRC\n"); submit_data() 562 if (context->rx.prev_bit) submit_data() 568 lirc_buffer_write(context->driver->rbuf, buf); submit_data() 569 wake_up(&context->driver->rbuf->wait_poll); submit_data() 575 static void imon_incoming_packet(struct imon_context *context, imon_incoming_packet() argument 580 struct device *dev = context->driver->dev; imon_incoming_packet() 587 if (!context->ir_isopen) imon_incoming_packet() 612 if (buf[7] == 1 && context->rx.initial_space) { imon_incoming_packet() 614 context->rx.prev_bit = 0; imon_incoming_packet() 615 context->rx.count = 4; imon_incoming_packet() 616 submit_data(context); imon_incoming_packet() 617 context->rx.count = 0; imon_incoming_packet() 625 if (curr_bit != context->rx.prev_bit) { imon_incoming_packet() 626 if (context->rx.count) { imon_incoming_packet() 627 submit_data(context); imon_incoming_packet() 628 context->rx.count = 0; imon_incoming_packet() 630 context->rx.prev_bit = curr_bit; imon_incoming_packet() 632 ++context->rx.count; imon_incoming_packet() 638 if (context->rx.count) { imon_incoming_packet() 639 submit_data(context); imon_incoming_packet() 640 context->rx.count = 0; imon_incoming_packet() 642 context->rx.initial_space = context->rx.prev_bit; imon_incoming_packet() 651 struct imon_context *context; usb_rx_callback() local 657 context = (struct imon_context *)urb->context; usb_rx_callback() 658 if (!context) usb_rx_callback() 666 imon_incoming_packet(context, urb, intfnum); usb_rx_callback() 670 dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n", usb_rx_callback() 675 usb_submit_urb(context->rx_urb, GFP_ATOMIC); usb_rx_callback() 700 struct imon_context *context = NULL; imon_probe() local 707 context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); imon_probe() 708 if (!context) imon_probe() 716 context->display = 0; imon_probe() 718 context->display = 1; imon_probe() 765 if (context->display == 0) { imon_probe() 811 mutex_init(&context->ctx_lock); imon_probe() 812 context->vfd_proto_6p = vfd_proto_6p; imon_probe() 819 driver->data = context; imon_probe() 826 mutex_lock(&context->ctx_lock); imon_probe() 828 context->driver = driver; imon_probe() 843 context->usbdev = usbdev; imon_probe() 844 context->dev_present = 1; imon_probe() 845 context->rx_endpoint = rx_endpoint; imon_probe() 846 context->rx_urb = rx_urb; imon_probe() 852 context->tx_endpoint = tx_endpoint; imon_probe() 853 context->tx_urb = tx_urb; imon_probe() 856 context->display = 1; imon_probe() 858 usb_fill_int_urb(context->rx_urb, context->usbdev, imon_probe() 859 usb_rcvintpipe(context->usbdev, imon_probe() 860 context->rx_endpoint->bEndpointAddress), imon_probe() 861 context->usb_rx_buf, sizeof(context->usb_rx_buf), imon_probe() 862 usb_rx_callback, context, imon_probe() 863 context->rx_endpoint->bInterval); imon_probe() 865 retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); imon_probe() 871 usb_set_intfdata(interface, context); imon_probe() 873 if (context->display && ifnum == 0) { imon_probe() 908 kfree(context); imon_probe() 909 context = NULL; imon_probe() 922 struct imon_context *context; imon_disconnect() local 928 context = usb_get_intfdata(interface); imon_disconnect() 931 mutex_lock(&context->ctx_lock); imon_disconnect() 936 if (atomic_read(&context->tx.busy)) { imon_disconnect() 937 usb_kill_urb(context->tx_urb); imon_disconnect() 938 complete_all(&context->tx.finished); imon_disconnect() 941 context->dev_present = 0; imon_disconnect() 942 usb_kill_urb(context->rx_urb); imon_disconnect() 943 if (context->display) imon_disconnect() 946 if (!context->ir_isopen && !context->dev_present) { imon_disconnect() 947 deregister_from_lirc(context); imon_disconnect() 948 mutex_unlock(&context->ctx_lock); imon_disconnect() 949 if (!context->display_isopen) imon_disconnect() 950 free_imon_context(context); imon_disconnect() 952 mutex_unlock(&context->ctx_lock); imon_disconnect() 962 struct imon_context *context = usb_get_intfdata(intf); imon_suspend() local 964 usb_kill_urb(context->rx_urb); imon_suspend() 971 struct imon_context *context = usb_get_intfdata(intf); imon_resume() local 973 usb_fill_int_urb(context->rx_urb, context->usbdev, imon_resume() 974 usb_rcvintpipe(context->usbdev, imon_resume() 975 context->rx_endpoint->bEndpointAddress), imon_resume() 976 context->usb_rx_buf, sizeof(context->usb_rx_buf), imon_resume() 977 usb_rx_callback, context, imon_resume() 978 context->rx_endpoint->bInterval); imon_resume() 980 return usb_submit_urb(context->rx_urb, GFP_ATOMIC); imon_resume()
|
/linux-4.4.14/drivers/misc/vmw_vmci/ |
H A D | vmci_context.c | 34 * These, along with context lookup, are protected by the 39 spinlock_t lock; /* Spinlock for context list operations */ 48 static void ctx_signal_notify(struct vmci_ctx *context) ctx_signal_notify() argument 50 *context->notify = true; ctx_signal_notify() 53 static void ctx_clear_notify(struct vmci_ctx *context) ctx_clear_notify() argument 55 *context->notify = false; ctx_clear_notify() 62 static void ctx_clear_notify_call(struct vmci_ctx *context) ctx_clear_notify_call() argument 64 if (context->pending_datagrams == 0 && ctx_clear_notify_call() 65 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) ctx_clear_notify_call() 66 ctx_clear_notify(context); ctx_clear_notify_call() 70 * Sets the context's notify flag iff datagrams are pending for this 71 * context. Called from vmci_setup_notify(). 73 void vmci_ctx_check_signal_notify(struct vmci_ctx *context) vmci_ctx_check_signal_notify() argument 75 spin_lock(&context->lock); vmci_ctx_check_signal_notify() 76 if (context->pending_datagrams) vmci_ctx_check_signal_notify() 77 ctx_signal_notify(context); vmci_ctx_check_signal_notify() 78 spin_unlock(&context->lock); vmci_ctx_check_signal_notify() 82 * Allocates and initializes a VMCI context. 89 struct vmci_ctx *context; vmci_ctx_create() local 93 pr_devel("Invalid context ID for VMCI context\n"); vmci_ctx_create() 99 pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", vmci_ctx_create() 111 context = kzalloc(sizeof(*context), GFP_KERNEL); vmci_ctx_create() 112 if (!context) { vmci_ctx_create() 113 pr_warn("Failed to allocate memory for VMCI context\n"); vmci_ctx_create() 118 kref_init(&context->kref); vmci_ctx_create() 119 spin_lock_init(&context->lock); vmci_ctx_create() 120 INIT_LIST_HEAD(&context->list_item); vmci_ctx_create() 121 INIT_LIST_HEAD(&context->datagram_queue); vmci_ctx_create() 122 INIT_LIST_HEAD(&context->notifier_list); vmci_ctx_create() 124 /* Initialize host-specific VMCI context. */ vmci_ctx_create() 125 init_waitqueue_head(&context->host_context.wait_queue); vmci_ctx_create() 127 context->queue_pair_array = vmci_handle_arr_create(0); vmci_ctx_create() 128 if (!context->queue_pair_array) { vmci_ctx_create() 133 context->doorbell_array = vmci_handle_arr_create(0); vmci_ctx_create() 134 if (!context->doorbell_array) { vmci_ctx_create() 139 context->pending_doorbell_array = vmci_handle_arr_create(0); vmci_ctx_create() 140 if (!context->pending_doorbell_array) { vmci_ctx_create() 145 context->user_version = user_version; vmci_ctx_create() 147 context->priv_flags = priv_flags; vmci_ctx_create() 150 context->cred = get_cred(cred); vmci_ctx_create() 152 context->notify = &ctx_dummy_notify; vmci_ctx_create() 153 context->notify_page = NULL; vmci_ctx_create() 156 * If we collide with an existing context we generate a new vmci_ctx_create() 169 context->cid = cid; vmci_ctx_create() 171 list_add_tail_rcu(&context->list_item, &ctx_list.head); vmci_ctx_create() 174 return context; vmci_ctx_create() 177 vmci_handle_arr_destroy(context->doorbell_array); vmci_ctx_create() 179 vmci_handle_arr_destroy(context->queue_pair_array); vmci_ctx_create() 181 kfree(context); vmci_ctx_create() 187 * Destroy VMCI context. 189 void vmci_ctx_destroy(struct vmci_ctx *context) vmci_ctx_destroy() argument 192 list_del_rcu(&context->list_item); vmci_ctx_destroy() 196 vmci_ctx_put(context); vmci_ctx_destroy() 261 pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", ctx_fire_notification() 263 ev.msg.hdr.dst.context); ctx_fire_notification() 279 struct vmci_ctx *context; vmci_ctx_pending_datagrams() local 281 context = vmci_ctx_get(cid); vmci_ctx_pending_datagrams() 282 if (context == NULL) vmci_ctx_pending_datagrams() 285 spin_lock(&context->lock); vmci_ctx_pending_datagrams() 287 *pending = context->pending_datagrams; vmci_ctx_pending_datagrams() 288 spin_unlock(&context->lock); vmci_ctx_pending_datagrams() 289 vmci_ctx_put(context); vmci_ctx_pending_datagrams() 295 * Queues a VMCI datagram for the appropriate target VM context. 300 struct vmci_ctx *context; vmci_ctx_enqueue_datagram() local 310 /* Get the target VM's VMCI context. */ vmci_ctx_enqueue_datagram() 311 context = vmci_ctx_get(cid); vmci_ctx_enqueue_datagram() 312 if (!context) { vmci_ctx_enqueue_datagram() 313 pr_devel("Invalid context (ID=0x%x)\n", cid); vmci_ctx_enqueue_datagram() 321 vmci_ctx_put(context); vmci_ctx_enqueue_datagram() 329 spin_lock(&context->lock); vmci_ctx_enqueue_datagram() 340 if (context->datagram_queue_size + vmci_dg_size >= vmci_ctx_enqueue_datagram() 346 context->datagram_queue_size + vmci_dg_size >= vmci_ctx_enqueue_datagram() 348 spin_unlock(&context->lock); vmci_ctx_enqueue_datagram() 349 vmci_ctx_put(context); vmci_ctx_enqueue_datagram() 355 list_add(&dq_entry->list_item, &context->datagram_queue); vmci_ctx_enqueue_datagram() 356 context->pending_datagrams++; vmci_ctx_enqueue_datagram() 357 context->datagram_queue_size += vmci_dg_size; vmci_ctx_enqueue_datagram() 358 ctx_signal_notify(context); vmci_ctx_enqueue_datagram() 359 wake_up(&context->host_context.wait_queue); vmci_ctx_enqueue_datagram() 360 spin_unlock(&context->lock); vmci_ctx_enqueue_datagram() 361 vmci_ctx_put(context); vmci_ctx_enqueue_datagram() 367 * Verifies whether a context with the specified context ID exists. 369 * using this data as context can appear and disappear at any time. 373 struct vmci_ctx *context; vmci_ctx_exists() local 378 list_for_each_entry_rcu(context, &ctx_list.head, list_item) { vmci_ctx_exists() 379 if (context->cid == cid) { vmci_ctx_exists() 390 * Retrieves VMCI context corresponding to the given cid. 394 struct vmci_ctx *c, *context = NULL; vmci_ctx_get() local 403 * The context owner drops its own reference to the vmci_ctx_get() 404 * context only after removing it from the list and vmci_ctx_get() 410 context = c; vmci_ctx_get() 411 kref_get(&context->kref); vmci_ctx_get() 417 return context; vmci_ctx_get() 421 * Deallocates all parts of a context data structure. This 422 * function doesn't lock the context, because it assumes that 423 * the caller was holding the last reference to context. 427 struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); ctx_free_ctx() local 434 * context is dying. ctx_free_ctx() 436 ctx_fire_notification(context->cid, context->priv_flags); ctx_free_ctx() 439 * Cleanup all queue pair resources attached to context. If ctx_free_ctx() 443 temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); ctx_free_ctx() 446 context) < VMCI_SUCCESS) { ctx_free_ctx() 453 vmci_handle_arr_remove_entry(context->queue_pair_array, ctx_free_ctx() 457 vmci_handle_arr_get_entry(context->queue_pair_array, 0); ctx_free_ctx() 462 * this is the only thread having a reference to the context. ctx_free_ctx() 465 &context->datagram_queue, list_item) { ctx_free_ctx() 473 &context->notifier_list, node) { ctx_free_ctx() 478 vmci_handle_arr_destroy(context->queue_pair_array); ctx_free_ctx() 479 vmci_handle_arr_destroy(context->doorbell_array); ctx_free_ctx() 480 vmci_handle_arr_destroy(context->pending_doorbell_array); ctx_free_ctx() 481 vmci_ctx_unset_notify(context); ctx_free_ctx() 482 if (context->cred) ctx_free_ctx() 483 put_cred(context->cred); ctx_free_ctx() 484 kfree(context); ctx_free_ctx() 488 * Drops reference to VMCI context. If this is the last reference to 489 * the context it will be deallocated. A context is created with 491 * the context list before its reference count is decremented. Thus, 493 * it (they need the entry in the context list for that), and so there 496 void vmci_ctx_put(struct vmci_ctx *context) vmci_ctx_put() argument 498 kref_put(&context->kref, ctx_free_ctx); vmci_ctx_put() 509 int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, vmci_ctx_dequeue_datagram() argument 518 spin_lock(&context->lock); vmci_ctx_dequeue_datagram() 519 if (context->pending_datagrams == 0) { vmci_ctx_dequeue_datagram() 520 ctx_clear_notify_call(context); vmci_ctx_dequeue_datagram() 521 spin_unlock(&context->lock); vmci_ctx_dequeue_datagram() 526 list_item = context->datagram_queue.next; vmci_ctx_dequeue_datagram() 534 spin_unlock(&context->lock); vmci_ctx_dequeue_datagram() 541 context->pending_datagrams--; vmci_ctx_dequeue_datagram() 542 context->datagram_queue_size -= dq_entry->dg_size; vmci_ctx_dequeue_datagram() 543 if (context->pending_datagrams == 0) { vmci_ctx_dequeue_datagram() 544 ctx_clear_notify_call(context); vmci_ctx_dequeue_datagram() 552 list_item = context->datagram_queue.next; vmci_ctx_dequeue_datagram() 563 spin_unlock(&context->lock); vmci_ctx_dequeue_datagram() 577 void vmci_ctx_unset_notify(struct vmci_ctx *context) vmci_ctx_unset_notify() argument 581 spin_lock(&context->lock); vmci_ctx_unset_notify() 583 notify_page = context->notify_page; vmci_ctx_unset_notify() 584 context->notify = &ctx_dummy_notify; vmci_ctx_unset_notify() 585 context->notify_page = NULL; vmci_ctx_unset_notify() 587 spin_unlock(&context->lock); vmci_ctx_unset_notify() 601 struct vmci_ctx *context; vmci_ctx_add_notification() local 606 context = vmci_ctx_get(context_id); vmci_ctx_add_notification() 607 if (!context) vmci_ctx_add_notification() 617 if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { vmci_ctx_add_notification() 631 spin_lock(&context->lock); vmci_ctx_add_notification() 633 list_for_each_entry(n, &context->notifier_list, node) { vmci_ctx_add_notification() 644 list_add_tail_rcu(¬ifier->node, &context->notifier_list); vmci_ctx_add_notification() 645 context->n_notifiers++; vmci_ctx_add_notification() 649 spin_unlock(&context->lock); vmci_ctx_add_notification() 652 vmci_ctx_put(context); vmci_ctx_add_notification() 657 * Remove remote_cid from current context's list of contexts it is 662 struct vmci_ctx *context; vmci_ctx_remove_notification() local 667 context = vmci_ctx_get(context_id); vmci_ctx_remove_notification() 668 if (!context) vmci_ctx_remove_notification() 673 spin_lock(&context->lock); vmci_ctx_remove_notification() 675 &context->notifier_list, node) { vmci_ctx_remove_notification() 678 context->n_notifiers--; vmci_ctx_remove_notification() 683 spin_unlock(&context->lock); vmci_ctx_remove_notification() 690 vmci_ctx_put(context); vmci_ctx_remove_notification() 695 static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, vmci_ctx_get_chkpt_notifiers() argument 703 if (context->n_notifiers == 0) { vmci_ctx_get_chkpt_notifiers() 709 data_size = context->n_notifiers * sizeof(*notifiers); vmci_ctx_get_chkpt_notifiers() 719 list_for_each_entry(entry, &context->notifier_list, node) vmci_ctx_get_chkpt_notifiers() 720 notifiers[i++] = entry->handle.context; vmci_ctx_get_chkpt_notifiers() 727 static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, vmci_ctx_get_chkpt_doorbells() argument 734 n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); vmci_ctx_get_chkpt_doorbells() 748 context->doorbell_array, i); vmci_ctx_get_chkpt_doorbells() 761 * Get current context's checkpoint state of given type. 768 struct vmci_ctx *context; vmci_ctx_get_chkpt_state() local 771 context = vmci_ctx_get(context_id); vmci_ctx_get_chkpt_state() 772 if (!context) vmci_ctx_get_chkpt_state() 775 spin_lock(&context->lock); vmci_ctx_get_chkpt_state() 779 result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); vmci_ctx_get_chkpt_state() 794 result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); vmci_ctx_get_chkpt_state() 803 spin_unlock(&context->lock); vmci_ctx_get_chkpt_state() 804 vmci_ctx_put(context); vmci_ctx_get_chkpt_state() 810 * Set current context's checkpoint state of given type. 850 * Retrieves the specified context's pending notifications in the 860 struct vmci_ctx *context; vmci_ctx_rcv_notifications_get() local 863 context = vmci_ctx_get(context_id); vmci_ctx_rcv_notifications_get() 864 if (context == NULL) vmci_ctx_rcv_notifications_get() 867 spin_lock(&context->lock); vmci_ctx_rcv_notifications_get() 869 *db_handle_array = context->pending_doorbell_array; vmci_ctx_rcv_notifications_get() 870 context->pending_doorbell_array = vmci_handle_arr_create(0); vmci_ctx_rcv_notifications_get() 871 if (!context->pending_doorbell_array) { vmci_ctx_rcv_notifications_get() 872 context->pending_doorbell_array = *db_handle_array; vmci_ctx_rcv_notifications_get() 878 spin_unlock(&context->lock); vmci_ctx_rcv_notifications_get() 879 vmci_ctx_put(context); vmci_ctx_rcv_notifications_get() 895 struct vmci_ctx *context = vmci_ctx_get(context_id); vmci_ctx_rcv_notifications_release() local 897 spin_lock(&context->lock); vmci_ctx_rcv_notifications_release() 903 * holding the context lock, so we transfer any new pending vmci_ctx_rcv_notifications_release() 909 context->pending_doorbell_array); vmci_ctx_rcv_notifications_release() 917 context->pending_doorbell_array); vmci_ctx_rcv_notifications_release() 919 vmci_handle_arr_destroy(context->pending_doorbell_array); vmci_ctx_rcv_notifications_release() 920 context->pending_doorbell_array = db_handle_array; vmci_ctx_rcv_notifications_release() 923 ctx_clear_notify_call(context); vmci_ctx_rcv_notifications_release() 925 spin_unlock(&context->lock); vmci_ctx_rcv_notifications_release() 926 vmci_ctx_put(context); vmci_ctx_rcv_notifications_release() 937 * context. Only doorbell handles registered can be notified. 941 struct vmci_ctx *context; vmci_ctx_dbell_create() local 947 context = vmci_ctx_get(context_id); vmci_ctx_dbell_create() 948 if (context == NULL) vmci_ctx_dbell_create() 951 spin_lock(&context->lock); vmci_ctx_dbell_create() 952 if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) { vmci_ctx_dbell_create() 953 vmci_handle_arr_append_entry(&context->doorbell_array, handle); vmci_ctx_dbell_create() 959 spin_unlock(&context->lock); vmci_ctx_dbell_create() 960 vmci_ctx_put(context); vmci_ctx_dbell_create() 971 struct vmci_ctx *context; vmci_ctx_dbell_destroy() local 977 context = vmci_ctx_get(context_id); vmci_ctx_dbell_destroy() 978 if (context == NULL) vmci_ctx_dbell_destroy() 981 spin_lock(&context->lock); vmci_ctx_dbell_destroy() 983 vmci_handle_arr_remove_entry(context->doorbell_array, handle); vmci_ctx_dbell_destroy() 984 vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); vmci_ctx_dbell_destroy() 985 spin_unlock(&context->lock); vmci_ctx_dbell_destroy() 987 vmci_ctx_put(context); vmci_ctx_dbell_destroy() 999 struct vmci_ctx *context; vmci_ctx_dbell_destroy_all() local 1005 context = vmci_ctx_get(context_id); vmci_ctx_dbell_destroy_all() 1006 if (context == NULL) vmci_ctx_dbell_destroy_all() 1009 spin_lock(&context->lock); vmci_ctx_dbell_destroy_all() 1011 struct vmci_handle_arr *arr = context->doorbell_array; vmci_ctx_dbell_destroy_all() 1015 struct vmci_handle_arr *arr = context->pending_doorbell_array; vmci_ctx_dbell_destroy_all() 1018 spin_unlock(&context->lock); vmci_ctx_dbell_destroy_all() 1020 vmci_ctx_put(context); vmci_ctx_dbell_destroy_all() 1027 * specified source context. The notification of doorbells are 1030 * of sender rights than those assigned to the sending context 1031 * itself, the host context is required to specify a different 1033 * the source context. 1045 /* Get the target VM's VMCI context. */ vmci_ctx_notify_dbell() 1046 dst_context = vmci_ctx_get(handle.context); vmci_ctx_notify_dbell() 1048 pr_devel("Invalid context (ID=0x%x)\n", handle.context); vmci_ctx_notify_dbell() 1052 if (src_cid != handle.context) { vmci_ctx_notify_dbell() 1056 VMCI_CONTEXT_IS_VM(handle.context)) { vmci_ctx_notify_dbell() 1058 src_cid, handle.context); vmci_ctx_notify_dbell() 1066 handle.context, handle.resource); vmci_ctx_notify_dbell() 1081 if (handle.context == VMCI_HOST_CONTEXT_ID) { vmci_ctx_notify_dbell() 1112 bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) vmci_ctx_supports_host_qp() argument 1114 return context && context->user_version >= VMCI_VERSION_HOSTQP; vmci_ctx_supports_host_qp() 1119 * the context. 1121 int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) vmci_ctx_qp_create() argument 1125 if (context == NULL || vmci_handle_is_invalid(handle)) vmci_ctx_qp_create() 1128 if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) { vmci_ctx_qp_create() 1129 vmci_handle_arr_append_entry(&context->queue_pair_array, vmci_ctx_qp_create() 1143 int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) vmci_ctx_qp_destroy() argument 1147 if (context == NULL || vmci_handle_is_invalid(handle)) vmci_ctx_qp_destroy() 1150 hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); vmci_ctx_qp_destroy() 1158 * with the given context. 1160 bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) vmci_ctx_qp_exists() argument 1162 if (context == NULL || vmci_handle_is_invalid(handle)) vmci_ctx_qp_exists() 1165 return vmci_handle_arr_has_entry(context->queue_pair_array, handle); vmci_ctx_qp_exists() 1170 * @context_id: The context ID of the VMCI context. 1172 * Retrieves privilege flags of the given VMCI context ID. 1178 struct vmci_ctx *context; vmci_context_get_priv_flags() local 1180 context = vmci_ctx_get(context_id); vmci_context_get_priv_flags() 1181 if (!context) vmci_context_get_priv_flags() 1184 flags = context->priv_flags; vmci_context_get_priv_flags() 1185 vmci_ctx_put(context); vmci_context_get_priv_flags() 1193 * vmci_is_context_owner() - Determimnes if user is the context owner 1194 * @context_id: The context ID of the VMCI context. 1197 * Determines whether a given UID is the owner of given VMCI context. 1204 struct vmci_ctx *context = vmci_ctx_get(context_id); vmci_is_context_owner() local 1205 if (context) { vmci_is_context_owner() 1206 if (context->cred) vmci_is_context_owner() 1207 is_owner = uid_eq(context->cred->uid, uid); vmci_is_context_owner() 1208 vmci_ctx_put(context); vmci_is_context_owner()
|
H A D | vmci_route.c | 26 * devices. Will set the source context if it is invalid. 49 /* Must have a valid destination context. */ vmci_route() 50 if (VMCI_INVALID_ID == dst->context) vmci_route() 54 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { vmci_route() 71 /* And we cannot send if the source is the host context. */ vmci_route() 72 if (VMCI_HOST_CONTEXT_ID == src->context) vmci_route() 77 * respect it (both context and resource are invalid). vmci_route() 78 * However, if they passed only an invalid context, vmci_route() 80 * should set the real context here before passing it vmci_route() 83 if (VMCI_INVALID_ID == src->context && vmci_route() 85 src->context = vmci_get_context_id(); vmci_route() 93 if (VMCI_HOST_CONTEXT_ID == dst->context) { vmci_route() 100 * way to remove any ambiguity from the host context. vmci_route() 102 if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { vmci_route() 120 /* If no source context then use the current. */ vmci_route() 121 if (VMCI_INVALID_ID == src->context) vmci_route() 122 src->context = vmci_get_context_id(); vmci_route() 138 if (VMCI_INVALID_ID == src->context) { vmci_route() 141 * valid context. Otherwise we can use the vmci_route() 142 * host context. vmci_route() 147 src->context = VMCI_HOST_CONTEXT_ID; vmci_route() 160 /* It will have a context if it is meant for a guest. */ vmci_route() 161 if (vmci_ctx_exists(dst->context)) { vmci_route() 162 if (VMCI_INVALID_ID == src->context) { vmci_route() 165 * must have a valid context. vmci_route() 167 * context. vmci_route() 173 src->context = VMCI_HOST_CONTEXT_ID; vmci_route() 174 } else if (VMCI_CONTEXT_IS_VM(src->context) && vmci_route() 175 src->context != dst->context) { vmci_route() 181 * VM since there is a valid context. vmci_route() 193 * without an active context, and we can't vmci_route() 216 /* If no source context then use the current context. */ vmci_route() 217 if (VMCI_INVALID_ID == src->context) vmci_route() 218 src->context = vmci_get_context_id(); vmci_route()
|
H A D | vmci_context.h | 58 * this context; e.g., VMX. 68 * is also accessed from the context 75 /* Doorbells created by context. */ 78 /* Doorbells pending for context. */ 81 /* Contexts current context is subscribing to. */ 93 /* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */ 99 /* VMCICptBufInfo: Used to set/get current context's checkpoint state. */ 137 void vmci_ctx_destroy(struct vmci_ctx *context); 139 bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); 141 int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, 145 void vmci_ctx_put(struct vmci_ctx *context); 155 int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle); 156 int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle); 157 bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle); 159 void vmci_ctx_check_signal_notify(struct vmci_ctx *context); 160 void vmci_ctx_unset_notify(struct vmci_ctx *context); 175 static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) vmci_ctx_get_id() argument 177 if (!context) vmci_ctx_get_id() 179 return context->cid; vmci_ctx_get_id()
|
H A D | vmci_datagram.c | 100 handle.context, handle.resource, result); dg_create_handle() 142 * Calls the specified callback in a delayed context. 160 * Dispatch datagram as a host, to the host, or other vm context. This 161 * function cannot dispatch to hypervisor context handlers. This should 174 if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) dg_dispatch_as_host() 177 /* Check that source handle matches sending context. */ dg_dispatch_as_host() 178 if (dg->src.context != context_id) { dg_dispatch_as_host() 179 pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n", dg_dispatch_as_host() 180 context_id, dg->src.context, dg->src.resource); dg_dispatch_as_host() 189 dg->src.context, dg->src.resource); dg_dispatch_as_host() 194 if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { dg_dispatch_as_host() 199 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && dg_dispatch_as_host() 208 dg->dst.context, dg->dst.resource); dg_dispatch_as_host() 225 dg->src.context == VMCI_HOST_CONTEXT_ID) { dg_dispatch_as_host() 258 /* Route to destination VM context. */ dg_dispatch_as_host() 261 if (context_id != dg->dst.context) { dg_dispatch_as_host() 264 (dg->dst.context))) { dg_dispatch_as_host() 268 * If the sending context is a VM, it dg_dispatch_as_host() 273 context_id, dg->dst.context); dg_dispatch_as_host() 283 retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); dg_dispatch_as_host() 340 dg->src.context, dg->dst.context, retval); vmci_datagram_dispatch() 371 dg->dst.context, dg->dst.resource); vmci_datagram_invoke_guest_handler() 401 * vmci_datagram_create_handle_priv() - Create host context datagram endpoint 409 * Creates a host context datagram endpoint and returns a handle to it. 435 * vmci_datagram_create_handle() - Create host context datagram endpoint 442 * Creates a host context datagram endpoint and returns a handle to 475 handle.context, handle.resource); vmci_datagram_destroy_handle()
|
H A D | vmci_host.c | 93 struct vmci_ctx *context; member in struct:vmci_host_dev 96 struct mutex lock; /* Mutex lock for vmci context access */ 146 vmci_ctx_destroy(vmci_host_dev->context); vmci_host_close() 147 vmci_host_dev->context = NULL; vmci_host_close() 152 * a context is created through the IOCTL_VMCI_INIT_CONTEXT vmci_host_close() 171 struct vmci_ctx *context = vmci_host_dev->context; vmci_host_poll() local 175 /* Check for VMCI calls to this VM context. */ vmci_host_poll() 177 poll_wait(filp, &context->host_context.wait_queue, vmci_host_poll() 180 spin_lock(&context->lock); vmci_host_poll() 181 if (context->pending_datagrams > 0 || vmci_host_poll() 183 context->pending_doorbell_array) > 0) { vmci_host_poll() 186 spin_unlock(&context->lock); vmci_host_poll() 221 * Sets up a given context for notify to work. Maps the notify 224 static int vmci_host_setup_notify(struct vmci_ctx *context, vmci_host_setup_notify() argument 229 if (context->notify_page) { vmci_host_setup_notify() 245 retval = get_user_pages_fast(uva, 1, 1, &context->notify_page); vmci_host_setup_notify() 247 context->notify_page = NULL; vmci_host_setup_notify() 254 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1)); vmci_host_setup_notify() 255 vmci_ctx_check_signal_notify(context); vmci_host_setup_notify() 322 vmci_host_dev->context = vmci_ctx_create(init_block.cid, vmci_host_do_init_context() 327 if (IS_ERR(vmci_host_dev->context)) { vmci_host_do_init_context() 328 retval = PTR_ERR(vmci_host_dev->context); vmci_host_do_init_context() 329 vmci_ioctl_err("error initializing context\n"); vmci_host_do_init_context() 337 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_init_context() 339 vmci_ctx_destroy(vmci_host_dev->context); vmci_host_do_init_context() 340 vmci_host_dev->context = NULL; vmci_host_do_init_context() 405 dg->dst.context, dg->dst.resource, vmci_host_do_send_datagram() 406 dg->src.context, dg->src.resource, vmci_host_do_send_datagram() 409 /* Get source context id. */ vmci_host_do_send_datagram() 410 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_send_datagram() 435 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, vmci_host_do_receive_datagram() 463 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_alloc_queuepair() 482 vmci_host_dev->context); vmci_host_do_alloc_queuepair() 507 vmci_host_dev->context); vmci_host_do_alloc_queuepair() 513 vmci_host_dev->context); vmci_host_do_alloc_queuepair() 548 vmci_host_dev->context, vmci_host_do_queuepair_setva() 556 vmci_host_dev->context, 0); vmci_host_do_queuepair_setva() 613 vmci_host_dev->context); vmci_host_do_queuepair_setpf() 657 vmci_host_dev->context); vmci_host_do_qp_detach() 683 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_add_notify() 706 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_remove_notify() 730 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_get_cpt_state() 776 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_set_cpt_state() 812 vmci_host_setup_notify(vmci_host_dev->context, vmci_host_do_set_notify() 815 vmci_ctx_unset_notify(vmci_host_dev->context); vmci_host_do_set_notify() 843 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_notify_resource() 902 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_recv_notifications()
|
/linux-4.4.14/arch/tile/gxio/ |
H A D | uart.c | 28 int gxio_uart_init(gxio_uart_context_t *context, int uart_index) gxio_uart_init() argument 42 context->fd = fd; gxio_uart_init() 45 context->mmio_base = (void __force *) gxio_uart_init() 48 if (context->mmio_base == NULL) { gxio_uart_init() 49 hv_dev_close(context->fd); gxio_uart_init() 50 context->fd = -1; gxio_uart_init() 59 int gxio_uart_destroy(gxio_uart_context_t *context) gxio_uart_destroy() argument 61 iounmap((void __force __iomem *)(context->mmio_base)); gxio_uart_destroy() 62 hv_dev_close(context->fd); gxio_uart_destroy() 64 context->mmio_base = NULL; gxio_uart_destroy() 65 context->fd = -1; gxio_uart_destroy() 73 void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset, gxio_uart_write() argument 76 __gxio_mmio_write(context->mmio_base + offset, word); gxio_uart_write() 82 uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset) gxio_uart_read() argument 84 return __gxio_mmio_read(context->mmio_base + offset); gxio_uart_read()
|
H A D | usb_host.c | 29 int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, gxio_usb_host_init() argument 50 context->fd = fd; gxio_usb_host_init() 53 context->mmio_base = gxio_usb_host_init() 56 if (context->mmio_base == NULL) { gxio_usb_host_init() 57 hv_dev_close(context->fd); gxio_usb_host_init() 66 int gxio_usb_host_destroy(gxio_usb_host_context_t *context) gxio_usb_host_destroy() argument 68 iounmap((void __force __iomem *)(context->mmio_base)); gxio_usb_host_destroy() 69 hv_dev_close(context->fd); gxio_usb_host_destroy() 71 context->mmio_base = NULL; gxio_usb_host_destroy() 72 context->fd = -1; gxio_usb_host_destroy() 79 void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context) gxio_usb_host_get_reg_start() argument 81 return context->mmio_base; gxio_usb_host_get_reg_start() 86 size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context) gxio_usb_host_get_reg_len() argument
|
H A D | mpipe.c | 33 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) gxio_mpipe_init() argument 46 context->fd = fd; gxio_mpipe_init() 56 context->mmio_cfg_base = (void __force *) gxio_mpipe_init() 59 if (context->mmio_cfg_base == NULL) gxio_mpipe_init() 62 context->mmio_fast_base = (void __force *) gxio_mpipe_init() 65 if (context->mmio_fast_base == NULL) gxio_mpipe_init() 70 context->__stacks.stacks[i] = 255; gxio_mpipe_init() 72 context->instance = mpipe_index; gxio_mpipe_init() 77 iounmap((void __force __iomem *)(context->mmio_cfg_base)); gxio_mpipe_init() 79 hv_dev_close(context->fd); gxio_mpipe_init() 80 context->fd = -1; gxio_mpipe_init() 86 int gxio_mpipe_destroy(gxio_mpipe_context_t *context) gxio_mpipe_destroy() argument 88 iounmap((void __force __iomem *)(context->mmio_cfg_base)); gxio_mpipe_destroy() 89 iounmap((void __force __iomem *)(context->mmio_fast_base)); gxio_mpipe_destroy() 90 return hv_dev_close(context->fd); gxio_mpipe_destroy() 135 int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context, gxio_mpipe_init_buffer_stack() argument 145 result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size, gxio_mpipe_init_buffer_stack() 152 context->__stacks.stacks[buffer_size_enum] = stack; gxio_mpipe_init_buffer_stack() 159 int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context, gxio_mpipe_init_notif_ring() argument 164 return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size, gxio_mpipe_init_notif_ring() 170 int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context, gxio_mpipe_init_notif_group_and_buckets() argument 192 result = gxio_mpipe_init_notif_group(context, group, bits); gxio_mpipe_init_notif_group_and_buckets() 199 result = gxio_mpipe_init_bucket(context, bucket + i, gxio_mpipe_init_notif_group_and_buckets() 210 int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, gxio_mpipe_init_edma_ring() argument 217 return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags, gxio_mpipe_init_edma_ring() 224 gxio_mpipe_context_t *context) gxio_mpipe_rules_init() 226 rules->context = context; gxio_mpipe_rules_init() 285 stacks ? stacks->stacks[i] : rules->context->__stacks. gxio_mpipe_rules_begin() 359 return gxio_mpipe_commit_rules(rules->context, list, size); gxio_mpipe_rules_commit() 365 gxio_mpipe_context_t *context, gxio_mpipe_iqueue_init() 372 iqueue->context = context; gxio_mpipe_iqueue_init() 386 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size, gxio_mpipe_iqueue_init() 393 gxio_mpipe_context_t *context, gxio_mpipe_equeue_init() 405 int result = gxio_mpipe_init_edma_ring(context, ering, channel, gxio_mpipe_equeue_init() 419 context->mmio_fast_base + offset.word, gxio_mpipe_equeue_init() 424 equeue->context = context; gxio_mpipe_equeue_init() 433 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, gxio_mpipe_set_timestamp() argument 437 return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec, gxio_mpipe_set_timestamp() 443 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, gxio_mpipe_get_timestamp() argument 449 ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec, gxio_mpipe_get_timestamp() 466 int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta) gxio_mpipe_adjust_timestamp() argument 468 return gxio_mpipe_adjust_timestamp_aux(context, delta); gxio_mpipe_adjust_timestamp() 472 /* Get our internal context used for link name access. This context is 477 static gxio_mpipe_context_t context; _gxio_get_link_context() local 496 context.fd = hv_dev_open((HV_VirtAddr) file, 0); _gxio_get_link_context() 497 if (context.fd < 0) _gxio_get_link_context() 500 contextp = &context; _gxio_get_link_context() 513 gxio_mpipe_context_t *context = _gxio_get_link_context(); gxio_mpipe_link_instance() local 515 if (!context) gxio_mpipe_link_instance() 521 return gxio_mpipe_info_instance_aux(context, name); gxio_mpipe_link_instance() 531 gxio_mpipe_context_t *context = _gxio_get_link_context(); gxio_mpipe_link_enumerate_mac() local 532 if (!context) gxio_mpipe_link_enumerate_mac() 535 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); gxio_mpipe_link_enumerate_mac() 548 gxio_mpipe_context_t *context, const char *link_name, gxio_mpipe_link_open() 557 rv = gxio_mpipe_link_open_aux(context, name, flags); gxio_mpipe_link_open() 561 link->context = context; gxio_mpipe_link_open() 572 return gxio_mpipe_link_close_aux(link->context, link->mac); gxio_mpipe_link_close() 580 return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr, gxio_mpipe_link_set_attr() 223 gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules, gxio_mpipe_context_t *context) gxio_mpipe_rules_init() argument 364 gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue, gxio_mpipe_context_t *context, unsigned int ring, void *mem, size_t mem_size, unsigned int mem_flags) gxio_mpipe_iqueue_init() argument 392 gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, gxio_mpipe_context_t *context, unsigned int ering, unsigned int channel, void *mem, unsigned int mem_size, unsigned int mem_flags) gxio_mpipe_equeue_init() argument 547 gxio_mpipe_link_open(gxio_mpipe_link_t *link, gxio_mpipe_context_t *context, const char *link_name, unsigned int flags) gxio_mpipe_link_open() argument
|
H A D | iorpc_mpipe.c | 24 int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, gxio_mpipe_alloc_buffer_stacks() argument 35 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_buffer_stacks() 48 int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, gxio_mpipe_init_buffer_stack_aux() argument 69 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_buffer_stack_aux() 83 int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, gxio_mpipe_alloc_notif_rings() argument 94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_notif_rings() 105 int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, gxio_mpipe_init_notif_ring_aux() argument 124 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_notif_ring_aux() 136 int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, gxio_mpipe_request_notif_ring_interrupt() argument 150 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_request_notif_ring_interrupt() 161 int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context, gxio_mpipe_enable_notif_ring_interrupt() argument 169 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_enable_notif_ring_interrupt() 182 int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, gxio_mpipe_alloc_notif_groups() argument 193 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_notif_groups() 204 int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, gxio_mpipe_init_notif_group() argument 214 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_notif_group() 226 int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count, gxio_mpipe_alloc_buckets() argument 236 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_buckets() 247 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, gxio_mpipe_init_bucket() argument 256 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_bucket() 268 int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, gxio_mpipe_alloc_edma_rings() argument 279 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_edma_rings() 291 int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va, gxio_mpipe_init_edma_ring_aux() argument 311 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_edma_ring_aux() 318 int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob, gxio_mpipe_commit_rules() argument 323 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size, gxio_mpipe_commit_rules() 335 int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context, gxio_mpipe_register_client_memory() argument 346 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_register_client_memory() 358 int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context, gxio_mpipe_link_open_aux() argument 367 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_link_open_aux() 377 int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac) gxio_mpipe_link_close_aux() argument 384 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_link_close_aux() 396 int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac, gxio_mpipe_link_set_attr_aux() argument 406 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_link_set_attr_aux() 418 int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, gxio_mpipe_get_timestamp_aux() argument 426 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_get_timestamp_aux() 443 int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec, gxio_mpipe_set_timestamp_aux() argument 453 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_set_timestamp_aux() 463 int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec) gxio_mpipe_adjust_timestamp_aux() argument 470 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_adjust_timestamp_aux() 484 int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context, gxio_mpipe_config_edma_ring_blks() argument 496 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_config_edma_ring_blks() 507 int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb) gxio_mpipe_adjust_timestamp_freq() argument 514 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_adjust_timestamp_freq() 525 int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie) gxio_mpipe_arm_pollfd() argument 532 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_arm_pollfd() 542 int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie) gxio_mpipe_close_pollfd() argument 549 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_close_pollfd() 559 int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base) gxio_mpipe_get_mmio_base() argument 566 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_get_mmio_base() 580 int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context, gxio_mpipe_check_mmio_offset() argument 589 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_check_mmio_offset()
|
H A D | iorpc_mpipe_info.c | 22 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, gxio_mpipe_info_instance_aux() argument 30 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_info_instance_aux() 41 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, gxio_mpipe_info_enumerate_aux() argument 51 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_info_enumerate_aux() 66 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, gxio_mpipe_info_get_mmio_base() argument 74 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_info_get_mmio_base() 88 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context, gxio_mpipe_info_check_mmio_offset() argument 97 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_info_check_mmio_offset()
|
H A D | iorpc_trio.c | 24 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, gxio_trio_alloc_asids() argument 34 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_asids() 47 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, gxio_trio_alloc_memory_maps() argument 58 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_memory_maps() 70 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, gxio_trio_alloc_scatter_queues() argument 81 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_scatter_queues() 94 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, gxio_trio_alloc_pio_regions() argument 105 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_pio_regions() 118 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, gxio_trio_init_pio_region_aux() argument 130 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_init_pio_region_aux() 148 int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, gxio_trio_init_memory_map_mmu_aux() argument 167 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_init_memory_map_mmu_aux() 178 int gxio_trio_get_port_property(gxio_trio_context_t *context, gxio_trio_get_port_property() argument 186 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_trio_get_port_property() 201 int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, gxio_trio_config_legacy_intr() argument 215 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_config_legacy_intr() 230 int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, gxio_trio_config_msi_intr() argument 249 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_config_msi_intr() 262 int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, gxio_trio_set_mps_mrs() argument 272 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_set_mps_mrs() 282 int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac) gxio_trio_force_rc_link_up() argument 289 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_force_rc_link_up() 299 int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac) gxio_trio_force_ep_link_up() argument 306 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_force_ep_link_up() 316 int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base) gxio_trio_get_mmio_base() argument 323 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_trio_get_mmio_base() 337 int gxio_trio_check_mmio_offset(gxio_trio_context_t *context, gxio_trio_check_mmio_offset() argument 346 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_check_mmio_offset()
|
H A D | iorpc_usb_host.c | 22 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, gxio_usb_host_cfg_interrupt() argument 33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_usb_host_cfg_interrupt() 44 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, gxio_usb_host_register_client_memory() argument 53 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_usb_host_register_client_memory() 64 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base) gxio_usb_host_get_mmio_base() argument 71 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_usb_host_get_mmio_base() 85 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context, gxio_usb_host_check_mmio_offset() argument 94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_usb_host_check_mmio_offset()
|
H A D | iorpc_uart.c | 22 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x, gxio_uart_cfg_interrupt() argument 33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_uart_cfg_interrupt() 43 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base) gxio_uart_get_mmio_base() argument 50 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_uart_get_mmio_base() 64 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context, gxio_uart_check_mmio_offset() argument 73 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_uart_check_mmio_offset()
|
H A D | trio.c | 28 int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index) gxio_trio_init() argument 36 context->fd = -1; gxio_trio_init() 44 context->fd = fd; gxio_trio_init()
|
/linux-4.4.14/fs/xfs/ |
H A D | xfs_attr_list.c | 69 xfs_attr_shortform_list(xfs_attr_list_context_t *context) xfs_attr_shortform_list() argument 79 ASSERT(context != NULL); xfs_attr_shortform_list() 80 dp = context->dp; xfs_attr_shortform_list() 87 cursor = context->cursor; xfs_attr_shortform_list() 90 trace_xfs_attr_list_sf(context); xfs_attr_shortform_list() 101 if (context->bufsize == 0 || xfs_attr_shortform_list() 103 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { xfs_attr_shortform_list() 105 error = context->put_listent(context, xfs_attr_shortform_list() 116 if (context->seen_enough) xfs_attr_shortform_list() 123 trace_xfs_attr_list_sf_all(context); xfs_attr_shortform_list() 128 if (context->bufsize == 0) xfs_attr_shortform_list() 148 context->dp->i_mount, sfe); xfs_attr_shortform_list() 199 error = context->put_listent(context, xfs_attr_shortform_list() 209 if (context->seen_enough) xfs_attr_shortform_list() 219 xfs_attr_node_list(xfs_attr_list_context_t *context) xfs_attr_node_list() argument 229 struct xfs_inode *dp = context->dp; xfs_attr_node_list() 232 trace_xfs_attr_node_list(context); xfs_attr_node_list() 234 cursor = context->cursor; xfs_attr_node_list() 255 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list() 267 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list() 272 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list() 278 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list() 309 context->dp->i_mount, xfs_attr_node_list() 321 trace_xfs_attr_list_node_descend(context, xfs_attr_node_list() 342 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_node_list() 348 if (context->seen_enough || leafhdr.forw == 0) xfs_attr_node_list() 366 struct xfs_attr_list_context *context) xfs_attr3_leaf_list_int() 375 struct xfs_mount *mp = context->dp->i_mount; xfs_attr3_leaf_list_int() 377 trace_xfs_attr_list_leaf(context); xfs_attr3_leaf_list_int() 383 cursor = context->cursor; xfs_attr3_leaf_list_int() 389 if (context->resynch) { xfs_attr3_leaf_list_int() 393 if (cursor->offset == context->dupcnt) { xfs_attr3_leaf_list_int() 394 context->dupcnt = 0; xfs_attr3_leaf_list_int() 397 context->dupcnt++; xfs_attr3_leaf_list_int() 400 context->dupcnt = 0; xfs_attr3_leaf_list_int() 405 trace_xfs_attr_list_notfound(context); xfs_attr3_leaf_list_int() 412 context->resynch = 0; xfs_attr3_leaf_list_int() 431 retval = context->put_listent(context, xfs_attr3_leaf_list_int() 445 if (context->put_value) { xfs_attr3_leaf_list_int() 449 args.geo = context->dp->i_mount->m_attr_geo; xfs_attr3_leaf_list_int() 450 args.dp = context->dp; xfs_attr3_leaf_list_int() 460 retval = context->put_listent(context, xfs_attr3_leaf_list_int() 468 retval = context->put_listent(context, xfs_attr3_leaf_list_int() 478 if (context->seen_enough) xfs_attr3_leaf_list_int() 482 trace_xfs_attr_list_leaf_end(context); xfs_attr3_leaf_list_int() 490 xfs_attr_leaf_list(xfs_attr_list_context_t *context) xfs_attr_leaf_list() argument 495 trace_xfs_attr_leaf_list(context); xfs_attr_leaf_list() 497 context->cursor->blkno = 0; xfs_attr_leaf_list() 498 error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); xfs_attr_leaf_list() 502 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_leaf_list() 509 xfs_attr_list_context_t *context) xfs_attr_list_int() 512 xfs_inode_t *dp = context->dp; xfs_attr_list_int() 527 error = xfs_attr_shortform_list(context); xfs_attr_list_int() 529 error = xfs_attr_leaf_list(context); xfs_attr_list_int() 531 error = xfs_attr_node_list(context); xfs_attr_list_int() 550 xfs_attr_list_context_t *context, xfs_attr_put_listent() 557 struct attrlist *alist = (struct attrlist *)context->alist; xfs_attr_put_listent() 561 ASSERT(!(context->flags & ATTR_KERNOVAL)); xfs_attr_put_listent() 562 ASSERT(context->count >= 0); xfs_attr_put_listent() 563 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); xfs_attr_put_listent() 564 ASSERT(context->firstu >= sizeof(*alist)); xfs_attr_put_listent() 565 ASSERT(context->firstu <= context->bufsize); xfs_attr_put_listent() 570 if (((context->flags & ATTR_SECURE) == 0) != xfs_attr_put_listent() 573 if (((context->flags & ATTR_ROOT) == 0) != xfs_attr_put_listent() 578 context->count * sizeof(alist->al_offset[0]); xfs_attr_put_listent() 579 context->firstu -= ATTR_ENTSIZE(namelen); xfs_attr_put_listent() 580 if (context->firstu < arraytop) { xfs_attr_put_listent() 581 trace_xfs_attr_list_full(context); xfs_attr_put_listent() 583 context->seen_enough = 1; xfs_attr_put_listent() 587 aep = (attrlist_ent_t *)&context->alist[context->firstu]; xfs_attr_put_listent() 591 alist->al_offset[context->count++] = context->firstu; xfs_attr_put_listent() 592 alist->al_count = context->count; xfs_attr_put_listent() 593 trace_xfs_attr_list_add(context); xfs_attr_put_listent() 612 xfs_attr_list_context_t context; xfs_attr_list() local 636 memset(&context, 0, sizeof(context)); xfs_attr_list() 637 context.dp = dp; xfs_attr_list() 638 context.cursor = cursor; xfs_attr_list() 639 context.resynch = 1; xfs_attr_list() 640 context.flags = flags; xfs_attr_list() 641 context.alist = buffer; xfs_attr_list() 642 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ xfs_attr_list() 643 context.firstu = context.bufsize; xfs_attr_list() 644 context.put_listent = xfs_attr_put_listent; xfs_attr_list() 646 alist = (struct attrlist *)context.alist; xfs_attr_list() 649 alist->al_offset[0] = context.bufsize; xfs_attr_list() 651 error = xfs_attr_list_int(&context); xfs_attr_list() 364 xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) xfs_attr3_leaf_list_int() argument 508 xfs_attr_list_int( xfs_attr_list_context_t *context) xfs_attr_list_int() argument 549 xfs_attr_put_listent( xfs_attr_list_context_t *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) xfs_attr_put_listent() argument
|
H A D | xfs_xattr.c | 160 struct xfs_attr_list_context *context, xfs_xattr_put_listent() 171 ASSERT(context->count >= 0); xfs_xattr_put_listent() 180 arraytop = context->count + prefix_len + namelen + 1; xfs_xattr_put_listent() 181 if (arraytop > context->firstu) { xfs_xattr_put_listent() 182 context->count = -1; /* insufficient space */ xfs_xattr_put_listent() 185 offset = (char *)context->alist + context->count; xfs_xattr_put_listent() 191 context->count += prefix_len + namelen + 1; xfs_xattr_put_listent() 197 struct xfs_attr_list_context *context, xfs_xattr_put_listent_sizes() 204 context->count += xfs_xattr_prefix_len(flags) + namelen + 1; xfs_xattr_put_listent_sizes() 227 struct xfs_attr_list_context context; xfs_vn_listxattr() local 235 memset(&context, 0, sizeof(context)); xfs_vn_listxattr() 236 context.dp = XFS_I(inode); xfs_vn_listxattr() 237 context.cursor = &cursor; xfs_vn_listxattr() 238 context.resynch = 1; xfs_vn_listxattr() 239 context.alist = data; xfs_vn_listxattr() 240 context.bufsize = size; xfs_vn_listxattr() 241 context.firstu = context.bufsize; xfs_vn_listxattr() 244 context.put_listent = xfs_xattr_put_listent; xfs_vn_listxattr() 246 context.put_listent = xfs_xattr_put_listent_sizes; xfs_vn_listxattr() 248 xfs_attr_list_int(&context); xfs_vn_listxattr() 249 if (context.count < 0) xfs_vn_listxattr() 258 data, size, &context.count); xfs_vn_listxattr() 266 data, size, &context.count); xfs_vn_listxattr() 271 return context.count; xfs_vn_listxattr() 159 xfs_xattr_put_listent( struct xfs_attr_list_context *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) xfs_xattr_put_listent() argument 196 xfs_xattr_put_listent_sizes( struct xfs_attr_list_context *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) xfs_xattr_put_listent_sizes() argument
|
/linux-4.4.14/arch/sparc/include/asm/ |
H A D | mmu_32.h | 4 /* Default "unsigned long" context */
|
H A D | mmu_context_64.h | 38 &mm->context.tsb_block[0], tsb_context_switch() 40 (mm->context.tsb_block[1].tsb ? tsb_context_switch() 41 &mm->context.tsb_block[1] : tsb_context_switch() 46 , __pa(&mm->context.tsb_descr[0])); tsb_context_switch() 58 /* Set MMU context in the actual hardware. */ 68 : "r" (CTX_HWBITS((__mm)->context)), \ 73 /* Switch the current MM context. */ switch_mm() 82 spin_lock_irqsave(&mm->context.lock, flags); switch_mm() 83 ctx_valid = CTX_VALID(mm->context); switch_mm() 94 * perform the secondary context load and the TSB context switch. switch_mm() 109 * context was valid, so skip switch_mm() 110 * TSB context switch switch_mm() 120 /* Any time a processor runs a context on an address space switch_mm() 121 * for the first time, we must flush that context out of the switch_mm() 127 __flush_tlb_mm(CTX_HWBITS(mm->context), switch_mm() 130 spin_unlock_irqrestore(&mm->context.lock, flags); switch_mm() 141 spin_lock_irqsave(&mm->context.lock, flags); activate_mm() 142 if (!CTX_VALID(mm->context)) activate_mm() 149 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); activate_mm() 151 spin_unlock_irqrestore(&mm->context.lock, flags); activate_mm()
|
H A D | mmu_context_32.h | 12 /* Initialize a new mmu context. This is invoked when a new 17 /* Destroy a dead context. This occurs when mmput drops the 24 /* Switch the current MM context. */
|
H A D | mmu_64.h | 15 * field in a spot outside of the areas of the context register 38 * PRIMARY_CONTEXT register values for the kernel context. 48 /* If you want "the TLB context number" use CTX_NR_MASK. If you 49 * want "the bits I program into the context registers" use
|
/linux-4.4.14/arch/avr32/include/asm/ |
H A D | mmu.h | 4 /* Default "unsigned long" context */
|
H A D | mmu_context.h | 20 * The MMU "context" consists of two things: 34 /* Cache of MMU context last used */ 38 * Get MMU context if needed 45 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context() 49 /* It's old, we need to get new context with new version */ get_mmu_context() 64 mm->context = mc; get_mmu_context() 68 * Initialize the context related info for a new mm_struct 74 mm->context = NO_CONTEXT; init_new_context() 79 * Destroy context related info for an mm_struct that is about 105 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); activate_context()
|
/linux-4.4.14/arch/parisc/include/asm/ |
H A D | mmu_context.h | 26 mm->context = alloc_sid(); init_new_context() 33 free_sid(mm->context); destroy_context() 34 mm->context = 0; destroy_context() 37 static inline unsigned long __space_to_prot(mm_context_t context) __space_to_prot() argument 40 return context << 1; __space_to_prot() 42 return context >> (SPACEID_SHIFT - 1); __space_to_prot() 46 static inline void load_context(mm_context_t context) load_context() argument 48 mtsp(context, 3); load_context() 49 mtctl(__space_to_prot(context), 8); load_context() 57 load_context(next->context); switch_mm() 75 if (next->context == 0) activate_mm() 76 next->context = alloc_sid(); activate_mm()
|
H A D | tlbflush.h | 34 __flush_tlb_range((vma)->vm_mm->context, start, end) 42 * The code to switch to a new context is NOT valid for processes 69 if (mm->context != 0) flush_tlb_mm() 70 free_sid(mm->context); flush_tlb_mm() 71 mm->context = alloc_sid(); flush_tlb_mm() 73 load_context(mm->context); flush_tlb_mm() 83 sid = vma->vm_mm->context; flush_tlb_page()
|
/linux-4.4.14/include/linux/i2c/ |
H A D | max732x.h | 13 void *context; /* param to setup/teardown */ member in struct:max732x_platform_data 17 void *context); 20 void *context);
|
H A D | pcf857x.h | 12 * @context: optional parameter passed to setup() and teardown() 37 void *context); 40 void *context); 41 void *context; member in struct:pcf857x_platform_data
|
/linux-4.4.14/arch/nios2/mm/ |
H A D | mmu_context.c | 2 * MMU context handling. 19 /* The pids position and mask in context */ 24 /* The versions position and mask in context */ 29 /* Return the version part of a context */ 32 /* Return the pid part of a context */ 35 /* Value of the first context (version 1, pid 0) */ 41 * Initialize MMU context management stuff. 51 * Set new context (pid), keep way 53 static void set_context(mm_context_t context) set_context() argument 55 set_mmu_pid(CTX_PID(context)); set_context() 87 /* If the process context we are swapping in has a different context switch_mm() 89 if (unlikely(CTX_VERSION(next->context) != switch_mm() 91 next->context = get_new_context(); switch_mm() 96 /* Set the current context */ switch_mm() 97 set_context(next->context); switch_mm() 104 * the context for the new mm so we see the new mappings. 108 next->context = get_new_context(); activate_mm() 109 set_context(next->context); activate_mm() 113 unsigned long get_pid_from_context(mm_context_t *context) get_pid_from_context() argument 115 return CTX_PID((*context)); get_pid_from_context()
|
/linux-4.4.14/kernel/ |
H A D | auditsc.c | 41 * Subject and object context labeling support added by <danjones@us.ibm.com> 197 * it's going to remain 1-element for almost any setup) until we free context itself. 430 /* Determine if any context name data matches a rule's watch data */ 821 /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ audit_take_context() 826 struct audit_context *context = tsk->audit_context; audit_take_context() local 828 if (!context) audit_take_context() 830 context->return_valid = return_valid; audit_take_context() 846 context->return_code = -EINTR; audit_take_context() 848 context->return_code = return_code; audit_take_context() 850 if (context->in_syscall && !context->dummy) { audit_take_context() 851 audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); audit_take_context() 852 audit_filter_inodes(tsk, context); audit_take_context() 856 return context; audit_take_context() 859 static inline void audit_proctitle_free(struct audit_context *context) audit_proctitle_free() argument 861 kfree(context->proctitle.value); audit_proctitle_free() 862 context->proctitle.value = NULL; audit_proctitle_free() 863 context->proctitle.len = 0; audit_proctitle_free() 866 static inline void audit_free_names(struct audit_context *context) audit_free_names() argument 870 list_for_each_entry_safe(n, next, &context->names_list, list) { audit_free_names() 877 context->name_count = 0; audit_free_names() 878 path_put(&context->pwd); audit_free_names() 879 context->pwd.dentry = NULL; audit_free_names() 880 context->pwd.mnt = NULL; audit_free_names() 883 static inline void audit_free_aux(struct audit_context *context) audit_free_aux() argument 887 while ((aux = context->aux)) { audit_free_aux() 888 context->aux = aux->next; audit_free_aux() 891 while ((aux = context->aux_pids)) { audit_free_aux() 892 context->aux_pids = aux->next; audit_free_aux() 899 struct audit_context *context; audit_alloc_context() local 901 context = kzalloc(sizeof(*context), GFP_KERNEL); audit_alloc_context() 902 if (!context) audit_alloc_context() 904 context->state = state; audit_alloc_context() 905 context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; audit_alloc_context() 906 INIT_LIST_HEAD(&context->killed_trees); audit_alloc_context() 907 INIT_LIST_HEAD(&context->names_list); audit_alloc_context() 908 return context; audit_alloc_context() 912 * audit_alloc - allocate an audit context block for a task 915 * Filter on the task information and allocate a per-task audit context 922 struct audit_context *context; audit_alloc() local 935 if (!(context = audit_alloc_context(state))) { audit_alloc() 940 context->filterkey = key; audit_alloc() 942 tsk->audit_context = context; audit_alloc() 947 static inline void audit_free_context(struct audit_context *context) audit_free_context() argument 949 audit_free_names(context); audit_free_context() 950 unroll_tree_refs(context, NULL, 0); audit_free_context() 951 free_tree_refs(context); audit_free_context() 952 audit_free_aux(context); audit_free_context() 953 kfree(context->filterkey); audit_free_context() 954 kfree(context->sockaddr); audit_free_context() 955 audit_proctitle_free(context); audit_free_context() 956 kfree(context); audit_free_context() 959 static int audit_log_pid_context(struct audit_context *context, pid_t pid, audit_log_pid_context() argument 968 ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); audit_log_pid_context() 1002 static int audit_log_single_execve_arg(struct audit_context *context, audit_log_single_execve_arg() argument 1086 *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); audit_log_single_execve_arg() 1137 static void audit_log_execve_info(struct audit_context *context, audit_log_execve_info() argument 1147 audit_log_format(*ab, "argc=%d", context->execve.argc); audit_log_execve_info() 1161 for (i = 0; i < context->execve.argc; i++) { audit_log_execve_info() 1162 len = audit_log_single_execve_arg(context, ab, i, audit_log_execve_info() 1171 static void show_special(struct audit_context *context, int *call_panic) show_special() argument 1176 ab = audit_log_start(context, GFP_KERNEL, context->type); show_special() 1180 switch (context->type) { show_special() 1182 int nargs = context->socketcall.nargs; show_special() 1186 context->socketcall.args[i]); show_special() 1189 u32 osid = context->ipc.osid; show_special() 1192 from_kuid(&init_user_ns, context->ipc.uid), show_special() 1193 from_kgid(&init_user_ns, context->ipc.gid), show_special() 1194 context->ipc.mode); show_special() 1206 if (context->ipc.has_perm) { show_special() 1208 ab = audit_log_start(context, GFP_KERNEL, show_special() 1214 context->ipc.qbytes, show_special() 1215 context->ipc.perm_uid, show_special() 1216 context->ipc.perm_gid, show_special() 1217 context->ipc.perm_mode); show_special() 1224 context->mq_open.oflag, context->mq_open.mode, show_special() 1225 context->mq_open.attr.mq_flags, show_special() 1226 context->mq_open.attr.mq_maxmsg, show_special() 1227 context->mq_open.attr.mq_msgsize, show_special() 1228 context->mq_open.attr.mq_curmsgs); show_special() 1234 context->mq_sendrecv.mqdes, show_special() 1235 context->mq_sendrecv.msg_len, show_special() 1236 context->mq_sendrecv.msg_prio, show_special() 1237 context->mq_sendrecv.abs_timeout.tv_sec, show_special() 1238 context->mq_sendrecv.abs_timeout.tv_nsec); show_special() 1242 context->mq_notify.mqdes, show_special() 1243 context->mq_notify.sigev_signo); show_special() 1246 struct mq_attr *attr = &context->mq_getsetattr.mqstat; show_special() 1250 context->mq_getsetattr.mqdes, show_special() 1255 audit_log_format(ab, "pid=%d", context->capset.pid); show_special() 1256 audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); show_special() 1257 audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); show_special() 1258 audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); show_special() 1261 audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, show_special() 1262 context->mmap.flags); show_special() 1265 audit_log_execve_info(context, &ab); show_special() 1284 struct audit_context *context) audit_log_proctitle() 1292 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); audit_log_proctitle() 1299 if (!context->proctitle.value) { audit_log_proctitle() 1314 context->proctitle.value = buf; audit_log_proctitle() 1315 context->proctitle.len = res; audit_log_proctitle() 1317 msg = context->proctitle.value; audit_log_proctitle() 1318 len = context->proctitle.len; audit_log_proctitle() 1324 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) audit_log_exit() argument 1332 context->personality = tsk->personality; audit_log_exit() 1334 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); audit_log_exit() 1338 context->arch, context->major); audit_log_exit() 1339 if (context->personality != PER_LINUX) audit_log_exit() 1340 audit_log_format(ab, " per=%lx", context->personality); audit_log_exit() 1341 if (context->return_valid) audit_log_exit() 1343 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", audit_log_exit() 1344 context->return_code); audit_log_exit() 1348 context->argv[0], audit_log_exit() 1349 context->argv[1], audit_log_exit() 1350 context->argv[2], audit_log_exit() 1351 context->argv[3], audit_log_exit() 1352 context->name_count); audit_log_exit() 1355 audit_log_key(ab, context->filterkey); audit_log_exit() 1358 for (aux = context->aux; aux; aux = aux->next) { audit_log_exit() 1360 ab = audit_log_start(context, GFP_KERNEL, aux->type); audit_log_exit() 1384 if (context->type) audit_log_exit() 1385 show_special(context, &call_panic); audit_log_exit() 1387 if (context->fds[0] >= 0) { audit_log_exit() 1388 ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); audit_log_exit() 1391 context->fds[0], context->fds[1]); audit_log_exit() 1396 if (context->sockaddr_len) { audit_log_exit() 1397 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); audit_log_exit() 1400 audit_log_n_hex(ab, (void *)context->sockaddr, audit_log_exit() 1401 context->sockaddr_len); audit_log_exit() 1406 for (aux = context->aux_pids; aux; aux = aux->next) { audit_log_exit() 1410 if (audit_log_pid_context(context, axs->target_pid[i], audit_log_exit() 1419 if (context->target_pid && audit_log_exit() 1420 audit_log_pid_context(context, context->target_pid, audit_log_exit() 1421 context->target_auid, context->target_uid, audit_log_exit() 1422 context->target_sessionid, audit_log_exit() 1423 context->target_sid, context->target_comm)) audit_log_exit() 1426 if (context->pwd.dentry && context->pwd.mnt) { audit_log_exit() 1427 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); audit_log_exit() 1429 audit_log_d_path(ab, " cwd=", &context->pwd); audit_log_exit() 1435 list_for_each_entry(n, &context->names_list, list) { audit_log_exit() 1438 audit_log_name(context, n, NULL, i++, &call_panic); audit_log_exit() 1441 audit_log_proctitle(tsk, context); audit_log_exit() 1444 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); audit_log_exit() 1452 * audit_free - free a per-task audit context 1453 * @tsk: task whose audit context block to free 1459 struct audit_context *context; __audit_free() local 1461 context = audit_take_context(tsk, 0, 0); __audit_free() 1462 if (!context) __audit_free() 1466 * function (e.g., exit_group), then free context block. __audit_free() 1468 * in the context of the idle thread */ __audit_free() 1470 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) __audit_free() 1471 audit_log_exit(context, tsk); __audit_free() 1472 if (!list_empty(&context->killed_trees)) __audit_free() 1473 audit_kill_trees(&context->killed_trees); __audit_free() 1475 audit_free_context(context); __audit_free() 1486 * Fill in audit context at syscall entry. This only happens if the 1487 * audit context was created when the task was created and the state or 1488 * filters demand the audit context be built. If the state from the 1498 struct audit_context *context = tsk->audit_context; __audit_syscall_entry() local 1501 if (!context) __audit_syscall_entry() 1504 BUG_ON(context->in_syscall || context->name_count); __audit_syscall_entry() 1509 context->arch = syscall_get_arch(); __audit_syscall_entry() 1510 context->major = major; __audit_syscall_entry() 1511 context->argv[0] = a1; __audit_syscall_entry() 1512 context->argv[1] = a2; __audit_syscall_entry() 1513 context->argv[2] = a3; __audit_syscall_entry() 1514 context->argv[3] = a4; __audit_syscall_entry() 1516 state = context->state; __audit_syscall_entry() 1517 context->dummy = !audit_n_rules; __audit_syscall_entry() 1518 if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { __audit_syscall_entry() 1519 context->prio = 0; __audit_syscall_entry() 1520 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); __audit_syscall_entry() 1525 context->serial = 0; __audit_syscall_entry() 1526 context->ctime = CURRENT_TIME; __audit_syscall_entry() 1527 context->in_syscall = 1; __audit_syscall_entry() 1528 context->current_state = state; __audit_syscall_entry() 1529 context->ppid = 0; __audit_syscall_entry() 1533 * audit_syscall_exit - deallocate audit context after a system call 1537 * Tear down after system call. If the audit context has been marked as 1546 struct audit_context *context; __audit_syscall_exit() local 1553 context = audit_take_context(tsk, success, return_code); __audit_syscall_exit() 1554 if (!context) __audit_syscall_exit() 1557 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) __audit_syscall_exit() 1558 audit_log_exit(context, tsk); __audit_syscall_exit() 1560 context->in_syscall = 0; __audit_syscall_exit() 1561 context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; __audit_syscall_exit() 1563 if (!list_empty(&context->killed_trees)) __audit_syscall_exit() 1564 audit_kill_trees(&context->killed_trees); __audit_syscall_exit() 1566 audit_free_names(context); __audit_syscall_exit() 1567 unroll_tree_refs(context, NULL, 0); __audit_syscall_exit() 1568 audit_free_aux(context); __audit_syscall_exit() 1569 context->aux = NULL; __audit_syscall_exit() 1570 context->aux_pids = NULL; __audit_syscall_exit() 1571 context->target_pid = 0; __audit_syscall_exit() 1572 context->target_sid = 0; __audit_syscall_exit() 1573 context->sockaddr_len = 0; __audit_syscall_exit() 1574 context->type = 0; __audit_syscall_exit() 1575 context->fds[0] = -1; __audit_syscall_exit() 1576 if (context->state != AUDIT_RECORD_CONTEXT) { __audit_syscall_exit() 1577 kfree(context->filterkey); __audit_syscall_exit() 1578 context->filterkey = NULL; __audit_syscall_exit() 1580 tsk->audit_context = context; __audit_syscall_exit() 1586 struct audit_context *context; handle_one() local 1592 context = current->audit_context; handle_one() 1593 p = context->trees; handle_one() 1594 count = context->tree_count; handle_one() 1600 if (likely(put_tree_ref(context, chunk))) handle_one() 1602 if (unlikely(!grow_tree_refs(context))) { handle_one() 1604 audit_set_auditable(context); handle_one() 1606 unroll_tree_refs(context, p, count); handle_one() 1609 put_tree_ref(context, chunk); handle_one() 1616 struct audit_context *context; handle_path() local 1623 context = current->audit_context; handle_path() 1624 p = context->trees; handle_path() 1625 count = context->tree_count; handle_path() 1637 if (unlikely(!put_tree_ref(context, chunk))) { handle_path() 1652 unroll_tree_refs(context, p, count); handle_path() 1656 if (grow_tree_refs(context)) { handle_path() 1658 unroll_tree_refs(context, p, count); handle_path() 1663 unroll_tree_refs(context, p, count); handle_path() 1664 audit_set_auditable(context); handle_path() 1671 static struct audit_names *audit_alloc_name(struct audit_context *context, audit_alloc_name() argument 1676 if (context->name_count < AUDIT_NAMES) { audit_alloc_name() 1677 aname = &context->preallocated_names[context->name_count]; audit_alloc_name() 1688 list_add_tail(&aname->list, &context->names_list); audit_alloc_name() 1690 context->name_count++; audit_alloc_name() 1698 * Search the audit_names list for the current audit context. If there is an 1705 struct audit_context *context = current->audit_context; __audit_reusename() local 1708 list_for_each_entry(n, &context->names_list, list) { __audit_reusename() 1723 * Add a name to the list of audit names for this context. 1728 struct audit_context *context = current->audit_context; __audit_getname() local 1731 if (!context->in_syscall) __audit_getname() 1734 n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); __audit_getname() 1743 if (!context->pwd.dentry) __audit_getname() 1744 get_fs_pwd(current->fs, &context->pwd); __audit_getname() 1756 struct audit_context *context = current->audit_context; __audit_inode() local 1761 if (!context->in_syscall) __audit_inode() 1783 list_for_each_entry_reverse(n, &context->names_list, list) { __audit_inode() 1810 n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); __audit_inode() 1845 * This call updates the audit context with the child's information. 1855 struct audit_context *context = current->audit_context; __audit_inode_child() local 1860 if (!context->in_syscall) __audit_inode_child() 1867 list_for_each_entry(n, &context->names_list, list) { __audit_inode_child() 1884 list_for_each_entry(n, &context->names_list, list) { __audit_inode_child() 1904 n = audit_alloc_name(context, AUDIT_TYPE_PARENT); __audit_inode_child() 1911 found_child = audit_alloc_name(context, type); __audit_inode_child() 1916 * directory. All names for this context are relinquished in __audit_inode_child() 1938 * Also sets the context as auditable. 2043 struct audit_context *context = current->audit_context; __audit_mq_open() local 2046 memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr)); __audit_mq_open() 2048 memset(&context->mq_open.attr, 0, sizeof(struct mq_attr)); __audit_mq_open() 2050 context->mq_open.oflag = oflag; __audit_mq_open() 2051 context->mq_open.mode = mode; __audit_mq_open() 2053 context->type = AUDIT_MQ_OPEN; __audit_mq_open() 2067 struct audit_context *context = current->audit_context; __audit_mq_sendrecv() local 2068 struct timespec *p = &context->mq_sendrecv.abs_timeout; __audit_mq_sendrecv() 2075 context->mq_sendrecv.mqdes = mqdes; __audit_mq_sendrecv() 2076 context->mq_sendrecv.msg_len = msg_len; __audit_mq_sendrecv() 2077 context->mq_sendrecv.msg_prio = msg_prio; __audit_mq_sendrecv() 2079 context->type = AUDIT_MQ_SENDRECV; __audit_mq_sendrecv() 2091 struct audit_context *context = current->audit_context; __audit_mq_notify() local 2094 context->mq_notify.sigev_signo = notification->sigev_signo; __audit_mq_notify() 2096 context->mq_notify.sigev_signo = 0; __audit_mq_notify() 2098 context->mq_notify.mqdes = mqdes; __audit_mq_notify() 2099 context->type = AUDIT_MQ_NOTIFY; __audit_mq_notify() 2110 struct audit_context *context = current->audit_context; __audit_mq_getsetattr() local 2111 context->mq_getsetattr.mqdes = mqdes; __audit_mq_getsetattr() 2112 context->mq_getsetattr.mqstat = *mqstat; __audit_mq_getsetattr() 2113 context->type = AUDIT_MQ_GETSETATTR; __audit_mq_getsetattr() 2123 struct audit_context *context = current->audit_context; __audit_ipc_obj() local 2124 context->ipc.uid = ipcp->uid; __audit_ipc_obj() 2125 context->ipc.gid = ipcp->gid; __audit_ipc_obj() 2126 context->ipc.mode = ipcp->mode; __audit_ipc_obj() 2127 context->ipc.has_perm = 0; __audit_ipc_obj() 2128 security_ipc_getsecid(ipcp, &context->ipc.osid); __audit_ipc_obj() 2129 context->type = AUDIT_IPC; __audit_ipc_obj() 2143 struct audit_context *context = current->audit_context; __audit_ipc_set_perm() local 2145 context->ipc.qbytes = qbytes; __audit_ipc_set_perm() 2146 context->ipc.perm_uid = uid; __audit_ipc_set_perm() 2147 context->ipc.perm_gid = gid; __audit_ipc_set_perm() 2148 context->ipc.perm_mode = mode; __audit_ipc_set_perm() 2149 context->ipc.has_perm = 1; __audit_ipc_set_perm() 2154 struct audit_context *context = current->audit_context; __audit_bprm() local 2156 context->type = AUDIT_EXECVE; __audit_bprm() 2157 context->execve.argc = bprm->argc; __audit_bprm() 2169 struct audit_context *context = current->audit_context; __audit_socketcall() local 2173 context->type = AUDIT_SOCKETCALL; __audit_socketcall() 2174 context->socketcall.nargs = nargs; __audit_socketcall() 2175 memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); __audit_socketcall() 2187 struct audit_context *context = current->audit_context; __audit_fd_pair() local 2188 context->fds[0] = fd1; __audit_fd_pair() 2189 context->fds[1] = fd2; __audit_fd_pair() 2197 * Returns 0 for success or NULL context or < 0 on error. 2201 struct audit_context *context = current->audit_context; __audit_sockaddr() local 2203 if (!context->sockaddr) { __audit_sockaddr() 2207 context->sockaddr = p; __audit_sockaddr() 2210 context->sockaddr_len = len; __audit_sockaddr() 2211 memcpy(context->sockaddr, a, len); __audit_sockaddr() 2217 struct audit_context *context = current->audit_context; __audit_ptrace() local 2219 context->target_pid = task_pid_nr(t); __audit_ptrace() 2220 context->target_auid = audit_get_loginuid(t); __audit_ptrace() 2221 context->target_uid = task_uid(t); __audit_ptrace() 2222 context->target_sessionid = audit_get_sessionid(t); __audit_ptrace() 2223 security_task_getsecid(t, &context->target_sid); __audit_ptrace() 2224 memcpy(context->target_comm, t->comm, TASK_COMM_LEN); __audit_ptrace() 2305 struct audit_context *context = current->audit_context; __audit_log_bprm_fcaps() local 2313 ax->d.next = context->aux; __audit_log_bprm_fcaps() 2314 context->aux = (void *)ax; __audit_log_bprm_fcaps() 2343 struct audit_context *context = current->audit_context; __audit_log_capset() local 2344 context->capset.pid = task_pid_nr(current); __audit_log_capset() 2345 context->capset.cap.effective = new->cap_effective; __audit_log_capset() 2346 context->capset.cap.inheritable = new->cap_effective; __audit_log_capset() 2347 context->capset.cap.permitted = new->cap_permitted; __audit_log_capset() 2348 context->type = AUDIT_CAPSET; __audit_log_capset() 2353 struct audit_context *context = current->audit_context; __audit_mmap_fd() local 2354 context->mmap.fd = fd; __audit_mmap_fd() 2355 context->mmap.flags = flags; __audit_mmap_fd() 2356 context->type = AUDIT_MMAP; __audit_mmap_fd() 1283 audit_log_proctitle(struct task_struct *tsk, struct audit_context *context) audit_log_proctitle() argument
|
H A D | user-return-notifier.c | 11 * called in atomic context. The notifier will also be called in atomic 12 * context. 23 * context, and from the same cpu registration occurred in.
|
/linux-4.4.14/arch/mips/include/uapi/asm/ |
H A D | ucontext.h | 5 * struct extcontext - extended context header structure 6 * @magic: magic value identifying the type of extended context 9 * Extended context structures provide context which does not fit within struct 11 * ucontext and struct sigframe, with each extended context structure beginning 12 * with a header defined by this struct. The type of context represented is 13 * indicated by the magic field. Userland may check each extended context 15 * unrecognised context to be skipped, allowing for future expansion. The end 16 * of the extended context data is indicated by the magic value 25 * struct msa_extcontext - MSA extended context structure 26 * @ext: the extended context header, with magic == MSA_EXTCONTEXT_MAGIC 30 * If MSA context is live for a task at the time a signal is delivered to it, 31 * this structure will hold the MSA context of the task as it was prior to the 45 * struct ucontext - user context structure 61 /* Extended context structures may follow ucontext */
|
/linux-4.4.14/arch/tile/include/gxio/ |
H A D | uart.h | 33 /* A context object used to manage UART resources. */ 49 * @param context Pointer to a properly initialized gxio_uart_context_t. 58 extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, 63 /* Initialize a UART context. 65 * A properly initialized context must be obtained before any of the other 68 * @param context Pointer to a gxio_uart_context_t, which will be initialized 71 * @return Zero if the context was successfully initialized, else a 74 extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index); 76 /* Destroy a UART context. 78 * Once destroyed, a context may not be used with any gxio_uart routines 80 * interrupts requested on this context will be delivered. The state and 81 * configuration of the pins which had been attached to this context are 84 * @param context Pointer to a gxio_uart_context_t. 85 * @return Zero if the context was successfully destroyed, else a 88 extern int gxio_uart_destroy(gxio_uart_context_t *context); 91 * @param context Pointer to a gxio_uart_context_t. 95 extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset, 99 * @param context Pointer to a gxio_uart_context_t. 103 extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
|
H A D | iorpc_mpipe.h | 59 int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, 63 int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, 69 int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, 73 int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 77 int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, 82 int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context, 85 int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, 89 int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, 93 int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count, 96 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, 99 int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, 103 int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va, 108 int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob, 111 int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context, 115 int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context, 118 int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac); 120 int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac, 123 int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, 126 int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec, 129 int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, 132 int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, 135 int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); 137 int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie); 139 int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base); 141 int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
|
H A D | usb_host.h | 33 /* A context object used to manage USB hardware resources. */ 43 /* Initialize a USB context. 45 * A properly initialized context must be obtained before any of the other 48 * @param context Pointer to a gxio_usb_host_context_t, which will be 53 * @return Zero if the context was successfully initialized, else a 56 extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, 59 /* Destroy a USB context. 61 * Once destroyed, a context may not be used with any gxio_usb_host routines 63 * interrupts or signals requested on this context will be delivered. The 65 * context are unchanged by this operation. 67 * @param context Pointer to a gxio_usb_host_context_t. 68 * @return Zero if the context was successfully destroyed, else a 71 extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context); 75 * @param context Pointer to a properly initialized gxio_usb_host_context_t. 78 extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context); 82 * @param context Pointer to a properly initialized gxio_usb_host_context_t. 85 extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);
|
H A D | iorpc_trio.h | 49 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, 53 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, 58 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, 62 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, 66 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, 71 int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, 78 int gxio_trio_get_port_property(gxio_trio_context_t *context, 81 int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, 85 int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, 92 int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, 95 int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac); 97 int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac); 99 int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base); 101 int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
|
H A D | iorpc_uart.h | 32 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x, 35 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base); 37 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
|
H A D | iorpc_mpipe_info.h | 36 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, 39 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, 44 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, 47 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
|
H A D | iorpc_usb_host.h | 34 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, 37 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, 40 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, 43 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
|
/linux-4.4.14/arch/ia64/include/asm/ |
H A D | mmu_context.h | 10 * Routines to manage the allocation of task context numbers. Task context 12 * due to context switches. Context numbers are implemented using ia-64 36 unsigned int next; /* next context number to use */ 38 unsigned int max_ctx; /* max. context value supported by all CPUs */ 56 * When the context counter wraps around all TLBs need to be flushed because 57 * an old context number might have been reused. This is signalled by the 81 nv_mm_context_t context = mm->context; get_mmu_context() local 83 if (likely(context)) get_mmu_context() 88 context = mm->context; get_mmu_context() 89 if (context == 0) { get_mmu_context() 99 mm->context = context = ia64_ctx.next++; get_mmu_context() 100 __set_bit(context, ia64_ctx.bitmap); get_mmu_context() 105 * Ensure we're not starting to use "context" before any old get_mmu_context() 110 return context; get_mmu_context() 114 * Initialize context number to some sane value. MM is guaranteed to be a 120 mm->context = 0; init_new_context() 131 reload_context (nv_mm_context_t context) reload_context() argument 138 rid = context << 3; /* make space for encoding the region number */ reload_context() 165 nv_mm_context_t context; activate_context() local 168 context = get_mmu_context(mm); activate_context() 171 reload_context(context); activate_context() 176 } while (unlikely(context != mm->context)); activate_context()
|
H A D | mmu.h | 5 * Type for a context number. We declare it volatile to ensure proper
|
H A D | switch_to.h | 24 * context switch MUST be done before calling ia64_switch_to() since a 55 * In the SMP case, we save the fph state when context-switching away from a thread that 67 /* "next" in old context is "current" in new context */ \
|
/linux-4.4.14/drivers/media/usb/as102/ |
H A D | as10x_cmd_cfg.c | 25 * as10x_cmd_get_context - Send get context command to AS10x 27 * @tag: context tag 28 * @pvalue: pointer where to store context value read 43 sizeof(pcmd->body.context.req)); as10x_cmd_get_context() 46 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); as10x_cmd_get_context() 47 pcmd->body.context.req.tag = cpu_to_le16(tag); as10x_cmd_get_context() 48 pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA); as10x_cmd_get_context() 54 sizeof(pcmd->body.context.req) as10x_cmd_get_context() 57 sizeof(prsp->body.context.rsp) as10x_cmd_get_context() 66 /* parse response: context command do not follow the common response */ as10x_cmd_get_context() 72 *pvalue = le32_to_cpu((__force __le32)prsp->body.context.rsp.reg_val.u.value32); as10x_cmd_get_context() 81 * as10x_cmd_set_context - send set context command to AS10x 83 * @tag: context tag 84 * @value: value to set in context 99 sizeof(pcmd->body.context.req)); as10x_cmd_set_context() 102 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); as10x_cmd_set_context() 103 /* pcmd->body.context.req.reg_val.mode initialization is not required */ as10x_cmd_set_context() 104 pcmd->body.context.req.reg_val.u.value32 = (__force u32)cpu_to_le32(value); as10x_cmd_set_context() 105 pcmd->body.context.req.tag = cpu_to_le16(tag); as10x_cmd_set_context() 106 pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA); as10x_cmd_set_context() 112 sizeof(pcmd->body.context.req) as10x_cmd_set_context() 115 sizeof(prsp->body.context.rsp) as10x_cmd_set_context() 124 /* parse response: context command do not follow the common response */ as10x_cmd_set_context() 182 * as10x_context_rsp_parse - Parse context command response 194 err = prsp->body.context.rsp.error; as10x_context_rsp_parse() 197 (le16_to_cpu(prsp->body.context.rsp.proc_id) == proc_id)) { as10x_context_rsp_parse()
|
/linux-4.4.14/drivers/tty/serial/ |
H A D | tilegx.c | 48 /* GXIO device context. */ 49 gxio_uart_context_t context; member in struct:tile_uart_port 71 gxio_uart_context_t *context = &tile_uart->context; receive_chars() local 74 count.word = gxio_uart_read(context, UART_FIFO_COUNT); receive_chars() 76 c = (char)gxio_uart_read(context, UART_RECEIVE_DATA); receive_chars() 89 gxio_uart_context_t *context = &tile_uart->context; handle_receive() local 98 gxio_uart_write(context, UART_INTERRUPT_STATUS, handle_receive() 117 static int tilegx_putchar(gxio_uart_context_t *context, char c) tilegx_putchar() argument 120 flag.word = gxio_uart_read(context, UART_FLAG); tilegx_putchar() 124 gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c); tilegx_putchar() 137 gxio_uart_context_t *context = &tile_uart->context; handle_transmit() local 140 gxio_uart_write(context, UART_INTERRUPT_STATUS, handle_transmit() 146 if (tilegx_putchar(context, port->x_char)) handle_transmit() 157 if (tilegx_putchar(context, ch)) handle_transmit() 164 gxio_uart_write(context, UART_INTERRUPT_STATUS, handle_transmit() 180 gxio_uart_context_t *context; tilegx_interrupt() local 187 context = &tile_uart->context; tilegx_interrupt() 188 intr_stat.word = gxio_uart_read(context, UART_INTERRUPT_STATUS); tilegx_interrupt() 212 gxio_uart_context_t *context; tilegx_tx_empty() local 217 context = &tile_uart->context; tilegx_tx_empty() 219 flag.word = gxio_uart_read(context, UART_FLAG); tilegx_tx_empty() 262 gxio_uart_context_t *context; tilegx_start_tx() local 267 context = &tile_uart->context; tilegx_start_tx() 270 if (tilegx_putchar(context, port->x_char)) tilegx_start_tx() 283 if (tilegx_putchar(context, ch)) tilegx_start_tx() 303 gxio_uart_context_t *context; tilegx_stop_rx() local 310 context = &tile_uart->context; tilegx_stop_rx() 312 err = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu), tilegx_stop_rx() 332 gxio_uart_context_t *context; tilegx_startup() local 339 context = &tile_uart->context; tilegx_startup() 342 if (context->fd < 0) { tilegx_startup() 346 ret = gxio_uart_init(context, port->line); tilegx_startup() 366 ret = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu), tilegx_startup() 372 intr_mask.word = gxio_uart_read(context, UART_INTERRUPT_MASK); tilegx_startup() 375 gxio_uart_write(context, UART_INTERRUPT_MASK, intr_mask.word); tilegx_startup() 378 gxio_uart_write(context, UART_INTERRUPT_STATUS, tilegx_startup() 391 gxio_uart_destroy(context); tilegx_startup() 408 gxio_uart_context_t *context; tilegx_shutdown() local 414 context = &tile_uart->context; tilegx_shutdown() 417 intr_mask.word = gxio_uart_read(context, UART_INTERRUPT_MASK); tilegx_shutdown() 420 gxio_uart_write(context, UART_INTERRUPT_MASK, intr_mask.word); tilegx_shutdown() 424 err = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu), tilegx_shutdown() 433 gxio_uart_destroy(context); tilegx_shutdown() 459 gxio_uart_context_t *context; tilegx_set_termios() local 464 context = &tile_uart->context; tilegx_set_termios() 467 if (context->fd < 0) { tilegx_set_termios() 468 err = gxio_uart_init(context, port->line); tilegx_set_termios() 475 divisor.word = gxio_uart_read(context, UART_DIVISOR); tilegx_set_termios() 476 type.word = gxio_uart_read(context, UART_TYPE); tilegx_set_termios() 510 gxio_uart_write(context, UART_DIVISOR, divisor.word); tilegx_set_termios() 511 gxio_uart_write(context, UART_TYPE, type.word); tilegx_set_termios() 571 * in an interrupt or debug context. 577 gxio_uart_context_t *context; tilegx_poll_get_char() local 581 context = &tile_uart->context; tilegx_poll_get_char() 582 count.word = gxio_uart_read(context, UART_FIFO_COUNT); tilegx_poll_get_char() 585 return (char)gxio_uart_read(context, UART_RECEIVE_DATA); tilegx_poll_get_char() 590 gxio_uart_context_t *context; tilegx_poll_put_char() local 594 context = &tile_uart->context; tilegx_poll_put_char() 595 gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c); tilegx_poll_put_char() 638 tile_uart_ports[i].context.fd = -1; tilegx_init_ports()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_resources.c | 42 int user_prio, struct mlx4_qp_context *context) mlx4_en_fill_qp_context() 47 memset(context, 0, sizeof *context); mlx4_en_fill_qp_context() 48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); mlx4_en_fill_qp_context() 49 context->pd = cpu_to_be32(mdev->priv_pdn); mlx4_en_fill_qp_context() 50 context->mtu_msgmax = 0xff; mlx4_en_fill_qp_context() 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); mlx4_en_fill_qp_context() 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); mlx4_en_fill_qp_context() 56 context->params2 |= MLX4_QP_BIT_FPP; mlx4_en_fill_qp_context() 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; mlx4_en_fill_qp_context() 61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); mlx4_en_fill_qp_context() 62 context->local_qpn = cpu_to_be32(qpn); mlx4_en_fill_qp_context() 63 context->pri_path.ackto = 1 & 0x07; mlx4_en_fill_qp_context() 64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; mlx4_en_fill_qp_context() 66 context->pri_path.sched_queue |= user_prio << 3; mlx4_en_fill_qp_context() 67 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; mlx4_en_fill_qp_context() 69 context->pri_path.counter_index = priv->counter_index; mlx4_en_fill_qp_context() 70 context->cqn_send = cpu_to_be32(cqn); mlx4_en_fill_qp_context() 71 context->cqn_recv = cpu_to_be32(cqn); mlx4_en_fill_qp_context() 74 context->pri_path.counter_index != mlx4_en_fill_qp_context() 78 context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB; mlx4_en_fill_qp_context() 79 context->pri_path.control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; mlx4_en_fill_qp_context() 81 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); mlx4_en_fill_qp_context() 83 context->param3 |= cpu_to_be32(1 << 30); mlx4_en_fill_qp_context() 88 context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ mlx4_en_fill_qp_context() 40 mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context) mlx4_en_fill_qp_context() argument
|
/linux-4.4.14/include/trace/events/ |
H A D | fence.h | 21 __field(unsigned int, context) 33 __entry->context = fence->context; 38 __entry->waiting_context = f1->context; 43 TP_printk("driver=%s timeline=%s context=%u seqno=%u " \ 44 "waits on driver=%s timeline=%s context=%u seqno=%u", 45 __get_str(driver), __get_str(timeline), __entry->context, 60 __field(unsigned int, context) 67 __entry->context = fence->context; 71 TP_printk("driver=%s timeline=%s context=%u seqno=%u", 72 __get_str(driver), __get_str(timeline), __entry->context,
|
/linux-4.4.14/include/linux/platform_data/ |
H A D | pca953x.h | 19 void *context; /* param to setup/teardown */ member in struct:pca953x_platform_data 23 void *context); 26 void *context);
|
H A D | at24.h | 21 * @context: optional parameter passed to setup() 29 * void get_mac_addr(struct memory_accessor *mem_acc, void *context) 32 * off_t offset = context; 39 * This function pointer and context can now be set up in at24_platform_data. 51 void (*setup)(struct memory_accessor *, void *context); 52 void *context; member in struct:at24_platform_data
|
/linux-4.4.14/arch/metag/include/uapi/asm/ |
H A D | ech.h | 5 * These bits can be set in the top half of the D0.8 register when DSP context 6 * switching is enabled, in order to support partial DSP context save/restore. 9 #define TBICTX_XEXT_BIT 0x1000 /* Enable extended context save */
|
/linux-4.4.14/arch/alpha/include/asm/ |
H A D | mmu.h | 4 /* The alpha MMU context is one "unsigned long" bitmap per CPU */
|
/linux-4.4.14/arch/m68k/include/asm/ |
H A D | mmu.h | 5 /* Default "unsigned long" context */
|
H A D | mmu_context.h | 34 if (mm->context != NO_CONTEXT) get_mmu_context() 47 mm->context = ctx; get_mmu_context() 52 * Set up the context for a new address space. 54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 57 * We're finished using the context for an address space. 61 if (mm->context != NO_CONTEXT) { destroy_context() 62 clear_bit(mm->context, context_map); destroy_context() 63 mm->context = NO_CONTEXT; destroy_context() 68 static inline void set_context(mm_context_t context, pgd_t *pgd) set_context() argument 70 __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context)); set_context() 77 set_context(tsk->mm->context, next->pgd); switch_mm() 82 * the context for the new mm so we see the new mappings. 88 set_context(mm->context, mm->pgd); activate_mm() 139 asid = mm->context & 0xff; load_ksp_mmu() 165 extern void clear_context(unsigned long context); 167 /* set the context for a new task to unmapped */ init_new_context() 171 mm->context = SUN3_INVALID_CONTEXT; init_new_context() 175 /* find the context given to this process, and if it hasn't already 179 if (mm->context == SUN3_INVALID_CONTEXT) get_mmu_context() 180 mm->context = get_free_context(mm); get_mmu_context() 183 /* flush context if allocated... */ destroy_context() 186 if (mm->context != SUN3_INVALID_CONTEXT) destroy_context() 187 clear_context(mm->context); destroy_context() 193 sun3_put_context(mm->context); activate_context() 219 mm->context = virt_to_phys(mm->pgd); init_new_context() 228 0x80000000 | _PAGE_TABLE, mm->context switch_mm_0230() 266 asm volatile ("movec %0,%%urp" : : "r" (mm->context)); switch_mm_0460() 297 next_mm->context = virt_to_phys(next_mm->pgd); activate_mm()
|
/linux-4.4.14/arch/arm64/mm/ |
H A D | Makefile | 4 context.o proc.o pageattr.o
|
/linux-4.4.14/tools/perf/scripts/perl/ |
H A D | check-perf-trace.pl | 30 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 37 print_uncommon($context); 45 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 53 print_uncommon($context); 65 my ($context) = @_; 68 common_pc($context), trace_flag_str(common_flags($context)), 69 common_lock_depth($context)); 94 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
|
H A D | wakeup-latency.pl | 30 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 53 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs, 103 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
|
/linux-4.4.14/tools/perf/scripts/python/ |
H A D | check-perf-trace.py | 28 def irq__softirq_entry(event_name, context, common_cpu, 34 print_uncommon(context) 39 def kmem__kmalloc(event_name, context, common_cpu, 46 print_uncommon(context) 54 def trace_unhandled(event_name, context, event_fields_dict): 65 def print_uncommon(context): 67 % (common_pc(context), trace_flag_str(common_flags(context)), \ 68 common_lock_depth(context))
|
H A D | netdev-times.py | 227 def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): 230 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 233 def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): 236 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 239 def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): 242 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) 245 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, 247 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 251 def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret): 252 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) 255 def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name): 256 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 260 def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, 262 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 266 def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, 268 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 272 def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain, 274 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 278 def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain, 280 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 284 def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, 286 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 290 def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr): 291 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 295 def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain, 297 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, 302 (name, context, cpu, time, pid, comm, irq, irq_name) = event_info 309 (name, context, cpu, time, pid, comm, irq, ret) = event_info 321 (name, context, cpu, time, pid, comm, vec) = event_info 335 (name, context, cpu, time, pid, comm, vec) = event_info 339 (name, context, cpu, time, pid, comm, vec) = event_info 357 (name, context, cpu, time, pid, comm, napi, dev_name) = event_info 365 (name, context, cpu, time, pid, comm, 383 (name, context, cpu, time, pid, comm, 398 (name, context, cpu, time, pid, comm, 409 (name, context, cpu, time, pid, comm, 424 (name, context, cpu, time, pid, comm, 447 (name, context, cpu, time, pid, comm, skbaddr) = event_info 457 (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
|
H A D | sched-migration.py | 370 def sched__sched_stat_runtime(event_name, context, common_cpu, 375 def sched__sched_stat_iowait(event_name, context, common_cpu, 380 def sched__sched_stat_sleep(event_name, context, common_cpu, 385 def sched__sched_stat_wait(event_name, context, common_cpu, 390 def sched__sched_process_fork(event_name, context, common_cpu, 395 def sched__sched_process_wait(event_name, context, common_cpu, 400 def sched__sched_process_exit(event_name, context, common_cpu, 405 def sched__sched_process_free(event_name, context, common_cpu, 410 def sched__sched_migrate_task(event_name, context, common_cpu, 418 def sched__sched_switch(event_name, context, common_cpu, 428 def sched__sched_wakeup_new(event_name, context, common_cpu, 436 def sched__sched_wakeup(event_name, context, common_cpu, 444 def sched__sched_wait_task(event_name, context, common_cpu, 449 def sched__sched_kthread_stop_ret(event_name, context, common_cpu, 454 def sched__sched_kthread_stop(event_name, context, common_cpu, 459 def trace_unhandled(event_name, context, event_fields_dict):
|
/linux-4.4.14/arch/powerpc/mm/ |
H A D | mmu_context_hash64.c | 2 * MMU context allocation for 64-bit kernels. 77 mm->context.id = index; init_new_context() 79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); init_new_context() 80 if (!mm->context.cop_lockp) { init_new_context() 83 mm->context.id = MMU_NO_CONTEXT; init_new_context() 86 spin_lock_init(mm->context.cop_lockp); init_new_context() 90 mm->context.pte_frag = NULL; init_new_context() 93 mm_iommu_init(&mm->context); init_new_context() 113 pte_frag = mm->context.pte_frag; destroy_pagetable_page() 139 mm_iommu_cleanup(&mm->context); destroy_context() 143 drop_cop(mm->context.acop, mm); destroy_context() 144 kfree(mm->context.cop_lockp); destroy_context() 145 mm->context.cop_lockp = NULL; destroy_context() 149 __destroy_context(mm->context.id); destroy_context() 151 mm->context.id = MMU_NO_CONTEXT; destroy_context()
|
H A D | icswx_pid.c | 62 if (mm->context.cop_pid == COP_PID_NONE) { get_cop_pid() 66 mm->context.cop_pid = pid; get_cop_pid() 68 return mm->context.cop_pid; get_cop_pid() 75 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { disable_cop_pid() 76 free_pid = mm->context.cop_pid; disable_cop_pid() 77 mm->context.cop_pid = COP_PID_NONE; disable_cop_pid()
|
H A D | mmu_context_hash32.c | 34 * (virtual segment identifiers) for each context. Although the 40 * that we used to have when the context number overflowed, 50 * segment IDs). We use a skew on both the context and the high 4 bits 80 * Set up the context for a new address space. 84 mm->context.id = __init_new_context(); init_new_context() 90 * Free a context ID. Make sure to call this with preempt disabled! 99 * We're finished using the context for an address space. 104 if (mm->context.id != NO_CONTEXT) { destroy_context() 105 __destroy_context(mm->context.id); destroy_context() 106 mm->context.id = NO_CONTEXT; destroy_context() 112 * Initialize the context management stuff. 116 /* Reserve context 0 for kernel use */ mmu_context_init()
|
H A D | mmu_context_nohash.c | 19 * - The global context lock will not scale very well 22 * - Implement flush_tlb_mm() by making the context stale and picking 69 /* Steal a context from a task that has one at the moment. 74 * This isn't an LRU system, it just frees up each context in 79 * For context stealing, we use a slightly different approach for 100 if (mm->context.active) { steal_context_smp() 108 /* Mark this mm has having no context anymore */ steal_context_smp() 109 mm->context.id = MMU_NO_CONTEXT; steal_context_smp() 151 /* Mark this mm as having no context anymore */ steal_all_contexts() 152 mm->context.id = MMU_NO_CONTEXT; steal_all_contexts() 157 mm->context.active = 0; steal_all_contexts() 186 /* Flush the TLB for that context */ steal_context_up() 189 /* Mark this mm has having no context anymore */ steal_context_up() 190 mm->context.id = MMU_NO_CONTEXT; steal_context_up() 212 nact += context_mm[id]->context.active; context_check_map() 215 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", context_check_map() 237 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", switch_mmu_context() 238 cpu, next, next->context.active, next->context.id); switch_mmu_context() 242 next->context.active++; switch_mmu_context() 244 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); switch_mmu_context() 245 WARN_ON(prev->context.active < 1); switch_mmu_context() 246 prev->context.active--; switch_mmu_context() 252 /* If we already have a valid assigned context, skip all that */ switch_mmu_context() 253 id = next->context.id; switch_mmu_context() 263 /* We really don't have a context, let's try to acquire one */ switch_mmu_context() 287 /* We know there's at least one free context, try to find it */ switch_mmu_context() 296 next->context.id = id; switch_mmu_context() 302 /* If that context got marked stale on this CPU, then flush the switch_mmu_context() 327 * Set up the context for a new address space. 331 pr_hard("initing context for mm @%p\n", mm); init_new_context() 333 mm->context.id = MMU_NO_CONTEXT; init_new_context() 334 mm->context.active = 0; init_new_context() 345 * We're finished using the context for an address space. 352 if (mm->context.id == MMU_NO_CONTEXT) destroy_context() 355 WARN_ON(mm->context.active != 0); destroy_context() 358 id = mm->context.id; destroy_context() 361 mm->context.id = MMU_NO_CONTEXT; destroy_context() 363 mm->context.active = 0; destroy_context() 387 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); mmu_context_cpu_notify() 395 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); mmu_context_cpu_notify() 414 * Initialize the context management stuff. 422 init_mm.context.active = NR_CPUS; mmu_context_init() 463 * Allocate the maps used by context management mmu_context_init() 476 "MMU: Allocated %zu bytes of context maps for %d contexts\n", mmu_context_init() 482 * init_mm, and require using context 0 for a normal task. mmu_context_init() 483 * Other processors reserve the use of context zero for the kernel. mmu_context_init()
|
/linux-4.4.14/arch/frv/mm/ |
H A D | mmu-context.c | 1 /* mmu-context.c: MMU context allocation and management 27 * initialise a new context 31 memset(&mm->context, 0, sizeof(mm->context)); init_new_context() 32 INIT_LIST_HEAD(&mm->context.id_link); init_new_context() 33 mm->context.itlb_cached_pge = 0xffffffffUL; init_new_context() 34 mm->context.dtlb_cached_pge = 0xffffffffUL; init_new_context() 41 * make sure a kernel MMU context has a CPU context number 54 /* find the first unallocated context number get_cxn() 87 * restore the current TLB miss handler mapped page tables into the MMU context and set up a 96 /* save the state of the outgoing MMU context */ change_mm_context() 104 /* select an MMU context number */ change_mm_context() 112 /* restore the state of the incoming MMU context */ change_mm_context() 128 * finished with an MMU context number 132 mm_context_t *ctx = &mm->context; destroy_context() 151 * display the MMU context currently a process is currently using 157 buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); proc_pid_status_frv_cxnr() 166 * (un)pin a process's mm_struct's MMU context ID 203 cxn_pinned = get_cxn(&mm->context); cxn_pin_by_pid()
|
H A D | Makefile | 9 mmu-context.o dma-alloc.o elf-fdpic.o
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | init.h | 6 void *context; /* context for alloc_pgt_page */ member in struct:x86_mapping_info
|
H A D | mmu_context.h | 27 atomic_read(&mm->context.perf_rdpmc_allowed)) load_mm_cr4() 72 ldt = lockless_dereference(mm->context.ldt); load_mm_ldt() 75 * Any change to mm->context.ldt is followed by an IPI to all load_mm_ldt() 161 * It's possible that prev->context.ldt doesn't match switch_mm() 164 * prev->context.ldt but suppressed an IPI to this CPU. switch_mm() 165 * In this case, prev->context.ldt != NULL, because we switch_mm() 166 * never set context.ldt to NULL while the mm still switch_mm() 167 * exists. That means that next->context.ldt != switch_mm() 168 * prev->context.ldt, because mms never share an LDT. switch_mm() 170 if (unlikely(prev->context.ldt != next->context.ldt)) switch_mm() 182 * from irq context, from ptep_clear_flush() while in switch_mm() 239 !(mm->context.ia32_compat == TIF_IA32); is_64bit_mm()
|
H A D | mmu.h | 8 * The x86 doesn't have a mmu context, but
|
/linux-4.4.14/arch/mn10300/include/asm/ |
H A D | mmu.h | 1 /* MN10300 Memory management context 12 * MMU context
|
H A D | mmu_context.h | 1 /* MN10300 MMU context management 57 #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) 61 * @mm: The userspace VM context being set up 83 * get an MMU context if one is needed 93 /* if we have an old version of the context, replace it */ get_mmu_context() 101 * initialise the context related info for a new mm_struct instance 109 mm->context.tlbpid[i] = MMU_NO_CONTEXT; init_new_context() 114 * after we have set current->mm to a new value, this activates the context for 129 * destroy_context - Destroy mm context information 132 * Destroy context related info for an mm_struct that is about to be put to 139 * @prev: The outgoing MM context. 140 * @next: The incoming MM context.
|
H A D | ucontext.h | 1 /* MN10300 User context
|
/linux-4.4.14/arch/metag/include/asm/ |
H A D | mmu_context.h | 22 /* We use context to store a pointer to the page holding the init_new_context() 24 * running the pgd and context fields should be equal. init_new_context() 26 mm->context.pgd_base = (unsigned long) mm->pgd; init_new_context() 29 INIT_LIST_HEAD(&mm->context.tcm); init_new_context() 43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { list_for_each_entry_safe() 79 /* prev->context == prev->pgd in the case where we are initially switch_mmu() 81 if (prev->context.pgd_base != (unsigned long) prev->pgd) { switch_mmu() 83 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i]; switch_mmu() 88 prev->pgd = (pgd_t *) prev->context.pgd_base; switch_mmu() 91 next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i]; switch_mmu()
|
/linux-4.4.14/fs/ocfs2/ |
H A D | move_extents.c | 59 struct ocfs2_move_extents_context *context, __ocfs2_move_extent() 64 struct inode *inode = context->inode; __ocfs2_move_extent() 69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); __ocfs2_move_extent() 85 path = ocfs2_new_path_from_et(&context->et); __ocfs2_move_extent() 118 context->et.et_root_bh, __ocfs2_move_extent() 125 ret = ocfs2_split_extent(handle, &context->et, path, index, __ocfs2_move_extent() 126 &replace_rec, context->meta_ac, __ocfs2_move_extent() 127 &context->dealloc); __ocfs2_move_extent() 133 ocfs2_journal_dirty(handle, context->et.et_root_bh); __ocfs2_move_extent() 135 context->new_phys_cpos = new_p_cpos; __ocfs2_move_extent() 145 len, context->meta_ac, __ocfs2_move_extent() 146 &context->dealloc, 1); __ocfs2_move_extent() 225 static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, ocfs2_defrag_extent() argument 228 int ret, credits = 0, extra_blocks = 0, partial = context->partial; ocfs2_defrag_extent() 230 struct inode *inode = context->inode; ocfs2_defrag_extent() 242 BUG_ON(!context->refcount_loc); ocfs2_defrag_extent() 244 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, ocfs2_defrag_extent() 252 context->refcount_loc, ocfs2_defrag_extent() 263 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, ocfs2_defrag_extent() 264 &context->meta_ac, ocfs2_defrag_extent() 265 &context->data_ac, ocfs2_defrag_extent() 275 * if (context->data_ac) ocfs2_defrag_extent() 276 * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; ocfs2_defrag_extent() 296 ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len, ocfs2_defrag_extent() 312 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; ocfs2_defrag_extent() 321 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos, ocfs2_defrag_extent() 333 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len); ocfs2_defrag_extent() 343 if (context->data_ac) { ocfs2_defrag_extent() 344 ocfs2_free_alloc_context(context->data_ac); ocfs2_defrag_extent() 345 context->data_ac = NULL; ocfs2_defrag_extent() 348 if (context->meta_ac) { ocfs2_defrag_extent() 349 ocfs2_free_alloc_context(context->meta_ac); ocfs2_defrag_extent() 350 context->meta_ac = NULL; ocfs2_defrag_extent() 563 static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, ocfs2_move_extent() argument 569 struct inode *inode = context->inode; ocfs2_move_extent() 578 context->range->me_threshold); ocfs2_move_extent() 588 BUG_ON(!context->refcount_loc); ocfs2_move_extent() 590 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, ocfs2_move_extent() 598 context->refcount_loc, ocfs2_move_extent() 609 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, ocfs2_move_extent() 610 &context->meta_ac, ocfs2_move_extent() 675 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, ocfs2_move_extent() 702 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); ocfs2_move_extent() 720 if (context->meta_ac) { ocfs2_move_extent() 721 ocfs2_free_alloc_context(context->meta_ac); ocfs2_move_extent() 722 context->meta_ac = NULL; ocfs2_move_extent() 762 struct ocfs2_move_extents_context *context) __ocfs2_move_extents_range() 768 struct inode *inode = context->inode; __ocfs2_move_extents_range() 770 struct ocfs2_move_extents *range = context->range; __ocfs2_move_extents_range() 779 context->refcount_loc = le64_to_cpu(di->i_refcount_loc); __ocfs2_move_extents_range() 781 ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh); __ocfs2_move_extents_range() 782 ocfs2_init_dealloc_ctxt(&context->dealloc); __ocfs2_move_extents_range() 790 do_defrag = context->auto_defrag; __ocfs2_move_extents_range() 860 ret = ocfs2_defrag_extent(context, cpos, phys_cpos, __ocfs2_move_extents_range() 863 ret = ocfs2_move_extent(context, cpos, phys_cpos, __ocfs2_move_extents_range() 875 context->clusters_moved += alloc_size; __ocfs2_move_extents_range() 886 context->clusters_moved); __ocfs2_move_extents_range() 888 context->new_phys_cpos); __ocfs2_move_extents_range() 891 ocfs2_run_deallocs(osb, &context->dealloc); __ocfs2_move_extents_range() 896 static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) ocfs2_move_extents() argument 900 struct inode *inode = context->inode; ocfs2_move_extents() 930 status = __ocfs2_move_extents_range(di_bh, context); ocfs2_move_extents() 983 struct ocfs2_move_extents_context *context; ocfs2_ioctl_move_extents() local 1002 context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS); ocfs2_ioctl_move_extents() 1003 if (!context) { ocfs2_ioctl_move_extents() 1009 context->inode = inode; ocfs2_ioctl_move_extents() 1010 context->file = filp; ocfs2_ioctl_move_extents() 1025 context->range = ⦥ ocfs2_ioctl_move_extents() 1028 context->auto_defrag = 1; ocfs2_ioctl_move_extents() 1041 context->partial = 1; ocfs2_ioctl_move_extents() 1055 status = ocfs2_move_extents(context); ocfs2_ioctl_move_extents() 1068 kfree(context); ocfs2_ioctl_move_extents() 58 __ocfs2_move_extent(handle_t *handle, struct ocfs2_move_extents_context *context, u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos, int ext_flags) __ocfs2_move_extent() argument 761 __ocfs2_move_extents_range(struct buffer_head *di_bh, struct ocfs2_move_extents_context *context) __ocfs2_move_extents_range() argument
|
/linux-4.4.14/arch/blackfin/include/asm/ |
H A D | mmu_context.h | 64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; activate_l1stack() 82 if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) { __switch_mm() 84 set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu); __switch_mm() 90 if (!next_mm->context.l1_stack_save) __switch_mm() 92 if (next_mm->context.l1_stack_save == current_l1_stack_save) __switch_mm() 97 current_l1_stack_save = next_mm->context.l1_stack_save; __switch_mm() 123 unsigned long *mask = mm->context.page_rwx_mask; protect_page() 154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { update_protections() 156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu); update_protections() 171 /* Called when creating a new context during fork() or execve(). */ 177 mm->context.page_rwx_mask = (unsigned long *)p; init_new_context() 178 memset(mm->context.page_rwx_mask, 0, init_new_context() 192 if (current_l1_stack_save == mm->context.l1_stack_save) destroy_context() 194 if (mm->context.l1_stack_save) destroy_context() 198 while ((tmp = mm->context.sram_list)) { destroy_context() 199 mm->context.sram_list = tmp->next; destroy_context() 204 if (current_rwx_mask[cpu] == mm->context.page_rwx_mask) destroy_context() 206 free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); destroy_context()
|
/linux-4.4.14/include/linux/sunrpc/ |
H A D | auth_gss.h | 43 struct xdr_netobj gc_ctx; /* context handle */ 52 /* return from gss NULL PROC init sec context */ 54 struct xdr_netobj gr_ctx; /* context handle */ 62 * code needs to know about a single security context. In particular, 63 * gc_gss_ctx is the context handle that is used to do gss-api calls, while 64 * gc_wire_ctx is the context handle that is used to identify the context on
|
/linux-4.4.14/arch/score/include/asm/ |
H A D | mmu_context.h | 16 * into the context register. 57 mm->context = asid; get_new_mmu_context() 62 * Initialize the context related info for a new mm_struct 68 mm->context = 0; init_new_context() 78 if ((next->context ^ asid_cache) & ASID_VERSION_MASK) switch_mm() 81 pevn_set(next->context); switch_mm() 87 * Destroy context related info for an mm_struct that is about 99 * the context for the new mm so we see the new mappings. 108 pevn_set(next->context); activate_mm()
|
/linux-4.4.14/arch/hexagon/include/asm/ |
H A D | mmu_context.h | 2 * MM context support for the Hexagon architecture 52 * init_new_context - initialize context related info for new mm_struct instance 59 /* mm->context is set up by pgd_alloc */ init_new_context() 64 * Switch active mm context 75 if (next->context.generation < prev->context.generation) { switch_mm() 79 next->context.generation = prev->context.generation; switch_mm() 82 __vmnewmap((void *)next->context.ptbase); switch_mm()
|
/linux-4.4.14/tools/perf/scripts/python/Perf-Trace-Util/ |
H A D | Context.c | 31 PyObject *context; perf_trace_context_common_pc() local 34 if (!PyArg_ParseTuple(args, "O", &context)) perf_trace_context_common_pc() 37 scripting_context = PyCObject_AsVoidPtr(context); perf_trace_context_common_pc() 47 PyObject *context; perf_trace_context_common_flags() local 50 if (!PyArg_ParseTuple(args, "O", &context)) perf_trace_context_common_flags() 53 scripting_context = PyCObject_AsVoidPtr(context); perf_trace_context_common_flags() 63 PyObject *context; perf_trace_context_common_lock_depth() local 66 if (!PyArg_ParseTuple(args, "O", &context)) perf_trace_context_common_lock_depth() 69 scripting_context = PyCObject_AsVoidPtr(context); perf_trace_context_common_lock_depth()
|
/linux-4.4.14/drivers/infiniband/core/ |
H A D | umem_odp.c | 82 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context) ib_ucontext_notifier_start_account() argument 84 atomic_inc(&context->notifier_count); ib_ucontext_notifier_start_account() 91 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context) ib_ucontext_notifier_end_account() argument 93 int zero_notifiers = atomic_dec_and_test(&context->notifier_count); ib_ucontext_notifier_end_account() 96 !list_empty(&context->no_private_counters)) { ib_ucontext_notifier_end_account() 103 down_write(&context->umem_rwsem); ib_ucontext_notifier_end_account() 107 if (!atomic_read(&context->notifier_count)) { ib_ucontext_notifier_end_account() 109 &context->no_private_counters, ib_ucontext_notifier_end_account() 119 up_write(&context->umem_rwsem); ib_ucontext_notifier_end_account() 135 item->context->invalidate_range(item, ib_umem_start(item), ib_umem_notifier_release_trampoline() 143 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_release() local 145 if (!context->invalidate_range) ib_umem_notifier_release() 148 ib_ucontext_notifier_start_account(context); ib_umem_notifier_release() 149 down_read(&context->umem_rwsem); ib_umem_notifier_release() 150 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0, ib_umem_notifier_release() 154 up_read(&context->umem_rwsem); ib_umem_notifier_release() 161 item->context->invalidate_range(item, start, start + PAGE_SIZE); invalidate_page_trampoline() 170 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_invalidate_page() local 172 if (!context->invalidate_range) ib_umem_notifier_invalidate_page() 175 ib_ucontext_notifier_start_account(context); ib_umem_notifier_invalidate_page() 176 down_read(&context->umem_rwsem); ib_umem_notifier_invalidate_page() 177 rbt_ib_umem_for_each_in_range(&context->umem_tree, address, ib_umem_notifier_invalidate_page() 180 up_read(&context->umem_rwsem); ib_umem_notifier_invalidate_page() 181 ib_ucontext_notifier_end_account(context); ib_umem_notifier_invalidate_page() 188 item->context->invalidate_range(item, start, end); invalidate_range_start_trampoline() 197 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_invalidate_range_start() local 199 if (!context->invalidate_range) ib_umem_notifier_invalidate_range_start() 202 ib_ucontext_notifier_start_account(context); ib_umem_notifier_invalidate_range_start() 203 down_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_start() 204 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, ib_umem_notifier_invalidate_range_start() 207 up_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_start() 222 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_invalidate_range_end() local 224 if (!context->invalidate_range) ib_umem_notifier_invalidate_range_end() 227 down_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_end() 228 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, ib_umem_notifier_invalidate_range_end() 231 up_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_end() 232 ib_ucontext_notifier_end_account(context); ib_umem_notifier_invalidate_range_end() 242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) ib_umem_odp_get() argument 256 if (context->tgid != our_pid) { ib_umem_odp_get() 292 down_write(&context->umem_rwsem); ib_umem_odp_get() 293 context->odp_mrs_count++; ib_umem_odp_get() 296 &context->umem_tree); ib_umem_odp_get() 297 if (likely(!atomic_read(&context->notifier_count)) || ib_umem_odp_get() 298 context->odp_mrs_count == 1) ib_umem_odp_get() 302 &context->no_private_counters); ib_umem_odp_get() 303 downgrade_write(&context->umem_rwsem); ib_umem_odp_get() 305 if (context->odp_mrs_count == 1) { ib_umem_odp_get() 308 * for this context! ib_umem_odp_get() 310 atomic_set(&context->notifier_count, 0); ib_umem_odp_get() 311 INIT_HLIST_NODE(&context->mn.hlist); ib_umem_odp_get() 312 context->mn.ops = &ib_umem_notifiers; ib_umem_odp_get() 318 ret_val = mmu_notifier_register(&context->mn, mm); ib_umem_odp_get() 327 up_read(&context->umem_rwsem); ib_umem_odp_get() 339 up_read(&context->umem_rwsem); ib_umem_odp_get() 352 struct ib_ucontext *context = umem->context; ib_umem_odp_release() local 363 down_write(&context->umem_rwsem); ib_umem_odp_release() 366 &context->umem_tree); ib_umem_odp_release() 367 context->odp_mrs_count--; ib_umem_odp_release() 380 downgrade_write(&context->umem_rwsem); ib_umem_odp_release() 381 if (!context->odp_mrs_count) { ib_umem_odp_release() 385 owning_process = get_pid_task(context->tgid, ib_umem_odp_release() 401 mmu_notifier_unregister(&context->mn, owning_mm); ib_umem_odp_release() 409 up_read(&context->umem_rwsem); ib_umem_odp_release() 443 struct ib_device *dev = umem->context->device; ib_umem_odp_map_dma_single_page() 482 if (umem->context->invalidate_range || !stored_page) ib_umem_odp_map_dma_single_page() 485 if (remove_existing_mapping && umem->context->invalidate_range) { ib_umem_odp_map_dma_single_page() 547 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); ib_umem_odp_map_dma_pages() 626 struct ib_device *dev = umem->context->device; ib_umem_odp_unmap_dma_pages() 661 if (!umem->context->invalidate_range) ib_umem_odp_unmap_dma_pages()
|
H A D | device.c | 252 struct ib_client_data *context; add_client_context() local 255 context = kmalloc(sizeof *context, GFP_KERNEL); add_client_context() 256 if (!context) { add_client_context() 257 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", add_client_context() 262 context->client = client; add_client_context() 263 context->data = NULL; add_client_context() 264 context->going_down = false; add_client_context() 268 list_add(&context->list, &device->client_data_list); add_client_context() 386 struct ib_client_data *context, *tmp; ib_unregister_device() local 394 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_device() 395 context->going_down = true; ib_unregister_device() 399 list_for_each_entry_safe(context, tmp, &device->client_data_list, ib_unregister_device() 401 if (context->client->remove) ib_unregister_device() 402 context->client->remove(device, context->data); ib_unregister_device() 413 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_device() 414 kfree(context); ib_unregister_device() 465 struct ib_client_data *context, *tmp; ib_unregister_client() local 480 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_client() 481 if (context->client == client) { ib_unregister_client() 482 context->going_down = true; ib_unregister_client() 483 found_context = context; ib_unregister_client() 494 pr_warn("No client context found for %s/%s\n", ib_unregister_client() 512 * ib_get_client_data - Get IB client context 513 * @device:Device to get context for 514 * @client:Client to get context for 516 * ib_get_client_data() returns client context set with 521 struct ib_client_data *context; ib_get_client_data() local 526 list_for_each_entry(context, &device->client_data_list, list) ib_get_client_data() 527 if (context->client == client) { ib_get_client_data() 528 ret = context->data; ib_get_client_data() 538 * ib_set_client_data - Set IB client context 539 * @device:Device to set context for 540 * @client:Client to set context for 543 * ib_set_client_data() sets client context that can be retrieved with 549 struct ib_client_data *context; ib_set_client_data() local 553 list_for_each_entry(context, &device->client_data_list, list) ib_set_client_data() 554 if (context->client == client) { ib_set_client_data() 555 context->data = data; ib_set_client_data() 559 printk(KERN_WARNING "No client context found for %s/%s\n", ib_set_client_data() 574 * callback may occur in interrupt context. 921 struct ib_client_data *context; ib_get_net_dev_by_params() local 928 list_for_each_entry(context, &dev->client_data_list, list) { ib_get_net_dev_by_params() 929 struct ib_client *client = context->client; ib_get_net_dev_by_params() 931 if (context->going_down) ib_get_net_dev_by_params() 937 context->data); ib_get_net_dev_by_params()
|
/linux-4.4.14/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 2 * Switch an MMU context. 38 * any user or kernel context. We use the reserved values in the 80 mm->context.asid[cpu] = asid; get_new_mmu_context() 81 mm->context.cpu = cpu; get_new_mmu_context() 91 unsigned long asid = mm->context.asid[cpu]; get_mmu_context() 102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); activate_context() 107 * Initialize the context related info for a new mm_struct 117 mm->context.asid[cpu] = NO_CONTEXT; for_each_possible_cpu() 119 mm->context.cpu = -1; 127 int migrated = next->context.cpu != cpu; switch_mm() 131 next->context.cpu = cpu; switch_mm() 141 * Destroy context related info for an mm_struct that is about
|
/linux-4.4.14/drivers/gpu/drm/ |
H A D | drm_lock.c | 41 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 67 if (lock->context == DRM_KERNEL_CONTEXT) { drm_legacy_lock() 68 DRM_ERROR("Process %d using kernel context %d\n", drm_legacy_lock() 69 task_pid_nr(current), lock->context); drm_legacy_lock() 74 lock->context, task_pid_nr(current), drm_legacy_lock() 90 if (drm_lock_take(&master->lock, lock->context)) { drm_legacy_lock() 111 DRM_DEBUG("%d %s\n", lock->context, drm_legacy_lock() 119 dev->sigdata.context = lock->context; drm_legacy_lock() 127 lock->context); drm_legacy_lock() 154 if (lock->context == DRM_KERNEL_CONTEXT) { drm_legacy_unlock() 155 DRM_ERROR("Process %d using kernel context %d\n", drm_legacy_unlock() 156 task_pid_nr(current), lock->context); drm_legacy_unlock() 160 if (drm_legacy_lock_free(&master->lock, lock->context)) { drm_legacy_unlock() 171 * \param context locking context. 174 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. 178 unsigned int context) drm_lock_take() 189 new = context | _DRM_LOCK_HELD | drm_lock_take() 197 if (_DRM_LOCKING_CONTEXT(old) == context) { drm_lock_take() 199 if (context != DRM_KERNEL_CONTEXT) { drm_lock_take() 201 context); drm_lock_take() 207 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { drm_lock_take() 215 * This takes a lock forcibly and hands it to context. Should ONLY be used 220 * \param context locking context. 224 * Marks the lock as held by the given context, via the \p cmpxchg instruction. 227 unsigned int context) drm_lock_transfer() 235 new = context | _DRM_LOCK_HELD; drm_lock_transfer() 246 * \param context context. 252 int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context) drm_legacy_lock_free() argument 272 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { drm_legacy_lock_free() 274 context, _DRM_LOCKING_CONTEXT(old)); drm_legacy_lock_free() 283 * with the kernel context if it is free, otherwise it gets the highest priority when and if 177 drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) drm_lock_take() argument 226 drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) drm_lock_transfer() argument
|
H A D | drm_context.c | 45 * Free a handle from the context bitmap. 48 * \param ctx_handle context handle. 69 * \return (non-negative) context handle on success or a negative number on failure. 161 * Get per-context SAREA. 209 * Set per-context SAREA. 257 /** \name The actual DRM context handling routines */ 261 * Switch context. 264 * \param old old context handle. 265 * \param new new context handle. 288 * Complete context switch. 291 * \param new new context handle. 304 DRM_ERROR("Lock isn't held after context switch\n"); drm_context_switch_complete() 307 /* If a context switch is ever initiated drm_context_switch_complete() 349 * Add context. 357 * Get a new handle for the context and copy to userspace. 371 /* Skip kernel's context and get a new one. */ drm_legacy_addctx() 399 * Get context. 416 /* This is 0, because we don't handle any context flags */ drm_legacy_getctx() 423 * Switch context. 447 * New context. 473 * Remove context. 481 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
|
/linux-4.4.14/arch/microblaze/include/asm/ |
H A D | mmu_context_mm.h | 22 * segment IDs). We use a skew on both the context and the high 4 bits 45 * Set the current MMU context. 54 extern void set_context(mm_context_t context, pgd_t *pgd); 63 * This caches the next context number that we expect to be free. 64 * Its use is an optimization only, we can't rely on this context 79 * Get a new mmu context for the address space described by `mm'. 85 if (mm->context != NO_CONTEXT) get_mmu_context() 96 mm->context = ctx; get_mmu_context() 101 * Set up the context for a new address space. 103 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 106 * We're finished using the context for an address space. 110 if (mm->context != NO_CONTEXT) { destroy_context() 111 clear_bit(mm->context, context_map); destroy_context() 112 mm->context = NO_CONTEXT; destroy_context() 122 set_context(next->context, next->pgd); switch_mm() 127 * the context for the new mm so we see the new mappings. 134 set_context(mm->context, mm->pgd); activate_mm()
|
/linux-4.4.14/arch/sparc/mm/ |
H A D | tsb.c | 77 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user() 80 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user() 81 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user() 87 if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user() 88 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user() 89 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user() 95 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user() 102 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user_page() 105 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user_page() 106 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user_page() 112 if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user_page() 113 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user_page() 114 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user_page() 120 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user_page() 136 mm->context.tsb_block[tsb_idx].tsb_nentries = setup_tsb_params() 153 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); setup_tsb_params() 214 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params() 215 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; setup_tsb_params() 216 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; setup_tsb_params() 222 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params() 223 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; setup_tsb_params() 224 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; setup_tsb_params() 229 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; setup_tsb_params() 369 if (mm->context.tsb_block[tsb_index].tsb == NULL && tsb_grow() 380 if (mm->context.tsb_block[tsb_index].tsb != NULL) tsb_grow() 381 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; tsb_grow() 392 * We have to hold mm->context.lock while committing to the tsb_grow() 410 spin_lock_irqsave(&mm->context.lock, flags); tsb_grow() 412 old_tsb = mm->context.tsb_block[tsb_index].tsb; tsb_grow() 414 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); tsb_grow() 415 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * tsb_grow() 424 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { tsb_grow() 425 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow() 431 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; tsb_grow() 448 mm->context.tsb_block[tsb_index].tsb = new_tsb; tsb_grow() 451 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow() 477 spin_lock_init(&mm->context.lock); init_new_context() 479 mm->context.sparc64_ctx_val = 0UL; init_new_context() 486 huge_pte_count = mm->context.huge_pte_count; init_new_context() 487 mm->context.huge_pte_count = 0; init_new_context() 495 mm->context.tsb_block[i].tsb = NULL; init_new_context() 507 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) init_new_context() 530 tsb_destroy_one(&mm->context.tsb_block[i]); destroy_context() 534 if (CTX_VALID(mm->context)) { destroy_context() 535 unsigned long nr = CTX_NRBITS(mm->context); destroy_context()
|
/linux-4.4.14/drivers/usb/image/ |
H A D | microtek.c | 77 * 20000515 Put transfer context and URB in mts_desc (john) 190 MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ 191 MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ 192 mts_debug_dump(context->instance);\ 207 struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \ 375 /* Interrupt context! */ mts_int_submit_urb() 377 /* Holding transfer->context->lock! */ mts_int_submit_urb() 384 context->instance->usb_dev, mts_int_submit_urb() 389 context mts_int_submit_urb() 395 context->srb->result = DID_ERROR << 16; mts_int_submit_urb() 402 /* Interrupt context! */ mts_transfer_cleanup() 406 if ( likely(context->final_callback != NULL) ) mts_transfer_cleanup() 407 context->final_callback(context->srb); mts_transfer_cleanup() 414 context->srb->result &= MTS_SCSI_ERR_MASK; mts_transfer_done() 415 context->srb->result |= (unsigned)(*context->scsi_status)<<1; mts_transfer_done() 422 /* Interrupt context! */ mts_get_status() 427 usb_rcvbulkpipe(context->instance->usb_dev, mts_get_status() 428 context->instance->ep_response), mts_get_status() 429 context->scsi_status, mts_get_status() 435 /* Interrupt context! */ mts_data_done() 440 if ( context->data_length != transfer->actual_length ) { mts_data_done() 441 scsi_set_resid(context->srb, context->data_length - mts_data_done() 444 context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; mts_data_done() 452 /* Interrupt context! */ mts_command_done() 461 context->srb->result = DID_ABORT<<16; mts_command_done() 466 context->srb->result = DID_ERROR<<16; mts_command_done() 473 if (context->srb->cmnd[0] == REQUEST_SENSE) { mts_command_done() 475 context->data_pipe, mts_command_done() 476 context->srb->sense_buffer, mts_command_done() 477 context->data_length, mts_command_done() 479 } else { if ( context->data ) { mts_command_done() 481 context->data_pipe, mts_command_done() 482 context->data, mts_command_done() 483 context->data_length, mts_command_done() 484 scsi_sg_count(context->srb) > 1 ? mts_command_done() 498 MTS_DEBUG("Processing fragment %d of %d\n", context->fragment, mts_do_sg() 499 scsi_sg_count(context->srb)); mts_do_sg() 502 context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; mts_do_sg() 506 sg = scsi_sglist(context->srb); mts_do_sg() 507 context->fragment++; mts_do_sg() 509 context->data_pipe, mts_do_sg() 510 sg_virt(&sg[context->fragment]), mts_do_sg() 511 sg[context->fragment].length, mts_do_sg() 512 context->fragment + 1 == scsi_sg_count(context->srb) ? mts_do_sg() 536 desc->context.instance = desc; mts_build_transfer_context() 537 desc->context.srb = srb; mts_build_transfer_context() 538 desc->context.fragment = 0; mts_build_transfer_context() 541 desc->context.data = NULL; mts_build_transfer_context() 542 desc->context.data_length = 0; mts_build_transfer_context() 546 desc->context.data = sg_virt(&sg[0]); mts_build_transfer_context() 547 desc->context.data_length = sg[0].length; mts_build_transfer_context() 568 desc->context.data_pipe = pipe; mts_build_transfer_context() 604 &desc->context mts_scsi_queuecommand_lck() 609 desc->context.final_callback = callback; mts_scsi_queuecommand_lck() 742 new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL); mts_usb_probe() 743 if (!new_desc->context.scsi_status) mts_usb_probe() 784 kfree(new_desc->context.scsi_status); mts_usb_probe() 804 kfree(desc->context.scsi_status); mts_usb_disconnect()
|
/linux-4.4.14/drivers/misc/cxl/ |
H A D | Makefile | 4 cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
|
/linux-4.4.14/mm/ |
H A D | mmu_context.c | 16 * mm context. 18 * from a kernel thread context) 46 * specified mm context which was earlier taken on 49 * from a kernel thread context)
|
/linux-4.4.14/include/linux/ |
H A D | ssbi.h | 24 ssbi_reg_read(void *context, unsigned int reg, unsigned int *val) ssbi_reg_read() argument 29 ret = ssbi_read(context, reg, &v, 1); ssbi_reg_read() 37 ssbi_reg_write(void *context, unsigned int reg, unsigned int val) ssbi_reg_write() argument 40 return ssbi_write(context, reg, &v, 1); ssbi_reg_write()
|
H A D | hardirq.h | 43 * Enter irq context (on NO_HZ, update jiffies): 48 * Exit irq context without processing softirqs: 58 * Exit irq context and process softirqs if needed:
|
H A D | dm-kcopyd.h | 62 void *context); 66 unsigned flags, dm_kcopyd_notify_fn fn, void *context); 72 * It must not be called from interrupt context. 76 * It may be called from interrupt context. 80 dm_kcopyd_notify_fn fn, void *context); 85 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
|
H A D | ww_mutex.h | 96 * ww_acquire_init - initialize a w/w acquire context 97 * @ctx: w/w acquire context to initialize 98 * @ww_class: w/w class of the context 100 * Initializes an context to acquire multiple mutexes of the given w/w class. 106 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can 115 * An acquire context must be released with ww_acquire_fini by the same task 116 * before the memory is freed. It is recommended to allocate the context itself 144 * @ctx: the acquire context 147 * this context are forbidden. 164 * ww_acquire_fini - releases a w/w acquire context 165 * @ctx: the acquire context to free 167 * Releases a w/w acquire context. This must be called _after_ all acquired w/w 197 * @ctx: w/w acquire context, or NULL to acquire only a single lock. 203 * will either sleep until it is (wait case). Or it selects the current context 205 * same lock with the same context twice is also detected and signalled by 209 * the given context and then wait for this contending lock to be available by 219 * of the same w/w lock class as was used to initialize the acquire context. 235 * @ctx: w/w acquire context 241 * will either sleep until it is (wait case). Or it selects the current context 243 * same lock with the same context twice is also detected and signalled by 248 * the given context and then wait for this contending lock to be available by 258 * of the same w/w lock class as was used to initialize the acquire context. 274 * @ctx: w/w acquire context 276 * Acquires a w/w mutex with the given context after a wound case. This function 280 * context and then call this function on the contended lock. 287 * with the context held. It is forbidden to call this on anything else than the 308 * @ctx: w/w acquire context 310 * Acquires a w/w mutex with the given context after a wound case. This function 316 * context and then call this function on the contended lock. 323 * with the given context held. It is forbidden to call this on anything else 343 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context 346 * Trylocks a mutex without acquire context, so no deadlock detection is
|
H A D | firmware.h | 46 const char *name, struct device *device, gfp_t gfp, void *context, 47 void (*cont)(const struct firmware *fw, void *context)); 61 const char *name, struct device *device, gfp_t gfp, void *context, request_firmware_nowait() 62 void (*cont)(const struct firmware *fw, void *context)) request_firmware_nowait() 59 request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) request_firmware_nowait() argument
|
H A D | context_tracking.h | 58 * ct_state() - return the current context tracking state if known 60 * Returns the current cpu's context tracking state if context tracking 61 * is enabled. If context tracking is disabled, returns 113 * This is running in ioctl context so its safe guest_enter()
|
H A D | hw_breakpoint.h | 49 void *context, 62 void *context, 68 void *context); 94 void *context, register_user_hw_breakpoint() 102 void *context, register_wide_hw_breakpoint_cpu() 107 void *context) { return NULL; } 92 register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, struct task_struct *tsk) register_user_hw_breakpoint() argument 100 register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, int cpu) register_wide_hw_breakpoint_cpu() argument 105 register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context) register_wide_hw_breakpoint() argument
|
H A D | ntb.h | 162 * struct ntb_ctx_ops - ntb driver context operations 345 * @client: Client context. 361 * @client: Client context. 375 * @ntb: NTB device context. 387 * @ntb: NTB device context. 396 * ntb_set_ctx() - associate a driver context with an ntb device 397 * @ntb: NTB device context. 398 * @ctx: Driver context. 399 * @ctx_ops: Driver context operations. 401 * Associate a driver context and operations with a ntb device. The context is 403 * context with each ntb device. 405 * Return: Zero if the context is associated, otherwise an error number. 411 * ntb_clear_ctx() - disassociate any driver context from an ntb device 412 * @ntb: NTB device context. 414 * Clear any association that may exist between a driver context and the ntb 420 * ntb_link_event() - notify driver context of a change in link status 421 * @ntb: NTB device context. 423 * Notify the driver context that the link status may have changed. The driver 429 * ntb_db_event() - notify driver context of a doorbell event 430 * @ntb: NTB device context. 433 * Notify the driver context of a doorbell event. If hardware supports 445 * @ntb: NTB device context. 458 * @ntb: NTB device context. 483 * @ntb: NTB device context. 503 * @ntb: NTB device context. 521 * @ntb: NTB device context. 527 * the context of the link event callback. 540 * @ntb: NTB device context. 560 * @ntb: NTB device context. 577 * @ntb: NTB device context. 595 * @ntb: NTB device context. 608 * @ntb: NTB device context. 624 * @ntb: NTB device context. 641 * @ntb: NTB device context. 654 * @ntb: NTB device context. 674 * @ntb: NTB device context. 689 * @ntb: NTB device context. 707 * @ntb: NTB device context. 723 * @ntb: NTB device context. 742 * @ntb: NTB device context. 765 * @ntb: NTB device context. 783 * @ntb: NTB device context. 798 * @ntb: NTB device context. 818 * @ntb: NTB device context. 836 * @ntb: NTB device context. 857 * @ntb: NTB device context. 879 * @ntb: NTB device context. 897 * @ntb: NTB device context. 910 * @ntb: NTB device context. 924 * @ntb: NTB device context. 939 * @ntb: NTB device context. 956 * @ntb: NTB device context. 970 * @ntb: NTB device context.
|
H A D | fence.h | 44 * @context: execution context this fence belongs to, returned by 46 * @seqno: the sequence number of this fence inside the execution context, 78 unsigned context, seqno; member in struct:fence 108 * @get_timeline_name: return the name of the context this fence belongs to. 127 * This function can be called called from atomic context, but not 128 * from irq context, so normal spinlocks can be used. 161 * destruction of the fence. Can be called from irq context. 179 spinlock_t *lock, unsigned context, unsigned seqno); 284 * @f1: [in] the first fence from the same context 285 * @f2: [in] the second fence from the same context 288 * from the same context, since a seqno is not re-used across contexts. 292 if (WARN_ON(f1->context != f2->context)) fence_is_later() 300 * @f1: [in] the first fence from the same context 301 * @f2: [in] the second fence from the same context 304 * signaled last. Both fences must be from the same context, since a seqno is 309 if (WARN_ON(f1->context != f2->context)) fence_later() 360 __ff->context, __ff->seqno, ##args); \ 366 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 373 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
|
H A D | asn1_decoder.h | 20 void *context,
|
H A D | dm-region-hash.h | 36 void *context, void (*dispatch_bios)(void *context, 38 void (*wakeup_workers)(void *context), 39 void (*wakeup_all_recovery_waiters)(void *context),
|
H A D | vexpress.h | 39 struct regmap * (*regmap_init)(struct device *dev, void *context); 40 void (*regmap_exit)(struct regmap *regmap, void *context); 44 struct vexpress_config_bridge_ops *ops, void *context);
|
/linux-4.4.14/arch/m32r/include/asm/ |
H A D | mmu.h | 12 /* Default "unsigned long" context */
|
H A D | mmu_context.h | 21 * Cache of MMU context last used. 26 #define mm_context(mm) mm->context 30 #define mm_context(mm) mm->context[smp_processor_id()] 56 * Get MMU context if needed. 63 /* Check if we have old version of context. get_mmu_context() 64 If it's old, we need to get new context with new version. */ get_mmu_context() 71 * Initialize the context related info for a new mm_struct 78 mm->context = NO_CONTEXT; init_new_context() 84 mm->context[i] = NO_CONTEXT; init_new_context() 91 * Destroy context related info for an mm_struct that is about 113 * the context for the new mm so we see the new mappings.
|
/linux-4.4.14/arch/cris/mm/ |
H A D | tlb.c | 17 * The running context is R_MMU_CONTEXT, and each TLB entry contains a 40 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); alloc_context() 52 old_mm->context.page_id = NO_CONTEXT; alloc_context() 57 mm->context.page_id = map_replace_ptr; alloc_context() 67 * if needed, get a new MMU context for the mm. otherwise nothing is done. 73 if(mm->context.page_id == NO_CONTEXT) get_mmu_context() 77 /* called by __exit_mm to destroy the used MMU context if any before 88 if(mm->context.page_id != NO_CONTEXT) { destroy_context() 89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); destroy_context() 91 page_id_map[mm->context.page_id] = NULL; destroy_context() 111 /* the init_mm has context 0 from the boot */ tlb_init()
|
/linux-4.4.14/arch/microblaze/mm/ |
H A D | mmu_context.c | 39 * Initialize the context management stuff. 44 * The use of context zero is reserved for the kernel. mmu_context_init() 53 * Steal a context from a task that has one at the moment. 55 * This isn't an LRU system, it just frees up each context in 63 /* free up context `next_mmu_context' */ steal_context() 64 /* if we shouldn't free context 0, don't... */ steal_context()
|
/linux-4.4.14/include/misc/ |
H A D | cxl.h | 40 * An AFU context may be inited and then started and stoppped multiple times 49 * Once released, a context can't be started again. 51 * One context is inited by the cxl driver for every pci_dev. This is to be 52 * used as a default kernel context. cxl_get_context() will get this 53 * context. This context will be released by PCI hot unplug, so doesn't need to 59 * Once a context has been inited, IRQs may be configured. Firstly these IRQs 72 * On pci_enabled_device(), the cxl driver will init a single cxl context for 73 * use by the driver. It doesn't start this context (as that will likely 76 * This gets the default context associated with this pci_dev. This context 82 * Allocate and initalise a context associated with a AFU PCI device. This 83 * doesn't start the context in the AFU. 87 * Release and free a context. Context should be stopped before calling. 92 * Allocate AFU interrupts for this context. num=0 will allocate the default 102 * Map a handler for an AFU interrupt associated with a particular context. AFU 112 * Start work on the AFU. This starts an cxl context and associates it with a 113 * task. task == NULL will make it a kernel context. 118 * Stop a context and remove it from the PSL 126 * Set a context as a master context. 128 * than just the per context area (for slaves). 134 * depends on if this context is a master or slave. 139 /* Get the process element for this context */ 149 * // Init the context 155 * // Start context 166 * This inits a context, and gets a file descriptor and associates some file 172 * If cxl_fd_release() file op call is installed, the context will be stopped 178 * Take a context and associate it with my file ops. Returns the associated 184 /* Get the context associated with this file */ 187 * Start a context associated a struct cxl_ioctl_start_work used by the
|
/linux-4.4.14/arch/m68k/sun3/ |
H A D | mmu_emu.c | 56 context. 0xffffffff is a marker for kernel context */ 61 /* has this context been mmdrop'd? */ 203 /* erase the mappings for a dead context. Uses the pg_dir for hints 207 context for when they're cleared */ clear_context() 208 void clear_context(unsigned long context) clear_context() argument 213 if(context) { clear_context() 214 if(!ctx_alloc[context]) clear_context() 215 panic("clear_context: context not allocated\n"); clear_context() 217 ctx_alloc[context]->context = SUN3_INVALID_CONTEXT; clear_context() 218 ctx_alloc[context] = (struct mm_struct *)0; clear_context() 224 sun3_put_context(context); clear_context() 227 if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) { clear_context() 238 /* gets an empty context. if full, kills the next context listed to 240 /* This context invalidation scheme is, well, totally arbitrary, I'm 250 /* kill someone to get our context */ get_free_context() 265 panic("get_free_context: failed to find free context"); get_free_context() 276 * `context'. Maintain internal PMEG management structures. This doesn't 282 inline void mmu_emu_map_pmeg (int context, int vaddr) mmu_emu_map_pmeg() argument 296 printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n", mmu_emu_map_pmeg() 297 curr_pmeg, context, vaddr); mmu_emu_map_pmeg() 304 sun3_put_context(context); mmu_emu_map_pmeg() 317 sun3_put_context(context); mmu_emu_map_pmeg() 324 pmeg_ctx[curr_pmeg] = context; mmu_emu_map_pmeg() 357 unsigned char context; mmu_emu_handle_fault() local 363 context = 0; mmu_emu_handle_fault() 365 context = current->mm->context; mmu_emu_handle_fault() 400 mmu_emu_map_pmeg (context, vaddr); mmu_emu_handle_fault()
|
/linux-4.4.14/drivers/base/regmap/ |
H A D | regmap-spmi.c | 25 static int regmap_spmi_base_read(void *context, regmap_spmi_base_read() argument 35 err = spmi_register_read(context, addr++, val++); regmap_spmi_base_read() 40 static int regmap_spmi_base_gather_write(void *context, regmap_spmi_base_gather_write() argument 55 err = spmi_register_zero_write(context, *data); regmap_spmi_base_gather_write() 65 err = spmi_register_write(context, addr, *data); regmap_spmi_base_gather_write() 78 static int regmap_spmi_base_write(void *context, const void *data, regmap_spmi_base_write() argument 82 return regmap_spmi_base_gather_write(context, data, 1, data + 1, regmap_spmi_base_write() 114 static int regmap_spmi_ext_read(void *context, regmap_spmi_ext_read() argument 133 err = spmi_ext_register_read(context, addr, val, len); regmap_spmi_ext_read() 145 err = spmi_ext_register_readl(context, addr, val, len); regmap_spmi_ext_read() 158 static int regmap_spmi_ext_gather_write(void *context, regmap_spmi_ext_gather_write() argument 173 err = spmi_ext_register_write(context, addr, val, len); regmap_spmi_ext_gather_write() 185 err = spmi_ext_register_writel(context, addr, val, len); regmap_spmi_ext_gather_write() 198 static int regmap_spmi_ext_write(void *context, const void *data, regmap_spmi_ext_write() argument 202 return regmap_spmi_ext_gather_write(context, data, 2, data + 2, regmap_spmi_ext_write()
|
H A D | regmap-i2c.c | 19 static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, regmap_smbus_byte_reg_read() argument 22 struct device *dev = context; regmap_smbus_byte_reg_read() 38 static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, regmap_smbus_byte_reg_write() argument 41 struct device *dev = context; regmap_smbus_byte_reg_write() 55 static int regmap_smbus_word_reg_read(void *context, unsigned int reg, regmap_smbus_word_reg_read() argument 58 struct device *dev = context; regmap_smbus_word_reg_read() 74 static int regmap_smbus_word_reg_write(void *context, unsigned int reg, regmap_smbus_word_reg_write() argument 77 struct device *dev = context; regmap_smbus_word_reg_write() 91 static int regmap_smbus_word_read_swapped(void *context, unsigned int reg, regmap_smbus_word_read_swapped() argument 94 struct device *dev = context; regmap_smbus_word_read_swapped() 110 static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, regmap_smbus_word_write_swapped() argument 113 struct device *dev = context; regmap_smbus_word_write_swapped() 127 static int regmap_i2c_write(void *context, const void *data, size_t count) regmap_i2c_write() argument 129 struct device *dev = context; regmap_i2c_write() 142 static int regmap_i2c_gather_write(void *context, regmap_i2c_gather_write() argument 146 struct device *dev = context; regmap_i2c_gather_write() 176 static int regmap_i2c_read(void *context, regmap_i2c_read() argument 180 struct device *dev = context; regmap_i2c_read() 212 static int regmap_i2c_smbus_i2c_write(void *context, const void *data, regmap_i2c_smbus_i2c_write() argument 215 struct device *dev = context; regmap_i2c_smbus_i2c_write() 228 static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, regmap_i2c_smbus_i2c_read() argument 232 struct device *dev = context; regmap_i2c_smbus_i2c_read()
|
H A D | regmap-spi.c | 32 static int regmap_spi_write(void *context, const void *data, size_t count) regmap_spi_write() argument 34 struct device *dev = context; regmap_spi_write() 40 static int regmap_spi_gather_write(void *context, regmap_spi_gather_write() argument 44 struct device *dev = context; regmap_spi_gather_write() 57 static int regmap_spi_async_write(void *context, regmap_spi_async_write() argument 65 struct device *dev = context; regmap_spi_async_write() 79 async->m.context = async; regmap_spi_async_write() 95 static int regmap_spi_read(void *context, regmap_spi_read() argument 99 struct device *dev = context; regmap_spi_read()
|
H A D | regmap-ac97.c | 56 static int regmap_ac97_reg_read(void *context, unsigned int reg, regmap_ac97_reg_read() argument 59 struct snd_ac97 *ac97 = context; regmap_ac97_reg_read() 66 static int regmap_ac97_reg_write(void *context, unsigned int reg, regmap_ac97_reg_write() argument 69 struct snd_ac97 *ac97 = context; regmap_ac97_reg_write()
|
/linux-4.4.14/arch/x86/kernel/ |
H A D | ldt.c | 24 /* context.lock is held for us, so we don't need any locking. */ flush_ldt() 32 pc = ¤t->active_mm->context; flush_ldt() 78 /* context.lock is held */ install_ldt() 83 smp_store_release(¤t_mm->context.ldt, ldt); install_ldt() 112 mutex_init(&mm->context.lock); init_new_context() 115 mm->context.ldt = NULL; init_new_context() 119 mutex_lock(&old_mm->context.lock); init_new_context() 120 if (!old_mm->context.ldt) { init_new_context() 121 mm->context.ldt = NULL; init_new_context() 125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); init_new_context() 131 memcpy(new_ldt->entries, old_mm->context.ldt->entries, init_new_context() 135 mm->context.ldt = new_ldt; init_new_context() 138 mutex_unlock(&old_mm->context.lock); init_new_context() 149 free_ldt_struct(mm->context.ldt); destroy_context() 150 mm->context.ldt = NULL; destroy_context() 159 mutex_lock(&mm->context.lock); read_ldt() 161 if (!mm->context.ldt) { read_ldt() 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE; read_ldt() 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { read_ldt() 188 mutex_unlock(&mm->context.lock); read_ldt() 248 mutex_lock(&mm->context.lock); write_ldt() 250 old_ldt = mm->context.ldt; write_ldt() 269 mutex_unlock(&mm->context.lock); write_ldt()
|
/linux-4.4.14/drivers/misc/echo/ |
H A D | oslec.h | 47 * oslec_create - Create a voice echo canceller context. 49 * @return: The new canceller context, or NULL if the canceller could not be 55 * oslec_free - Free a voice echo canceller context. 56 * @ec: The echo canceller context. 61 * oslec_flush - Flush (reinitialise) a voice echo canceller context. 62 * @ec: The echo canceller context. 67 * oslec_adaption_mode - set the adaption mode of a voice echo canceller context. 68 * @ec The echo canceller context. 77 * @ec: The echo canceller context. 87 * @ec: The echo canceller context.
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
H A D | mthca_provider.c | 305 struct mthca_ucontext *context; mthca_alloc_ucontext() local 319 context = kmalloc(sizeof *context, GFP_KERNEL); mthca_alloc_ucontext() 320 if (!context) mthca_alloc_ucontext() 323 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); mthca_alloc_ucontext() 325 kfree(context); mthca_alloc_ucontext() 329 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); mthca_alloc_ucontext() 330 if (IS_ERR(context->db_tab)) { mthca_alloc_ucontext() 331 err = PTR_ERR(context->db_tab); mthca_alloc_ucontext() 332 mthca_uar_free(to_mdev(ibdev), &context->uar); mthca_alloc_ucontext() 333 kfree(context); mthca_alloc_ucontext() 338 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); mthca_alloc_ucontext() 339 mthca_uar_free(to_mdev(ibdev), &context->uar); mthca_alloc_ucontext() 340 kfree(context); mthca_alloc_ucontext() 344 context->reg_mr_warned = 0; mthca_alloc_ucontext() 346 return &context->ibucontext; mthca_alloc_ucontext() 349 static int mthca_dealloc_ucontext(struct ib_ucontext *context) mthca_dealloc_ucontext() argument 351 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, mthca_dealloc_ucontext() 352 to_mucontext(context)->db_tab); mthca_dealloc_ucontext() 353 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); mthca_dealloc_ucontext() 354 kfree(to_mucontext(context)); mthca_dealloc_ucontext() 359 static int mthca_mmap_uar(struct ib_ucontext *context, mthca_mmap_uar() argument 368 to_mucontext(context)->uar.pfn, mthca_mmap_uar() 376 struct ib_ucontext *context, mthca_alloc_pd() 386 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); mthca_alloc_pd() 392 if (context) { mthca_alloc_pd() 443 struct mthca_ucontext *context = NULL; mthca_create_srq() local 455 context = to_mucontext(pd->uobject->context); mthca_create_srq() 462 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_srq() 463 context->db_tab, ucmd.db_index, mthca_create_srq() 477 mthca_unmap_user_db(to_mdev(pd->device), &context->uar, mthca_create_srq() 478 context->db_tab, ucmd.db_index); mthca_create_srq() 483 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { mthca_create_srq() 499 struct mthca_ucontext *context; mthca_destroy_srq() local 502 context = to_mucontext(srq->uobject->context); mthca_destroy_srq() 504 mthca_unmap_user_db(to_mdev(srq->device), &context->uar, mthca_destroy_srq() 505 context->db_tab, to_msrq(srq)->db_index); mthca_destroy_srq() 530 struct mthca_ucontext *context; mthca_create_qp() local 537 context = to_mucontext(pd->uobject->context); mthca_create_qp() 544 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_qp() 545 context->db_tab, mthca_create_qp() 552 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_qp() 553 context->db_tab, mthca_create_qp() 557 &context->uar, mthca_create_qp() 558 context->db_tab, mthca_create_qp() 576 context = to_mucontext(pd->uobject->context); mthca_create_qp() 579 &context->uar, mthca_create_qp() 580 context->db_tab, mthca_create_qp() 583 &context->uar, mthca_create_qp() 584 context->db_tab, mthca_create_qp() 635 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp() 636 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp() 639 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp() 640 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp() 650 struct ib_ucontext *context, mthca_create_cq() 665 if (context) { mthca_create_cq() 669 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq() 670 to_mucontext(context)->db_tab, mthca_create_cq() 675 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq() 676 to_mucontext(context)->db_tab, mthca_create_cq() 688 if (context) { mthca_create_cq() 698 context ? to_mucontext(context) : NULL, mthca_create_cq() 699 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, mthca_create_cq() 704 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { mthca_create_cq() 718 if (context) mthca_create_cq() 719 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq() 720 to_mucontext(context)->db_tab, ucmd.arm_db_index); mthca_create_cq() 723 if (context) mthca_create_cq() 724 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq() 725 to_mucontext(context)->db_tab, ucmd.set_db_index); mthca_create_cq() 855 &to_mucontext(cq->uobject->context)->uar, mthca_destroy_cq() 856 to_mucontext(cq->uobject->context)->db_tab, mthca_destroy_cq() 859 &to_mucontext(cq->uobject->context)->uar, mthca_destroy_cq() 860 to_mucontext(cq->uobject->context)->db_tab, mthca_destroy_cq() 998 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) { mthca_reg_user_mr() 1003 ++to_mucontext(pd->uobject->context)->reg_mr_warned; mthca_reg_user_mr() 1012 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, mthca_reg_user_mr() 375 mthca_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) mthca_alloc_pd() argument 648 mthca_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) mthca_create_cq() argument
|
/linux-4.4.14/drivers/iommu/ |
H A D | msm_iommu.h | 38 * context bank. The number of MIDs mapped to the same CB does not affect 48 * ncb Number of context banks present on this IOMMU HW instance 56 * struct msm_iommu_ctx_dev - an IOMMU context bank instance 57 * name Human-readable name given to this context bank 58 * num Index of this context bank within the hardware 59 * mids List of Machine IDs that are to be mapped into this context 91 * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance 92 * @num: Hardware context number of this context 97 * A msm_iommu_ctx_drvdata holds the driver data for a single context bank 107 * Look up an IOMMU context device by its context name. NULL if none found. 114 * Interrupt handler for the IOMMU context fault interrupt. Hooking the
|
/linux-4.4.14/arch/cris/arch-v10/mm/ |
H A D | tlb.c | 20 * The running context is R_MMU_CONTEXT, and each TLB entry contains a 61 /* invalidate the selected mm context only */ 67 int page_id = mm->context.page_id; flush_tlb_mm() 70 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); flush_tlb_mm() 102 int page_id = mm->context.page_id; flush_tlb_page() 106 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); flush_tlb_page() 113 /* invalidate those TLB entries that match both the mm context flush_tlb_page() 138 * Initialize the context related info for a new mm_struct 145 mm->context.page_id = NO_CONTEXT; init_new_context() 155 /* make sure we have a context */ switch_mm() 167 /* switch context in the MMU */ switch_mm() 170 next->context, next)); switch_mm() 173 page_id, next->context.page_id); switch_mm()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
H A D | doorbell.c | 44 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, mlx4_ib_db_map_user() argument 50 mutex_lock(&context->db_page_mutex); mlx4_ib_db_map_user() 52 list_for_each_entry(page, &context->db_page_list, list) mlx4_ib_db_map_user() 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx4_ib_db_map_user() 72 list_add(&page->list, &context->db_page_list); mlx4_ib_db_map_user() 80 mutex_unlock(&context->db_page_mutex); mlx4_ib_db_map_user() 85 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) mlx4_ib_db_unmap_user() argument 87 mutex_lock(&context->db_page_mutex); mlx4_ib_db_unmap_user() 95 mutex_unlock(&context->db_page_mutex); mlx4_ib_db_unmap_user()
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
H A D | doorbell.c | 46 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, mlx5_ib_db_map_user() argument 52 mutex_lock(&context->db_page_mutex); mlx5_ib_db_map_user() 54 list_for_each_entry(page, &context->db_page_list, list) mlx5_ib_db_map_user() 66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx5_ib_db_map_user() 74 list_add(&page->list, &context->db_page_list); mlx5_ib_db_map_user() 82 mutex_unlock(&context->db_page_mutex); mlx5_ib_db_map_user() 87 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) mlx5_ib_db_unmap_user() argument 89 mutex_lock(&context->db_page_mutex); mlx5_ib_db_unmap_user() 97 mutex_unlock(&context->db_page_mutex); mlx5_ib_db_unmap_user()
|
/linux-4.4.14/crypto/asymmetric_keys/ |
H A D | pkcs7_parser.c | 194 int pkcs7_note_OID(void *context, size_t hdrlen, pkcs7_note_OID() argument 198 struct pkcs7_parse_context *ctx = context; pkcs7_note_OID() 213 int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, pkcs7_sig_note_digest_algo() argument 217 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_digest_algo() 250 int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, pkcs7_sig_note_pkey_algo() argument 254 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_pkey_algo() 270 int pkcs7_check_content_type(void *context, size_t hdrlen, pkcs7_check_content_type() argument 274 struct pkcs7_parse_context *ctx = context; pkcs7_check_content_type() 287 int pkcs7_note_signeddata_version(void *context, size_t hdrlen, pkcs7_note_signeddata_version() argument 291 struct pkcs7_parse_context *ctx = context; pkcs7_note_signeddata_version() 321 int pkcs7_note_signerinfo_version(void *context, size_t hdrlen, pkcs7_note_signerinfo_version() argument 325 struct pkcs7_parse_context *ctx = context; pkcs7_note_signerinfo_version() 362 * Extract a certificate and store it in the context. 364 int pkcs7_extract_cert(void *context, size_t hdrlen, pkcs7_extract_cert() argument 368 struct pkcs7_parse_context *ctx = context; pkcs7_extract_cert() 404 int pkcs7_note_certificate_list(void *context, size_t hdrlen, pkcs7_note_certificate_list() argument 408 struct pkcs7_parse_context *ctx = context; pkcs7_note_certificate_list() 422 int pkcs7_note_content(void *context, size_t hdrlen, pkcs7_note_content() argument 426 struct pkcs7_parse_context *ctx = context; pkcs7_note_content() 440 * the context. 442 int pkcs7_note_data(void *context, size_t hdrlen, pkcs7_note_data() argument 446 struct pkcs7_parse_context *ctx = context; pkcs7_note_data() 459 int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen, pkcs7_sig_note_authenticated_attr() argument 463 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_authenticated_attr() 542 int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen, pkcs7_sig_note_set_of_authattrs() argument 546 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_set_of_authattrs() 572 int pkcs7_sig_note_serial(void *context, size_t hdrlen, pkcs7_sig_note_serial() argument 576 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_serial() 585 int pkcs7_sig_note_issuer(void *context, size_t hdrlen, pkcs7_sig_note_issuer() argument 589 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_issuer() 598 int pkcs7_sig_note_skid(void *context, size_t hdrlen, pkcs7_sig_note_skid() argument 602 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_skid() 614 int pkcs7_sig_note_signature(void *context, size_t hdrlen, pkcs7_sig_note_signature() argument 618 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_signature() 635 int pkcs7_note_signed_info(void *context, size_t hdrlen, pkcs7_note_signed_info() argument 639 struct pkcs7_parse_context *ctx = context; pkcs7_note_signed_info()
|
H A D | x509_cert_parser.c | 139 int x509_note_OID(void *context, size_t hdrlen, x509_note_OID() argument 143 struct x509_parse_context *ctx = context; x509_note_OID() 159 int x509_note_tbs_certificate(void *context, size_t hdrlen, x509_note_tbs_certificate() argument 163 struct x509_parse_context *ctx = context; x509_note_tbs_certificate() 176 int x509_note_pkey_algo(void *context, size_t hdrlen, x509_note_pkey_algo() argument 180 struct x509_parse_context *ctx = context; x509_note_pkey_algo() 228 int x509_note_signature(void *context, size_t hdrlen, x509_note_signature() argument 232 struct x509_parse_context *ctx = context; x509_note_signature() 250 int x509_note_serial(void *context, size_t hdrlen, x509_note_serial() argument 254 struct x509_parse_context *ctx = context; x509_note_serial() 263 int x509_extract_name_segment(void *context, size_t hdrlen, x509_extract_name_segment() argument 267 struct x509_parse_context *ctx = context; x509_extract_name_segment() 367 int x509_note_issuer(void *context, size_t hdrlen, x509_note_issuer() argument 371 struct x509_parse_context *ctx = context; x509_note_issuer() 377 int x509_note_subject(void *context, size_t hdrlen, x509_note_subject() argument 381 struct x509_parse_context *ctx = context; x509_note_subject() 390 int x509_extract_key_data(void *context, size_t hdrlen, x509_extract_key_data() argument 394 struct x509_parse_context *ctx = context; x509_extract_key_data() 410 int rsa_extract_mpi(void *context, size_t hdrlen, rsa_extract_mpi() argument 414 struct x509_parse_context *ctx = context; rsa_extract_mpi() 436 int x509_process_extension(void *context, size_t hdrlen, x509_process_extension() argument 440 struct x509_parse_context *ctx = context; x509_process_extension() 570 int x509_note_not_before(void *context, size_t hdrlen, x509_note_not_before() argument 574 struct x509_parse_context *ctx = context; x509_note_not_before() 578 int x509_note_not_after(void *context, size_t hdrlen, x509_note_not_after() argument 582 struct x509_parse_context *ctx = context; x509_note_not_after() 589 int x509_akid_note_kid(void *context, size_t hdrlen, x509_akid_note_kid() argument 593 struct x509_parse_context *ctx = context; x509_akid_note_kid() 612 int x509_akid_note_name(void *context, size_t hdrlen, x509_akid_note_name() argument 616 struct x509_parse_context *ctx = context; x509_akid_note_name() 628 int x509_akid_note_serial(void *context, size_t hdrlen, x509_akid_note_serial() argument 632 struct x509_parse_context *ctx = context; x509_akid_note_serial()
|
H A D | mscode_parser.c | 46 int mscode_note_content_type(void *context, size_t hdrlen, mscode_note_content_type() argument 78 int mscode_note_digest_algo(void *context, size_t hdrlen, mscode_note_digest_algo() argument 82 struct pefile_context *ctx = context; mscode_note_digest_algo() 126 int mscode_note_digest(void *context, size_t hdrlen, mscode_note_digest() argument 130 struct pefile_context *ctx = context; mscode_note_digest()
|
/linux-4.4.14/tools/perf/scripts/perl/Perf-Trace-Util/ |
H A D | Context.c | 55 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); XS() 58 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); XS() local 62 RETVAL = common_pc(context); XS() 78 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); XS() 81 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); XS() local 85 RETVAL = common_flags(context); XS() 101 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context"); XS() 104 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); XS() local 108 RETVAL = common_lock_depth(context); XS()
|
/linux-4.4.14/drivers/staging/skein/ |
H A D | skein_api.h | 50 * struct skein_ctx ctx; // a Skein hash or MAC context 52 * // prepare context, here for a Skein with a state size of 512 bits. 55 * // Initialize the context to set the requested hash length in bits 71 * An application may use @c skein_reset to reset a Skein context and use 113 * Prepare a Skein context. 116 * context. The functions clears memory and initializes size dependent 120 * Pointer to a Skein context. 129 * Initialize a Skein context. 131 * Initializes the context with this data and saves the resulting Skein 135 * Pointer to a Skein context. 145 * Resets a Skein context for further use. 147 * Restores the saved chaining variables to reset the Skein context. 152 * Pointer to a pre-initialized Skein MAC context 157 * Initializes a Skein context for MAC usage. 159 * Initializes the context with this data and saves the resulting Skein 166 * Pointer to an empty or preinitialized Skein MAC context 183 * Pointer to initialized Skein context 201 * Pointer to initialized Skein context 214 * reset the Skein context. 217 * Pointer to initialized Skein context
|
/linux-4.4.14/arch/nios2/include/asm/ |
H A D | mmu_context.h | 32 * Initialize the context related info for a new mm_struct instance. 35 * the currently running generation when this context is switched in. 40 mm->context = 0; init_new_context() 45 * Destroy context related info for an mm_struct that is about 62 * the context for the new mm so we see the new mappings.
|
H A D | mmu.h | 13 /* Default "unsigned long" context */
|
/linux-4.4.14/arch/frv/include/asm/ |
H A D | mmu_context.h | 1 /* mmu_context.h: MMU context management routines 38 change_mm_context(&prev->context, &next->context, next->pgd); \ 43 change_mm_context(&prev->context, &next->context, next->pgd); \
|
H A D | mmu.h | 1 /* mmu.h: memory management context for FR-V with or without MMU support 16 struct list_head id_link; /* link in list of context ID owners */ 17 unsigned short id; /* MMU context ID */
|
H A D | tlbflush.h | 38 __flush_tlb_mm((mm)->context.id); \ 45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \ 52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
|
/linux-4.4.14/drivers/staging/vt6656/ |
H A D | usbpipe.c | 107 struct vnt_private *priv = urb->context; vnt_start_interrupt_urb_complete() 169 struct vnt_rcb *rcb = urb->context; vnt_submit_rx_urb_complete() 243 struct vnt_usb_send_context *context = urb->context; vnt_tx_context_complete() local 244 struct vnt_private *priv = context->priv; vnt_tx_context_complete() 248 dev_dbg(&priv->usb->dev, "Write %d bytes\n", context->buf_len); vnt_tx_context_complete() 253 context->in_use = false; vnt_tx_context_complete() 261 if (context->type == CONTEXT_DATA_PACKET) vnt_tx_context_complete() 264 if (urb->status || context->type == CONTEXT_BEACON_PACKET) { vnt_tx_context_complete() 265 if (context->skb) vnt_tx_context_complete() 266 ieee80211_free_txskb(priv->hw, context->skb); vnt_tx_context_complete() 268 context->in_use = false; vnt_tx_context_complete() 273 struct vnt_usb_send_context *context) vnt_tx_context() 279 context->in_use = false; vnt_tx_context() 283 urb = context->urb; vnt_tx_context() 288 context->data, vnt_tx_context() 289 context->buf_len, vnt_tx_context() 291 context); vnt_tx_context() 297 context->in_use = false; vnt_tx_context() 272 vnt_tx_context(struct vnt_private *priv, struct vnt_usb_send_context *context) vnt_tx_context() argument
|
H A D | int.c | 72 struct vnt_usb_send_context *context; vnt_int_report_rate() local 81 context = priv->tx_context[pkt_no]; vnt_int_report_rate() 83 if (!context->skb) vnt_int_report_rate() 86 info = IEEE80211_SKB_CB(context->skb); vnt_int_report_rate() 89 if (context->fb_option && !(tsr & (TSR_TMO | TSR_RETRYTMO))) { vnt_int_report_rate() 99 if (context->fb_option == AUTO_FB_0) vnt_int_report_rate() 101 else if (context->fb_option == AUTO_FB_1) vnt_int_report_rate() 119 ieee80211_tx_status_irqsafe(priv->hw, context->skb); vnt_int_report_rate() 121 context->in_use = false; vnt_int_report_rate()
|
/linux-4.4.14/drivers/tty/ipwireless/ |
H A D | main.h | 54 /* Hardware context */ 56 /* Network layer context */ 58 /* TTY device context */
|
/linux-4.4.14/arch/um/kernel/ |
H A D | exec.c | 27 ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); flush_thread() 28 ret = ret || unmap(¤t->mm->context.id, STUB_END, flush_thread() 38 __switch_mm(¤t->mm->context.id); flush_thread()
|
H A D | reboot.c | 28 pid = t->mm->context.id.u.pid; for_each_process()
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
H A D | i915_gem_context.c | 29 * This file implements HW context support. On gen5+ a HW context consists of an 30 * opaque GPU object which is referenced at times of context saves and restores. 31 * With RC6 enabled, the context is also referenced as the GPU enters and exists 32 * from RC6 (GPU has it's own internal power context, except on gen5). Though 33 * something like a context does exist for the media ring, the code only 37 * and the default HW context. The default HW context is used by GPU clients 38 * that do not request setup of their own hardware context. The default 39 * context's state is never restored to help prevent programming errors. This 41 * The default context only exists to give the GPU some offset to load as the 42 * current to invoke a save of the context we actually care about. In fact, the 44 * never use the default context, though that limits the driver's ability to 52 * The context life cycle is semi-complicated in that context BOs may live 53 * longer than the context itself because of the way the hardware, and object 55 * describing the context life. 58 * S1: context created 1 0 0 59 * S2: context is currently running 2 1 X 61 * S4: context is current, but destroyed 1 1 0 65 * S0->S1: client creates a context 66 * S1->S2: client submits execbuf with context 67 * S2->S3: other clients submits execbuf with context 68 * S3->S1: context object was retired 70 * S2->S4: context destroy called with current context 72 * S4->S5->S0: destroy path on current context 75 * The "current context" means the context which is currently running on the 78 * offset, but it will on the next context switch. The only way to avoid this 81 * An "active context' is one which was previously the "current context" and is 82 * on the active list waiting for the next context switch to occur. Until this 84 * possible to destroy a context, but it is still active. 161 * This context is going away and we need to remove all VMAs still i915_gem_context_free() 186 * Try to make the context utilize L3 as well as LLC. i915_gem_alloc_context_obj() 231 /* Default context will never have a file_priv */ __create_hw_context() 242 /* NB: Mark all slices as needing a remap so that when the context first __create_hw_context() 257 * The default context needs to exist per ring that uses contexts. It stores the 258 * context state of the GPU for applications that don't utilize HW contexts, as 278 * context. This can cause a problem as pinning the i915_gem_create_context() 279 * default context also requires GTT space which may not i915_gem_create_context() 281 * context. i915_gem_create_context() 383 DRM_ERROR("Failed to create default global context (error %ld)\n", i915_gem_context_init() 395 DRM_DEBUG_DRIVER("%s context support initialized\n", i915_gem_context_init() 408 /* The only known way to stop the gpu from accessing the hw context is i915_gem_context_fini() 413 /* When default context is created and switched to, base object refcount i915_gem_context_fini() 416 * to default context. So we need to unreference the base object once i915_gem_context_fini() 421 /* Fake switch to NULL context */ i915_gem_context_fini() 458 DRM_ERROR("ring init context: %d\n", ret); i915_gem_context_enable() 677 * Pin can switch back to the default context if we end up calling into do_switch() 679 * switches to the default context. Hence we need to reload from here. do_switch() 687 * a context."*/ do_switch() 706 * write domains when putting a context object onto the active list do_switch() 717 /* NB: If we inhibit the restore, the context is not allowed to do_switch() 741 /* The hardware context switch is emitted, but we haven't do_switch() 747 DRM_ERROR("Failed to change address space on context switch\n"); do_switch() 764 /* The backing object for the context is done after switching to the do_switch() 765 * *next* context. Therefore we cannot retire the previous context until do_switch() 766 * the next context has already started running. In fact, the below code do_switch() 775 * object dirty. The only exception is that the context must be do_switch() 798 DRM_ERROR("ring init context: %d\n", ret); do_switch() 811 * i915_switch_context() - perform a GPU context switch. 812 * @req: request for which we'll execute the context switch 814 * The context life cycle is simple. The context refcount is incremented and 815 * decremented by 1 and create and destroy. If the context is in use by the GPU, 816 * it will have a refcount > 1. This allows us to destroy the context abstract 819 * This function should not be used in execlists mode. Instead the context is 821 * context. 831 if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ i915_switch_context() 870 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); i915_gem_context_create_ioctl() 900 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); i915_gem_context_destroy_ioctl()
|
/linux-4.4.14/drivers/net/ethernet/microchip/ |
H A D | encx24j600-regmap.c | 54 static void regmap_lock_mutex(void *context) regmap_lock_mutex() argument 56 struct encx24j600_context *ctx = context; regmap_lock_mutex() 60 static void regmap_unlock_mutex(void *context) regmap_unlock_mutex() argument 62 struct encx24j600_context *ctx = context; regmap_unlock_mutex() 66 static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val, regmap_encx24j600_sfr_read() argument 69 struct encx24j600_context *ctx = context; regmap_encx24j600_sfr_read() 173 static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val, regmap_encx24j600_sfr_write() argument 176 struct encx24j600_context *ctx = context; regmap_encx24j600_sfr_write() 192 static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg, regmap_encx24j600_reg_update_bits() argument 196 struct encx24j600_context *ctx = context; regmap_encx24j600_reg_update_bits() 224 int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data, regmap_encx24j600_spi_write() argument 227 struct encx24j600_context *ctx = context; regmap_encx24j600_spi_write() 237 int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count) regmap_encx24j600_spi_read() argument 239 struct encx24j600_context *ctx = context; regmap_encx24j600_spi_read() 248 static int regmap_encx24j600_write(void *context, const void *data, regmap_encx24j600_write() argument 257 return regmap_encx24j600_spi_write(context, reg, dout, len); regmap_encx24j600_write() 262 return regmap_encx24j600_sfr_write(context, reg, dout, len); regmap_encx24j600_write() 265 static int regmap_encx24j600_read(void *context, regmap_encx24j600_read() argument 277 return regmap_encx24j600_spi_read(context, reg, val, val_size); regmap_encx24j600_read() 284 return regmap_encx24j600_sfr_read(context, reg, val, val_size); regmap_encx24j600_read() 349 static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg, regmap_encx24j600_phy_reg_read() argument 352 struct encx24j600_context *ctx = context; regmap_encx24j600_phy_reg_read() 387 static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg, regmap_encx24j600_phy_reg_write() argument 390 struct encx24j600_context *ctx = context; regmap_encx24j600_phy_reg_write()
|
/linux-4.4.14/drivers/pci/hotplug/ |
H A D | acpiphp_glue.c | 64 static void hotplug_event(u32 type, struct acpiphp_context *context); 68 * acpiphp_init_context - Create hotplug context and grab a reference to it. 69 * @adev: ACPI device object to create the context for. 75 struct acpiphp_context *context; acpiphp_init_context() local 77 context = kzalloc(sizeof(*context), GFP_KERNEL); acpiphp_init_context() 78 if (!context) acpiphp_init_context() 81 context->refcount = 1; acpiphp_init_context() 82 context->hp.notify = acpiphp_hotplug_notify; acpiphp_init_context() 83 context->hp.fixup = acpiphp_post_dock_fixup; acpiphp_init_context() 84 acpi_set_hp_context(adev, &context->hp); acpiphp_init_context() 85 return context; acpiphp_init_context() 89 * acpiphp_get_context - Get hotplug context and grab a reference to it. 90 * @adev: ACPI device object to get the context for. 96 struct acpiphp_context *context; acpiphp_get_context() local 101 context = to_acpiphp_context(adev->hp); acpiphp_get_context() 102 context->refcount++; acpiphp_get_context() 103 return context; acpiphp_get_context() 107 * acpiphp_put_context - Drop a reference to ACPI hotplug context. 108 * @context: ACPI hotplug context to drop a reference to. 110 * The context object is removed if there are no more references to it. 114 static void acpiphp_put_context(struct acpiphp_context *context) acpiphp_put_context() argument 116 if (--context->refcount) acpiphp_put_context() 119 WARN_ON(context->bridge); acpiphp_put_context() 120 context->hp.self->hp = NULL; acpiphp_put_context() 121 kfree(context); acpiphp_put_context() 136 struct acpiphp_context *context; acpiphp_grab_context() local 139 context = acpiphp_get_context(adev); acpiphp_grab_context() 140 if (!context || context->func.parent->is_going_away) { acpiphp_grab_context() 144 get_bridge(context->func.parent); acpiphp_grab_context() 145 acpiphp_put_context(context); acpiphp_grab_context() 147 return context; acpiphp_grab_context() 150 static void acpiphp_let_context_go(struct acpiphp_context *context) acpiphp_let_context_go() argument 152 put_bridge(context->func.parent); acpiphp_let_context_go() 157 struct acpiphp_context *context; free_bridge() local 173 context = bridge->context; free_bridge() 174 /* Root bridges will not have hotplug context. */ free_bridge() 175 if (context) { free_bridge() 177 put_bridge(context->func.parent); free_bridge() 178 context->bridge = NULL; free_bridge() 179 acpiphp_put_context(context); free_bridge() 197 struct acpiphp_context *context = acpiphp_grab_context(adev); acpiphp_post_dock_fixup() local 201 if (!context) acpiphp_post_dock_fixup() 204 bus = context->func.slot->bus; acpiphp_post_dock_fixup() 222 acpiphp_let_context_go(context); acpiphp_post_dock_fixup() 255 * acpiphp_add_context - Add ACPIPHP context to an ACPI device object. 256 * @handle: ACPI handle of the object to add a context to. 265 struct acpiphp_context *context; acpiphp_add_context() local 290 context = acpiphp_init_context(adev); acpiphp_add_context() 291 if (!context) { acpiphp_add_context() 293 acpi_handle_err(handle, "No hotplug context\n"); acpiphp_add_context() 296 newfunc = &context->func; acpiphp_add_context() 319 acpiphp_put_context(context); acpiphp_add_context() 750 static void hotplug_event(u32 type, struct acpiphp_context *context) hotplug_event() argument 752 acpi_handle handle = context->hp.self->handle; hotplug_event() 753 struct acpiphp_func *func = &context->func; hotplug_event() 758 bridge = context->bridge; hotplug_event() 806 struct acpiphp_context *context; acpiphp_hotplug_notify() local 808 context = acpiphp_grab_context(adev); acpiphp_hotplug_notify() 809 if (!context) acpiphp_hotplug_notify() 812 hotplug_event(type, context); acpiphp_hotplug_notify() 813 acpiphp_let_context_go(context); acpiphp_hotplug_notify() 868 struct acpiphp_context *context; acpiphp_enumerate_slots() local 872 * under its parent, so the context should be there, unless the acpiphp_enumerate_slots() 876 context = acpiphp_get_context(adev); acpiphp_enumerate_slots() 877 if (!context) acpiphp_enumerate_slots() 880 bridge->context = context; acpiphp_enumerate_slots() 881 context->bridge = bridge; acpiphp_enumerate_slots() 883 get_bridge(context->func.parent); acpiphp_enumerate_slots()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | pio.h | 54 /* send context types */ 60 /* invalid send context index */ 73 #define PRC_SC_DISABLE 0x20 /* clean-up after a context disable */ 84 struct send_context *sc;/* back pointer to owning send context */ 88 void __iomem *end; /* context end address */ 89 unsigned long size; /* context size, in bytes */ 103 /* per-NUMA send context */ 110 struct work_struct halt_work; /* halted context work queue entry */ 112 int node; /* context home node */ 113 int type; /* context type */ 115 u32 hw_context; /* hardware context number */ 116 u32 credits; /* number of blocks in context */ 137 /* send context flags */ 144 struct send_context *sc; /* allocated working context */ 146 u16 type; /* context type */ 151 /* DMA credit return, index is always (context & 0x7) */ 162 /* send context configuration sizes (one per type) */ 168 /* send context functions */
|
H A D | pio.c | 139 /* number of send context memory pools */ 154 /* default send context sizes */ 165 /* send context memory pool configuration */ 184 int size; /* context size, in blocks */ 217 * Read the send context memory pool configuration and send context 219 * counts and sizes for the send context types. 258 "Send context memory pool %d: both the block count and centipercent are invalid\n", init_sc_pools_and_sizes() 271 "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n"); init_sc_pools_and_sizes() 279 "Send context memory pool centipercent is %d, expecting 10000\n", init_sc_pools_and_sizes() 288 "Send context memory pool absolute block count %d is larger than the memory size %d\n", init_sc_pools_and_sizes() 295 * - copy from the context size config init_sc_pools_and_sizes() 296 * - replace context type wildcard counts with real values init_sc_pools_and_sizes() 321 "%s send context invalid count wildcard %d\n", init_sc_pools_and_sizes() 344 "%s send context invalid pool wildcard %d\n", init_sc_pools_and_sizes() 355 "Send context fixed block count, %u, larger than total block count %u\n", init_sc_pools_and_sizes() 360 /* step 3: calculate the blocks in the pools, and pool context sizes */ init_sc_pools_and_sizes() 365 "Send context fixed pool sizes, %u, larger than pool block count %u\n", init_sc_pools_and_sizes() 382 "Send context memory pool %d has %u contexts, but no blocks\n", init_sc_pools_and_sizes() 391 "Send context memory pool %d has %u blocks, but zero contexts\n", init_sc_pools_and_sizes() 399 /* step 4: fill in the context type sizes from the pool sizes */ init_sc_pools_and_sizes() 418 dd_dev_info(dd, "unused send context blocks: %d\n", extra); init_sc_pools_and_sizes() 426 int ret, i, j, context; init_send_contexts() local 444 /* hardware context map starts with invalid send context indices */ init_send_contexts() 450 * for each context one after another from the global space. init_send_contexts() 452 context = 0; init_send_contexts() 459 &dd->send_contexts[context]; init_send_contexts() 464 context++; init_send_contexts() 473 * Allocate a software index and hardware context of the given type. 482 u32 context; sc_hw_alloc() local 489 context = dd->chip_send_contexts - index - 1; sc_hw_alloc() 490 dd->hw_to_sw[context] = index; sc_hw_alloc() 492 *hw_context = context; sc_hw_alloc() 496 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); sc_hw_alloc() 501 * Free the send context given by its software index. 518 /* return the base context of a context in a group */ group_context() 519 static inline u32 group_context(u32 context, u32 group) group_context() argument 521 return (context >> group) << group; group_context() 536 * send context, per NUMA. 537 * o Each send context always looks in its relative location in a struct 539 * o Each send context in a group must have its return address CSR programmed 540 * with the same value. Use the address of the first send context in the 566 * Calculate PIO block threshold for this send context using the given MTU. 584 /* check against this context's credits */ sc_mtu_to_threshold() 643 * Set the CHECK_ENABLE register for the send context 'sc'. 664 * Allocate a NUMA relative send context structure of the given type along 665 * with a HW context. 711 /* grouping is always single context for now */ sc_alloc() 743 /* per context type checks */ sc_alloc() 752 /* set the send context check opcode mask and value */ sc_alloc() 800 * the context yet, so allocate it now. sc_alloc() 819 "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n", sc_alloc() 831 /* free a per-NUMA send context structure */ sc_free() 863 /* release the index and context for re-use */ sc_free() 871 /* disable the context */ sc_disable() 891 * Flush any waiters. Once the context is disabled, sc_disable() 893 * could be one in-process when the context is disabled). sc_disable() 899 if (sc->sr) { /* this context has a shadow ring */ sc_disable() 917 /* is egress halted on the context? */ 945 "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", sc_wait_for_packet_egress() 975 * Restart a context after it has been halted due to error. 994 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, sc_restart() 998 * Step 1: Wait for the context to actually halt. sc_restart() 1001 * on the context. sc_restart() 1009 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", sc_restart() 1024 * to the context's PIO pages before calling this routine and will sc_restart() 1028 /* kernel context */ sc_restart() 1036 "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n", sc_restart() 1047 * This is done while disabling the send context sc_restart() 1049 * Step 4: Disable the context sc_restart() 1057 * Step 5: Enable the context sc_restart() 1059 * This enable will clear the halted flag and per-send context sc_restart() 1080 * calls into the driver to reset its context. pio_freeze() 1085 /* only need to disable, the context is already stopped */ pio_freeze() 1093 * been cleared. Now perform the last step and re-enable each kernel context. 1161 "PIO send context init %s while initializing all PIO blocks\n", pio_reset_all() 1166 /* enable the context */ sc_enable() 1180 * attempts (which should not happen prior to context being sc_enable() 1183 * if the context accounting values have not changed. sc_enable() 1202 * Clear all per-context errors. Some of these will be set when sc_enable() 1203 * we are re-enabling after a context halt. Now that the context sc_enable() 1243 * All is well. Enable the context. sc_enable() 1249 * hazard where a PIO write may reach the context before the enable. sc_enable() 1260 /* force a credit return on the context */ sc_return_credits() 1278 /* allow all in-flight packets to drain on the context */ sc_flush() 1287 /* drop all packets on the context, no waiting until they are sent */ sc_drop() 1293 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", sc_drop() 1298 * Start the software reaction to a context halt or SPC freeze: 1299 * - mark the context as halted or frozen 1309 /* mark the context */ sc_stop() 1323 * The send context buffer "allocator". 1325 * @sc: the PIO send context we are allocating from 1487 * @sc: the send context 1559 * The send context buffer "releaser". 1612 * Send context group releaser. Argument is the send context that caused 1613 * the interrupt. Called from the send context interrupt handler. 1668 * receive context but we need the RcvHdrQ entry size, init_pervl_scs() 1688 "Using send context %u(%u) for VL15\n", init_pervl_scs()
|
/linux-4.4.14/drivers/acpi/ |
H A D | sbshc.h | 25 typedef void (*smbus_alarm_callback)(void *context); 32 smbus_alarm_callback callback, void *context);
|
/linux-4.4.14/arch/um/kernel/skas/ |
H A D | mmu.c | 53 struct mm_context *to_mm = &mm->context; init_new_context() 63 from_mm = ¤t->mm->context; init_new_context() 102 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); uml_setup_stubs() 106 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); uml_setup_stubs() 107 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); uml_setup_stubs() 113 mm->context.stub_pages); uml_setup_stubs() 141 struct mm_context *mmu = &mm->context; destroy_context()
|
/linux-4.4.14/arch/powerpc/include/asm/ |
H A D | mm-arch-hooks.h | 23 if (old_start == mm->context.vdso_base) arch_remap() 24 mm->context.vdso_base = new_start; arch_remap()
|
H A D | mmu_context.h | 14 * Most if the context management is out of line 62 /* Mark this context has been used on the new CPU */ switch_mm() 79 /* Switch coprocessor context only if prev or next uses a coprocessor */ switch_mm() 80 if (prev->context.acop || next->context.acop) switch_mm() 85 * context switch_mm() 108 * the context for the new mm so we see the new mappings. 142 if (start <= mm->context.vdso_base && mm->context.vdso_base < end) arch_unmap() 143 mm->context.vdso_base = 0; arch_unmap()
|
/linux-4.4.14/arch/frv/include/uapi/asm/ |
H A D | sigcontext.h | 1 /* sigcontext.h: FRV signal context 17 * Signal context structure - contains all info to do with the state
|
/linux-4.4.14/arch/arm/include/asm/ |
H A D | mmu_context.h | 29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) check_and_switch_context() 59 mm->context.switch_pending = 1; check_and_switch_context() 70 if (mm && mm->context.switch_pending) { finish_arch_post_lock_switch() 78 if (mm->context.switch_pending) { finish_arch_post_lock_switch() 79 mm->context.switch_pending = 0; finish_arch_post_lock_switch() 98 * mm: describes the currently active mm context
|
H A D | mmu.h | 22 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
|
/linux-4.4.14/sound/soc/codecs/ |
H A D | rl6347a.h | 31 int rl6347a_hw_write(void *context, unsigned int reg, unsigned int value); 32 int rl6347a_hw_read(void *context, unsigned int reg, unsigned int *value);
|
H A D | rl6347a.c | 19 int rl6347a_hw_write(void *context, unsigned int reg, unsigned int value) rl6347a_hw_write() argument 21 struct i2c_client *client = context; rl6347a_hw_write() 62 int rl6347a_hw_read(void *context, unsigned int reg, unsigned int *value) rl6347a_hw_read() argument 64 struct i2c_client *client = context; rl6347a_hw_read()
|
/linux-4.4.14/drivers/nvmem/ |
H A D | rockchip-efuse.c | 44 static int rockchip_efuse_write(void *context, const void *data, size_t count) rockchip_efuse_write() argument 50 static int rockchip_efuse_read(void *context, rockchip_efuse_read() argument 55 struct rockchip_efuse_context *_context = context; rockchip_efuse_read() 130 struct rockchip_efuse_context *context; rockchip_efuse_probe() local 137 context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context), rockchip_efuse_probe() 139 if (IS_ERR(context)) rockchip_efuse_probe() 140 return PTR_ERR(context); rockchip_efuse_probe() 146 context->dev = dev; rockchip_efuse_probe() 147 context->base = base; rockchip_efuse_probe() 148 context->efuse_clk = clk; rockchip_efuse_probe() 153 context, &rockchip_efuse_regmap_config); rockchip_efuse_probe()
|
/linux-4.4.14/drivers/scsi/isci/ |
H A D | remote_node_context.h | 61 * the remote node context in the silicon. It exists to model and manage 62 * the remote node context in the silicon. 94 * @SCI_RNC_INITIAL initial state for a remote node context. On a resume 95 * request the remote node context will transition to the posting state. 98 * the RNC is posted the remote node context will be made ready. 101 * the hardware. Once the invalidate is complete the remote node context will 106 * remote node context will transition to the ready state. 108 * @SCI_RNC_READY: state that the remote node context must be in to accept io 111 * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when 114 * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to 117 * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits 119 * either there is a request to supend the remote node context or when there is 142 * node context. 156 * associated with the remote node context object. The remote node context 169 * context suspension. 176 * This field is true if the remote node context is resuming from its current
|
/linux-4.4.14/drivers/net/phy/ |
H A D | at803x.c | 66 struct at803x_context *context) at803x_context_save() 68 context->bmcr = phy_read(phydev, MII_BMCR); at803x_context_save() 69 context->advertise = phy_read(phydev, MII_ADVERTISE); at803x_context_save() 70 context->control1000 = phy_read(phydev, MII_CTRL1000); at803x_context_save() 71 context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE); at803x_context_save() 72 context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED); at803x_context_save() 73 context->led_control = phy_read(phydev, AT803X_LED_CONTROL); at803x_context_save() 78 const struct at803x_context *context) at803x_context_restore() 80 phy_write(phydev, MII_BMCR, context->bmcr); at803x_context_restore() 81 phy_write(phydev, MII_ADVERTISE, context->advertise); at803x_context_restore() 82 phy_write(phydev, MII_CTRL1000, context->control1000); at803x_context_restore() 83 phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable); at803x_context_restore() 84 phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed); at803x_context_restore() 85 phy_write(phydev, AT803X_LED_CONTROL, context->led_control); at803x_context_restore() 273 struct at803x_context context; at803x_link_change_notify() local 275 at803x_context_save(phydev, &context); at803x_link_change_notify() 282 at803x_context_restore(phydev, &context); at803x_link_change_notify() 65 at803x_context_save(struct phy_device *phydev, struct at803x_context *context) at803x_context_save() argument 77 at803x_context_restore(struct phy_device *phydev, const struct at803x_context *context) at803x_context_restore() argument
|
/linux-4.4.14/drivers/crypto/caam/ |
H A D | key_gen.h | 13 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
|
/linux-4.4.14/arch/x86/entry/vdso/ |
H A D | vgetcpu.c | 5 * Fast user context implementation of getcpu()
|
/linux-4.4.14/arch/x86/um/ |
H A D | tls_64.c | 13 * during context switches. arch_copy_tls()
|
/linux-4.4.14/include/linux/ceph/ |
H A D | types.h | 23 /* context for the caps reservation mechanism */
|
/linux-4.4.14/arch/mips/kernel/ |
H A D | pm.c | 26 * Ensures that general CPU context is saved, notably FPU and DSP. 41 * Restores important CPU context. 63 * mips_pm_notifier() - Notifier for preserving general CPU context. 69 * ensure that important CPU context is preserved across a CPU power down.
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/ |
H A D | tlb.h | 6 * to store the "process" it belongs to (=> fast mm context switch). The
|
/linux-4.4.14/tools/perf/arch/x86/tests/ |
H A D | arch-tests.c | 27 .desc = "Test intel cqm nmi context read",
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_mmap.c | 50 struct ipath_ibdev *dev = to_idev(ip->context->device); ipath_release_mmap_info() 85 * @context: the IB user context of the process making the mmap() call 89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ipath_mmap() argument 91 struct ipath_ibdev *dev = to_idev(context->device); ipath_mmap() 106 if (context != ip->context || (__u64) offset != ip->offset) ipath_mmap() 133 struct ib_ucontext *context, ipath_create_mmap_info() 152 ip->context = context; ipath_create_mmap_info() 131 ipath_create_mmap_info(struct ipath_ibdev *dev, u32 size, struct ib_ucontext *context, void *obj) ipath_create_mmap_info() argument
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_mmap.c | 50 struct qib_ibdev *dev = to_idev(ip->context->device); qib_release_mmap_info() 85 * @context: the IB user context of the process making the mmap() call 89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) qib_mmap() argument 91 struct qib_ibdev *dev = to_idev(context->device); qib_mmap() 106 if (context != ip->context || (__u64) offset != ip->offset) qib_mmap() 133 struct ib_ucontext *context, qib_create_mmap_info() 152 ip->context = context; qib_create_mmap_info() 131 qib_create_mmap_info(struct qib_ibdev *dev, u32 size, struct ib_ucontext *context, void *obj) qib_create_mmap_info() argument
|
/linux-4.4.14/arch/cris/arch-v32/mm/ |
H A D | tlb.c | 31 * context is found in the PID register. Each TLB entry contains a page_id that 88 page_id = mm->context.page_id; __flush_tlb_mm() 130 page_id = vma->vm_mm->context.page_id; __flush_tlb_page() 138 * Invalidate those TLB entries that match both the mm context and the __flush_tlb_page() 164 * Initialize the context related info for a new mm_struct 171 mm->context.page_id = NO_CONTEXT; init_new_context() 185 /* Make sure there is a MMU context. */ switch_mm() 198 /* Switch context in the MMU. */ switch_mm() 200 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | switch_mm() 203 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); switch_mm()
|
/linux-4.4.14/tools/perf/util/ |
H A D | trace-event-parse.c | 31 static int get_common_field(struct scripting_context *context, get_common_field() argument 34 struct pevent *pevent = context->pevent; get_common_field() 50 return pevent_read_number(pevent, context->event_data + *offset, *size); get_common_field() 53 int common_lock_depth(struct scripting_context *context) common_lock_depth() argument 59 ret = get_common_field(context, &size, &offset, common_lock_depth() 67 int common_flags(struct scripting_context *context) common_flags() argument 73 ret = get_common_field(context, &size, &offset, common_flags() 81 int common_pc(struct scripting_context *context) common_pc() argument 87 ret = get_common_field(context, &size, &offset, common_pc()
|
/linux-4.4.14/drivers/gpio/ |
H A D | gpio-omap.c | 57 struct gpio_regs context; member in struct:gpio_bank 112 bank->context.oe = l; omap_set_gpio_direction() 125 bank->context.dataout |= l; omap_set_gpio_dataout_reg() 128 bank->context.dataout &= ~l; omap_set_gpio_dataout_reg() 148 bank->context.dataout = l; omap_set_gpio_dataout_mask() 256 bank->context.debounce = debounce; omap2_set_gpio_debounce() 257 bank->context.debounce_en = val; omap2_set_gpio_debounce() 282 bank->context.debounce_en &= ~gpio_bit; omap_clear_gpio_debounce() 283 writel_relaxed(bank->context.debounce_en, omap_clear_gpio_debounce() 287 bank->context.debounce = 0; omap_clear_gpio_debounce() 288 writel_relaxed(bank->context.debounce, bank->base + omap_clear_gpio_debounce() 310 bank->context.leveldetect0 = omap_set_gpio_trigger() 312 bank->context.leveldetect1 = omap_set_gpio_trigger() 314 bank->context.risingdetect = omap_set_gpio_trigger() 316 bank->context.fallingdetect = omap_set_gpio_trigger() 321 bank->context.wake_en = omap_set_gpio_trigger() 417 bank->context.wake_en = omap_set_gpio_triggering() 441 bank->context.ctrl = ctrl; omap_enable_gpio_module() 454 bank->context.wake_en = omap_disable_gpio_module() 466 bank->context.ctrl = ctrl; omap_disable_gpio_module() 570 bank->context.irqenable1 |= gpio_mask; omap_enable_gpio_irqbank() 578 bank->context.irqenable1 = l; omap_enable_gpio_irqbank() 592 bank->context.irqenable1 &= ~gpio_mask; omap_disable_gpio_irqbank() 600 bank->context.irqenable1 = l; omap_disable_gpio_irqbank() 638 bank->context.wake_en |= gpio_bit; omap_set_gpio_wakeup() 640 bank->context.wake_en &= ~gpio_bit; omap_set_gpio_wakeup() 642 writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); omap_set_gpio_wakeup() 897 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); omap_mpuio_suspend_noirq() 912 writel_relaxed(bank->context.wake_en, mask_reg); omap_mpuio_resume_noirq() 1065 /* Save OE default value (0xffffffff) in the context */ omap_gpio_mod_init() 1066 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); omap_gpio_mod_init() 1312 * by writing back the values saved in bank->context. omap_gpio_runtime_suspend() 1314 wake_low = bank->context.leveldetect0 & bank->context.wake_en; omap_gpio_runtime_suspend() 1316 writel_relaxed(wake_low | bank->context.fallingdetect, omap_gpio_runtime_suspend() 1318 wake_hi = bank->context.leveldetect1 & bank->context.wake_en; omap_gpio_runtime_suspend() 1320 writel_relaxed(wake_hi | bank->context.risingdetect, omap_gpio_runtime_suspend() 1337 l1 = bank->context.fallingdetect; omap_gpio_runtime_suspend() 1338 l2 = bank->context.risingdetect; omap_gpio_runtime_suspend() 1372 * On the first resume during the probe, the context has not omap_gpio_runtime_resume() 1374 * the context loss count. omap_gpio_runtime_resume() 1392 writel_relaxed(bank->context.fallingdetect, omap_gpio_runtime_resume() 1394 writel_relaxed(bank->context.risingdetect, omap_gpio_runtime_resume() 1431 gen0 = l & bank->context.fallingdetect; omap_gpio_runtime_resume() 1434 gen1 = l & bank->context.risingdetect; omap_gpio_runtime_resume() 1438 gen = l & (~(bank->context.fallingdetect) & omap_gpio_runtime_resume() 1439 ~(bank->context.risingdetect)); omap_gpio_runtime_resume() 1507 p->context.ctrl = readl_relaxed(base + regs->ctrl); omap_gpio_init_context() 1508 p->context.oe = readl_relaxed(base + regs->direction); omap_gpio_init_context() 1509 p->context.wake_en = readl_relaxed(base + regs->wkup_en); omap_gpio_init_context() 1510 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); omap_gpio_init_context() 1511 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); omap_gpio_init_context() 1512 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); omap_gpio_init_context() 1513 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); omap_gpio_init_context() 1514 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); omap_gpio_init_context() 1515 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); omap_gpio_init_context() 1518 p->context.dataout = readl_relaxed(base + regs->set_dataout); omap_gpio_init_context() 1520 p->context.dataout = readl_relaxed(base + regs->dataout); omap_gpio_init_context() 1527 writel_relaxed(bank->context.wake_en, omap_gpio_restore_context() 1529 writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl); omap_gpio_restore_context() 1530 writel_relaxed(bank->context.leveldetect0, omap_gpio_restore_context() 1532 writel_relaxed(bank->context.leveldetect1, omap_gpio_restore_context() 1534 writel_relaxed(bank->context.risingdetect, omap_gpio_restore_context() 1536 writel_relaxed(bank->context.fallingdetect, omap_gpio_restore_context() 1539 writel_relaxed(bank->context.dataout, omap_gpio_restore_context() 1542 writel_relaxed(bank->context.dataout, omap_gpio_restore_context() 1544 writel_relaxed(bank->context.oe, bank->base + bank->regs->direction); omap_gpio_restore_context() 1547 writel_relaxed(bank->context.debounce, bank->base + omap_gpio_restore_context() 1549 writel_relaxed(bank->context.debounce_en, omap_gpio_restore_context() 1553 writel_relaxed(bank->context.irqenable1, omap_gpio_restore_context() 1555 writel_relaxed(bank->context.irqenable2, omap_gpio_restore_context()
|
/linux-4.4.14/drivers/isdn/hisax/ |
H A D | hfc_usb.c | 260 hfcusb_data *hfc = (hfcusb_data *) urb->context; ctrl_complete() 429 usb_complete_t complete, void *context) fill_isoc_urb() 438 urb->context = context; fill_isoc_urb() 544 iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; tx_iso_complete() 594 tx_iso_complete, urb->context); tx_iso_complete() 682 iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; rx_iso_complete() 762 rx_iso_complete, urb->context); rx_iso_complete() 855 usb_fifo *fifo = (usb_fifo *) urb->context; rx_int_complete() 1260 hfcusb_data *context; hfc_usb_probe() local 1359 if (!(context = kzalloc(sizeof(hfcusb_data), GFP_KERNEL))) hfc_usb_probe() 1379 context-> hfc_usb_probe() 1386 context-> hfc_usb_probe() 1395 context-> hfc_usb_probe() 1405 context-> hfc_usb_probe() 1414 context-> hfc_usb_probe() 1423 context-> hfc_usb_probe() 1433 context-> hfc_usb_probe() 1442 context-> hfc_usb_probe() 1450 context-> hfc_usb_probe() 1455 if (context->fifos[cidx].pipe) { hfc_usb_probe() 1456 context->fifos[cidx]. hfc_usb_probe() 1458 context->fifos[cidx].hfc = hfc_usb_probe() 1459 context; hfc_usb_probe() 1460 context->fifos[cidx].usb_packet_maxlen = hfc_usb_probe() 1462 context->fifos[cidx]. hfc_usb_probe() 1465 context->fifos[cidx]. hfc_usb_probe() 1471 context->dev = dev; /* save device */ hfc_usb_probe() 1472 context->if_used = ifnum; /* save used interface */ hfc_usb_probe() 1473 context->alt_used = alt_used; /* and alternate config */ hfc_usb_probe() 1474 context->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */ hfc_usb_probe() 1475 context->cfg_used = vcf[16]; /* store used config */ hfc_usb_probe() 1476 context->vend_idx = vend_idx; /* store found vendor */ hfc_usb_probe() 1477 context->packet_size = packet_size; hfc_usb_probe() 1478 context->iso_packet_size = iso_packet_size; hfc_usb_probe() 1481 context->ctrl_in_pipe = hfc_usb_probe() 1482 usb_rcvctrlpipe(context->dev, 0); hfc_usb_probe() 1483 context->ctrl_out_pipe = hfc_usb_probe() 1484 usb_sndctrlpipe(context->dev, 0); hfc_usb_probe() 1489 context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); hfc_usb_probe() 1491 if (!context->ctrl_urb) { hfc_usb_probe() 1494 kfree(context); hfc_usb_probe() 1503 conf_str[small_match], context->if_used, hfc_usb_probe() 1504 context->alt_used, hfc_usb_probe() 1508 if (hfc_usb_init(context)) { hfc_usb_probe() 1509 usb_kill_urb(context->ctrl_urb); hfc_usb_probe() 1510 usb_free_urb(context->ctrl_urb); hfc_usb_probe() 1511 context->ctrl_urb = NULL; hfc_usb_probe() 1512 kfree(context); hfc_usb_probe() 1515 usb_set_intfdata(intf, context); hfc_usb_probe() 1529 hfcusb_data *context = usb_get_intfdata(intf); hfc_usb_disconnect() local 1532 handle_led(context, LED_POWER_OFF); hfc_usb_disconnect() 1536 context->disc_flag = 1; hfc_usb_disconnect() 1539 if (timer_pending(&context->t3_timer)) hfc_usb_disconnect() 1540 del_timer(&context->t3_timer); hfc_usb_disconnect() 1541 if (timer_pending(&context->t4_timer)) hfc_usb_disconnect() 1542 del_timer(&context->t4_timer); hfc_usb_disconnect() 1546 if (context->fifos[i].usb_transfer_mode == USB_ISOC) { hfc_usb_disconnect() 1547 if (context->fifos[i].active > 0) { hfc_usb_disconnect() 1548 stop_isoc_chain(&context->fifos[i]); hfc_usb_disconnect() 1554 if (context->fifos[i].active > 0) { hfc_usb_disconnect() 1555 context->fifos[i].active = 0; hfc_usb_disconnect() 1560 usb_kill_urb(context->fifos[i].urb); hfc_usb_disconnect() 1561 usb_free_urb(context->fifos[i].urb); hfc_usb_disconnect() 1562 context->fifos[i].urb = NULL; hfc_usb_disconnect() 1564 context->fifos[i].active = 0; hfc_usb_disconnect() 1566 usb_kill_urb(context->ctrl_urb); hfc_usb_disconnect() 1567 usb_free_urb(context->ctrl_urb); hfc_usb_disconnect() 1568 context->ctrl_urb = NULL; hfc_usb_disconnect() 1569 hisax_unregister(&context->d_if); hfc_usb_disconnect() 1570 kfree(context); /* free our structure again */ hfc_usb_disconnect() 427 fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *buf, int num_packets, int packet_size, int interval, usb_complete_t complete, void *context) fill_isoc_urb() argument
|
/linux-4.4.14/drivers/scsi/cxlflash/ |
H A D | superpipe.c | 109 * find_error_context() - locates a context by cookie on the error recovery list 111 * @rctxid: Desired context by id. 112 * @file: Desired context by file. 114 * Return: Found context on success, NULL on failure 129 * get_context() - obtains a validated and locked context reference 131 * @rctxid: Desired context (raw, un-decoded format). 141 * Return: Validated context on success, NULL on failure 181 * Need to acquire ownership of the context while still get_context() 184 * table/list lock for a single context. get_context() 195 break; /* got the context's lock! */ get_context() 228 * put_context() - release a context that was retrieved from get_context() 231 * For now, releasing the context equates to unlocking it's mutex. 239 * afu_attach() - attach a context to the AFU 243 * Upon setting the context capabilities, they must be confirmed with 244 * a read back operation as the context might have been closed since 597 * AFU sync should _not_ be performed when the context is sitting on the error 598 * recovery list. A context on the error recovery list is not known to the AFU 599 * due to reset. When the context is recovered, it will be reattached and made 631 dev_dbg(dev, "%s: Bad context! (%llu)\n", _cxlflash_disk_release() 708 * destroy_context() - releases a context 712 * Note that the rht_lun member of the context was cut from a single 713 * allocation when the context was created and therefore does not need 715 * existence of the context control map before clearing the RHT registers 716 * and context capabilities because it is possible to destroy a context 717 * while the context is in the error state (previous mapping was removed 718 * [so we don't have to worry about clearing] and context is waiting for 728 /* Clear RHT registers and drop all capabilities for this context */ destroy_context() 735 /* Free memory associated with context */ destroy_context() 743 * create_context() - allocates and initializes a context 745 * @ctx: Previously obtained CXL context reference. 746 * @ctxid: Previously obtained process element associated with CXL context. 747 * @adap_fd: Previously obtained adapter fd associated with CXL context. 748 * @file: Previously obtained file associated with CXL context. 751 * The context's mutex is locked when an allocated context is returned. 753 * Return: Allocated context on success, NULL on failure 771 dev_err(dev, "%s: Unable to allocate context!\n", __func__); create_context() 809 * _cxlflash_disk_detach() - detaches a LUN from a context 814 * As part of the detach, all per-context resources associated with the LUN 815 * are cleaned up. When detaching the last LUN for a context, the context 842 dev_dbg(dev, "%s: Bad context! (%llu)\n", _cxlflash_disk_detach() 866 /* Take our LUN out of context, free the node */ _cxlflash_disk_detach() 875 /* Tear down context following last LUN cleanup */ _cxlflash_disk_detach() 898 * NOTE: this will free up the context from the CXL services, _cxlflash_disk_detach() 906 /* Release the sdev reference that bound this LUN to the context */ _cxlflash_disk_detach() 928 * the CXL services on an initial attach for a context. It is called 932 * a context is being removed. Note that nothing prevents the user 938 * a process tear down), the routine derives the context reference 939 * and calls detach for each LUN associated with the context. The 940 * final detach operation will cause the context itself to be freed. 945 * of a context, the context is first completely freed and then the 946 * close is performed. This routine will fail to derive the context 947 * reference (due to the context having already been freed) and then 950 * Thus, with exception to when the CXL process element (context id) 952 * call into this routine results in a complete freeing of a context. 954 * As part of the detach, all per-context resources associated with the LUN 955 * are cleaned up. When detaching the last LUN for a context, the context 988 dev_dbg(dev, "%s: Another process owns context %d!\n", cxlflash_cxl_release() 994 dev_dbg(dev, "%s: close(%d) for context %d\n", cxlflash_cxl_release() 1094 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); cxlflash_mmap_fault() 1098 dev_dbg(dev, "%s: fault(%d) for context %d\n", cxlflash_mmap_fault() 1169 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); cxlflash_cxl_mmap() 1174 dev_dbg(dev, "%s: mmap(%d) for context %d\n", cxlflash_cxl_mmap() 1200 * A context is only moved over to the error list when there are no outstanding 1240 * This routine can block and should only be used on process context. 1277 * cxlflash_disk_attach() - attach a LUN to a context 1281 * Creates a context and attaches LUN to it. A LUN can only be attached 1282 * one time to a context (subsequent attaches for the same context/LUN pair 1283 * are not supported). Additional LUNs can be attached to a context by 1334 dev_dbg(dev, "%s: Bad context! (%016llX)\n", cxlflash_disk_attach() 1365 /* Non-NULL context indicates reuse */ cxlflash_disk_attach() 1367 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n", cxlflash_disk_attach() 1376 dev_err(dev, "%s: Could not initialize context %p\n", cxlflash_disk_attach() 1401 dev_err(dev, "%s: Failed to create context! (%d)\n", cxlflash_disk_attach() 1412 dev_dbg(dev, "%s: Could not start context rc=%d\n", cxlflash_disk_attach() 1484 * recover_context() - recovers a context in error 1488 * Restablishes the state for a context-in-error. 1504 dev_err(dev, "%s: Could not initialize context %p\n", recover_context() 1526 dev_dbg(dev, "%s: Could not start context rc=%d\n", recover_context() 1531 /* Update with new MMIO area based on updated context id */ recover_context() 1551 * Put context back in table (note the reinit of the context list); recover_context() 1552 * we must first drop the context's mutex and then acquire it in recover_context() 1589 * a context recovery is retried if there are multiple recoveries taking 1628 /* Ensure that this process is attached to the context */ cxlflash_afu_recover() 1631 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); cxlflash_afu_recover() 1640 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n", cxlflash_afu_recover() 1673 * Before checking the state, put back the context obtained with cxlflash_afu_recover() 1788 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); cxlflash_disk_verify() 1925 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); cxlflash_disk_direct_open() 1932 dev_dbg(dev, "%s: too many opens for this context\n", __func__); cxlflash_disk_direct_open()
|
/linux-4.4.14/drivers/media/pci/cx23885/ |
H A D | cx23885-ir.c | 71 /* Possibly called in an IRQ context */ cx23885_ir_rx_v4l2_dev_notify() 87 * For the integrated AV core, we are already in a workqueue context. cx23885_ir_rx_v4l2_dev_notify() 88 * For the CX23888 integrated IR, we are in an interrupt context. cx23885_ir_rx_v4l2_dev_notify() 96 /* Possibly called in an IRQ context */ cx23885_ir_tx_v4l2_dev_notify() 106 * For the integrated AV core, we are already in a workqueue context. cx23885_ir_tx_v4l2_dev_notify() 107 * For the CX23888 integrated IR, we are in an interrupt context. cx23885_ir_tx_v4l2_dev_notify()
|
/linux-4.4.14/crypto/ |
H A D | rsa_helper.c | 21 int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, rsa_get_n() argument 24 struct rsa_key *key = context; rsa_get_n() 42 int rsa_get_e(void *context, size_t hdrlen, unsigned char tag, rsa_get_e() argument 45 struct rsa_key *key = context; rsa_get_e() 55 int rsa_get_d(void *context, size_t hdrlen, unsigned char tag, rsa_get_d() argument 58 struct rsa_key *key = context; rsa_get_d()
|
/linux-4.4.14/arch/hexagon/mm/ |
H A D | vm_tlb.c | 42 if (mm->context.ptbase == current->active_mm->context.ptbase) flush_tlb_range() 71 if (current->active_mm->context.ptbase == mm->context.ptbase) flush_tlb_mm() 82 if (mm->context.ptbase == current->active_mm->context.ptbase) flush_tlb_page()
|
/linux-4.4.14/drivers/gpu/drm/tegra/ |
H A D | drm.c | 140 DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n", tegra_drm_load() 255 static void tegra_drm_context_free(struct tegra_drm_context *context) tegra_drm_context_free() argument 257 context->client->ops->close_channel(context); tegra_drm_context_free() 258 kfree(context); tegra_drm_context_free() 327 int tegra_drm_submit(struct tegra_drm_context *context, tegra_drm_submit() argument 348 job = host1x_job_alloc(context->channel, args->num_cmdbufs, tegra_drm_submit() 355 job->client = (u32)args->context; tegra_drm_submit() 356 job->class = context->client->base.class; tegra_drm_submit() 400 job->is_addr_reg = context->client->ops->is_addr_reg; tegra_drm_submit() 408 err = host1x_job_pin(job, context->client->base.dev); tegra_drm_submit() 430 static struct tegra_drm_context *tegra_drm_get_context(__u64 context) tegra_drm_get_context() argument 432 return (struct tegra_drm_context *)(uintptr_t)context; tegra_drm_get_context() 436 struct tegra_drm_context *context) tegra_drm_file_owns_context() 441 if (ctx == context) tegra_drm_file_owns_context() 531 struct tegra_drm_context *context; tegra_open_channel() local 535 context = kzalloc(sizeof(*context), GFP_KERNEL); tegra_open_channel() 536 if (!context) tegra_open_channel() 541 err = client->ops->open_channel(client, context); tegra_open_channel() 545 list_add(&context->list, &fpriv->contexts); tegra_open_channel() 546 args->context = (uintptr_t)context; tegra_open_channel() 547 context->client = client; tegra_open_channel() 551 kfree(context); tegra_open_channel() 560 struct tegra_drm_context *context; tegra_close_channel() local 562 context = tegra_drm_get_context(args->context); tegra_close_channel() 564 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_close_channel() 567 list_del(&context->list); tegra_close_channel() 568 tegra_drm_context_free(context); tegra_close_channel() 578 struct tegra_drm_context *context; tegra_get_syncpt() local 581 context = tegra_drm_get_context(args->context); tegra_get_syncpt() 583 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_get_syncpt() 586 if (args->index >= context->client->base.num_syncpts) tegra_get_syncpt() 589 syncpt = context->client->base.syncpts[args->index]; tegra_get_syncpt() 600 struct tegra_drm_context *context; tegra_submit() local 602 context = tegra_drm_get_context(args->context); tegra_submit() 604 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_submit() 607 return context->client->ops->submit(context, args, drm, file); tegra_submit() 615 struct tegra_drm_context *context; tegra_get_syncpt_base() local 619 context = tegra_drm_get_context(args->context); tegra_get_syncpt_base() 621 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_get_syncpt_base() 624 if (args->syncpt >= context->client->base.num_syncpts) tegra_get_syncpt_base() 627 syncpt = context->client->base.syncpts[args->syncpt]; tegra_get_syncpt_base() 862 struct tegra_drm_context *context, *tmp; tegra_drm_preclose() local 868 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list) tegra_drm_preclose() 869 tegra_drm_context_free(context); tegra_drm_preclose() 435 tegra_drm_file_owns_context(struct tegra_drm_file *file, struct tegra_drm_context *context) tegra_drm_file_owns_context() argument
|
/linux-4.4.14/arch/arm/plat-omap/ |
H A D | dmtimer.c | 97 timer->context.twer); omap_timer_restore_context() 99 timer->context.tcrr); omap_timer_restore_context() 101 timer->context.tldr); omap_timer_restore_context() 103 timer->context.tmar); omap_timer_restore_context() 105 timer->context.tsicr); omap_timer_restore_context() 106 writel_relaxed(timer->context.tier, timer->irq_ena); omap_timer_restore_context() 108 timer->context.tclr); omap_timer_restore_context() 451 /* Save the context */ omap_dm_timer_start() 452 timer->context.tclr = l; omap_dm_timer_start() 472 * context. omap_dm_timer_stop() 474 timer->context.tclr = omap_dm_timer_stop() 556 /* Save the context */ omap_dm_timer_set_load() 557 timer->context.tclr = l; omap_dm_timer_set_load() 558 timer->context.tldr = load; omap_dm_timer_set_load() 586 /* Save the context */ omap_dm_timer_set_load_start() 587 timer->context.tclr = l; omap_dm_timer_set_load_start() 588 timer->context.tldr = load; omap_dm_timer_set_load_start() 589 timer->context.tcrr = load; omap_dm_timer_set_load_start() 611 /* Save the context */ omap_dm_timer_set_match() 612 timer->context.tclr = l; omap_dm_timer_set_match() 613 timer->context.tmar = match; omap_dm_timer_set_match() 638 /* Save the context */ omap_dm_timer_set_pwm() 639 timer->context.tclr = l; omap_dm_timer_set_pwm() 661 /* Save the context */ omap_dm_timer_set_prescaler() 662 timer->context.tclr = l; omap_dm_timer_set_prescaler() 677 /* Save the context */ omap_dm_timer_set_int_enable() 678 timer->context.tier = value; omap_dm_timer_set_int_enable() 679 timer->context.twer = value; omap_dm_timer_set_int_enable() 708 /* Save the context */ omap_dm_timer_set_int_disable() 709 timer->context.tier &= ~mask; omap_dm_timer_set_int_disable() 710 timer->context.twer &= ~mask; omap_dm_timer_set_int_disable() 762 /* Save the context */ omap_dm_timer_write_counter() 763 timer->context.tcrr = value; omap_dm_timer_write_counter()
|
/linux-4.4.14/drivers/md/ |
H A D | dm-round-robin.c | 71 ps->context = s; rr_create() 77 struct selector *s = (struct selector *) ps->context; rr_destroy() 82 ps->context = NULL; rr_destroy() 114 struct selector *s = (struct selector *) ps->context; rr_add_path() 133 *error = "round-robin ps: Error allocating path context"; rr_add_path() 149 struct selector *s = (struct selector *) ps->context; rr_fail_path() 157 struct selector *s = (struct selector *) ps->context; rr_reinstate_path() 168 struct selector *s = (struct selector *) ps->context; rr_select_path()
|
H A D | dm-snap-transient.c | 28 kfree(store->context); transient_dtr() 42 struct transient_c *tc = store->context; transient_prepare_exception() 68 *sectors_allocated = ((struct transient_c *) store->context)->next_free; transient_usage() 82 store->context = tc; transient_ctr()
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_context.c | 258 DRM_ERROR("Out of hw context ids.\n"); vmw_context_init() 289 * GB context. 306 DRM_ERROR("Failed to allocate a context id.\n"); vmw_gb_context_create() 317 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_create() 351 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_bind() 397 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_unbind() 447 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_destroy() 465 * DX context. 482 DRM_ERROR("Failed to allocate a context id.\n"); vmw_dx_context_create() 493 DRM_ERROR("Failed reserving FIFO space for context " vmw_dx_context_create() 527 DRM_ERROR("Failed reserving FIFO space for context " vmw_dx_context_bind() 546 * cotables from a context 548 * @ctx: Pointer to the context resource 551 * COtables must be unbound before their context, but unbinding requires 553 * This function scrubs all cotables of a context, potentially reading back 555 * also makes the device context invalid, so scrub all bindings first so 556 * that doesn't have to be done later with an invalid context. 621 DRM_ERROR("Failed reserving FIFO space for context " vmw_dx_context_unbind() 671 DRM_ERROR("Failed reserving FIFO space for context " vmw_dx_context_destroy() 689 * User-space context management: 773 DRM_ERROR("Out of graphics memory for context" vmw_context_define() 839 * vmw_context_binding_list - Return a list of context bindings 841 * @ctx: The context resource 843 * Returns the current list of bindings of the given context. Note that 872 * Return a pointer to a context binding state structure 874 * @ctx: The context resource 876 * Returns the current state of bindings of the given context. Note that 887 * Sets query MOB for the context. If @mob is NULL, then this function will 888 * remove the association between the MOB and the context. This function 891 * @ctx_res: The context resource 913 /* Can only have one MOB per context for queries */ vmw_context_bind_dx_query() 928 * @ctx_res: The context resource
|
/linux-4.4.14/arch/sparc/prom/ |
H A D | mp.c | 17 /* Start cpu with prom-tree node 'cpunode' using context described 18 * by 'ctable_reg' in context 'ctx' at program counter 'pc'.
|
/linux-4.4.14/arch/mips/include/asm/ |
H A D | mmu_context.h | 2 * Switch a MMU context. 55 * into the context register. 85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 123 * Initialize the context related info for a new mm_struct 134 atomic_set(&mm->context.fp_mode_switching, 0); init_new_context() 165 * Destroy context related info for an mm_struct that is about 176 * the context for the new mm so we see the new mappings. 217 /* will get a new context next time */ drop_mmu_context()
|
/linux-4.4.14/arch/m68k/mm/ |
H A D | mcfmmu.c | 132 asid = mm->context & 0xff; cf_tlb_miss() 154 * Initialize the context management stuff. 161 * init_mm, and require using context 0 for a normal task. mmu_context_init() 162 * Other processors reserve the use of context zero for the kernel. mmu_context_init() 171 * Steal a context from a task that has one at the moment. 177 * This isn't an LRU system, it just frees up each context in 186 * free up context `next_mmu_context' steal_context() 187 * if we shouldn't free context 0, don't... steal_context()
|
/linux-4.4.14/drivers/media/usb/pvrusb2/ |
H A D | pvrusb2-dvb.h | 8 #include "pvrusb2-context.h"
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/ |
H A D | ramht.h | 24 int chid, int addr, u32 handle, u32 context);
|
/linux-4.4.14/arch/unicore32/include/asm/ |
H A D | suspend.h | 21 struct cpu_context_save cpu_context; /* cpu context */
|
/linux-4.4.14/arch/arc/include/uapi/asm/ |
H A D | sigcontext.h | 15 * Signal context structure - contains all info to do with the state
|
/linux-4.4.14/security/selinux/include/ |
H A D | audit.h | 44 * selinux_audit_rule_match - determine if a context ID matches a rule. 45 * @sid: the context ID to check 49 * @actx: the audit context (can be NULL) associated with the check 51 * Returns 1 if the context id matches the rule, 0 if it does not, and
|
/linux-4.4.14/arch/arm/include/asm/xen/ |
H A D | hypervisor.h | 9 /* Lazy mode for batching updates / context switch */
|
/linux-4.4.14/drivers/acpi/acpica/ |
H A D | evxface.c | 61 acpi_gpe_handler address, void *context); 76 * context - Value passed to the handler on each GPE 93 acpi_notify_handler handler, void *context) acpi_install_notify_handler() 132 acpi_gbl_global_notify[i].context = context; acpi_install_notify_handler() 202 handler_obj->notify.context = context; acpi_install_notify_handler() 287 acpi_gbl_global_notify[i].context = NULL; ACPI_EXPORT_SYMBOL() 416 * context - Value passed to the handler on each SCI ACPI_EXPORT_SYMBOL() 423 acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context) ACPI_EXPORT_SYMBOL() 444 new_sci_handler->context = context; ACPI_EXPORT_SYMBOL() 557 * context - Value passed to the handler on each event ACPI_EXPORT_SYMBOL() 568 acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) ACPI_EXPORT_SYMBOL() 593 acpi_gbl_global_event_handler_context = context; ACPI_EXPORT_SYMBOL() 609 * context - Value passed to the handler on each GPE ACPI_EXPORT_SYMBOL() 619 acpi_event_handler handler, void *context) ACPI_EXPORT_SYMBOL() 646 acpi_gbl_fixed_event_handlers[event].context = context; ACPI_EXPORT_SYMBOL() 659 acpi_gbl_fixed_event_handlers[event].context = NULL; ACPI_EXPORT_SYMBOL() 711 acpi_gbl_fixed_event_handlers[event].context = NULL; ACPI_EXPORT_SYMBOL() 741 * context - Value passed to the handler on each GPE 754 acpi_gpe_handler address, void *context) acpi_ev_install_gpe_handler() 803 handler->context = context; acpi_ev_install_gpe_handler() 865 * context - Value passed to the handler on each GPE 876 u32 type, acpi_gpe_handler address, void *context) acpi_install_gpe_handler() 884 address, context); acpi_install_gpe_handler() 901 * context - Value passed to the handler on each GPE ACPI_EXPORT_SYMBOL() 911 u32 type, acpi_gpe_handler address, void *context) ACPI_EXPORT_SYMBOL() 918 address, context); ACPI_EXPORT_SYMBOL() 91 acpi_install_notify_handler(acpi_handle device, u32 handler_type, acpi_notify_handler handler, void *context) acpi_install_notify_handler() argument 750 acpi_ev_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, u8 is_raw_handler, acpi_gpe_handler address, void *context) acpi_ev_install_gpe_handler() argument 874 acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, acpi_gpe_handler address, void *context) acpi_install_gpe_handler() argument
|
/linux-4.4.14/drivers/uwb/i1480/dfu/ |
H A D | dfu.c | 43 * @context: expected context 53 const char *cmd, u8 context, u8 expected_type, i1480_rceb_check() 58 if (rceb->bEventContext != context) { i1480_rceb_check() 60 dev_err(dev, "%s: unexpected context id 0x%02x " i1480_rceb_check() 62 rceb->bEventContext, context); i1480_rceb_check() 100 u8 context; i1480_cmd() local 105 get_random_bytes(&context, 1); i1480_cmd() 106 } while (context == 0x00 || context == 0xff); i1480_cmd() 107 cmd->bCommandContext = context; i1480_cmd() 147 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, i1480_cmd() 52 i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, const char *cmd, u8 context, u8 expected_type, unsigned expected_event) i1480_rceb_check() argument
|
/linux-4.4.14/arch/x86/include/asm/fpu/ |
H A D | api.h | 32 * get used from interrupt context as well. To prevent these kernel instructions 33 * in interrupt context interacting wrongly with other user/kernel fpu usage, we 34 * should use them only in the context of irq_ts_save/restore()
|
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/ |
H A D | mce-genpool.c | 2 * MCE event pool management in MCE context 16 * printk() is not safe in MCE context. This is a lock-less memory allocator 19 * This memory pool is only to be used to save MCE records in MCE context.
|
/linux-4.4.14/arch/s390/numa/ |
H A D | toptree.h | 29 int toptree_count(struct toptree *context, int level); 31 struct toptree *toptree_first(struct toptree *context, int level); 32 struct toptree *toptree_next(struct toptree *cur, struct toptree *context,
|
H A D | toptree.c | 174 * toptree_move - Move a node to another context 241 * @context: Pointer to tree node whose descendants are to be used 245 * @context's first descendant on the specified level, or NULL 248 struct toptree *toptree_first(struct toptree *context, int level) toptree_first() argument 252 if (context->level == level) toptree_first() 253 return context; toptree_first() 255 if (!list_empty(&context->children)) { toptree_first() 256 list_for_each_entry(child, &context->children, sibling) { toptree_first() 287 * @context: Pointer to the root node of the tree or subtree to 295 struct toptree *toptree_next(struct toptree *cur, struct toptree *context, toptree_next() argument 303 if (context->level == level) toptree_next() 311 while (cur_context->level < context->level - 1) { toptree_next() 328 * @context: Pointer to node whose descendants are to be considered 334 int toptree_count(struct toptree *context, int level) toptree_count() argument 339 toptree_for_each(cur, context, level) toptree_count()
|
/linux-4.4.14/arch/arm64/include/uapi/asm/ |
H A D | sigcontext.h | 22 * Signal context structure - contains all info to do with the state 38 * context. Such structures must be placed after the rt_sigframe on the stack 56 /* ESR_EL1 context */
|
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ebb/ |
H A D | pmae_handling_test.c | 16 * Test that the kernel properly handles PMAE across context switches. 22 * The kernel must make sure that when it context switches us back in, it 46 /* Try and get ourselves scheduled, to force a PMU context switch */ syscall_ebb_callee()
|
/linux-4.4.14/tools/testing/selftests/powerpc/tm/ |
H A D | tm-resched-dscr.c | 1 /* Test context switching to see if the DSCR SPR is correctly preserved 17 * If the abort was because of a context switch, check the DSCR value. 45 printf("Check DSCR TM context switch: "); test_body()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
H A D | ehca_pd.c | 49 struct ib_ucontext *context, struct ib_udata *udata) ehca_alloc_pd() 56 ehca_err(device, "device=%p context=%p out of memory", ehca_alloc_pd() 57 device, context); ehca_alloc_pd() 69 * User PD: when context != -1 ehca_alloc_pd() 71 if (!context) { ehca_alloc_pd() 48 ehca_alloc_pd(struct ib_device *device, struct ib_ucontext *context, struct ib_udata *udata) ehca_alloc_pd() argument
|
/linux-4.4.14/arch/x86/math-emu/ |
H A D | fpu_system.h | 29 mutex_lock(¤t->mm->context.lock); FPU_get_ldt_descriptor() 30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size) FPU_get_ldt_descriptor() 31 ret = current->mm->context.ldt->entries[seg]; FPU_get_ldt_descriptor() 32 mutex_unlock(¤t->mm->context.lock); FPU_get_ldt_descriptor()
|
/linux-4.4.14/net/sunrpc/ |
H A D | timer.c | 29 * rpc_init_rtt - Initialize an RPC RTT estimator context 30 * @rt: context to initialize 52 * rpc_update_rtt - Update an RPC RTT estimator context 53 * @rt: context to update 93 * @rt: context to use for calculation
|