Searched refs:context (Results 1 - 200 of 3652) sorted by relevance

1234567891011>>

/linux-4.1.27/security/selinux/ss/
H A Dmls.h24 #include "context.h"
27 int mls_compute_context_len(struct context *context);
28 void mls_sid_to_context(struct context *context, char **scontext);
29 int mls_context_isvalid(struct policydb *p, struct context *c);
36 struct context *context,
40 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
42 int mls_range_set(struct context *context, struct mls_range *range);
46 struct context *context);
48 int mls_compute_sid(struct context *scontext,
49 struct context *tcontext,
52 struct context *newcontext,
55 int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
56 struct context *usercon);
59 void mls_export_netlbl_lvl(struct context *context,
61 void mls_import_netlbl_lvl(struct context *context,
63 int mls_export_netlbl_cat(struct context *context,
65 int mls_import_netlbl_cat(struct context *context,
68 static inline void mls_export_netlbl_lvl(struct context *context, mls_export_netlbl_lvl() argument
73 static inline void mls_import_netlbl_lvl(struct context *context, mls_import_netlbl_lvl() argument
78 static inline int mls_export_netlbl_cat(struct context *context, mls_export_netlbl_cat() argument
83 static inline int mls_import_netlbl_cat(struct context *context, mls_import_netlbl_cat() argument
H A Dsidtab.h3 * of security context structures indexed by SID value.
10 #include "context.h"
14 struct context context; /* security context structure */ member in struct:sidtab_node
35 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context);
36 struct context *sidtab_search(struct sidtab *s, u32 sid);
37 struct context *sidtab_search_force(struct sidtab *s, u32 sid);
41 struct context *context,
46 struct context *context,
H A Dsidtab.c33 int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) sidtab_insert() argument
62 if (context_cpy(&newnode->context, context)) { sidtab_insert()
85 static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force) sidtab_search_core()
98 if (force && cur && sid == cur->sid && cur->context.len) sidtab_search_core()
99 return &cur->context; sidtab_search_core()
101 if (cur == NULL || sid != cur->sid || cur->context.len) { sidtab_search_core()
112 return &cur->context; sidtab_search_core()
115 struct context *sidtab_search(struct sidtab *s, u32 sid) sidtab_search()
120 struct context *sidtab_search_force(struct sidtab *s, u32 sid) sidtab_search_force()
127 struct context *context, sidtab_map()
140 rc = apply(cur->sid, &cur->context, args); sidtab_map()
162 struct context *context) sidtab_search_context()
170 if (context_cmp(&cur->context, context)) { sidtab_search_context()
180 static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context) sidtab_search_cache() argument
189 if (context_cmp(&node->context, context)) { sidtab_search_cache()
198 struct context *context, sidtab_context_to_sid()
207 sid = sidtab_search_cache(s, context); sidtab_context_to_sid()
209 sid = sidtab_search_context(s, context); sidtab_context_to_sid()
213 sid = sidtab_search_context(s, context); sidtab_context_to_sid()
216 /* No SID exists for the context. Allocate a new one. */ sidtab_context_to_sid()
222 if (context->len) sidtab_context_to_sid()
225 context->str); sidtab_context_to_sid()
226 ret = sidtab_insert(s, sid, context); sidtab_context_to_sid()
280 context_destroy(&temp->context); sidtab_destroy()
125 sidtab_map(struct sidtab *s, int (*apply) (u32 sid, struct context *context, void *args), void *args) sidtab_map() argument
161 sidtab_search_context(struct sidtab *s, struct context *context) sidtab_search_context() argument
197 sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *out_sid) sidtab_context_to_sid() argument
H A Dmls.c33 * security context string representation of `context'.
35 int mls_compute_context_len(struct context *context) mls_compute_context_len() argument
47 int index_sens = context->range.level[l].sens; mls_compute_context_len()
53 e = &context->range.level[l].cat; ebitmap_for_each_positive_bit()
72 if (mls_level_eq(&context->range.level[0],
73 &context->range.level[1]))
84 * Write the security context string representation of
85 * the MLS fields of `context' into the string `*scontext'.
88 void mls_sid_to_context(struct context *context, mls_sid_to_context() argument
106 context->range.level[l].sens - 1)); mls_sid_to_context()
112 e = &context->range.level[l].cat; ebitmap_for_each_positive_bit()
148 if (mls_level_eq(&context->range.level[0],
149 &context->range.level[1]))
188 * Return 1 if the MLS fields in the security context
191 int mls_context_isvalid(struct policydb *p, struct context *c) mls_context_isvalid()
217 * Set the MLS fields in the security context structure
218 * `context' based on the string representation in
227 * copy the MLS field of the associated default context.
237 struct context *context, mls_context_to_sid()
255 * No MLS component to the security context, try and map to mls_context_to_sid()
259 struct context *defcon; mls_context_to_sid()
268 rc = mls_context_cpy(context, defcon); mls_context_to_sid()
288 context->range.level[l].sens = levdatum->level->sens; mls_context_to_sid()
314 rc = ebitmap_set_bit(&context->range.level[l].cat, mls_context_to_sid()
335 rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1); mls_context_to_sid()
359 context->range.level[1].sens = context->range.level[0].sens; mls_context_to_sid()
360 rc = ebitmap_cpy(&context->range.level[1].cat, mls_context_to_sid()
361 &context->range.level[0].cat); mls_context_to_sid()
372 * Set the MLS fields in the security context structure
373 * `context' based on the string representation in
377 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask) mls_from_string() argument
391 rc = mls_context_to_sid(&policydb, ':', &tmpstr, context, mls_from_string()
400 * Copies the MLS range `range' into `context'.
402 int mls_range_set(struct context *context, mls_range_set() argument
407 /* Copy the MLS range into the context */ mls_range_set()
409 context->range.level[l].sens = range->level[l].sens; mls_range_set()
410 rc = ebitmap_cpy(&context->range.level[l].cat, mls_range_set()
419 int mls_setup_user_range(struct context *fromcon, struct user_datum *user, mls_setup_user_range()
420 struct context *usercon) mls_setup_user_range()
458 * Convert the MLS fields in the security context
464 struct context *c) mls_convert_context()
505 int mls_compute_sid(struct context *scontext, mls_compute_sid()
506 struct context *tcontext, mls_compute_sid()
509 struct context *newcontext, mls_compute_sid()
571 * @context: the security context
575 * Given the security context copy the low MLS sensitivity level into the
579 void mls_export_netlbl_lvl(struct context *context, mls_export_netlbl_lvl() argument
585 secattr->attr.mls.lvl = context->range.level[0].sens - 1; mls_export_netlbl_lvl()
591 * @context: the security context
595 * Given the security context and the NetLabel security attributes, copy the
596 * NetLabel MLS sensitivity level into the context.
599 void mls_import_netlbl_lvl(struct context *context, mls_import_netlbl_lvl() argument
605 context->range.level[0].sens = secattr->attr.mls.lvl + 1; mls_import_netlbl_lvl()
606 context->range.level[1].sens = context->range.level[0].sens; mls_import_netlbl_lvl()
611 * @context: the security context
615 * Given the security context copy the low MLS categories into the NetLabel
619 int mls_export_netlbl_cat(struct context *context, mls_export_netlbl_cat() argument
627 rc = ebitmap_netlbl_export(&context->range.level[0].cat, mls_export_netlbl_cat()
637 * @context: the security context
641 * Copy the NetLabel security attributes into the SELinux context; since the
643 * both the low and high categories of the context. Returns zero on success,
647 int mls_import_netlbl_cat(struct context *context, mls_import_netlbl_cat() argument
655 rc = ebitmap_netlbl_import(&context->range.level[0].cat, mls_import_netlbl_cat()
659 memcpy(&context->range.level[1].cat, &context->range.level[0].cat, mls_import_netlbl_cat()
660 sizeof(context->range.level[0].cat)); mls_import_netlbl_cat()
665 ebitmap_destroy(&context->range.level[0].cat); mls_import_netlbl_cat()
234 mls_context_to_sid(struct policydb *pol, char oldc, char **scontext, struct context *context, struct sidtab *s, u32 def_sid) mls_context_to_sid() argument
H A Dcontext.h2 * A security context is a set of security attributes
23 * A security context consists of an authenticated user
26 struct context { struct
32 char *str; /* string representation if context cannot be mapped. */
35 static inline void mls_context_init(struct context *c) mls_context_init()
40 static inline int mls_context_cpy(struct context *dst, struct context *src) mls_context_cpy()
60 static inline int mls_context_cpy_low(struct context *dst, struct context *src) mls_context_cpy_low()
80 static inline int mls_context_cpy_high(struct context *dst, struct context *src) mls_context_cpy_high()
97 static inline int mls_context_cmp(struct context *c1, struct context *c2) mls_context_cmp()
105 static inline void mls_context_destroy(struct context *c) mls_context_destroy()
112 static inline void context_init(struct context *c) context_init()
117 static inline int context_cpy(struct context *dst, struct context *src) context_cpy()
141 static inline void context_destroy(struct context *c) context_destroy()
150 static inline int context_cmp(struct context *c1, struct context *c2) context_cmp()
H A Dservices.c10 * Support for context based audit filters.
61 #include "context.h"
92 static int context_struct_to_string(struct context *context, char **scontext,
95 static void context_struct_compute_av(struct context *scontext,
96 struct context *tcontext,
266 * only. For these rules, scontext is the context before the transition,
267 * tcontext is the context after the transition, and xcontext is the context
271 static int constraint_expr_eval(struct context *scontext, constraint_expr_eval()
272 struct context *tcontext, constraint_expr_eval()
273 struct context *xcontext, constraint_expr_eval()
277 struct context *c; constraint_expr_eval()
461 static void security_dump_masked_av(struct context *scontext, security_dump_masked_av()
462 struct context *tcontext, security_dump_masked_av()
539 static void type_attribute_bounds_av(struct context *scontext, type_attribute_bounds_av()
540 struct context *tcontext, type_attribute_bounds_av()
544 struct context lo_scontext; type_attribute_bounds_av()
545 struct context lo_tcontext; type_attribute_bounds_av()
616 * Compute access vectors based on a context structure pair for
619 static void context_struct_compute_av(struct context *scontext, context_struct_compute_av()
620 struct context *tcontext, context_struct_compute_av()
716 static int security_validtrans_handle_fail(struct context *ocontext, security_validtrans_handle_fail()
717 struct context *ncontext, security_validtrans_handle_fail()
718 struct context *tcontext, security_validtrans_handle_fail()
747 struct context *ocontext; security_validate_transition()
748 struct context *ncontext; security_validate_transition()
749 struct context *tcontext; security_validate_transition()
821 struct context *old_context, *new_context; security_bounded_transition()
919 struct context *scontext = NULL, *tcontext = NULL; security_compute_av()
965 struct context *scontext = NULL, *tcontext = NULL; security_compute_av_user()
1006 * Write the security context string representation of
1007 * the context structure `context' into a dynamically
1012 static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len) context_struct_to_string() argument
1020 if (context->len) { context_struct_to_string()
1021 *scontext_len = context->len; context_struct_to_string()
1023 *scontext = kstrdup(context->str, GFP_ATOMIC); context_struct_to_string()
1030 /* Compute the size of the context. */ context_struct_to_string()
1031 *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1; context_struct_to_string()
1032 *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1; context_struct_to_string()
1033 *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1; context_struct_to_string()
1034 *scontext_len += mls_compute_context_len(context); context_struct_to_string()
1039 /* Allocate space for the context; caller must free this space. */ context_struct_to_string()
1046 * Copy the user name, role name and type name into the context. context_struct_to_string()
1049 sym_name(&policydb, SYM_USERS, context->user - 1), context_struct_to_string()
1050 sym_name(&policydb, SYM_ROLES, context->role - 1), context_struct_to_string()
1051 sym_name(&policydb, SYM_TYPES, context->type - 1)); context_struct_to_string()
1052 scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + context_struct_to_string()
1053 1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + context_struct_to_string()
1054 1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)); context_struct_to_string()
1056 mls_sid_to_context(context, &scontextp); context_struct_to_string()
1075 struct context *context; security_sid_to_context_core() local
1105 context = sidtab_search_force(&sidtab, sid); security_sid_to_context_core()
1107 context = sidtab_search(&sidtab, sid); security_sid_to_context_core()
1108 if (!context) { security_sid_to_context_core()
1114 rc = context_struct_to_string(context, scontext, scontext_len); security_sid_to_context_core()
1123 * security_sid_to_context - Obtain a context for a given SID.
1125 * @scontext: security context
1128 * Write the string representation of the context associated with @sid
1149 struct context *ctx, string_to_context_struct()
1160 /* Parse the security context. */ string_to_context_struct()
1217 /* Check the validity of the new context. */ string_to_context_struct()
1232 struct context context; security_context_to_sid_core() local
1235 /* An empty security context is never valid. */ security_context_to_sid_core()
1270 scontext_len, &context, def_sid); security_context_to_sid_core()
1272 context.str = str; security_context_to_sid_core()
1273 context.len = scontext_len; security_context_to_sid_core()
1277 rc = sidtab_context_to_sid(&sidtab, &context, sid); security_context_to_sid_core()
1278 context_destroy(&context); security_context_to_sid_core()
1288 * security_context_to_sid - Obtain a SID for a given security context.
1289 * @scontext: security context
1292 * @gfp: context for the allocation
1294 * Obtains a SID associated with the security context that
1296 * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
1307 * security_context_to_sid_default - Obtain a SID for a given security context,
1310 * @scontext: security context
1315 * Obtains a SID associated with the security context that
1320 * Implicitly forces adding of the context even if it cannot be mapped yet.
1321 * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
1339 struct context *scontext, compute_sid_handle_invalid_context()
1340 struct context *tcontext, compute_sid_handle_invalid_context()
1342 struct context *newcontext) compute_sid_handle_invalid_context()
1368 static void filename_compute_type(struct policydb *p, struct context *newcontext, filename_compute_type()
1402 struct context *scontext = NULL, *tcontext = NULL, newcontext; security_compute_sid()
1547 /* Check the validity of the context. */ security_compute_sid()
1556 /* Obtain the sid for the context. */ security_compute_sid()
1638 struct context *context, clone_sid()
1644 return sidtab_insert(s, sid, context); clone_sid()
1649 static inline int convert_context_handle_invalid_context(struct context *context) convert_context_handle_invalid_context() argument
1657 if (!context_struct_to_string(context, &s, &len)) { convert_context_handle_invalid_context()
1670 * Convert the values in the security context
1674 * context is valid under the new policy.
1677 struct context *c, convert_context()
1681 struct context oldc; convert_context()
1697 struct context ctx; convert_context()
1720 printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n", convert_context()
1763 * context for all existing entries in the sidtab. convert_context()
1769 * ensure that the MLS fields of the context for all convert_context()
1783 range = &oc->context[0].range; convert_context()
1789 /* Check the validity of the new context. */ convert_context()
2021 &c->context[0], security_port_sid()
2058 &c->context[0], security_netif_sid()
2063 &c->context[1], security_netif_sid()
2148 &c->context[0], security_node_sid()
2185 struct context *fromcon, usercon; security_get_user_sids()
2331 rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); __security_genfs_sid()
2388 rc = sidtab_context_to_sid(&sidtab, &c->context[0], security_fs_use()
2565 struct context *context1; security_sid_mls_copy()
2566 struct context *context2; security_sid_mls_copy()
2567 struct context newcon; security_sid_mls_copy()
2605 /* Check the validity of the new context. */ security_sid_mls_copy()
2653 struct context *nlbl_ctx; security_net_peersid_resolve()
2654 struct context *xfrm_ctx; security_net_peersid_resolve()
2840 struct context au_ctxt;
2977 struct context *ctxt; selinux_audit_rule_match()
3112 * Attempt to cache the context in @ctx, which was derived from the packet in
3145 * SID/context then use SECINITSID_NETMSG as the foundation. If possible the
3156 struct context *ctx; security_netlbl_secattr_to_sid()
3157 struct context ctx_new; security_netlbl_secattr_to_sid()
3222 struct context *ctx; security_netlbl_sid_to_secattr()
1637 clone_sid(u32 sid, struct context *context, void *arg) clone_sid() argument
/linux-4.1.27/arch/s390/include/asm/
H A Dmmu.h17 /* The mmu context allocates 4K page tables. */
19 /* The mmu context uses extended page tables. */
21 /* The mmu context uses storage keys. */
26 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
27 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
28 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
H A Dmmu_context.h18 spin_lock_init(&mm->context.list_lock); init_new_context()
19 INIT_LIST_HEAD(&mm->context.pgtable_list); init_new_context()
20 INIT_LIST_HEAD(&mm->context.gmap_list); init_new_context()
21 cpumask_clear(&mm->context.cpu_attach_mask); init_new_context()
22 atomic_set(&mm->context.attach_count, 0); init_new_context()
23 mm->context.flush_mm = 0; init_new_context()
25 mm->context.alloc_pgste = page_table_allocate_pgste; init_new_context()
26 mm->context.has_pgste = 0; init_new_context()
27 mm->context.use_skey = 0; init_new_context()
29 if (mm->context.asce_limit == 0) { init_new_context()
30 /* context created by exec, set asce limit to 4TB */ init_new_context()
31 mm->context.asce_bits = _ASCE_TABLE_LENGTH | init_new_context()
33 mm->context.asce_limit = STACK_TOP_MAX; init_new_context()
34 } else if (mm->context.asce_limit == (1UL << 31)) { init_new_context()
45 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); set_user_asce()
74 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); switch_mm()
78 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); switch_mm()
82 atomic_inc(&next->context.attach_count); switch_mm()
83 atomic_dec(&prev->context.attach_count); switch_mm()
85 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); switch_mm()
97 while (atomic_read(&mm->context.attach_count) >> 16) finish_arch_post_lock_switch()
101 if (mm->context.flush_mm) finish_arch_post_lock_switch()
H A Dtlbflush.h67 atomic_add(0x10000, &mm->context.attach_count); __tlb_flush_full()
77 &mm->context.cpu_attach_mask); __tlb_flush_full()
79 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_full()
92 count = atomic_add_return(0x10000, &mm->context.attach_count); __tlb_flush_asce()
104 &mm->context.cpu_attach_mask); __tlb_flush_asce()
106 atomic_sub(0x10000, &mm->context.attach_count); __tlb_flush_asce()
114 init_mm.context.asce_bits); __tlb_flush_kernel()
137 init_mm.context.asce_bits); __tlb_flush_kernel()
150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_mm()
152 mm->context.asce_bits); __tlb_flush_mm()
159 if (mm->context.flush_mm) { __tlb_flush_mm_lazy()
161 mm->context.flush_mm = 0; __tlb_flush_mm_lazy()
169 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
/linux-4.1.27/drivers/staging/media/lirc/
H A Dlirc_sasem.c165 static void delete_context(struct sasem_context *context) delete_context() argument
167 usb_free_urb(context->tx_urb); /* VFD */ delete_context()
168 usb_free_urb(context->rx_urb); /* IR */ delete_context()
169 lirc_buffer_free(context->driver->rbuf); delete_context()
170 kfree(context->driver->rbuf); delete_context()
171 kfree(context->driver); delete_context()
172 kfree(context); delete_context()
175 static void deregister_from_lirc(struct sasem_context *context) deregister_from_lirc() argument
178 int minor = context->driver->minor; deregister_from_lirc()
182 dev_err(&context->dev->dev, deregister_from_lirc()
186 dev_info(&context->dev->dev, deregister_from_lirc()
198 struct sasem_context *context = NULL; vfd_open() local
213 context = usb_get_intfdata(interface); vfd_open()
215 if (!context) { vfd_open()
216 dev_err(&interface->dev, "no context found for minor %d\n", vfd_open()
222 mutex_lock(&context->ctx_lock); vfd_open()
224 if (context->vfd_isopen) { vfd_open()
229 context->vfd_isopen = 1; vfd_open()
230 file->private_data = context; vfd_open()
234 mutex_unlock(&context->ctx_lock); vfd_open()
247 struct sasem_context *context = NULL; vfd_ioctl() local
249 context = (struct sasem_context *) file->private_data; vfd_ioctl()
251 if (!context) { vfd_ioctl()
252 pr_err("%s: no context for device\n", __func__); vfd_ioctl()
256 mutex_lock(&context->ctx_lock); vfd_ioctl()
262 context->vfd_contrast = (unsigned int)arg; vfd_ioctl()
266 mutex_unlock(&context->ctx_lock); vfd_ioctl()
270 mutex_unlock(&context->ctx_lock); vfd_ioctl()
280 struct sasem_context *context = NULL; vfd_close() local
283 context = (struct sasem_context *) file->private_data; vfd_close()
285 if (!context) { vfd_close()
286 pr_err("%s: no context for device\n", __func__); vfd_close()
290 mutex_lock(&context->ctx_lock); vfd_close()
292 if (!context->vfd_isopen) { vfd_close()
293 dev_err(&context->dev->dev, "%s: VFD is not open\n", __func__); vfd_close()
296 context->vfd_isopen = 0; vfd_close()
297 dev_info(&context->dev->dev, "VFD port closed\n"); vfd_close()
298 if (!context->dev_present && !context->ir_isopen) { vfd_close()
301 * not open. If IR port is open, context will be vfd_close()
303 mutex_unlock(&context->ctx_lock); vfd_close()
304 delete_context(context); vfd_close()
309 mutex_unlock(&context->ctx_lock); vfd_close()
316 static int send_packet(struct sasem_context *context) send_packet() argument
322 pipe = usb_sndintpipe(context->dev, send_packet()
323 context->tx_endpoint->bEndpointAddress); send_packet()
324 interval = context->tx_endpoint->bInterval; send_packet()
326 usb_fill_int_urb(context->tx_urb, context->dev, pipe, send_packet()
327 context->usb_tx_buf, sizeof(context->usb_tx_buf), send_packet()
328 usb_tx_callback, context, interval); send_packet()
330 context->tx_urb->actual_length = 0; send_packet()
332 init_completion(&context->tx.finished); send_packet()
333 atomic_set(&context->tx.busy, 1); send_packet()
335 retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); send_packet()
337 atomic_set(&context->tx.busy, 0); send_packet()
338 dev_err(&context->dev->dev, "error submitting urb (%d)\n", send_packet()
342 mutex_unlock(&context->ctx_lock); send_packet()
343 wait_for_completion(&context->tx.finished); send_packet()
344 mutex_lock(&context->ctx_lock); send_packet()
346 retval = context->tx.status; send_packet()
348 dev_err(&context->dev->dev, send_packet()
365 struct sasem_context *context; vfd_write() local
368 context = (struct sasem_context *) file->private_data; vfd_write()
369 if (!context) { vfd_write()
370 pr_err("%s: no context for device\n", __func__); vfd_write()
374 mutex_lock(&context->ctx_lock); vfd_write()
376 if (!context->dev_present) { vfd_write()
383 dev_err(&context->dev->dev, "%s: invalid payload size\n", vfd_write()
396 memcpy(context->tx.data_buf, data_buf, n_bytes); vfd_write()
400 context->tx.data_buf[i] = ' '; vfd_write()
408 memcpy(context->usb_tx_buf, "\x07\0\0\0\0\0\0\0", 8); vfd_write()
409 context->usb_tx_buf[1] = (context->vfd_contrast) ? vfd_write()
410 (0x2B - (context->vfd_contrast - 1) / 250) vfd_write()
414 memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); vfd_write()
417 memcpy(context->usb_tx_buf, "\x0b\x01\0\0\0\0\0\0", 8); vfd_write()
420 memcpy(context->usb_tx_buf, context->tx.data_buf, 8); vfd_write()
423 memcpy(context->usb_tx_buf, vfd_write()
424 context->tx.data_buf + 8, 8); vfd_write()
427 memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8); vfd_write()
430 memcpy(context->usb_tx_buf, "\x0b\x02\0\0\0\0\0\0", 8); vfd_write()
433 memcpy(context->usb_tx_buf, vfd_write()
434 context->tx.data_buf + 16, 8); vfd_write()
437 memcpy(context->usb_tx_buf, vfd_write()
438 context->tx.data_buf + 24, 8); vfd_write()
441 retval = send_packet(context); vfd_write()
443 dev_err(&context->dev->dev, vfd_write()
450 mutex_unlock(&context->ctx_lock); vfd_write()
461 struct sasem_context *context; usb_tx_callback() local
465 context = (struct sasem_context *) urb->context; usb_tx_callback()
466 if (!context) usb_tx_callback()
469 context->tx.status = urb->status; usb_tx_callback()
472 atomic_set(&context->tx.busy, 0); usb_tx_callback()
473 complete(&context->tx.finished); usb_tx_callback()
482 struct sasem_context *context; ir_open() local
487 context = data; ir_open()
489 mutex_lock(&context->ctx_lock); ir_open()
491 if (context->ir_isopen) { ir_open()
492 dev_err(&context->dev->dev, "%s: IR port is already open\n", ir_open()
498 usb_fill_int_urb(context->rx_urb, context->dev, ir_open()
499 usb_rcvintpipe(context->dev, ir_open()
500 context->rx_endpoint->bEndpointAddress), ir_open()
501 context->usb_rx_buf, sizeof(context->usb_rx_buf), ir_open()
502 usb_rx_callback, context, context->rx_endpoint->bInterval); ir_open()
504 retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); ir_open()
507 dev_err(&context->dev->dev, ir_open()
510 context->ir_isopen = 1; ir_open()
511 dev_info(&context->dev->dev, "IR port opened\n"); ir_open()
515 mutex_unlock(&context->ctx_lock); ir_open()
526 struct sasem_context *context; ir_close() local
528 context = data; ir_close()
529 if (!context) { ir_close()
530 pr_err("%s: no context for device\n", __func__); ir_close()
534 mutex_lock(&context->ctx_lock); ir_close()
536 usb_kill_urb(context->rx_urb); ir_close()
537 context->ir_isopen = 0; ir_close()
540 if (!context->dev_present) { ir_close()
547 deregister_from_lirc(context); ir_close()
549 if (!context->vfd_isopen) { ir_close()
551 mutex_unlock(&context->ctx_lock); ir_close()
552 delete_context(context); ir_close()
555 /* If VFD port is open, context will be deleted by vfd_close */ ir_close()
558 mutex_unlock(&context->ctx_lock); ir_close()
564 static void incoming_packet(struct sasem_context *context, incoming_packet() argument
573 dev_warn(&context->dev->dev, incoming_packet()
580 dev_info(&context->dev->dev, "Incoming data: %*ph\n", len, buf); incoming_packet()
588 ms = (tv.tv_sec - context->presstime.tv_sec) * 1000 + incoming_packet()
589 (tv.tv_usec - context->presstime.tv_usec) / 1000; incoming_packet()
603 if ((ms < 250) && (context->codesaved != 0)) { incoming_packet()
604 memcpy(buf, &context->lastcode, 8); incoming_packet()
605 context->presstime.tv_sec = tv.tv_sec; incoming_packet()
606 context->presstime.tv_usec = tv.tv_usec; incoming_packet()
610 memcpy(&context->lastcode, buf, 8); incoming_packet()
615 context->codesaved = 1; incoming_packet()
616 context->presstime.tv_sec = tv.tv_sec; incoming_packet()
617 context->presstime.tv_usec = tv.tv_usec; incoming_packet()
620 lirc_buffer_write(context->driver->rbuf, buf); incoming_packet()
621 wake_up(&context->driver->rbuf->wait_poll); incoming_packet()
629 struct sasem_context *context; usb_rx_callback() local
633 context = (struct sasem_context *) urb->context; usb_rx_callback()
634 if (!context) usb_rx_callback()
643 if (context->ir_isopen) usb_rx_callback()
644 incoming_packet(context, urb); usb_rx_callback()
653 usb_submit_urb(context->rx_urb, GFP_ATOMIC); usb_rx_callback()
678 struct sasem_context *context = NULL; sasem_probe() local
746 context = kzalloc(sizeof(struct sasem_context), GFP_KERNEL); sasem_probe()
747 if (!context) { sasem_probe()
785 mutex_init(&context->ctx_lock); sasem_probe()
792 driver->data = context; sasem_probe()
799 mutex_lock(&context->ctx_lock); sasem_probe()
816 context->dev = dev; sasem_probe()
817 context->dev_present = 1; sasem_probe()
818 context->rx_endpoint = rx_endpoint; sasem_probe()
819 context->rx_urb = rx_urb; sasem_probe()
821 context->tx_endpoint = tx_endpoint; sasem_probe()
822 context->tx_urb = tx_urb; sasem_probe()
823 context->vfd_contrast = 1000; /* range 0 - 1000 */ sasem_probe()
825 context->driver = driver; sasem_probe()
827 usb_set_intfdata(interface, context); sasem_probe()
845 mutex_unlock(&context->ctx_lock); sasem_probe()
866 kfree(context); sasem_probe()
867 context = NULL; sasem_probe()
883 struct sasem_context *context; sasem_disconnect() local
888 context = usb_get_intfdata(interface); sasem_disconnect()
889 mutex_lock(&context->ctx_lock); sasem_disconnect()
895 context->dev_present = 0; sasem_disconnect()
898 usb_kill_urb(context->rx_urb); sasem_disconnect()
901 if (atomic_read(&context->tx.busy)) { sasem_disconnect()
903 usb_kill_urb(context->tx_urb); sasem_disconnect()
904 wait_for_completion(&context->tx.finished); sasem_disconnect()
908 if (!context->ir_isopen) sasem_disconnect()
909 deregister_from_lirc(context); sasem_disconnect()
913 mutex_unlock(&context->ctx_lock); sasem_disconnect()
915 if (!context->ir_isopen && !context->vfd_isopen) sasem_disconnect()
916 delete_context(context); sasem_disconnect()
H A Dlirc_imon.c189 static void free_imon_context(struct imon_context *context) free_imon_context() argument
191 struct device *dev = context->driver->dev; free_imon_context()
193 usb_free_urb(context->tx_urb); free_imon_context()
194 usb_free_urb(context->rx_urb); free_imon_context()
195 lirc_buffer_free(context->driver->rbuf); free_imon_context()
196 kfree(context->driver->rbuf); free_imon_context()
197 kfree(context->driver); free_imon_context()
198 kfree(context); free_imon_context()
200 dev_dbg(dev, "%s: iMON context freed\n", __func__); free_imon_context()
203 static void deregister_from_lirc(struct imon_context *context) deregister_from_lirc() argument
206 int minor = context->driver->minor; deregister_from_lirc()
210 dev_err(&context->usbdev->dev, deregister_from_lirc()
213 dev_info(&context->usbdev->dev, deregister_from_lirc()
225 struct imon_context *context = NULL; display_open() local
240 context = usb_get_intfdata(interface); display_open()
242 if (!context) { display_open()
243 dev_err(&interface->dev, "no context found for minor %d\n", display_open()
249 mutex_lock(&context->ctx_lock); display_open()
251 if (!context->display) { display_open()
255 } else if (context->display_isopen) { display_open()
260 context->display_isopen = 1; display_open()
261 file->private_data = context; display_open()
262 dev_info(context->driver->dev, "display port opened\n"); display_open()
265 mutex_unlock(&context->ctx_lock); display_open()
278 struct imon_context *context = NULL; display_close() local
281 context = file->private_data; display_close()
283 if (!context) { display_close()
284 pr_err("%s: no context for device\n", __func__); display_close()
288 mutex_lock(&context->ctx_lock); display_close()
290 if (!context->display) { display_close()
291 dev_err(&context->usbdev->dev, display_close()
294 } else if (!context->display_isopen) { display_close()
295 dev_err(&context->usbdev->dev, display_close()
299 context->display_isopen = 0; display_close()
300 dev_info(context->driver->dev, "display port closed\n"); display_close()
301 if (!context->dev_present && !context->ir_isopen) { display_close()
304 * open. If IR port is open, context will be deleted by display_close()
307 mutex_unlock(&context->ctx_lock); display_close()
308 free_imon_context(context); display_close()
313 mutex_unlock(&context->ctx_lock); display_close()
319 * with context->ctx_lock held.
321 static int send_packet(struct imon_context *context) send_packet() argument
328 pipe = usb_sndintpipe(context->usbdev, send_packet()
329 context->tx_endpoint->bEndpointAddress); send_packet()
330 interval = context->tx_endpoint->bInterval; send_packet()
332 usb_fill_int_urb(context->tx_urb, context->usbdev, pipe, send_packet()
333 context->usb_tx_buf, send_packet()
334 sizeof(context->usb_tx_buf), send_packet()
335 usb_tx_callback, context, interval); send_packet()
337 context->tx_urb->actual_length = 0; send_packet()
339 init_completion(&context->tx.finished); send_packet()
340 atomic_set(&context->tx.busy, 1); send_packet()
342 retval = usb_submit_urb(context->tx_urb, GFP_KERNEL); send_packet()
344 atomic_set(&context->tx.busy, 0); send_packet()
345 dev_err(&context->usbdev->dev, "error submitting urb(%d)\n", send_packet()
349 mutex_unlock(&context->ctx_lock); send_packet()
351 &context->tx.finished); send_packet()
353 dev_err(&context->usbdev->dev, send_packet()
355 mutex_lock(&context->ctx_lock); send_packet()
357 retval = context->tx.status; send_packet()
359 dev_err(&context->usbdev->dev, send_packet()
384 struct imon_context *context; vfd_write() local
389 context = file->private_data; vfd_write()
390 if (!context) { vfd_write()
391 pr_err("%s: no context for device\n", __func__); vfd_write()
395 mutex_lock(&context->ctx_lock); vfd_write()
397 if (!context->dev_present) { vfd_write()
398 dev_err(&context->usbdev->dev, vfd_write()
405 dev_err(&context->usbdev->dev, vfd_write()
418 memcpy(context->tx.data_buf, data_buf, n_bytes); vfd_write()
422 context->tx.data_buf[i] = ' '; vfd_write()
425 context->tx.data_buf[i] = 0xFF; vfd_write()
431 memcpy(context->usb_tx_buf, context->tx.data_buf + offset, 7); vfd_write()
432 context->usb_tx_buf[7] = (unsigned char) seq; vfd_write()
434 retval = send_packet(context); vfd_write()
436 dev_err(&context->usbdev->dev, vfd_write()
447 if (context->vfd_proto_6p) { vfd_write()
449 memcpy(context->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); vfd_write()
450 context->usb_tx_buf[7] = (unsigned char) seq; vfd_write()
451 retval = send_packet(context); vfd_write()
453 dev_err(&context->usbdev->dev, vfd_write()
459 mutex_unlock(&context->ctx_lock); vfd_write()
470 struct imon_context *context; usb_tx_callback() local
474 context = (struct imon_context *)urb->context; usb_tx_callback()
475 if (!context) usb_tx_callback()
478 context->tx.status = urb->status; usb_tx_callback()
481 atomic_set(&context->tx.busy, 0); usb_tx_callback()
482 complete(&context->tx.finished); usb_tx_callback()
490 struct imon_context *context; ir_open() local
495 context = data; ir_open()
498 context->rx.count = 0; ir_open()
499 context->rx.initial_space = 1; ir_open()
500 context->rx.prev_bit = 0; ir_open()
502 context->ir_isopen = 1; ir_open()
503 dev_info(context->driver->dev, "IR port opened\n"); ir_open()
514 struct imon_context *context; ir_close() local
516 context = data; ir_close()
517 if (!context) { ir_close()
518 pr_err("%s: no context for device\n", __func__); ir_close()
522 mutex_lock(&context->ctx_lock); ir_close()
524 context->ir_isopen = 0; ir_close()
525 dev_info(context->driver->dev, "IR port closed\n"); ir_close()
527 if (!context->dev_present) { ir_close()
532 deregister_from_lirc(context); ir_close()
534 if (!context->display_isopen) { ir_close()
535 mutex_unlock(&context->ctx_lock); ir_close()
536 free_imon_context(context); ir_close()
540 * If display port is open, context will be deleted by ir_close()
545 mutex_unlock(&context->ctx_lock); ir_close()
552 static void submit_data(struct imon_context *context) submit_data() argument
555 int value = context->rx.count; submit_data()
558 dev_dbg(context->driver->dev, "submitting data to LIRC\n"); submit_data()
562 if (context->rx.prev_bit) submit_data()
568 lirc_buffer_write(context->driver->rbuf, buf); submit_data()
569 wake_up(&context->driver->rbuf->wait_poll); submit_data()
575 static void imon_incoming_packet(struct imon_context *context, imon_incoming_packet() argument
580 struct device *dev = context->driver->dev; imon_incoming_packet()
587 if (!context->ir_isopen) imon_incoming_packet()
612 if (buf[7] == 1 && context->rx.initial_space) { imon_incoming_packet()
614 context->rx.prev_bit = 0; imon_incoming_packet()
615 context->rx.count = 4; imon_incoming_packet()
616 submit_data(context); imon_incoming_packet()
617 context->rx.count = 0; imon_incoming_packet()
625 if (curr_bit != context->rx.prev_bit) { imon_incoming_packet()
626 if (context->rx.count) { imon_incoming_packet()
627 submit_data(context); imon_incoming_packet()
628 context->rx.count = 0; imon_incoming_packet()
630 context->rx.prev_bit = curr_bit; imon_incoming_packet()
632 ++context->rx.count; imon_incoming_packet()
638 if (context->rx.count) { imon_incoming_packet()
639 submit_data(context); imon_incoming_packet()
640 context->rx.count = 0; imon_incoming_packet()
642 context->rx.initial_space = context->rx.prev_bit; imon_incoming_packet()
651 struct imon_context *context; usb_rx_callback() local
657 context = (struct imon_context *)urb->context; usb_rx_callback()
658 if (!context) usb_rx_callback()
666 imon_incoming_packet(context, urb, intfnum); usb_rx_callback()
670 dev_warn(context->driver->dev, "imon %s: status(%d): ignored\n", usb_rx_callback()
675 usb_submit_urb(context->rx_urb, GFP_ATOMIC); usb_rx_callback()
701 struct imon_context *context = NULL; imon_probe() local
708 context = kzalloc(sizeof(struct imon_context), GFP_KERNEL); imon_probe()
709 if (!context) { imon_probe()
719 context->display = 0; imon_probe()
721 context->display = 1; imon_probe()
768 if (context->display == 0) { imon_probe()
820 mutex_init(&context->ctx_lock); imon_probe()
821 context->vfd_proto_6p = vfd_proto_6p; imon_probe()
828 driver->data = context; imon_probe()
835 mutex_lock(&context->ctx_lock); imon_probe()
837 context->driver = driver; imon_probe()
852 context->usbdev = usbdev; imon_probe()
853 context->dev_present = 1; imon_probe()
854 context->rx_endpoint = rx_endpoint; imon_probe()
855 context->rx_urb = rx_urb; imon_probe()
861 context->tx_endpoint = tx_endpoint; imon_probe()
862 context->tx_urb = tx_urb; imon_probe()
865 context->display = 1; imon_probe()
867 usb_fill_int_urb(context->rx_urb, context->usbdev, imon_probe()
868 usb_rcvintpipe(context->usbdev, imon_probe()
869 context->rx_endpoint->bEndpointAddress), imon_probe()
870 context->usb_rx_buf, sizeof(context->usb_rx_buf), imon_probe()
871 usb_rx_callback, context, imon_probe()
872 context->rx_endpoint->bInterval); imon_probe()
874 retval = usb_submit_urb(context->rx_urb, GFP_KERNEL); imon_probe()
882 usb_set_intfdata(interface, context); imon_probe()
884 if (context->display && ifnum == 0) { imon_probe()
899 mutex_unlock(&context->ctx_lock); imon_probe()
921 kfree(context); imon_probe()
922 context = NULL; imon_probe()
941 struct imon_context *context; imon_disconnect() local
947 context = usb_get_intfdata(interface); imon_disconnect()
950 mutex_lock(&context->ctx_lock); imon_disconnect()
955 if (atomic_read(&context->tx.busy)) { imon_disconnect()
956 usb_kill_urb(context->tx_urb); imon_disconnect()
957 complete_all(&context->tx.finished); imon_disconnect()
960 context->dev_present = 0; imon_disconnect()
961 usb_kill_urb(context->rx_urb); imon_disconnect()
962 if (context->display) imon_disconnect()
965 if (!context->ir_isopen && !context->dev_present) { imon_disconnect()
966 deregister_from_lirc(context); imon_disconnect()
967 mutex_unlock(&context->ctx_lock); imon_disconnect()
968 if (!context->display_isopen) imon_disconnect()
969 free_imon_context(context); imon_disconnect()
971 mutex_unlock(&context->ctx_lock); imon_disconnect()
981 struct imon_context *context = usb_get_intfdata(intf); imon_suspend() local
983 usb_kill_urb(context->rx_urb); imon_suspend()
990 struct imon_context *context = usb_get_intfdata(intf); imon_resume() local
992 usb_fill_int_urb(context->rx_urb, context->usbdev, imon_resume()
993 usb_rcvintpipe(context->usbdev, imon_resume()
994 context->rx_endpoint->bEndpointAddress), imon_resume()
995 context->usb_rx_buf, sizeof(context->usb_rx_buf), imon_resume()
996 usb_rx_callback, context, imon_resume()
997 context->rx_endpoint->bInterval); imon_resume()
999 return usb_submit_urb(context->rx_urb, GFP_ATOMIC); imon_resume()
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_context.c34 * These, along with context lookup, are protected by the
39 spinlock_t lock; /* Spinlock for context list operations */
48 static void ctx_signal_notify(struct vmci_ctx *context) ctx_signal_notify() argument
50 *context->notify = true; ctx_signal_notify()
53 static void ctx_clear_notify(struct vmci_ctx *context) ctx_clear_notify() argument
55 *context->notify = false; ctx_clear_notify()
62 static void ctx_clear_notify_call(struct vmci_ctx *context) ctx_clear_notify_call() argument
64 if (context->pending_datagrams == 0 && ctx_clear_notify_call()
65 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) ctx_clear_notify_call()
66 ctx_clear_notify(context); ctx_clear_notify_call()
70 * Sets the context's notify flag iff datagrams are pending for this
71 * context. Called from vmci_setup_notify().
73 void vmci_ctx_check_signal_notify(struct vmci_ctx *context) vmci_ctx_check_signal_notify() argument
75 spin_lock(&context->lock); vmci_ctx_check_signal_notify()
76 if (context->pending_datagrams) vmci_ctx_check_signal_notify()
77 ctx_signal_notify(context); vmci_ctx_check_signal_notify()
78 spin_unlock(&context->lock); vmci_ctx_check_signal_notify()
82 * Allocates and initializes a VMCI context.
89 struct vmci_ctx *context; vmci_ctx_create() local
93 pr_devel("Invalid context ID for VMCI context\n"); vmci_ctx_create()
99 pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", vmci_ctx_create()
111 context = kzalloc(sizeof(*context), GFP_KERNEL); vmci_ctx_create()
112 if (!context) { vmci_ctx_create()
113 pr_warn("Failed to allocate memory for VMCI context\n"); vmci_ctx_create()
118 kref_init(&context->kref); vmci_ctx_create()
119 spin_lock_init(&context->lock); vmci_ctx_create()
120 INIT_LIST_HEAD(&context->list_item); vmci_ctx_create()
121 INIT_LIST_HEAD(&context->datagram_queue); vmci_ctx_create()
122 INIT_LIST_HEAD(&context->notifier_list); vmci_ctx_create()
124 /* Initialize host-specific VMCI context. */ vmci_ctx_create()
125 init_waitqueue_head(&context->host_context.wait_queue); vmci_ctx_create()
127 context->queue_pair_array = vmci_handle_arr_create(0); vmci_ctx_create()
128 if (!context->queue_pair_array) { vmci_ctx_create()
133 context->doorbell_array = vmci_handle_arr_create(0); vmci_ctx_create()
134 if (!context->doorbell_array) { vmci_ctx_create()
139 context->pending_doorbell_array = vmci_handle_arr_create(0); vmci_ctx_create()
140 if (!context->pending_doorbell_array) { vmci_ctx_create()
145 context->user_version = user_version; vmci_ctx_create()
147 context->priv_flags = priv_flags; vmci_ctx_create()
150 context->cred = get_cred(cred); vmci_ctx_create()
152 context->notify = &ctx_dummy_notify; vmci_ctx_create()
153 context->notify_page = NULL; vmci_ctx_create()
156 * If we collide with an existing context we generate a new vmci_ctx_create()
169 context->cid = cid; vmci_ctx_create()
171 list_add_tail_rcu(&context->list_item, &ctx_list.head); vmci_ctx_create()
174 return context; vmci_ctx_create()
177 vmci_handle_arr_destroy(context->doorbell_array); vmci_ctx_create()
179 vmci_handle_arr_destroy(context->queue_pair_array); vmci_ctx_create()
181 kfree(context); vmci_ctx_create()
187 * Destroy VMCI context.
189 void vmci_ctx_destroy(struct vmci_ctx *context) vmci_ctx_destroy() argument
192 list_del_rcu(&context->list_item); vmci_ctx_destroy()
196 vmci_ctx_put(context); vmci_ctx_destroy()
261 pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", ctx_fire_notification()
263 ev.msg.hdr.dst.context); ctx_fire_notification()
279 struct vmci_ctx *context; vmci_ctx_pending_datagrams() local
281 context = vmci_ctx_get(cid); vmci_ctx_pending_datagrams()
282 if (context == NULL) vmci_ctx_pending_datagrams()
285 spin_lock(&context->lock); vmci_ctx_pending_datagrams()
287 *pending = context->pending_datagrams; vmci_ctx_pending_datagrams()
288 spin_unlock(&context->lock); vmci_ctx_pending_datagrams()
289 vmci_ctx_put(context); vmci_ctx_pending_datagrams()
295 * Queues a VMCI datagram for the appropriate target VM context.
300 struct vmci_ctx *context; vmci_ctx_enqueue_datagram() local
310 /* Get the target VM's VMCI context. */ vmci_ctx_enqueue_datagram()
311 context = vmci_ctx_get(cid); vmci_ctx_enqueue_datagram()
312 if (!context) { vmci_ctx_enqueue_datagram()
313 pr_devel("Invalid context (ID=0x%x)\n", cid); vmci_ctx_enqueue_datagram()
321 vmci_ctx_put(context); vmci_ctx_enqueue_datagram()
329 spin_lock(&context->lock); vmci_ctx_enqueue_datagram()
340 if (context->datagram_queue_size + vmci_dg_size >= vmci_ctx_enqueue_datagram()
346 context->datagram_queue_size + vmci_dg_size >= vmci_ctx_enqueue_datagram()
348 spin_unlock(&context->lock); vmci_ctx_enqueue_datagram()
349 vmci_ctx_put(context); vmci_ctx_enqueue_datagram()
355 list_add(&dq_entry->list_item, &context->datagram_queue); vmci_ctx_enqueue_datagram()
356 context->pending_datagrams++; vmci_ctx_enqueue_datagram()
357 context->datagram_queue_size += vmci_dg_size; vmci_ctx_enqueue_datagram()
358 ctx_signal_notify(context); vmci_ctx_enqueue_datagram()
359 wake_up(&context->host_context.wait_queue); vmci_ctx_enqueue_datagram()
360 spin_unlock(&context->lock); vmci_ctx_enqueue_datagram()
361 vmci_ctx_put(context); vmci_ctx_enqueue_datagram()
367 * Verifies whether a context with the specified context ID exists.
369 * using this data as context can appear and disappear at any time.
373 struct vmci_ctx *context; vmci_ctx_exists() local
378 list_for_each_entry_rcu(context, &ctx_list.head, list_item) { vmci_ctx_exists()
379 if (context->cid == cid) { vmci_ctx_exists()
390 * Retrieves VMCI context corresponding to the given cid.
394 struct vmci_ctx *c, *context = NULL; vmci_ctx_get() local
403 * The context owner drops its own reference to the vmci_ctx_get()
404 * context only after removing it from the list and vmci_ctx_get()
410 context = c; vmci_ctx_get()
411 kref_get(&context->kref); vmci_ctx_get()
417 return context; vmci_ctx_get()
421 * Deallocates all parts of a context data structure. This
422 * function doesn't lock the context, because it assumes that
423 * the caller was holding the last reference to context.
427 struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); ctx_free_ctx() local
434 * context is dying. ctx_free_ctx()
436 ctx_fire_notification(context->cid, context->priv_flags); ctx_free_ctx()
439 * Cleanup all queue pair resources attached to context. If ctx_free_ctx()
443 temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); ctx_free_ctx()
446 context) < VMCI_SUCCESS) { ctx_free_ctx()
453 vmci_handle_arr_remove_entry(context->queue_pair_array, ctx_free_ctx()
457 vmci_handle_arr_get_entry(context->queue_pair_array, 0); ctx_free_ctx()
462 * this is the only thread having a reference to the context. ctx_free_ctx()
465 &context->datagram_queue, list_item) { ctx_free_ctx()
473 &context->notifier_list, node) { ctx_free_ctx()
478 vmci_handle_arr_destroy(context->queue_pair_array); ctx_free_ctx()
479 vmci_handle_arr_destroy(context->doorbell_array); ctx_free_ctx()
480 vmci_handle_arr_destroy(context->pending_doorbell_array); ctx_free_ctx()
481 vmci_ctx_unset_notify(context); ctx_free_ctx()
482 if (context->cred) ctx_free_ctx()
483 put_cred(context->cred); ctx_free_ctx()
484 kfree(context); ctx_free_ctx()
488 * Drops reference to VMCI context. If this is the last reference to
489 * the context it will be deallocated. A context is created with
491 * the context list before its reference count is decremented. Thus,
493 * it (they need the entry in the context list for that), and so there
496 void vmci_ctx_put(struct vmci_ctx *context) vmci_ctx_put() argument
498 kref_put(&context->kref, ctx_free_ctx); vmci_ctx_put()
509 int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, vmci_ctx_dequeue_datagram() argument
518 spin_lock(&context->lock); vmci_ctx_dequeue_datagram()
519 if (context->pending_datagrams == 0) { vmci_ctx_dequeue_datagram()
520 ctx_clear_notify_call(context); vmci_ctx_dequeue_datagram()
521 spin_unlock(&context->lock); vmci_ctx_dequeue_datagram()
526 list_item = context->datagram_queue.next; vmci_ctx_dequeue_datagram()
534 spin_unlock(&context->lock); vmci_ctx_dequeue_datagram()
541 context->pending_datagrams--; vmci_ctx_dequeue_datagram()
542 context->datagram_queue_size -= dq_entry->dg_size; vmci_ctx_dequeue_datagram()
543 if (context->pending_datagrams == 0) { vmci_ctx_dequeue_datagram()
544 ctx_clear_notify_call(context); vmci_ctx_dequeue_datagram()
552 list_item = context->datagram_queue.next; vmci_ctx_dequeue_datagram()
563 spin_unlock(&context->lock); vmci_ctx_dequeue_datagram()
577 void vmci_ctx_unset_notify(struct vmci_ctx *context) vmci_ctx_unset_notify() argument
581 spin_lock(&context->lock); vmci_ctx_unset_notify()
583 notify_page = context->notify_page; vmci_ctx_unset_notify()
584 context->notify = &ctx_dummy_notify; vmci_ctx_unset_notify()
585 context->notify_page = NULL; vmci_ctx_unset_notify()
587 spin_unlock(&context->lock); vmci_ctx_unset_notify()
601 struct vmci_ctx *context; vmci_ctx_add_notification() local
606 context = vmci_ctx_get(context_id); vmci_ctx_add_notification()
607 if (!context) vmci_ctx_add_notification()
617 if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { vmci_ctx_add_notification()
631 spin_lock(&context->lock); vmci_ctx_add_notification()
633 list_for_each_entry(n, &context->notifier_list, node) { vmci_ctx_add_notification()
644 list_add_tail_rcu(&notifier->node, &context->notifier_list); vmci_ctx_add_notification()
645 context->n_notifiers++; vmci_ctx_add_notification()
649 spin_unlock(&context->lock); vmci_ctx_add_notification()
652 vmci_ctx_put(context); vmci_ctx_add_notification()
657 * Remove remote_cid from current context's list of contexts it is
662 struct vmci_ctx *context; vmci_ctx_remove_notification() local
667 context = vmci_ctx_get(context_id); vmci_ctx_remove_notification()
668 if (!context) vmci_ctx_remove_notification()
673 spin_lock(&context->lock); vmci_ctx_remove_notification()
675 &context->notifier_list, node) { vmci_ctx_remove_notification()
678 context->n_notifiers--; vmci_ctx_remove_notification()
683 spin_unlock(&context->lock); vmci_ctx_remove_notification()
690 vmci_ctx_put(context); vmci_ctx_remove_notification()
695 static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, vmci_ctx_get_chkpt_notifiers() argument
703 if (context->n_notifiers == 0) { vmci_ctx_get_chkpt_notifiers()
709 data_size = context->n_notifiers * sizeof(*notifiers); vmci_ctx_get_chkpt_notifiers()
719 list_for_each_entry(entry, &context->notifier_list, node) vmci_ctx_get_chkpt_notifiers()
720 notifiers[i++] = entry->handle.context; vmci_ctx_get_chkpt_notifiers()
727 static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, vmci_ctx_get_chkpt_doorbells() argument
734 n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); vmci_ctx_get_chkpt_doorbells()
748 context->doorbell_array, i); vmci_ctx_get_chkpt_doorbells()
761 * Get current context's checkpoint state of given type.
768 struct vmci_ctx *context; vmci_ctx_get_chkpt_state() local
771 context = vmci_ctx_get(context_id); vmci_ctx_get_chkpt_state()
772 if (!context) vmci_ctx_get_chkpt_state()
775 spin_lock(&context->lock); vmci_ctx_get_chkpt_state()
779 result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); vmci_ctx_get_chkpt_state()
794 result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); vmci_ctx_get_chkpt_state()
803 spin_unlock(&context->lock); vmci_ctx_get_chkpt_state()
804 vmci_ctx_put(context); vmci_ctx_get_chkpt_state()
810 * Set current context's checkpoint state of given type.
850 * Retrieves the specified context's pending notifications in the
860 struct vmci_ctx *context; vmci_ctx_rcv_notifications_get() local
863 context = vmci_ctx_get(context_id); vmci_ctx_rcv_notifications_get()
864 if (context == NULL) vmci_ctx_rcv_notifications_get()
867 spin_lock(&context->lock); vmci_ctx_rcv_notifications_get()
869 *db_handle_array = context->pending_doorbell_array; vmci_ctx_rcv_notifications_get()
870 context->pending_doorbell_array = vmci_handle_arr_create(0); vmci_ctx_rcv_notifications_get()
871 if (!context->pending_doorbell_array) { vmci_ctx_rcv_notifications_get()
872 context->pending_doorbell_array = *db_handle_array; vmci_ctx_rcv_notifications_get()
878 spin_unlock(&context->lock); vmci_ctx_rcv_notifications_get()
879 vmci_ctx_put(context); vmci_ctx_rcv_notifications_get()
895 struct vmci_ctx *context = vmci_ctx_get(context_id); vmci_ctx_rcv_notifications_release() local
897 spin_lock(&context->lock); vmci_ctx_rcv_notifications_release()
903 * holding the context lock, so we transfer any new pending vmci_ctx_rcv_notifications_release()
909 context->pending_doorbell_array); vmci_ctx_rcv_notifications_release()
917 context->pending_doorbell_array); vmci_ctx_rcv_notifications_release()
919 vmci_handle_arr_destroy(context->pending_doorbell_array); vmci_ctx_rcv_notifications_release()
920 context->pending_doorbell_array = db_handle_array; vmci_ctx_rcv_notifications_release()
923 ctx_clear_notify_call(context); vmci_ctx_rcv_notifications_release()
925 spin_unlock(&context->lock); vmci_ctx_rcv_notifications_release()
926 vmci_ctx_put(context); vmci_ctx_rcv_notifications_release()
937 * context. Only doorbell handles registered can be notified.
941 struct vmci_ctx *context; vmci_ctx_dbell_create() local
947 context = vmci_ctx_get(context_id); vmci_ctx_dbell_create()
948 if (context == NULL) vmci_ctx_dbell_create()
951 spin_lock(&context->lock); vmci_ctx_dbell_create()
952 if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) { vmci_ctx_dbell_create()
953 vmci_handle_arr_append_entry(&context->doorbell_array, handle); vmci_ctx_dbell_create()
959 spin_unlock(&context->lock); vmci_ctx_dbell_create()
960 vmci_ctx_put(context); vmci_ctx_dbell_create()
971 struct vmci_ctx *context; vmci_ctx_dbell_destroy() local
977 context = vmci_ctx_get(context_id); vmci_ctx_dbell_destroy()
978 if (context == NULL) vmci_ctx_dbell_destroy()
981 spin_lock(&context->lock); vmci_ctx_dbell_destroy()
983 vmci_handle_arr_remove_entry(context->doorbell_array, handle); vmci_ctx_dbell_destroy()
984 vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); vmci_ctx_dbell_destroy()
985 spin_unlock(&context->lock); vmci_ctx_dbell_destroy()
987 vmci_ctx_put(context); vmci_ctx_dbell_destroy()
999 struct vmci_ctx *context; vmci_ctx_dbell_destroy_all() local
1005 context = vmci_ctx_get(context_id); vmci_ctx_dbell_destroy_all()
1006 if (context == NULL) vmci_ctx_dbell_destroy_all()
1009 spin_lock(&context->lock); vmci_ctx_dbell_destroy_all()
1011 struct vmci_handle_arr *arr = context->doorbell_array; vmci_ctx_dbell_destroy_all()
1015 struct vmci_handle_arr *arr = context->pending_doorbell_array; vmci_ctx_dbell_destroy_all()
1018 spin_unlock(&context->lock); vmci_ctx_dbell_destroy_all()
1020 vmci_ctx_put(context); vmci_ctx_dbell_destroy_all()
1027 * specified source context. The notification of doorbells are
1030 * of sender rights than those assigned to the sending context
1031 * itself, the host context is required to specify a different
1033 * the source context.
1045 /* Get the target VM's VMCI context. */ vmci_ctx_notify_dbell()
1046 dst_context = vmci_ctx_get(handle.context); vmci_ctx_notify_dbell()
1048 pr_devel("Invalid context (ID=0x%x)\n", handle.context); vmci_ctx_notify_dbell()
1052 if (src_cid != handle.context) { vmci_ctx_notify_dbell()
1056 VMCI_CONTEXT_IS_VM(handle.context)) { vmci_ctx_notify_dbell()
1058 src_cid, handle.context); vmci_ctx_notify_dbell()
1066 handle.context, handle.resource); vmci_ctx_notify_dbell()
1081 if (handle.context == VMCI_HOST_CONTEXT_ID) { vmci_ctx_notify_dbell()
1112 bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) vmci_ctx_supports_host_qp() argument
1114 return context && context->user_version >= VMCI_VERSION_HOSTQP; vmci_ctx_supports_host_qp()
1119 * the context.
1121 int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) vmci_ctx_qp_create() argument
1125 if (context == NULL || vmci_handle_is_invalid(handle)) vmci_ctx_qp_create()
1128 if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) { vmci_ctx_qp_create()
1129 vmci_handle_arr_append_entry(&context->queue_pair_array, vmci_ctx_qp_create()
1143 int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) vmci_ctx_qp_destroy() argument
1147 if (context == NULL || vmci_handle_is_invalid(handle)) vmci_ctx_qp_destroy()
1150 hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); vmci_ctx_qp_destroy()
1158 * with the given context.
1160 bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) vmci_ctx_qp_exists() argument
1162 if (context == NULL || vmci_handle_is_invalid(handle)) vmci_ctx_qp_exists()
1165 return vmci_handle_arr_has_entry(context->queue_pair_array, handle); vmci_ctx_qp_exists()
1170 * @context_id: The context ID of the VMCI context.
1172 * Retrieves privilege flags of the given VMCI context ID.
1178 struct vmci_ctx *context; vmci_context_get_priv_flags() local
1180 context = vmci_ctx_get(context_id); vmci_context_get_priv_flags()
1181 if (!context) vmci_context_get_priv_flags()
1184 flags = context->priv_flags; vmci_context_get_priv_flags()
1185 vmci_ctx_put(context); vmci_context_get_priv_flags()
1193 * vmci_is_context_owner() - Determimnes if user is the context owner
1194 * @context_id: The context ID of the VMCI context.
1197 * Determines whether a given UID is the owner of given VMCI context.
1204 struct vmci_ctx *context = vmci_ctx_get(context_id); vmci_is_context_owner() local
1205 if (context) { vmci_is_context_owner()
1206 if (context->cred) vmci_is_context_owner()
1207 is_owner = uid_eq(context->cred->uid, uid); vmci_is_context_owner()
1208 vmci_ctx_put(context); vmci_is_context_owner()
H A Dvmci_route.c26 * devices. Will set the source context if it is invalid.
49 /* Must have a valid destination context. */ vmci_route()
50 if (VMCI_INVALID_ID == dst->context) vmci_route()
54 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { vmci_route()
71 /* And we cannot send if the source is the host context. */ vmci_route()
72 if (VMCI_HOST_CONTEXT_ID == src->context) vmci_route()
77 * respect it (both context and resource are invalid). vmci_route()
78 * However, if they passed only an invalid context, vmci_route()
80 * should set the real context here before passing it vmci_route()
83 if (VMCI_INVALID_ID == src->context && vmci_route()
85 src->context = vmci_get_context_id(); vmci_route()
93 if (VMCI_HOST_CONTEXT_ID == dst->context) { vmci_route()
100 * way to remove any ambiguity from the host context. vmci_route()
102 if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { vmci_route()
120 /* If no source context then use the current. */ vmci_route()
121 if (VMCI_INVALID_ID == src->context) vmci_route()
122 src->context = vmci_get_context_id(); vmci_route()
138 if (VMCI_INVALID_ID == src->context) { vmci_route()
141 * valid context. Otherwise we can use the vmci_route()
142 * host context. vmci_route()
147 src->context = VMCI_HOST_CONTEXT_ID; vmci_route()
160 /* It will have a context if it is meant for a guest. */ vmci_route()
161 if (vmci_ctx_exists(dst->context)) { vmci_route()
162 if (VMCI_INVALID_ID == src->context) { vmci_route()
165 * must have a valid context. vmci_route()
167 * context. vmci_route()
173 src->context = VMCI_HOST_CONTEXT_ID; vmci_route()
174 } else if (VMCI_CONTEXT_IS_VM(src->context) && vmci_route()
175 src->context != dst->context) { vmci_route()
181 * VM since there is a valid context. vmci_route()
193 * without an active context, and we can't vmci_route()
216 /* If no source context then use the current context. */ vmci_route()
217 if (VMCI_INVALID_ID == src->context) vmci_route()
218 src->context = vmci_get_context_id(); vmci_route()
H A Dvmci_context.h58 * this context; e.g., VMX.
68 * is also accessed from the context
75 /* Doorbells created by context. */
78 /* Doorbells pending for context. */
81 /* Contexts current context is subscribing to. */
93 /* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
99 /* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
137 void vmci_ctx_destroy(struct vmci_ctx *context);
139 bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
141 int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
145 void vmci_ctx_put(struct vmci_ctx *context);
155 int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
156 int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
157 bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
159 void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
160 void vmci_ctx_unset_notify(struct vmci_ctx *context);
175 static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) vmci_ctx_get_id() argument
177 if (!context) vmci_ctx_get_id()
179 return context->cid; vmci_ctx_get_id()
H A Dvmci_datagram.c100 handle.context, handle.resource, result); dg_create_handle()
142 * Calls the specified callback in a delayed context.
160 * Dispatch datagram as a host, to the host, or other vm context. This
161 * function cannot dispatch to hypervisor context handlers. This should
174 if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) dg_dispatch_as_host()
177 /* Check that source handle matches sending context. */ dg_dispatch_as_host()
178 if (dg->src.context != context_id) { dg_dispatch_as_host()
179 pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n", dg_dispatch_as_host()
180 context_id, dg->src.context, dg->src.resource); dg_dispatch_as_host()
189 dg->src.context, dg->src.resource); dg_dispatch_as_host()
194 if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { dg_dispatch_as_host()
199 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && dg_dispatch_as_host()
208 dg->dst.context, dg->dst.resource); dg_dispatch_as_host()
225 dg->src.context == VMCI_HOST_CONTEXT_ID) { dg_dispatch_as_host()
258 /* Route to destination VM context. */ dg_dispatch_as_host()
261 if (context_id != dg->dst.context) { dg_dispatch_as_host()
264 (dg->dst.context))) { dg_dispatch_as_host()
268 * If the sending context is a VM, it dg_dispatch_as_host()
273 context_id, dg->dst.context); dg_dispatch_as_host()
284 retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); dg_dispatch_as_host()
341 dg->src.context, dg->dst.context, retval); vmci_datagram_dispatch()
372 dg->dst.context, dg->dst.resource); vmci_datagram_invoke_guest_handler()
402 * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
410 * Creates a host context datagram endpoint and returns a handle to it.
436 * vmci_datagram_create_handle() - Create host context datagram endpoint
443 * Creates a host context datagram endpoint and returns a handle to
476 handle.context, handle.resource); vmci_datagram_destroy_handle()
H A Dvmci_host.c93 struct vmci_ctx *context; member in struct:vmci_host_dev
96 struct mutex lock; /* Mutex lock for vmci context access */
146 vmci_ctx_destroy(vmci_host_dev->context); vmci_host_close()
147 vmci_host_dev->context = NULL; vmci_host_close()
152 * a context is created through the IOCTL_VMCI_INIT_CONTEXT vmci_host_close()
171 struct vmci_ctx *context = vmci_host_dev->context; vmci_host_poll() local
175 /* Check for VMCI calls to this VM context. */ vmci_host_poll()
177 poll_wait(filp, &context->host_context.wait_queue, vmci_host_poll()
180 spin_lock(&context->lock); vmci_host_poll()
181 if (context->pending_datagrams > 0 || vmci_host_poll()
183 context->pending_doorbell_array) > 0) { vmci_host_poll()
186 spin_unlock(&context->lock); vmci_host_poll()
221 * Sets up a given context for notify to work. Maps the notify
224 static int vmci_host_setup_notify(struct vmci_ctx *context, vmci_host_setup_notify() argument
229 if (context->notify_page) { vmci_host_setup_notify()
245 retval = get_user_pages_fast(uva, 1, 1, &context->notify_page); vmci_host_setup_notify()
247 context->notify_page = NULL; vmci_host_setup_notify()
254 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1)); vmci_host_setup_notify()
255 vmci_ctx_check_signal_notify(context); vmci_host_setup_notify()
322 vmci_host_dev->context = vmci_ctx_create(init_block.cid, vmci_host_do_init_context()
327 if (IS_ERR(vmci_host_dev->context)) { vmci_host_do_init_context()
328 retval = PTR_ERR(vmci_host_dev->context); vmci_host_do_init_context()
329 vmci_ioctl_err("error initializing context\n"); vmci_host_do_init_context()
337 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_init_context()
339 vmci_ctx_destroy(vmci_host_dev->context); vmci_host_do_init_context()
340 vmci_host_dev->context = NULL; vmci_host_do_init_context()
405 dg->dst.context, dg->dst.resource, vmci_host_do_send_datagram()
406 dg->src.context, dg->src.resource, vmci_host_do_send_datagram()
409 /* Get source context id. */ vmci_host_do_send_datagram()
410 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_send_datagram()
435 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, vmci_host_do_receive_datagram()
463 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_alloc_queuepair()
482 vmci_host_dev->context); vmci_host_do_alloc_queuepair()
507 vmci_host_dev->context); vmci_host_do_alloc_queuepair()
513 vmci_host_dev->context); vmci_host_do_alloc_queuepair()
548 vmci_host_dev->context, vmci_host_do_queuepair_setva()
556 vmci_host_dev->context, 0); vmci_host_do_queuepair_setva()
613 vmci_host_dev->context); vmci_host_do_queuepair_setpf()
657 vmci_host_dev->context); vmci_host_do_qp_detach()
683 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_add_notify()
706 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_remove_notify()
730 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_get_cpt_state()
776 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_ctx_set_cpt_state()
812 vmci_host_setup_notify(vmci_host_dev->context, vmci_host_do_set_notify()
815 vmci_ctx_unset_notify(vmci_host_dev->context); vmci_host_do_set_notify()
843 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_notify_resource()
902 cid = vmci_ctx_get_id(vmci_host_dev->context); vmci_host_do_recv_notifications()
/linux-4.1.27/arch/tile/gxio/
H A Duart.c28 int gxio_uart_init(gxio_uart_context_t *context, int uart_index) gxio_uart_init() argument
42 context->fd = fd; gxio_uart_init()
45 context->mmio_base = (void __force *) gxio_uart_init()
48 if (context->mmio_base == NULL) { gxio_uart_init()
49 hv_dev_close(context->fd); gxio_uart_init()
50 context->fd = -1; gxio_uart_init()
59 int gxio_uart_destroy(gxio_uart_context_t *context) gxio_uart_destroy() argument
61 iounmap((void __force __iomem *)(context->mmio_base)); gxio_uart_destroy()
62 hv_dev_close(context->fd); gxio_uart_destroy()
64 context->mmio_base = NULL; gxio_uart_destroy()
65 context->fd = -1; gxio_uart_destroy()
73 void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset, gxio_uart_write() argument
76 __gxio_mmio_write(context->mmio_base + offset, word); gxio_uart_write()
82 uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset) gxio_uart_read() argument
84 return __gxio_mmio_read(context->mmio_base + offset); gxio_uart_read()
H A Dusb_host.c29 int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index, gxio_usb_host_init() argument
50 context->fd = fd; gxio_usb_host_init()
53 context->mmio_base = gxio_usb_host_init()
56 if (context->mmio_base == NULL) { gxio_usb_host_init()
57 hv_dev_close(context->fd); gxio_usb_host_init()
66 int gxio_usb_host_destroy(gxio_usb_host_context_t *context) gxio_usb_host_destroy() argument
68 iounmap((void __force __iomem *)(context->mmio_base)); gxio_usb_host_destroy()
69 hv_dev_close(context->fd); gxio_usb_host_destroy()
71 context->mmio_base = NULL; gxio_usb_host_destroy()
72 context->fd = -1; gxio_usb_host_destroy()
79 void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context) gxio_usb_host_get_reg_start() argument
81 return context->mmio_base; gxio_usb_host_get_reg_start()
86 size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context) gxio_usb_host_get_reg_len() argument
H A Diorpc_mpipe.c24 int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context, gxio_mpipe_alloc_buffer_stacks() argument
35 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_buffer_stacks()
48 int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context, gxio_mpipe_init_buffer_stack_aux() argument
69 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_buffer_stack_aux()
83 int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context, gxio_mpipe_alloc_notif_rings() argument
94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_notif_rings()
105 int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va, gxio_mpipe_init_notif_ring_aux() argument
124 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_notif_ring_aux()
136 int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context, gxio_mpipe_request_notif_ring_interrupt() argument
150 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_request_notif_ring_interrupt()
161 int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context, gxio_mpipe_enable_notif_ring_interrupt() argument
169 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_enable_notif_ring_interrupt()
182 int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context, gxio_mpipe_alloc_notif_groups() argument
193 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_notif_groups()
204 int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context, gxio_mpipe_init_notif_group() argument
214 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_notif_group()
226 int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count, gxio_mpipe_alloc_buckets() argument
236 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_buckets()
247 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, gxio_mpipe_init_bucket() argument
256 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_bucket()
268 int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context, gxio_mpipe_alloc_edma_rings() argument
279 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_alloc_edma_rings()
291 int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va, gxio_mpipe_init_edma_ring_aux() argument
311 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_init_edma_ring_aux()
318 int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob, gxio_mpipe_commit_rules() argument
323 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size, gxio_mpipe_commit_rules()
335 int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context, gxio_mpipe_register_client_memory() argument
346 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_register_client_memory()
358 int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context, gxio_mpipe_link_open_aux() argument
367 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_link_open_aux()
377 int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac) gxio_mpipe_link_close_aux() argument
384 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_link_close_aux()
396 int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac, gxio_mpipe_link_set_attr_aux() argument
406 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_link_set_attr_aux()
418 int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec, gxio_mpipe_get_timestamp_aux() argument
426 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_get_timestamp_aux()
443 int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec, gxio_mpipe_set_timestamp_aux() argument
453 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_set_timestamp_aux()
463 int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context, int64_t nsec) gxio_mpipe_adjust_timestamp_aux() argument
470 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_adjust_timestamp_aux()
484 int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context, gxio_mpipe_config_edma_ring_blks() argument
496 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_config_edma_ring_blks()
507 int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context, int32_t ppb) gxio_mpipe_adjust_timestamp_freq() argument
514 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_adjust_timestamp_freq()
525 int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie) gxio_mpipe_arm_pollfd() argument
532 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_arm_pollfd()
542 int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie) gxio_mpipe_close_pollfd() argument
549 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_close_pollfd()
559 int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base) gxio_mpipe_get_mmio_base() argument
566 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_get_mmio_base()
580 int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context, gxio_mpipe_check_mmio_offset() argument
589 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_check_mmio_offset()
H A Dmpipe.c58 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) gxio_mpipe_init() argument
71 context->fd = fd; gxio_mpipe_init()
81 context->mmio_cfg_base = (void __force *) gxio_mpipe_init()
84 if (context->mmio_cfg_base == NULL) gxio_mpipe_init()
87 context->mmio_fast_base = (void __force *) gxio_mpipe_init()
90 if (context->mmio_fast_base == NULL) gxio_mpipe_init()
95 context->__stacks.stacks[i] = 255; gxio_mpipe_init()
97 context->instance = mpipe_index; gxio_mpipe_init()
102 iounmap((void __force __iomem *)(context->mmio_cfg_base)); gxio_mpipe_init()
104 hv_dev_close(context->fd); gxio_mpipe_init()
105 context->fd = -1; gxio_mpipe_init()
111 int gxio_mpipe_destroy(gxio_mpipe_context_t *context) gxio_mpipe_destroy() argument
113 iounmap((void __force __iomem *)(context->mmio_cfg_base)); gxio_mpipe_destroy()
114 iounmap((void __force __iomem *)(context->mmio_fast_base)); gxio_mpipe_destroy()
115 return hv_dev_close(context->fd); gxio_mpipe_destroy()
160 int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context, gxio_mpipe_init_buffer_stack() argument
170 result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size, gxio_mpipe_init_buffer_stack()
177 context->__stacks.stacks[buffer_size_enum] = stack; gxio_mpipe_init_buffer_stack()
184 int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context, gxio_mpipe_init_notif_ring() argument
189 return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size, gxio_mpipe_init_notif_ring()
195 int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context, gxio_mpipe_init_notif_group_and_buckets() argument
217 result = gxio_mpipe_init_notif_group(context, group, bits); gxio_mpipe_init_notif_group_and_buckets()
224 result = gxio_mpipe_init_bucket(context, bucket + i, gxio_mpipe_init_notif_group_and_buckets()
235 int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, gxio_mpipe_init_edma_ring() argument
242 return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags, gxio_mpipe_init_edma_ring()
249 gxio_mpipe_context_t *context) gxio_mpipe_rules_init()
251 rules->context = context; gxio_mpipe_rules_init()
310 stacks ? stacks->stacks[i] : rules->context->__stacks. gxio_mpipe_rules_begin()
384 return gxio_mpipe_commit_rules(rules->context, list, size); gxio_mpipe_rules_commit()
390 gxio_mpipe_context_t *context, gxio_mpipe_iqueue_init()
397 iqueue->context = context; gxio_mpipe_iqueue_init()
411 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size, gxio_mpipe_iqueue_init()
418 gxio_mpipe_context_t *context, gxio_mpipe_equeue_init()
430 int result = gxio_mpipe_init_edma_ring(context, ering, channel, gxio_mpipe_equeue_init()
444 context->mmio_fast_base + offset.word, gxio_mpipe_equeue_init()
449 equeue->context = context; gxio_mpipe_equeue_init()
458 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, gxio_mpipe_set_timestamp() argument
462 return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec, gxio_mpipe_set_timestamp()
468 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, gxio_mpipe_get_timestamp() argument
474 ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec, gxio_mpipe_get_timestamp()
491 int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta) gxio_mpipe_adjust_timestamp() argument
493 return gxio_mpipe_adjust_timestamp_aux(context, delta); gxio_mpipe_adjust_timestamp()
497 /* Get our internal context used for link name access. This context is
502 static gxio_mpipe_context_t context; _gxio_get_link_context() local
521 context.fd = hv_dev_open((HV_VirtAddr) file, 0); _gxio_get_link_context()
522 if (context.fd < 0) _gxio_get_link_context()
525 contextp = &context; _gxio_get_link_context()
538 gxio_mpipe_context_t *context = _gxio_get_link_context(); gxio_mpipe_link_instance() local
540 if (!context) gxio_mpipe_link_instance()
546 return gxio_mpipe_info_instance_aux(context, name); gxio_mpipe_link_instance()
556 gxio_mpipe_context_t *context = _gxio_get_link_context(); gxio_mpipe_link_enumerate_mac() local
557 if (!context) gxio_mpipe_link_enumerate_mac()
560 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); gxio_mpipe_link_enumerate_mac()
573 gxio_mpipe_context_t *context, const char *link_name, gxio_mpipe_link_open()
582 rv = gxio_mpipe_link_open_aux(context, name, flags); gxio_mpipe_link_open()
586 link->context = context; gxio_mpipe_link_open()
597 return gxio_mpipe_link_close_aux(link->context, link->mac); gxio_mpipe_link_close()
605 return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr, gxio_mpipe_link_set_attr()
248 gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules, gxio_mpipe_context_t *context) gxio_mpipe_rules_init() argument
389 gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue, gxio_mpipe_context_t *context, unsigned int ring, void *mem, size_t mem_size, unsigned int mem_flags) gxio_mpipe_iqueue_init() argument
417 gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, gxio_mpipe_context_t *context, unsigned int ering, unsigned int channel, void *mem, unsigned int mem_size, unsigned int mem_flags) gxio_mpipe_equeue_init() argument
572 gxio_mpipe_link_open(gxio_mpipe_link_t *link, gxio_mpipe_context_t *context, const char *link_name, unsigned int flags) gxio_mpipe_link_open() argument
H A Diorpc_mpipe_info.c22 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context, gxio_mpipe_info_instance_aux() argument
30 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_info_instance_aux()
41 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context, gxio_mpipe_info_enumerate_aux() argument
51 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_info_enumerate_aux()
66 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context, gxio_mpipe_info_get_mmio_base() argument
74 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_mpipe_info_get_mmio_base()
88 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context, gxio_mpipe_info_check_mmio_offset() argument
97 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_mpipe_info_check_mmio_offset()
H A Diorpc_trio.c24 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count, gxio_trio_alloc_asids() argument
34 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_asids()
47 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context, gxio_trio_alloc_memory_maps() argument
58 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_memory_maps()
70 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context, gxio_trio_alloc_scatter_queues() argument
81 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_scatter_queues()
94 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context, gxio_trio_alloc_pio_regions() argument
105 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_alloc_pio_regions()
118 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context, gxio_trio_init_pio_region_aux() argument
130 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_init_pio_region_aux()
148 int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context, gxio_trio_init_memory_map_mmu_aux() argument
167 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_init_memory_map_mmu_aux()
178 int gxio_trio_get_port_property(gxio_trio_context_t *context, gxio_trio_get_port_property() argument
186 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_trio_get_port_property()
201 int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x, gxio_trio_config_legacy_intr() argument
215 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_config_legacy_intr()
230 int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x, gxio_trio_config_msi_intr() argument
249 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_config_msi_intr()
262 int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps, gxio_trio_set_mps_mrs() argument
272 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_set_mps_mrs()
282 int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac) gxio_trio_force_rc_link_up() argument
289 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_force_rc_link_up()
299 int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac) gxio_trio_force_ep_link_up() argument
306 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_force_ep_link_up()
316 int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base) gxio_trio_get_mmio_base() argument
323 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_trio_get_mmio_base()
337 int gxio_trio_check_mmio_offset(gxio_trio_context_t *context, gxio_trio_check_mmio_offset() argument
346 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_trio_check_mmio_offset()
H A Diorpc_usb_host.c22 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x, gxio_usb_host_cfg_interrupt() argument
33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_usb_host_cfg_interrupt()
44 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context, gxio_usb_host_register_client_memory() argument
53 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_usb_host_register_client_memory()
64 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context, HV_PTE *base) gxio_usb_host_get_mmio_base() argument
71 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_usb_host_get_mmio_base()
85 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context, gxio_usb_host_check_mmio_offset() argument
94 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_usb_host_check_mmio_offset()
H A Diorpc_uart.c22 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x, gxio_uart_cfg_interrupt() argument
33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_uart_cfg_interrupt()
43 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base) gxio_uart_get_mmio_base() argument
50 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params), gxio_uart_get_mmio_base()
64 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context, gxio_uart_check_mmio_offset() argument
73 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, gxio_uart_check_mmio_offset()
H A Dtrio.c28 int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index) gxio_trio_init() argument
36 context->fd = -1; gxio_trio_init()
44 context->fd = fd; gxio_trio_init()
/linux-4.1.27/fs/xfs/
H A Dxfs_attr_list.c69 xfs_attr_shortform_list(xfs_attr_list_context_t *context) xfs_attr_shortform_list() argument
79 ASSERT(context != NULL); xfs_attr_shortform_list()
80 dp = context->dp; xfs_attr_shortform_list()
87 cursor = context->cursor; xfs_attr_shortform_list()
90 trace_xfs_attr_list_sf(context); xfs_attr_shortform_list()
101 if (context->bufsize == 0 || xfs_attr_shortform_list()
103 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { xfs_attr_shortform_list()
105 error = context->put_listent(context, xfs_attr_shortform_list()
116 if (context->seen_enough) xfs_attr_shortform_list()
123 trace_xfs_attr_list_sf_all(context); xfs_attr_shortform_list()
128 if (context->bufsize == 0) xfs_attr_shortform_list()
148 context->dp->i_mount, sfe); xfs_attr_shortform_list()
199 error = context->put_listent(context, xfs_attr_shortform_list()
209 if (context->seen_enough) xfs_attr_shortform_list()
219 xfs_attr_node_list(xfs_attr_list_context_t *context) xfs_attr_node_list() argument
229 struct xfs_inode *dp = context->dp; xfs_attr_node_list()
232 trace_xfs_attr_node_list(context); xfs_attr_node_list()
234 cursor = context->cursor; xfs_attr_node_list()
255 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list()
267 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list()
272 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list()
278 trace_xfs_attr_list_wrong_blk(context); xfs_attr_node_list()
309 context->dp->i_mount, xfs_attr_node_list()
321 trace_xfs_attr_list_node_descend(context, xfs_attr_node_list()
342 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_node_list()
348 if (context->seen_enough || leafhdr.forw == 0) xfs_attr_node_list()
366 struct xfs_attr_list_context *context) xfs_attr3_leaf_list_int()
375 struct xfs_mount *mp = context->dp->i_mount; xfs_attr3_leaf_list_int()
377 trace_xfs_attr_list_leaf(context); xfs_attr3_leaf_list_int()
383 cursor = context->cursor; xfs_attr3_leaf_list_int()
389 if (context->resynch) { xfs_attr3_leaf_list_int()
393 if (cursor->offset == context->dupcnt) { xfs_attr3_leaf_list_int()
394 context->dupcnt = 0; xfs_attr3_leaf_list_int()
397 context->dupcnt++; xfs_attr3_leaf_list_int()
400 context->dupcnt = 0; xfs_attr3_leaf_list_int()
405 trace_xfs_attr_list_notfound(context); xfs_attr3_leaf_list_int()
412 context->resynch = 0; xfs_attr3_leaf_list_int()
431 retval = context->put_listent(context, xfs_attr3_leaf_list_int()
445 if (context->put_value) { xfs_attr3_leaf_list_int()
449 args.geo = context->dp->i_mount->m_attr_geo; xfs_attr3_leaf_list_int()
450 args.dp = context->dp; xfs_attr3_leaf_list_int()
460 retval = context->put_listent(context, xfs_attr3_leaf_list_int()
468 retval = context->put_listent(context, xfs_attr3_leaf_list_int()
478 if (context->seen_enough) xfs_attr3_leaf_list_int()
482 trace_xfs_attr_list_leaf_end(context); xfs_attr3_leaf_list_int()
490 xfs_attr_leaf_list(xfs_attr_list_context_t *context) xfs_attr_leaf_list() argument
495 trace_xfs_attr_leaf_list(context); xfs_attr_leaf_list()
497 context->cursor->blkno = 0; xfs_attr_leaf_list()
498 error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); xfs_attr_leaf_list()
502 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_leaf_list()
509 xfs_attr_list_context_t *context) xfs_attr_list_int()
512 xfs_inode_t *dp = context->dp; xfs_attr_list_int()
527 error = xfs_attr_shortform_list(context); xfs_attr_list_int()
529 error = xfs_attr_leaf_list(context); xfs_attr_list_int()
531 error = xfs_attr_node_list(context); xfs_attr_list_int()
550 xfs_attr_list_context_t *context, xfs_attr_put_listent()
557 struct attrlist *alist = (struct attrlist *)context->alist; xfs_attr_put_listent()
561 ASSERT(!(context->flags & ATTR_KERNOVAL)); xfs_attr_put_listent()
562 ASSERT(context->count >= 0); xfs_attr_put_listent()
563 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); xfs_attr_put_listent()
564 ASSERT(context->firstu >= sizeof(*alist)); xfs_attr_put_listent()
565 ASSERT(context->firstu <= context->bufsize); xfs_attr_put_listent()
570 if (((context->flags & ATTR_SECURE) == 0) != xfs_attr_put_listent()
573 if (((context->flags & ATTR_ROOT) == 0) != xfs_attr_put_listent()
578 context->count * sizeof(alist->al_offset[0]); xfs_attr_put_listent()
579 context->firstu -= ATTR_ENTSIZE(namelen); xfs_attr_put_listent()
580 if (context->firstu < arraytop) { xfs_attr_put_listent()
581 trace_xfs_attr_list_full(context); xfs_attr_put_listent()
583 context->seen_enough = 1; xfs_attr_put_listent()
587 aep = (attrlist_ent_t *)&context->alist[context->firstu]; xfs_attr_put_listent()
591 alist->al_offset[context->count++] = context->firstu; xfs_attr_put_listent()
592 alist->al_count = context->count; xfs_attr_put_listent()
593 trace_xfs_attr_list_add(context); xfs_attr_put_listent()
612 xfs_attr_list_context_t context; xfs_attr_list() local
636 memset(&context, 0, sizeof(context)); xfs_attr_list()
637 context.dp = dp; xfs_attr_list()
638 context.cursor = cursor; xfs_attr_list()
639 context.resynch = 1; xfs_attr_list()
640 context.flags = flags; xfs_attr_list()
641 context.alist = buffer; xfs_attr_list()
642 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ xfs_attr_list()
643 context.firstu = context.bufsize; xfs_attr_list()
644 context.put_listent = xfs_attr_put_listent; xfs_attr_list()
646 alist = (struct attrlist *)context.alist; xfs_attr_list()
649 alist->al_offset[0] = context.bufsize; xfs_attr_list()
651 error = xfs_attr_list_int(&context); xfs_attr_list()
364 xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) xfs_attr3_leaf_list_int() argument
508 xfs_attr_list_int( xfs_attr_list_context_t *context) xfs_attr_list_int() argument
549 xfs_attr_put_listent( xfs_attr_list_context_t *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) xfs_attr_put_listent() argument
H A Dxfs_xattr.c131 struct xfs_attr_list_context *context, xfs_xattr_put_listent()
142 ASSERT(context->count >= 0); xfs_xattr_put_listent()
151 arraytop = context->count + prefix_len + namelen + 1; xfs_xattr_put_listent()
152 if (arraytop > context->firstu) { xfs_xattr_put_listent()
153 context->count = -1; /* insufficient space */ xfs_xattr_put_listent()
156 offset = (char *)context->alist + context->count; xfs_xattr_put_listent()
162 context->count += prefix_len + namelen + 1; xfs_xattr_put_listent()
168 struct xfs_attr_list_context *context, xfs_xattr_put_listent_sizes()
175 context->count += xfs_xattr_prefix_len(flags) + namelen + 1; xfs_xattr_put_listent_sizes()
198 struct xfs_attr_list_context context; xfs_vn_listxattr() local
206 memset(&context, 0, sizeof(context)); xfs_vn_listxattr()
207 context.dp = XFS_I(inode); xfs_vn_listxattr()
208 context.cursor = &cursor; xfs_vn_listxattr()
209 context.resynch = 1; xfs_vn_listxattr()
210 context.alist = data; xfs_vn_listxattr()
211 context.bufsize = size; xfs_vn_listxattr()
212 context.firstu = context.bufsize; xfs_vn_listxattr()
215 context.put_listent = xfs_xattr_put_listent; xfs_vn_listxattr()
217 context.put_listent = xfs_xattr_put_listent_sizes; xfs_vn_listxattr()
219 xfs_attr_list_int(&context); xfs_vn_listxattr()
220 if (context.count < 0) xfs_vn_listxattr()
229 data, size, &context.count); xfs_vn_listxattr()
237 data, size, &context.count); xfs_vn_listxattr()
242 return context.count; xfs_vn_listxattr()
130 xfs_xattr_put_listent( struct xfs_attr_list_context *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) xfs_xattr_put_listent() argument
167 xfs_xattr_put_listent_sizes( struct xfs_attr_list_context *context, int flags, unsigned char *name, int namelen, int valuelen, unsigned char *value) xfs_xattr_put_listent_sizes() argument
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
H A Den_resources.c42 int user_prio, struct mlx4_qp_context *context) mlx4_en_fill_qp_context()
47 memset(context, 0, sizeof *context); mlx4_en_fill_qp_context()
48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); mlx4_en_fill_qp_context()
49 context->pd = cpu_to_be32(mdev->priv_pdn); mlx4_en_fill_qp_context()
50 context->mtu_msgmax = 0xff; mlx4_en_fill_qp_context()
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); mlx4_en_fill_qp_context()
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); mlx4_en_fill_qp_context()
56 context->params2 |= MLX4_QP_BIT_FPP; mlx4_en_fill_qp_context()
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; mlx4_en_fill_qp_context()
61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); mlx4_en_fill_qp_context()
62 context->local_qpn = cpu_to_be32(qpn); mlx4_en_fill_qp_context()
63 context->pri_path.ackto = 1 & 0x07; mlx4_en_fill_qp_context()
64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; mlx4_en_fill_qp_context()
66 context->pri_path.sched_queue |= user_prio << 3; mlx4_en_fill_qp_context()
67 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; mlx4_en_fill_qp_context()
69 context->pri_path.counter_index = 0xff; mlx4_en_fill_qp_context()
70 context->cqn_send = cpu_to_be32(cqn); mlx4_en_fill_qp_context()
71 context->cqn_recv = cpu_to_be32(cqn); mlx4_en_fill_qp_context()
72 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); mlx4_en_fill_qp_context()
74 context->param3 |= cpu_to_be32(1 << 30); mlx4_en_fill_qp_context()
79 context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ mlx4_en_fill_qp_context()
40 mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context) mlx4_en_fill_qp_context() argument
/linux-4.1.27/arch/sparc/include/asm/
H A Dmmu_32.h4 /* Default "unsigned long" context */
H A Dmmu_context_64.h38 &mm->context.tsb_block[0], tsb_context_switch()
40 (mm->context.tsb_block[1].tsb ? tsb_context_switch()
41 &mm->context.tsb_block[1] : tsb_context_switch()
46 , __pa(&mm->context.tsb_descr[0])); tsb_context_switch()
58 /* Set MMU context in the actual hardware. */
68 : "r" (CTX_HWBITS((__mm)->context)), \
73 /* Switch the current MM context. */ switch_mm()
82 spin_lock_irqsave(&mm->context.lock, flags); switch_mm()
83 ctx_valid = CTX_VALID(mm->context); switch_mm()
94 * perform the secondary context load and the TSB context switch. switch_mm()
109 * context was valid, so skip switch_mm()
110 * TSB context switch switch_mm()
120 /* Any time a processor runs a context on an address space switch_mm()
121 * for the first time, we must flush that context out of the switch_mm()
127 __flush_tlb_mm(CTX_HWBITS(mm->context), switch_mm()
130 spin_unlock_irqrestore(&mm->context.lock, flags); switch_mm()
141 spin_lock_irqsave(&mm->context.lock, flags); activate_mm()
142 if (!CTX_VALID(mm->context)) activate_mm()
149 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); activate_mm()
151 spin_unlock_irqrestore(&mm->context.lock, flags); activate_mm()
H A Dmmu_context_32.h12 /* Initialize a new mmu context. This is invoked when a new
17 /* Destroy a dead context. This occurs when mmput drops the
24 /* Switch the current MM context. */
H A Dmmu_64.h15 * field in a spot outside of the areas of the context register
38 * PRIMARY_CONTEXT register values for the kernel context.
48 /* If you want "the TLB context number" use CTX_NR_MASK. If you
49 * want "the bits I program into the context registers" use
/linux-4.1.27/arch/avr32/include/asm/
H A Dmmu.h4 /* Default "unsigned long" context */
H A Dmmu_context.h20 * The MMU "context" consists of two things:
34 /* Cache of MMU context last used */
38 * Get MMU context if needed
45 if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) get_mmu_context()
49 /* It's old, we need to get new context with new version */ get_mmu_context()
64 mm->context = mc; get_mmu_context()
68 * Initialize the context related info for a new mm_struct
74 mm->context = NO_CONTEXT; init_new_context()
79 * Destroy context related info for an mm_struct that is about
105 set_asid(mm->context & MMU_CONTEXT_ASID_MASK); activate_context()
/linux-4.1.27/arch/parisc/include/asm/
H A Dmmu_context.h26 mm->context = alloc_sid(); init_new_context()
33 free_sid(mm->context); destroy_context()
34 mm->context = 0; destroy_context()
37 static inline unsigned long __space_to_prot(mm_context_t context) __space_to_prot() argument
40 return context << 1; __space_to_prot()
42 return context >> (SPACEID_SHIFT - 1); __space_to_prot()
46 static inline void load_context(mm_context_t context) load_context() argument
48 mtsp(context, 3); load_context()
49 mtctl(__space_to_prot(context), 8); load_context()
57 load_context(next->context); switch_mm()
75 if (next->context == 0) activate_mm()
76 next->context = alloc_sid(); activate_mm()
H A Dtlbflush.h34 __flush_tlb_range((vma)->vm_mm->context, start, end)
42 * The code to switch to a new context is NOT valid for processes
69 if (mm->context != 0) flush_tlb_mm()
70 free_sid(mm->context); flush_tlb_mm()
71 mm->context = alloc_sid(); flush_tlb_mm()
73 load_context(mm->context); flush_tlb_mm()
83 sid = vma->vm_mm->context; flush_tlb_page()
/linux-4.1.27/include/linux/i2c/
H A Dmax732x.h13 void *context; /* param to setup/teardown */ member in struct:max732x_platform_data
17 void *context);
20 void *context);
H A Dpcf857x.h12 * @context: optional parameter passed to setup() and teardown()
37 void *context);
40 void *context);
41 void *context; member in struct:pcf857x_platform_data
/linux-4.1.27/arch/nios2/mm/
H A Dmmu_context.c2 * MMU context handling.
19 /* The pids position and mask in context */
24 /* The versions position and mask in context */
29 /* Return the version part of a context */
32 /* Return the pid part of a context */
35 /* Value of the first context (version 1, pid 0) */
41 * Initialize MMU context management stuff.
51 * Set new context (pid), keep way
53 static void set_context(mm_context_t context) set_context() argument
55 set_mmu_pid(CTX_PID(context)); set_context()
87 /* If the process context we are swapping in has a different context switch_mm()
89 if (unlikely(CTX_VERSION(next->context) != switch_mm()
91 next->context = get_new_context(); switch_mm()
96 /* Set the current context */ switch_mm()
97 set_context(next->context); switch_mm()
104 * the context for the new mm so we see the new mappings.
108 next->context = get_new_context(); activate_mm()
109 set_context(next->context); activate_mm()
113 unsigned long get_pid_from_context(mm_context_t *context) get_pid_from_context() argument
115 return CTX_PID((*context)); get_pid_from_context()
/linux-4.1.27/kernel/
H A Dauditsc.c41 * Subject and object context labeling support added by <danjones@us.ibm.com>
197 * it's going to remain 1-element for almost any setup) until we free context itself.
430 /* Determine if any context name data matches a rule's watch data */
820 /* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ audit_take_context()
825 struct audit_context *context = tsk->audit_context; audit_take_context() local
827 if (!context) audit_take_context()
829 context->return_valid = return_valid; audit_take_context()
845 context->return_code = -EINTR; audit_take_context()
847 context->return_code = return_code; audit_take_context()
849 if (context->in_syscall && !context->dummy) { audit_take_context()
850 audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); audit_take_context()
851 audit_filter_inodes(tsk, context); audit_take_context()
855 return context; audit_take_context()
858 static inline void audit_proctitle_free(struct audit_context *context) audit_proctitle_free() argument
860 kfree(context->proctitle.value); audit_proctitle_free()
861 context->proctitle.value = NULL; audit_proctitle_free()
862 context->proctitle.len = 0; audit_proctitle_free()
865 static inline void audit_free_names(struct audit_context *context) audit_free_names() argument
869 list_for_each_entry_safe(n, next, &context->names_list, list) { audit_free_names()
876 context->name_count = 0; audit_free_names()
877 path_put(&context->pwd); audit_free_names()
878 context->pwd.dentry = NULL; audit_free_names()
879 context->pwd.mnt = NULL; audit_free_names()
882 static inline void audit_free_aux(struct audit_context *context) audit_free_aux() argument
886 while ((aux = context->aux)) { audit_free_aux()
887 context->aux = aux->next; audit_free_aux()
890 while ((aux = context->aux_pids)) { audit_free_aux()
891 context->aux_pids = aux->next; audit_free_aux()
898 struct audit_context *context; audit_alloc_context() local
900 context = kzalloc(sizeof(*context), GFP_KERNEL); audit_alloc_context()
901 if (!context) audit_alloc_context()
903 context->state = state; audit_alloc_context()
904 context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; audit_alloc_context()
905 INIT_LIST_HEAD(&context->killed_trees); audit_alloc_context()
906 INIT_LIST_HEAD(&context->names_list); audit_alloc_context()
907 return context; audit_alloc_context()
911 * audit_alloc - allocate an audit context block for a task
914 * Filter on the task information and allocate a per-task audit context
921 struct audit_context *context; audit_alloc() local
934 if (!(context = audit_alloc_context(state))) { audit_alloc()
939 context->filterkey = key; audit_alloc()
941 tsk->audit_context = context; audit_alloc()
946 static inline void audit_free_context(struct audit_context *context) audit_free_context() argument
948 audit_free_names(context); audit_free_context()
949 unroll_tree_refs(context, NULL, 0); audit_free_context()
950 free_tree_refs(context); audit_free_context()
951 audit_free_aux(context); audit_free_context()
952 kfree(context->filterkey); audit_free_context()
953 kfree(context->sockaddr); audit_free_context()
954 audit_proctitle_free(context); audit_free_context()
955 kfree(context); audit_free_context()
958 static int audit_log_pid_context(struct audit_context *context, pid_t pid, audit_log_pid_context() argument
967 ab = audit_log_start(context, GFP_KERNEL, AUDIT_OBJ_PID); audit_log_pid_context()
1001 static int audit_log_single_execve_arg(struct audit_context *context, audit_log_single_execve_arg() argument
1086 *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); audit_log_single_execve_arg()
1137 static void audit_log_execve_info(struct audit_context *context, audit_log_execve_info() argument
1147 audit_log_format(*ab, "argc=%d", context->execve.argc); audit_log_execve_info()
1161 for (i = 0; i < context->execve.argc; i++) { audit_log_execve_info()
1162 len = audit_log_single_execve_arg(context, ab, i, audit_log_execve_info()
1171 static void show_special(struct audit_context *context, int *call_panic) show_special() argument
1176 ab = audit_log_start(context, GFP_KERNEL, context->type); show_special()
1180 switch (context->type) { show_special()
1182 int nargs = context->socketcall.nargs; show_special()
1186 context->socketcall.args[i]); show_special()
1189 u32 osid = context->ipc.osid; show_special()
1192 from_kuid(&init_user_ns, context->ipc.uid), show_special()
1193 from_kgid(&init_user_ns, context->ipc.gid), show_special()
1194 context->ipc.mode); show_special()
1206 if (context->ipc.has_perm) { show_special()
1208 ab = audit_log_start(context, GFP_KERNEL, show_special()
1214 context->ipc.qbytes, show_special()
1215 context->ipc.perm_uid, show_special()
1216 context->ipc.perm_gid, show_special()
1217 context->ipc.perm_mode); show_special()
1224 context->mq_open.oflag, context->mq_open.mode, show_special()
1225 context->mq_open.attr.mq_flags, show_special()
1226 context->mq_open.attr.mq_maxmsg, show_special()
1227 context->mq_open.attr.mq_msgsize, show_special()
1228 context->mq_open.attr.mq_curmsgs); show_special()
1234 context->mq_sendrecv.mqdes, show_special()
1235 context->mq_sendrecv.msg_len, show_special()
1236 context->mq_sendrecv.msg_prio, show_special()
1237 context->mq_sendrecv.abs_timeout.tv_sec, show_special()
1238 context->mq_sendrecv.abs_timeout.tv_nsec); show_special()
1242 context->mq_notify.mqdes, show_special()
1243 context->mq_notify.sigev_signo); show_special()
1246 struct mq_attr *attr = &context->mq_getsetattr.mqstat; show_special()
1250 context->mq_getsetattr.mqdes, show_special()
1255 audit_log_format(ab, "pid=%d", context->capset.pid); show_special()
1256 audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable); show_special()
1257 audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted); show_special()
1258 audit_log_cap(ab, "cap_pe", &context->capset.cap.effective); show_special()
1261 audit_log_format(ab, "fd=%d flags=0x%x", context->mmap.fd, show_special()
1262 context->mmap.flags); show_special()
1265 audit_log_execve_info(context, &ab); show_special()
1284 struct audit_context *context) audit_log_proctitle()
1292 ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); audit_log_proctitle()
1299 if (!context->proctitle.value) { audit_log_proctitle()
1314 context->proctitle.value = buf; audit_log_proctitle()
1315 context->proctitle.len = res; audit_log_proctitle()
1317 msg = context->proctitle.value; audit_log_proctitle()
1318 len = context->proctitle.len; audit_log_proctitle()
1324 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) audit_log_exit() argument
1332 context->personality = tsk->personality; audit_log_exit()
1334 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); audit_log_exit()
1338 context->arch, context->major); audit_log_exit()
1339 if (context->personality != PER_LINUX) audit_log_exit()
1340 audit_log_format(ab, " per=%lx", context->personality); audit_log_exit()
1341 if (context->return_valid) audit_log_exit()
1343 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", audit_log_exit()
1344 context->return_code); audit_log_exit()
1348 context->argv[0], audit_log_exit()
1349 context->argv[1], audit_log_exit()
1350 context->argv[2], audit_log_exit()
1351 context->argv[3], audit_log_exit()
1352 context->name_count); audit_log_exit()
1355 audit_log_key(ab, context->filterkey); audit_log_exit()
1358 for (aux = context->aux; aux; aux = aux->next) { audit_log_exit()
1360 ab = audit_log_start(context, GFP_KERNEL, aux->type); audit_log_exit()
1384 if (context->type) audit_log_exit()
1385 show_special(context, &call_panic); audit_log_exit()
1387 if (context->fds[0] >= 0) { audit_log_exit()
1388 ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); audit_log_exit()
1391 context->fds[0], context->fds[1]); audit_log_exit()
1396 if (context->sockaddr_len) { audit_log_exit()
1397 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); audit_log_exit()
1400 audit_log_n_hex(ab, (void *)context->sockaddr, audit_log_exit()
1401 context->sockaddr_len); audit_log_exit()
1406 for (aux = context->aux_pids; aux; aux = aux->next) { audit_log_exit()
1410 if (audit_log_pid_context(context, axs->target_pid[i], audit_log_exit()
1419 if (context->target_pid && audit_log_exit()
1420 audit_log_pid_context(context, context->target_pid, audit_log_exit()
1421 context->target_auid, context->target_uid, audit_log_exit()
1422 context->target_sessionid, audit_log_exit()
1423 context->target_sid, context->target_comm)) audit_log_exit()
1426 if (context->pwd.dentry && context->pwd.mnt) { audit_log_exit()
1427 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); audit_log_exit()
1429 audit_log_d_path(ab, " cwd=", &context->pwd); audit_log_exit()
1435 list_for_each_entry(n, &context->names_list, list) { audit_log_exit()
1438 audit_log_name(context, n, NULL, i++, &call_panic); audit_log_exit()
1441 audit_log_proctitle(tsk, context); audit_log_exit()
1444 ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); audit_log_exit()
1452 * audit_free - free a per-task audit context
1453 * @tsk: task whose audit context block to free
1459 struct audit_context *context; __audit_free() local
1461 context = audit_take_context(tsk, 0, 0); __audit_free()
1462 if (!context) __audit_free()
1466 * function (e.g., exit_group), then free context block. __audit_free()
1468 * in the context of the idle thread */ __audit_free()
1470 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) __audit_free()
1471 audit_log_exit(context, tsk); __audit_free()
1472 if (!list_empty(&context->killed_trees)) __audit_free()
1473 audit_kill_trees(&context->killed_trees); __audit_free()
1475 audit_free_context(context); __audit_free()
1486 * Fill in audit context at syscall entry. This only happens if the
1487 * audit context was created when the task was created and the state or
1488 * filters demand the audit context be built. If the state from the
1498 struct audit_context *context = tsk->audit_context; __audit_syscall_entry() local
1501 if (!context) __audit_syscall_entry()
1504 BUG_ON(context->in_syscall || context->name_count); __audit_syscall_entry()
1509 context->arch = syscall_get_arch(); __audit_syscall_entry()
1510 context->major = major; __audit_syscall_entry()
1511 context->argv[0] = a1; __audit_syscall_entry()
1512 context->argv[1] = a2; __audit_syscall_entry()
1513 context->argv[2] = a3; __audit_syscall_entry()
1514 context->argv[3] = a4; __audit_syscall_entry()
1516 state = context->state; __audit_syscall_entry()
1517 context->dummy = !audit_n_rules; __audit_syscall_entry()
1518 if (!context->dummy && state == AUDIT_BUILD_CONTEXT) { __audit_syscall_entry()
1519 context->prio = 0; __audit_syscall_entry()
1520 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); __audit_syscall_entry()
1525 context->serial = 0; __audit_syscall_entry()
1526 context->ctime = CURRENT_TIME; __audit_syscall_entry()
1527 context->in_syscall = 1; __audit_syscall_entry()
1528 context->current_state = state; __audit_syscall_entry()
1529 context->ppid = 0; __audit_syscall_entry()
1533 * audit_syscall_exit - deallocate audit context after a system call
1537 * Tear down after system call. If the audit context has been marked as
1546 struct audit_context *context; __audit_syscall_exit() local
1553 context = audit_take_context(tsk, success, return_code); __audit_syscall_exit()
1554 if (!context) __audit_syscall_exit()
1557 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT) __audit_syscall_exit()
1558 audit_log_exit(context, tsk); __audit_syscall_exit()
1560 context->in_syscall = 0; __audit_syscall_exit()
1561 context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0; __audit_syscall_exit()
1563 if (!list_empty(&context->killed_trees)) __audit_syscall_exit()
1564 audit_kill_trees(&context->killed_trees); __audit_syscall_exit()
1566 audit_free_names(context); __audit_syscall_exit()
1567 unroll_tree_refs(context, NULL, 0); __audit_syscall_exit()
1568 audit_free_aux(context); __audit_syscall_exit()
1569 context->aux = NULL; __audit_syscall_exit()
1570 context->aux_pids = NULL; __audit_syscall_exit()
1571 context->target_pid = 0; __audit_syscall_exit()
1572 context->target_sid = 0; __audit_syscall_exit()
1573 context->sockaddr_len = 0; __audit_syscall_exit()
1574 context->type = 0; __audit_syscall_exit()
1575 context->fds[0] = -1; __audit_syscall_exit()
1576 if (context->state != AUDIT_RECORD_CONTEXT) { __audit_syscall_exit()
1577 kfree(context->filterkey); __audit_syscall_exit()
1578 context->filterkey = NULL; __audit_syscall_exit()
1580 tsk->audit_context = context; __audit_syscall_exit()
1586 struct audit_context *context; handle_one() local
1592 context = current->audit_context; handle_one()
1593 p = context->trees; handle_one()
1594 count = context->tree_count; handle_one()
1600 if (likely(put_tree_ref(context, chunk))) handle_one()
1602 if (unlikely(!grow_tree_refs(context))) { handle_one()
1604 audit_set_auditable(context); handle_one()
1606 unroll_tree_refs(context, p, count); handle_one()
1609 put_tree_ref(context, chunk); handle_one()
1616 struct audit_context *context; handle_path() local
1623 context = current->audit_context; handle_path()
1624 p = context->trees; handle_path()
1625 count = context->tree_count; handle_path()
1637 if (unlikely(!put_tree_ref(context, chunk))) { handle_path()
1652 unroll_tree_refs(context, p, count); handle_path()
1656 if (grow_tree_refs(context)) { handle_path()
1658 unroll_tree_refs(context, p, count); handle_path()
1663 unroll_tree_refs(context, p, count); handle_path()
1664 audit_set_auditable(context); handle_path()
1671 static struct audit_names *audit_alloc_name(struct audit_context *context, audit_alloc_name() argument
1676 if (context->name_count < AUDIT_NAMES) { audit_alloc_name()
1677 aname = &context->preallocated_names[context->name_count]; audit_alloc_name()
1688 list_add_tail(&aname->list, &context->names_list); audit_alloc_name()
1690 context->name_count++; audit_alloc_name()
1698 * Search the audit_names list for the current audit context. If there is an
1705 struct audit_context *context = current->audit_context; __audit_reusename() local
1708 list_for_each_entry(n, &context->names_list, list) { __audit_reusename()
1723 * Add a name to the list of audit names for this context.
1728 struct audit_context *context = current->audit_context; __audit_getname() local
1731 if (!context->in_syscall) __audit_getname()
1734 n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); __audit_getname()
1743 if (!context->pwd.dentry) __audit_getname()
1744 get_fs_pwd(current->fs, &context->pwd); __audit_getname()
1756 struct audit_context *context = current->audit_context; __audit_inode() local
1761 if (!context->in_syscall) __audit_inode()
1783 list_for_each_entry_reverse(n, &context->names_list, list) { __audit_inode()
1810 n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); __audit_inode()
1845 * This call updates the audit context with the child's information.
1855 struct audit_context *context = current->audit_context; __audit_inode_child() local
1860 if (!context->in_syscall) __audit_inode_child()
1867 list_for_each_entry(n, &context->names_list, list) { __audit_inode_child()
1884 list_for_each_entry(n, &context->names_list, list) { __audit_inode_child()
1904 n = audit_alloc_name(context, AUDIT_TYPE_PARENT); __audit_inode_child()
1911 found_child = audit_alloc_name(context, type); __audit_inode_child()
1916 * directory. All names for this context are relinquished in __audit_inode_child()
1938 * Also sets the context as auditable.
2043 struct audit_context *context = current->audit_context; __audit_mq_open() local
2046 memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr)); __audit_mq_open()
2048 memset(&context->mq_open.attr, 0, sizeof(struct mq_attr)); __audit_mq_open()
2050 context->mq_open.oflag = oflag; __audit_mq_open()
2051 context->mq_open.mode = mode; __audit_mq_open()
2053 context->type = AUDIT_MQ_OPEN; __audit_mq_open()
2067 struct audit_context *context = current->audit_context; __audit_mq_sendrecv() local
2068 struct timespec *p = &context->mq_sendrecv.abs_timeout; __audit_mq_sendrecv()
2075 context->mq_sendrecv.mqdes = mqdes; __audit_mq_sendrecv()
2076 context->mq_sendrecv.msg_len = msg_len; __audit_mq_sendrecv()
2077 context->mq_sendrecv.msg_prio = msg_prio; __audit_mq_sendrecv()
2079 context->type = AUDIT_MQ_SENDRECV; __audit_mq_sendrecv()
2091 struct audit_context *context = current->audit_context; __audit_mq_notify() local
2094 context->mq_notify.sigev_signo = notification->sigev_signo; __audit_mq_notify()
2096 context->mq_notify.sigev_signo = 0; __audit_mq_notify()
2098 context->mq_notify.mqdes = mqdes; __audit_mq_notify()
2099 context->type = AUDIT_MQ_NOTIFY; __audit_mq_notify()
2110 struct audit_context *context = current->audit_context; __audit_mq_getsetattr() local
2111 context->mq_getsetattr.mqdes = mqdes; __audit_mq_getsetattr()
2112 context->mq_getsetattr.mqstat = *mqstat; __audit_mq_getsetattr()
2113 context->type = AUDIT_MQ_GETSETATTR; __audit_mq_getsetattr()
2123 struct audit_context *context = current->audit_context; __audit_ipc_obj() local
2124 context->ipc.uid = ipcp->uid; __audit_ipc_obj()
2125 context->ipc.gid = ipcp->gid; __audit_ipc_obj()
2126 context->ipc.mode = ipcp->mode; __audit_ipc_obj()
2127 context->ipc.has_perm = 0; __audit_ipc_obj()
2128 security_ipc_getsecid(ipcp, &context->ipc.osid); __audit_ipc_obj()
2129 context->type = AUDIT_IPC; __audit_ipc_obj()
2143 struct audit_context *context = current->audit_context; __audit_ipc_set_perm() local
2145 context->ipc.qbytes = qbytes; __audit_ipc_set_perm()
2146 context->ipc.perm_uid = uid; __audit_ipc_set_perm()
2147 context->ipc.perm_gid = gid; __audit_ipc_set_perm()
2148 context->ipc.perm_mode = mode; __audit_ipc_set_perm()
2149 context->ipc.has_perm = 1; __audit_ipc_set_perm()
2154 struct audit_context *context = current->audit_context; __audit_bprm() local
2156 context->type = AUDIT_EXECVE; __audit_bprm()
2157 context->execve.argc = bprm->argc; __audit_bprm()
2169 struct audit_context *context = current->audit_context; __audit_socketcall() local
2173 context->type = AUDIT_SOCKETCALL; __audit_socketcall()
2174 context->socketcall.nargs = nargs; __audit_socketcall()
2175 memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long)); __audit_socketcall()
2187 struct audit_context *context = current->audit_context; __audit_fd_pair() local
2188 context->fds[0] = fd1; __audit_fd_pair()
2189 context->fds[1] = fd2; __audit_fd_pair()
2197 * Returns 0 for success or NULL context or < 0 on error.
2201 struct audit_context *context = current->audit_context; __audit_sockaddr() local
2203 if (!context->sockaddr) { __audit_sockaddr()
2207 context->sockaddr = p; __audit_sockaddr()
2210 context->sockaddr_len = len; __audit_sockaddr()
2211 memcpy(context->sockaddr, a, len); __audit_sockaddr()
2217 struct audit_context *context = current->audit_context; __audit_ptrace() local
2219 context->target_pid = task_pid_nr(t); __audit_ptrace()
2220 context->target_auid = audit_get_loginuid(t); __audit_ptrace()
2221 context->target_uid = task_uid(t); __audit_ptrace()
2222 context->target_sessionid = audit_get_sessionid(t); __audit_ptrace()
2223 security_task_getsecid(t, &context->target_sid); __audit_ptrace()
2224 memcpy(context->target_comm, t->comm, TASK_COMM_LEN); __audit_ptrace()
2305 struct audit_context *context = current->audit_context; __audit_log_bprm_fcaps() local
2313 ax->d.next = context->aux; __audit_log_bprm_fcaps()
2314 context->aux = (void *)ax; __audit_log_bprm_fcaps()
2343 struct audit_context *context = current->audit_context; __audit_log_capset() local
2344 context->capset.pid = task_pid_nr(current); __audit_log_capset()
2345 context->capset.cap.effective = new->cap_effective; __audit_log_capset()
2346 context->capset.cap.inheritable = new->cap_effective; __audit_log_capset()
2347 context->capset.cap.permitted = new->cap_permitted; __audit_log_capset()
2348 context->type = AUDIT_CAPSET; __audit_log_capset()
2353 struct audit_context *context = current->audit_context; __audit_mmap_fd() local
2354 context->mmap.fd = fd; __audit_mmap_fd()
2355 context->mmap.flags = flags; __audit_mmap_fd()
2356 context->type = AUDIT_MMAP; __audit_mmap_fd()
1283 audit_log_proctitle(struct task_struct *tsk, struct audit_context *context) audit_log_proctitle() argument
H A Duser-return-notifier.c11 * called in atomic context. The notifier will also be called in atomic
12 * context.
23 * context, and from the same cpu registration occurred in.
/linux-4.1.27/arch/tile/include/gxio/
H A Duart.h33 /* A context object used to manage UART resources. */
49 * @param context Pointer to a properly initialized gxio_uart_context_t.
58 extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context,
63 /* Initialize a UART context.
65 * A properly initialized context must be obtained before any of the other
68 * @param context Pointer to a gxio_uart_context_t, which will be initialized
71 * @return Zero if the context was successfully initialized, else a
74 extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index);
76 /* Destroy a UART context.
78 * Once destroyed, a context may not be used with any gxio_uart routines
80 * interrupts requested on this context will be delivered. The state and
81 * configuration of the pins which had been attached to this context are
84 * @param context Pointer to a gxio_uart_context_t.
85 * @return Zero if the context was successfully destroyed, else a
88 extern int gxio_uart_destroy(gxio_uart_context_t *context);
91 * @param context Pointer to a gxio_uart_context_t.
95 extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
99 * @param context Pointer to a gxio_uart_context_t.
103 extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
H A Diorpc_mpipe.h59 int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
63 int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t *context,
69 int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
73 int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
77 int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t *context,
82 int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t *context,
85 int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
89 int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
93 int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context, unsigned int count,
96 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
99 int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
103 int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t *context, void *mem_va,
108 int gxio_mpipe_commit_rules(gxio_mpipe_context_t *context, const void *blob,
111 int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
115 int gxio_mpipe_link_open_aux(gxio_mpipe_context_t *context,
118 int gxio_mpipe_link_close_aux(gxio_mpipe_context_t *context, int mac);
120 int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t *context, int mac,
123 int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t *context, uint64_t *sec,
126 int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t *context, uint64_t sec,
129 int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t *context,
132 int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t *context,
135 int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
137 int gxio_mpipe_close_pollfd(gxio_mpipe_context_t *context, int pollfd_cookie);
139 int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t *context, HV_PTE *base);
141 int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t *context,
H A Dusb_host.h33 /* A context object used to manage USB hardware resources. */
43 /* Initialize a USB context.
45 * A properly initialized context must be obtained before any of the other
48 * @param context Pointer to a gxio_usb_host_context_t, which will be
53 * @return Zero if the context was successfully initialized, else a
56 extern int gxio_usb_host_init(gxio_usb_host_context_t *context, int usb_index,
59 /* Destroy a USB context.
61 * Once destroyed, a context may not be used with any gxio_usb_host routines
63 * interrupts or signals requested on this context will be delivered. The
65 * context are unchanged by this operation.
67 * @param context Pointer to a gxio_usb_host_context_t.
68 * @return Zero if the context was successfully destroyed, else a
71 extern int gxio_usb_host_destroy(gxio_usb_host_context_t *context);
75 * @param context Pointer to a properly initialized gxio_usb_host_context_t.
78 extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t *context);
82 * @param context Pointer to a properly initialized gxio_usb_host_context_t.
85 extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t *context);
H A Diorpc_trio.h49 int gxio_trio_alloc_asids(gxio_trio_context_t *context, unsigned int count,
53 int gxio_trio_alloc_memory_maps(gxio_trio_context_t *context,
58 int gxio_trio_alloc_scatter_queues(gxio_trio_context_t *context,
62 int gxio_trio_alloc_pio_regions(gxio_trio_context_t *context,
66 int gxio_trio_init_pio_region_aux(gxio_trio_context_t *context,
71 int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t *context,
78 int gxio_trio_get_port_property(gxio_trio_context_t *context,
81 int gxio_trio_config_legacy_intr(gxio_trio_context_t *context, int inter_x,
85 int gxio_trio_config_msi_intr(gxio_trio_context_t *context, int inter_x,
92 int gxio_trio_set_mps_mrs(gxio_trio_context_t *context, uint16_t mps,
95 int gxio_trio_force_rc_link_up(gxio_trio_context_t *context, unsigned int mac);
97 int gxio_trio_force_ep_link_up(gxio_trio_context_t *context, unsigned int mac);
99 int gxio_trio_get_mmio_base(gxio_trio_context_t *context, HV_PTE *base);
101 int gxio_trio_check_mmio_offset(gxio_trio_context_t *context,
H A Diorpc_uart.h32 int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
35 int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base);
37 int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
H A Diorpc_mpipe_info.h36 int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t *context,
39 int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t *context,
44 int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t *context,
47 int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t *context,
H A Diorpc_usb_host.h34 int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t *context, int inter_x,
37 int gxio_usb_host_register_client_memory(gxio_usb_host_context_t *context,
40 int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t *context,
43 int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t *context,
H A Dmpipe.h90 * mPIPE context's set of open links; all VLANs; and all dMACs.
315 /* A context object used to manage mPIPE hardware resources. */
338 /* Initialize an mPIPE context.
343 * @param context Context object to be initialized.
345 * context.
347 extern int gxio_mpipe_init(gxio_mpipe_context_t *context,
350 /* Destroy an mPIPE context.
356 * will destroy the mPIPE context as part of process teardown.
358 * @param context Context object to be destroyed.
360 extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context);
370 * @param context An initialized mPIPE context.
378 extern int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
427 * @param context An initialized mPIPE context.
439 extern int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
453 * @param context An initialized mPIPE context.
457 static inline void gxio_mpipe_push_buffer(gxio_mpipe_context_t *context, gxio_mpipe_push_buffer() argument
478 __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word); gxio_mpipe_push_buffer()
483 * @param context An initialized mPIPE context.
487 static inline void *gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context, gxio_mpipe_pop_buffer() argument
509 __gxio_mmio_read(context->mmio_fast_base + gxio_mpipe_pop_buffer()
533 * @param context An initialized mPIPE context.
541 extern int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
547 * @param context An initialized mPIPE context.
558 extern int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
567 * @param context An initialized mPIPE context.
577 *context, int x, int y,
583 * @param context An initialized mPIPE context.
588 *context, unsigned int ring);
591 * @param context An initialized mPIPE context.
597 extern int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
609 * @param context An initialized mPIPE context.
617 extern int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
638 extern int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
658 * @param context An initialized mPIPE context.
666 extern int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context,
726 * @param context An initialized mPIPE context.
731 extern int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context,
747 * @param context An initialized mPIPE context.
760 *context,
771 * @param context An initialized mPIPE context.
776 static inline void gxio_mpipe_credit(gxio_mpipe_context_t *context, gxio_mpipe_credit() argument
797 __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word); gxio_mpipe_credit()
808 * @param context An initialized mPIPE context.
816 extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
822 * @param context An initialized mPIPE context.
825 * associated with the context's set of open links.
835 extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
850 * @param context An initialized mPIPE context.
861 extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
937 /* A set of classifier rules, plus a context. */
940 /* The context. */
941 gxio_mpipe_context_t *context; member in struct:__anon2830
954 * @param context An initialized mPIPE context.
957 gxio_mpipe_context_t *context);
994 * Channels added must be associated with links opened by the mPIPE context
1118 /* The context. */
1119 gxio_mpipe_context_t *context; member in struct:__anon2831
1151 gxio_mpipe_context_t *context,
1185 * This function is shorthand for "gxio_mpipe_credit(iqueue->context,
1201 gxio_mpipe_credit(iqueue->context, iqueue->ring, idesc->bucket_id, 1); gxio_mpipe_iqueue_release()
1217 * "gxio_mpipe_credit(iqueue->context, iqueue->ring, bucket, N)".
1221 * "gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, 1)", to
1305 gxio_mpipe_push_buffer(iqueue->context, idesc->stack_idx, va); gxio_mpipe_iqueue_drop()
1328 /* The context. */
1329 gxio_mpipe_context_t *context; member in struct:__anon2832
1345 * @param context An initialized mPIPE context.
1348 * associated with the context's set of open links.
1359 gxio_mpipe_context_t *context,
1585 return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering, gxio_mpipe_equeue_set_snf_size()
1719 /* The overall mPIPE context. */
1720 gxio_mpipe_context_t *context; member in struct:__anon2833
1738 * context, to configure the link.
1777 * @param context An initialized mPIPE context.
1784 gxio_mpipe_context_t *context,
1827 * @param context An initialized mPIPE context.
1832 extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
1837 * @param context An initialized mPIPE context.
1842 extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
1847 * @param context An initialized mPIPE context.
1854 extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
1859 * @param context An initialized mPIPE context.
1868 extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
/linux-4.1.27/arch/ia64/include/asm/
H A Dmmu_context.h10 * Routines to manage the allocation of task context numbers. Task context
12 * due to context switches. Context numbers are implemented using ia-64
36 unsigned int next; /* next context number to use */
38 unsigned int max_ctx; /* max. context value supported by all CPUs */
56 * When the context counter wraps around all TLBs need to be flushed because
57 * an old context number might have been reused. This is signalled by the
81 nv_mm_context_t context = mm->context; get_mmu_context() local
83 if (likely(context)) get_mmu_context()
88 context = mm->context; get_mmu_context()
89 if (context == 0) { get_mmu_context()
99 mm->context = context = ia64_ctx.next++; get_mmu_context()
100 __set_bit(context, ia64_ctx.bitmap); get_mmu_context()
105 * Ensure we're not starting to use "context" before any old get_mmu_context()
110 return context; get_mmu_context()
114 * Initialize context number to some sane value. MM is guaranteed to be a
120 mm->context = 0; init_new_context()
131 reload_context (nv_mm_context_t context) reload_context() argument
138 rid = context << 3; /* make space for encoding the region number */ reload_context()
165 nv_mm_context_t context; activate_context() local
168 context = get_mmu_context(mm); activate_context()
171 reload_context(context); activate_context()
176 } while (unlikely(context != mm->context)); activate_context()
H A Dmmu.h5 * Type for a context number. We declare it volatile to ensure proper
H A Dswitch_to.h24 * context switch MUST be done before calling ia64_switch_to() since a
55 * In the SMP case, we save the fph state when context-switching away from a thread that
67 /* "next" in old context is "current" in new context */ \
/linux-4.1.27/drivers/media/usb/as102/
H A Das10x_cmd_cfg.c25 * as10x_cmd_get_context - Send get context command to AS10x
27 * @tag: context tag
28 * @pvalue: pointer where to store context value read
43 sizeof(pcmd->body.context.req)); as10x_cmd_get_context()
46 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); as10x_cmd_get_context()
47 pcmd->body.context.req.tag = cpu_to_le16(tag); as10x_cmd_get_context()
48 pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA); as10x_cmd_get_context()
54 sizeof(pcmd->body.context.req) as10x_cmd_get_context()
57 sizeof(prsp->body.context.rsp) as10x_cmd_get_context()
66 /* parse response: context command do not follow the common response */ as10x_cmd_get_context()
72 *pvalue = le32_to_cpu((__force __le32)prsp->body.context.rsp.reg_val.u.value32); as10x_cmd_get_context()
81 * as10x_cmd_set_context - send set context command to AS10x
83 * @tag: context tag
84 * @value: value to set in context
99 sizeof(pcmd->body.context.req)); as10x_cmd_set_context()
102 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); as10x_cmd_set_context()
103 /* pcmd->body.context.req.reg_val.mode initialization is not required */ as10x_cmd_set_context()
104 pcmd->body.context.req.reg_val.u.value32 = (__force u32)cpu_to_le32(value); as10x_cmd_set_context()
105 pcmd->body.context.req.tag = cpu_to_le16(tag); as10x_cmd_set_context()
106 pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA); as10x_cmd_set_context()
112 sizeof(pcmd->body.context.req) as10x_cmd_set_context()
115 sizeof(prsp->body.context.rsp) as10x_cmd_set_context()
124 /* parse response: context command do not follow the common response */ as10x_cmd_set_context()
182 * as10x_context_rsp_parse - Parse context command response
194 err = prsp->body.context.rsp.error; as10x_context_rsp_parse()
197 (le16_to_cpu(prsp->body.context.rsp.proc_id) == proc_id)) { as10x_context_rsp_parse()
/linux-4.1.27/drivers/tty/serial/
H A Dtilegx.c48 /* GXIO device context. */
49 gxio_uart_context_t context; member in struct:tile_uart_port
71 gxio_uart_context_t *context = &tile_uart->context; receive_chars() local
74 count.word = gxio_uart_read(context, UART_FIFO_COUNT); receive_chars()
76 c = (char)gxio_uart_read(context, UART_RECEIVE_DATA); receive_chars()
89 gxio_uart_context_t *context = &tile_uart->context; handle_receive() local
98 gxio_uart_write(context, UART_INTERRUPT_STATUS, handle_receive()
117 static int tilegx_putchar(gxio_uart_context_t *context, char c) tilegx_putchar() argument
120 flag.word = gxio_uart_read(context, UART_FLAG); tilegx_putchar()
124 gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c); tilegx_putchar()
137 gxio_uart_context_t *context = &tile_uart->context; handle_transmit() local
140 gxio_uart_write(context, UART_INTERRUPT_STATUS, handle_transmit()
146 if (tilegx_putchar(context, port->x_char)) handle_transmit()
157 if (tilegx_putchar(context, ch)) handle_transmit()
164 gxio_uart_write(context, UART_INTERRUPT_STATUS, handle_transmit()
180 gxio_uart_context_t *context; tilegx_interrupt() local
187 context = &tile_uart->context; tilegx_interrupt()
188 intr_stat.word = gxio_uart_read(context, UART_INTERRUPT_STATUS); tilegx_interrupt()
212 gxio_uart_context_t *context; tilegx_tx_empty() local
217 context = &tile_uart->context; tilegx_tx_empty()
219 flag.word = gxio_uart_read(context, UART_FLAG); tilegx_tx_empty()
262 gxio_uart_context_t *context; tilegx_start_tx() local
267 context = &tile_uart->context; tilegx_start_tx()
270 if (tilegx_putchar(context, port->x_char)) tilegx_start_tx()
283 if (tilegx_putchar(context, ch)) tilegx_start_tx()
303 gxio_uart_context_t *context; tilegx_stop_rx() local
310 context = &tile_uart->context; tilegx_stop_rx()
312 err = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu), tilegx_stop_rx()
332 gxio_uart_context_t *context; tilegx_startup() local
339 context = &tile_uart->context; tilegx_startup()
342 if (context->fd < 0) { tilegx_startup()
346 ret = gxio_uart_init(context, port->line); tilegx_startup()
366 ret = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu), tilegx_startup()
372 intr_mask.word = gxio_uart_read(context, UART_INTERRUPT_MASK); tilegx_startup()
375 gxio_uart_write(context, UART_INTERRUPT_MASK, intr_mask.word); tilegx_startup()
378 gxio_uart_write(context, UART_INTERRUPT_STATUS, tilegx_startup()
391 gxio_uart_destroy(context); tilegx_startup()
408 gxio_uart_context_t *context; tilegx_shutdown() local
414 context = &tile_uart->context; tilegx_shutdown()
417 intr_mask.word = gxio_uart_read(context, UART_INTERRUPT_MASK); tilegx_shutdown()
420 gxio_uart_write(context, UART_INTERRUPT_MASK, intr_mask.word); tilegx_shutdown()
424 err = gxio_uart_cfg_interrupt(context, cpu_x(cpu), cpu_y(cpu), tilegx_shutdown()
433 gxio_uart_destroy(context); tilegx_shutdown()
459 gxio_uart_context_t *context; tilegx_set_termios() local
464 context = &tile_uart->context; tilegx_set_termios()
467 if (context->fd < 0) { tilegx_set_termios()
468 err = gxio_uart_init(context, port->line); tilegx_set_termios()
475 divisor.word = gxio_uart_read(context, UART_DIVISOR); tilegx_set_termios()
476 type.word = gxio_uart_read(context, UART_TYPE); tilegx_set_termios()
510 gxio_uart_write(context, UART_DIVISOR, divisor.word); tilegx_set_termios()
511 gxio_uart_write(context, UART_TYPE, type.word); tilegx_set_termios()
571 * in an interrupt or debug context.
577 gxio_uart_context_t *context; tilegx_poll_get_char() local
581 context = &tile_uart->context; tilegx_poll_get_char()
582 count.word = gxio_uart_read(context, UART_FIFO_COUNT); tilegx_poll_get_char()
585 return (char)gxio_uart_read(context, UART_RECEIVE_DATA); tilegx_poll_get_char()
590 gxio_uart_context_t *context; tilegx_poll_put_char() local
594 context = &tile_uart->context; tilegx_poll_put_char()
595 gxio_uart_write(context, UART_TRANSMIT_DATA, (unsigned long)c); tilegx_poll_put_char()
638 tile_uart_ports[i].context.fd = -1; tilegx_init_ports()
/linux-4.1.27/arch/metag/include/uapi/asm/
H A Dech.h5 * These bits can be set in the top half of the D0.8 register when DSP context
6 * switching is enabled, in order to support partial DSP context save/restore.
9 #define TBICTX_XEXT_BIT 0x1000 /* Enable extended context save */
/linux-4.1.27/include/trace/events/
H A Dfence.h21 __field(unsigned int, context)
33 __entry->context = fence->context;
38 __entry->waiting_context = f1->context;
43 TP_printk("driver=%s timeline=%s context=%u seqno=%u " \
44 "waits on driver=%s timeline=%s context=%u seqno=%u",
45 __get_str(driver), __get_str(timeline), __entry->context,
60 __field(unsigned int, context)
67 __entry->context = fence->context;
71 TP_printk("driver=%s timeline=%s context=%u seqno=%u",
72 __get_str(driver), __get_str(timeline), __entry->context,
/linux-4.1.27/include/linux/platform_data/
H A Dpca953x.h19 void *context; /* param to setup/teardown */ member in struct:pca953x_platform_data
23 void *context);
26 void *context);
H A Dat24.h21 * @context: optional parameter passed to setup()
29 * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
32 * off_t offset = context;
39 * This function pointer and context can now be set up in at24_platform_data.
51 void (*setup)(struct memory_accessor *, void *context);
52 void *context; member in struct:at24_platform_data
/linux-4.1.27/drivers/misc/cxl/
H A DMakefile1 cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o trace.o
/linux-4.1.27/arch/arm64/mm/
H A DMakefile4 context.o proc.o pageattr.o
H A Dcontext.c2 * Based on arch/arm/mm/context.c
39 * We fork()ed a process, and we need a new context for the child to run in.
43 mm->context.id = 0; __init_new_context()
44 raw_spin_lock_init(&mm->context.id_lock); __init_new_context()
64 * mm->context.id could be set from different CPUs during the set_mm_context()
66 * mm->context.id_lock has to be IRQ-safe. set_mm_context()
68 raw_spin_lock_irqsave(&mm->context.id_lock, flags); set_mm_context()
69 if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { set_mm_context()
74 mm->context.id = asid; set_mm_context()
77 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); set_mm_context()
98 * the reserved value, so no need to reset any context. reset_context()
117 mm->context.id = asid; set_mm_context()
134 if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) { __new_context()
/linux-4.1.27/arch/m68k/include/asm/
H A Dmmu.h5 /* Default "unsigned long" context */
H A Dmmu_context.h34 if (mm->context != NO_CONTEXT) get_mmu_context()
47 mm->context = ctx; get_mmu_context()
52 * Set up the context for a new address space.
54 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
57 * We're finished using the context for an address space.
61 if (mm->context != NO_CONTEXT) { destroy_context()
62 clear_bit(mm->context, context_map); destroy_context()
63 mm->context = NO_CONTEXT; destroy_context()
68 static inline void set_context(mm_context_t context, pgd_t *pgd) set_context() argument
70 __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context)); set_context()
77 set_context(tsk->mm->context, next->pgd); switch_mm()
82 * the context for the new mm so we see the new mappings.
88 set_context(mm->context, mm->pgd); activate_mm()
139 asid = mm->context & 0xff; load_ksp_mmu()
165 extern void clear_context(unsigned long context);
167 /* set the context for a new task to unmapped */ init_new_context()
171 mm->context = SUN3_INVALID_CONTEXT; init_new_context()
175 /* find the context given to this process, and if it hasn't already
179 if (mm->context == SUN3_INVALID_CONTEXT) get_mmu_context()
180 mm->context = get_free_context(mm); get_mmu_context()
183 /* flush context if allocated... */ destroy_context()
186 if (mm->context != SUN3_INVALID_CONTEXT) destroy_context()
187 clear_context(mm->context); destroy_context()
193 sun3_put_context(mm->context); activate_context()
219 mm->context = virt_to_phys(mm->pgd); init_new_context()
228 0x80000000 | _PAGE_TABLE, mm->context switch_mm_0230()
266 asm volatile ("movec %0,%%urp" : : "r" (mm->context)); switch_mm_0460()
297 next_mm->context = virt_to_phys(next_mm->pgd); activate_mm()
/linux-4.1.27/arch/alpha/include/asm/
H A Dmmu.h4 /* The alpha MMU context is one "unsigned long" bitmap per CPU */
/linux-4.1.27/tools/perf/scripts/perl/
H A Dcheck-perf-trace.pl30 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
37 print_uncommon($context);
45 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
53 print_uncommon($context);
65 my ($context) = @_;
68 common_pc($context), trace_flag_str(common_flags($context)),
69 common_lock_depth($context));
94 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
H A Dwakeup-latency.pl30 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
53 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
103 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
H A Drw-by-pid.pl26 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
42 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
53 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
64 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
180 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
H A Drwtop.pl37 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
55 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
68 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
81 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
199 my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
/linux-4.1.27/tools/perf/scripts/python/
H A Dcheck-perf-trace.py28 def irq__softirq_entry(event_name, context, common_cpu,
34 print_uncommon(context)
39 def kmem__kmalloc(event_name, context, common_cpu,
46 print_uncommon(context)
54 def trace_unhandled(event_name, context, event_fields_dict):
65 def print_uncommon(context):
67 % (common_pc(context), trace_flag_str(common_flags(context)), \
68 common_lock_depth(context))
H A Dnetdev-times.py227 def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
230 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
233 def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
236 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
239 def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
242 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
245 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
247 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
251 def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
252 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
255 def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
256 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
260 def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
262 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
266 def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
268 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
272 def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
274 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
278 def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
280 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
284 def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
286 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
290 def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
291 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
295 def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
297 event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
302 (name, context, cpu, time, pid, comm, irq, irq_name) = event_info
309 (name, context, cpu, time, pid, comm, irq, ret) = event_info
321 (name, context, cpu, time, pid, comm, vec) = event_info
335 (name, context, cpu, time, pid, comm, vec) = event_info
339 (name, context, cpu, time, pid, comm, vec) = event_info
357 (name, context, cpu, time, pid, comm, napi, dev_name) = event_info
365 (name, context, cpu, time, pid, comm,
383 (name, context, cpu, time, pid, comm,
398 (name, context, cpu, time, pid, comm,
409 (name, context, cpu, time, pid, comm,
424 (name, context, cpu, time, pid, comm,
447 (name, context, cpu, time, pid, comm, skbaddr) = event_info
457 (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
H A Dsched-migration.py370 def sched__sched_stat_runtime(event_name, context, common_cpu,
375 def sched__sched_stat_iowait(event_name, context, common_cpu,
380 def sched__sched_stat_sleep(event_name, context, common_cpu,
385 def sched__sched_stat_wait(event_name, context, common_cpu,
390 def sched__sched_process_fork(event_name, context, common_cpu,
395 def sched__sched_process_wait(event_name, context, common_cpu,
400 def sched__sched_process_exit(event_name, context, common_cpu,
405 def sched__sched_process_free(event_name, context, common_cpu,
410 def sched__sched_migrate_task(event_name, context, common_cpu,
418 def sched__sched_switch(event_name, context, common_cpu,
428 def sched__sched_wakeup_new(event_name, context, common_cpu,
436 def sched__sched_wakeup(event_name, context, common_cpu,
444 def sched__sched_wait_task(event_name, context, common_cpu,
449 def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
454 def sched__sched_kthread_stop(event_name, context, common_cpu,
459 def trace_unhandled(event_name, context, event_fields_dict):
/linux-4.1.27/arch/frv/mm/
H A Dmmu-context.c1 /* mmu-context.c: MMU context allocation and management
27 * initialise a new context
31 memset(&mm->context, 0, sizeof(mm->context)); init_new_context()
32 INIT_LIST_HEAD(&mm->context.id_link); init_new_context()
33 mm->context.itlb_cached_pge = 0xffffffffUL; init_new_context()
34 mm->context.dtlb_cached_pge = 0xffffffffUL; init_new_context()
41 * make sure a kernel MMU context has a CPU context number
54 /* find the first unallocated context number get_cxn()
87 * restore the current TLB miss handler mapped page tables into the MMU context and set up a
96 /* save the state of the outgoing MMU context */ change_mm_context()
104 /* select an MMU context number */ change_mm_context()
112 /* restore the state of the incoming MMU context */ change_mm_context()
128 * finished with an MMU context number
132 mm_context_t *ctx = &mm->context; destroy_context()
151 * display the MMU context currently a process is currently using
157 buffer += sprintf(buffer, "CXNR: %u\n", mm->context.id); proc_pid_status_frv_cxnr()
166 * (un)pin a process's mm_struct's MMU context ID
203 cxn_pinned = get_cxn(&mm->context); cxn_pin_by_pid()
H A DMakefile9 mmu-context.o dma-alloc.o elf-fdpic.o
/linux-4.1.27/arch/x86/include/asm/
H A Dinit.h6 void *context; /* context for alloc_pgt_page */ member in struct:x86_mapping_info
H A Dmmu.h8 * The x86 doesn't have a mmu context, but
H A Dmmu_context.h27 atomic_read(&mm->context.perf_rdpmc_allowed)) load_mm_cr4()
56 ldt = lockless_dereference(mm->context.ldt); load_mm_ldt()
59 * Any change to mm->context.ldt is followed by an IPI to all load_mm_ldt()
148 * It's possible that prev->context.ldt doesn't match switch_mm()
151 * prev->context.ldt but suppressed an IPI to this CPU. switch_mm()
152 * In this case, prev->context.ldt != NULL, because we switch_mm()
153 * never set context.ldt to NULL while the mm still switch_mm()
154 * exists. That means that next->context.ldt != switch_mm()
155 * prev->context.ldt, because mms never share an LDT. switch_mm()
157 if (unlikely(prev->context.ldt != next->context.ldt)) switch_mm()
168 * from irq context, from ptep_clear_flush() while in switch_mm()
H A Di387.h61 * get used from interrupt context as well. To prevent these kernel instructions
62 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
63 * should use them only in the context of irq_ts_save/restore()
68 * If in process context and not atomic, we can take a spurious DNA fault. irq_ts_save()
69 * Otherwise, doing clts() in process context requires disabling preemption irq_ts_save()
/linux-4.1.27/arch/mn10300/include/asm/
H A Dmmu.h1 /* MN10300 Memory management context
12 * MMU context
H A Dmmu_context.h1 /* MN10300 MMU context management
57 #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
61 * @mm: The userspace VM context being set up
83 * get an MMU context if one is needed
93 /* if we have an old version of the context, replace it */ get_mmu_context()
101 * initialise the context related info for a new mm_struct instance
109 mm->context.tlbpid[i] = MMU_NO_CONTEXT; init_new_context()
114 * after we have set current->mm to a new value, this activates the context for
129 * destroy_context - Destroy mm context information
132 * Destroy context related info for an mm_struct that is about to be put to
139 * @prev: The outgoing MM context.
140 * @next: The incoming MM context.
H A Ducontext.h1 /* MN10300 User context
/linux-4.1.27/arch/metag/include/asm/
H A Dmmu_context.h22 /* We use context to store a pointer to the page holding the init_new_context()
24 * running the pgd and context fields should be equal. init_new_context()
26 mm->context.pgd_base = (unsigned long) mm->pgd; init_new_context()
29 INIT_LIST_HEAD(&mm->context.tcm); init_new_context()
43 list_for_each_entry_safe(pos, n, &mm->context.tcm, list) { list_for_each_entry_safe()
79 /* prev->context == prev->pgd in the case where we are initially switch_mmu()
81 if (prev->context.pgd_base != (unsigned long) prev->pgd) { switch_mmu()
83 ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i]; switch_mmu()
88 prev->pgd = (pgd_t *) prev->context.pgd_base; switch_mmu()
91 next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i]; switch_mmu()
/linux-4.1.27/arch/powerpc/mm/
H A Dmmu_context_hash64.c2 * MMU context allocation for 64-bit kernels.
77 mm->context.id = index; init_new_context()
79 mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); init_new_context()
80 if (!mm->context.cop_lockp) { init_new_context()
83 mm->context.id = MMU_NO_CONTEXT; init_new_context()
86 spin_lock_init(mm->context.cop_lockp); init_new_context()
90 mm->context.pte_frag = NULL; init_new_context()
110 pte_frag = mm->context.pte_frag; destroy_pagetable_page()
137 drop_cop(mm->context.acop, mm); destroy_context()
138 kfree(mm->context.cop_lockp); destroy_context()
139 mm->context.cop_lockp = NULL; destroy_context()
143 __destroy_context(mm->context.id); destroy_context()
145 mm->context.id = MMU_NO_CONTEXT; destroy_context()
H A Dicswx_pid.c62 if (mm->context.cop_pid == COP_PID_NONE) { get_cop_pid()
66 mm->context.cop_pid = pid; get_cop_pid()
68 return mm->context.cop_pid; get_cop_pid()
75 if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { disable_cop_pid()
76 free_pid = mm->context.cop_pid; disable_cop_pid()
77 mm->context.cop_pid = COP_PID_NONE; disable_cop_pid()
H A Dmmu_context_hash32.c34 * (virtual segment identifiers) for each context. Although the
40 * that we used to have when the context number overflowed,
50 * segment IDs). We use a skew on both the context and the high 4 bits
80 * Set up the context for a new address space.
84 mm->context.id = __init_new_context(); init_new_context()
90 * Free a context ID. Make sure to call this with preempt disabled!
99 * We're finished using the context for an address space.
104 if (mm->context.id != NO_CONTEXT) { destroy_context()
105 __destroy_context(mm->context.id); destroy_context()
106 mm->context.id = NO_CONTEXT; destroy_context()
112 * Initialize the context management stuff.
116 /* Reserve context 0 for kernel use */ mmu_context_init()
H A Dmmu_context_nohash.c19 * - The global context lock will not scale very well
22 * - Implement flush_tlb_mm() by making the context stale and picking
69 /* Steal a context from a task that has one at the moment.
74 * This isn't an LRU system, it just frees up each context in
79 * For context stealing, we use a slightly different approach for
100 if (mm->context.active) { steal_context_smp()
108 /* Mark this mm has having no context anymore */ steal_context_smp()
109 mm->context.id = MMU_NO_CONTEXT; steal_context_smp()
151 /* Mark this mm as having no context anymore */ steal_all_contexts()
152 mm->context.id = MMU_NO_CONTEXT; steal_all_contexts()
157 mm->context.active = 0; steal_all_contexts()
186 /* Flush the TLB for that context */ steal_context_up()
189 /* Mark this mm has having no context anymore */ steal_context_up()
190 mm->context.id = MMU_NO_CONTEXT; steal_context_up()
212 nact += context_mm[id]->context.active; context_check_map()
215 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", context_check_map()
237 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", switch_mmu_context()
238 cpu, next, next->context.active, next->context.id); switch_mmu_context()
242 next->context.active++; switch_mmu_context()
244 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); switch_mmu_context()
245 WARN_ON(prev->context.active < 1); switch_mmu_context()
246 prev->context.active--; switch_mmu_context()
252 /* If we already have a valid assigned context, skip all that */ switch_mmu_context()
253 id = next->context.id; switch_mmu_context()
263 /* We really don't have a context, let's try to acquire one */ switch_mmu_context()
287 /* We know there's at least one free context, try to find it */ switch_mmu_context()
296 next->context.id = id; switch_mmu_context()
302 /* If that context got marked stale on this CPU, then flush the switch_mmu_context()
327 * Set up the context for a new address space.
331 pr_hard("initing context for mm @%p\n", mm); init_new_context()
333 mm->context.id = MMU_NO_CONTEXT; init_new_context()
334 mm->context.active = 0; init_new_context()
345 * We're finished using the context for an address space.
352 if (mm->context.id == MMU_NO_CONTEXT) destroy_context()
355 WARN_ON(mm->context.active != 0); destroy_context()
358 id = mm->context.id; destroy_context()
361 mm->context.id = MMU_NO_CONTEXT; destroy_context()
363 mm->context.active = 0; destroy_context()
387 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); mmu_context_cpu_notify()
395 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); mmu_context_cpu_notify()
414 * Initialize the context management stuff.
422 init_mm.context.active = NR_CPUS; mmu_context_init()
463 * Allocate the maps used by context management mmu_context_init()
476 "MMU: Allocated %zu bytes of context maps for %d contexts\n", mmu_context_init()
482 * init_mm, and require using context 0 for a normal task. mmu_context_init()
483 * Other processors reserve the use of context zero for the kernel. mmu_context_init()
/linux-4.1.27/fs/ocfs2/
H A Dmove_extents.c59 struct ocfs2_move_extents_context *context, __ocfs2_move_extent()
64 struct inode *inode = context->inode; __ocfs2_move_extent()
69 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); __ocfs2_move_extent()
85 path = ocfs2_new_path_from_et(&context->et); __ocfs2_move_extent()
120 context->et.et_root_bh, __ocfs2_move_extent()
127 ret = ocfs2_split_extent(handle, &context->et, path, index, __ocfs2_move_extent()
128 &replace_rec, context->meta_ac, __ocfs2_move_extent()
129 &context->dealloc); __ocfs2_move_extent()
135 ocfs2_journal_dirty(handle, context->et.et_root_bh); __ocfs2_move_extent()
137 context->new_phys_cpos = new_p_cpos; __ocfs2_move_extent()
147 len, context->meta_ac, __ocfs2_move_extent()
148 &context->dealloc, 1); __ocfs2_move_extent()
227 static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, ocfs2_defrag_extent() argument
230 int ret, credits = 0, extra_blocks = 0, partial = context->partial; ocfs2_defrag_extent()
232 struct inode *inode = context->inode; ocfs2_defrag_extent()
244 BUG_ON(!context->refcount_loc); ocfs2_defrag_extent()
246 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, ocfs2_defrag_extent()
254 context->refcount_loc, ocfs2_defrag_extent()
265 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, ocfs2_defrag_extent()
266 &context->meta_ac, ocfs2_defrag_extent()
267 &context->data_ac, ocfs2_defrag_extent()
277 * if (context->data_ac) ocfs2_defrag_extent()
278 * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; ocfs2_defrag_extent()
298 ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len, ocfs2_defrag_extent()
314 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; ocfs2_defrag_extent()
323 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos, ocfs2_defrag_extent()
335 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len); ocfs2_defrag_extent()
345 if (context->data_ac) { ocfs2_defrag_extent()
346 ocfs2_free_alloc_context(context->data_ac); ocfs2_defrag_extent()
347 context->data_ac = NULL; ocfs2_defrag_extent()
350 if (context->meta_ac) { ocfs2_defrag_extent()
351 ocfs2_free_alloc_context(context->meta_ac); ocfs2_defrag_extent()
352 context->meta_ac = NULL; ocfs2_defrag_extent()
565 static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, ocfs2_move_extent() argument
571 struct inode *inode = context->inode; ocfs2_move_extent()
580 context->range->me_threshold); ocfs2_move_extent()
590 BUG_ON(!context->refcount_loc); ocfs2_move_extent()
592 ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, ocfs2_move_extent()
600 context->refcount_loc, ocfs2_move_extent()
611 ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, ocfs2_move_extent()
612 &context->meta_ac, ocfs2_move_extent()
677 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, ocfs2_move_extent()
704 ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); ocfs2_move_extent()
722 if (context->meta_ac) { ocfs2_move_extent()
723 ocfs2_free_alloc_context(context->meta_ac); ocfs2_move_extent()
724 context->meta_ac = NULL; ocfs2_move_extent()
764 struct ocfs2_move_extents_context *context) __ocfs2_move_extents_range()
770 struct inode *inode = context->inode; __ocfs2_move_extents_range()
772 struct ocfs2_move_extents *range = context->range; __ocfs2_move_extents_range()
781 context->refcount_loc = le64_to_cpu(di->i_refcount_loc); __ocfs2_move_extents_range()
783 ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh); __ocfs2_move_extents_range()
784 ocfs2_init_dealloc_ctxt(&context->dealloc); __ocfs2_move_extents_range()
792 do_defrag = context->auto_defrag; __ocfs2_move_extents_range()
862 ret = ocfs2_defrag_extent(context, cpos, phys_cpos, __ocfs2_move_extents_range()
865 ret = ocfs2_move_extent(context, cpos, phys_cpos, __ocfs2_move_extents_range()
877 context->clusters_moved += alloc_size; __ocfs2_move_extents_range()
888 context->clusters_moved); __ocfs2_move_extents_range()
890 context->new_phys_cpos); __ocfs2_move_extents_range()
893 ocfs2_run_deallocs(osb, &context->dealloc); __ocfs2_move_extents_range()
898 static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) ocfs2_move_extents() argument
902 struct inode *inode = context->inode; ocfs2_move_extents()
932 status = __ocfs2_move_extents_range(di_bh, context); ocfs2_move_extents()
985 struct ocfs2_move_extents_context *context; ocfs2_ioctl_move_extents() local
1004 context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS); ocfs2_ioctl_move_extents()
1005 if (!context) { ocfs2_ioctl_move_extents()
1011 context->inode = inode; ocfs2_ioctl_move_extents()
1012 context->file = filp; ocfs2_ioctl_move_extents()
1027 context->range = &range; ocfs2_ioctl_move_extents()
1030 context->auto_defrag = 1; ocfs2_ioctl_move_extents()
1043 context->partial = 1; ocfs2_ioctl_move_extents()
1057 status = ocfs2_move_extents(context); ocfs2_ioctl_move_extents()
1070 kfree(context); ocfs2_ioctl_move_extents()
58 __ocfs2_move_extent(handle_t *handle, struct ocfs2_move_extents_context *context, u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos, int ext_flags) __ocfs2_move_extent() argument
763 __ocfs2_move_extents_range(struct buffer_head *di_bh, struct ocfs2_move_extents_context *context) __ocfs2_move_extents_range() argument
/linux-4.1.27/arch/blackfin/include/asm/
H A Dmmu_context.h64 mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base; activate_l1stack()
82 if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) { __switch_mm()
84 set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu); __switch_mm()
90 if (!next_mm->context.l1_stack_save) __switch_mm()
92 if (next_mm->context.l1_stack_save == current_l1_stack_save) __switch_mm()
97 current_l1_stack_save = next_mm->context.l1_stack_save; __switch_mm()
123 unsigned long *mask = mm->context.page_rwx_mask; protect_page()
154 if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) { update_protections()
156 set_mask_dcplbs(mm->context.page_rwx_mask, cpu); update_protections()
171 /* Called when creating a new context during fork() or execve(). */
177 mm->context.page_rwx_mask = (unsigned long *)p; init_new_context()
178 memset(mm->context.page_rwx_mask, 0, init_new_context()
192 if (current_l1_stack_save == mm->context.l1_stack_save) destroy_context()
194 if (mm->context.l1_stack_save) destroy_context()
198 while ((tmp = mm->context.sram_list)) { destroy_context()
199 mm->context.sram_list = tmp->next; destroy_context()
204 if (current_rwx_mask[cpu] == mm->context.page_rwx_mask) destroy_context()
206 free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order); destroy_context()
/linux-4.1.27/include/linux/sunrpc/
H A Dauth_gss.h43 struct xdr_netobj gc_ctx; /* context handle */
52 /* return from gss NULL PROC init sec context */
54 struct xdr_netobj gr_ctx; /* context handle */
62 * code needs to know about a single security context. In particular,
63 * gc_gss_ctx is the context handle that is used to do gss-api calls, while
64 * gc_wire_ctx is the context handle that is used to identify the context on
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_lock.c43 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
69 if (lock->context == DRM_KERNEL_CONTEXT) { drm_legacy_lock()
70 DRM_ERROR("Process %d using kernel context %d\n", drm_legacy_lock()
71 task_pid_nr(current), lock->context); drm_legacy_lock()
76 lock->context, task_pid_nr(current), drm_legacy_lock()
92 if (drm_lock_take(&master->lock, lock->context)) { drm_legacy_lock()
113 DRM_DEBUG("%d %s\n", lock->context, drm_legacy_lock()
126 dev->sigdata.context = lock->context; drm_legacy_lock()
135 lock->context); drm_legacy_lock()
162 if (lock->context == DRM_KERNEL_CONTEXT) { drm_legacy_unlock()
163 DRM_ERROR("Process %d using kernel context %d\n", drm_legacy_unlock()
164 task_pid_nr(current), lock->context); drm_legacy_unlock()
168 if (drm_legacy_lock_free(&master->lock, lock->context)) { drm_legacy_unlock()
180 * \param context locking context.
183 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
187 unsigned int context) drm_lock_take()
198 new = context | _DRM_LOCK_HELD | drm_lock_take()
206 if (_DRM_LOCKING_CONTEXT(old) == context) { drm_lock_take()
208 if (context != DRM_KERNEL_CONTEXT) { drm_lock_take()
210 context); drm_lock_take()
216 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { drm_lock_take()
224 * This takes a lock forcibly and hands it to context. Should ONLY be used
229 * \param context locking context.
233 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
236 unsigned int context) drm_lock_transfer()
244 new = context | _DRM_LOCK_HELD; drm_lock_transfer()
255 * \param context context.
261 int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context) drm_legacy_lock_free() argument
281 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { drm_legacy_lock_free()
283 context, _DRM_LOCKING_CONTEXT(old)); drm_legacy_lock_free()
309 || _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context) drm_notifier()
324 * with the kernel context if it is free, otherwise it gets the highest priority when and if
186 drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) drm_lock_take() argument
235 drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) drm_lock_transfer() argument
H A Ddrm_context.c45 * Free a handle from the context bitmap.
48 * \param ctx_handle context handle.
65 * \return (non-negative) context handle on success or a negative number on failure.
146 * Get per-context SAREA.
190 * Set per-context SAREA.
234 /** \name The actual DRM context handling routines */
238 * Switch context.
241 * \param old old context handle.
242 * \param new new context handle.
265 * Complete context switch.
268 * \param new new context handle.
281 DRM_ERROR("Lock isn't held after context switch\n"); drm_context_switch_complete()
284 /* If a context switch is ever initiated drm_context_switch_complete()
322 * Add context.
330 * Get a new handle for the context and copy to userspace.
340 /* Skip kernel's context and get a new one. */ drm_legacy_addctx()
368 * Get context.
381 /* This is 0, because we don't handle any context flags */ drm_legacy_getctx()
388 * Switch context.
408 * New context.
430 * Remove context.
438 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
/linux-4.1.27/arch/score/include/asm/
H A Dmmu_context.h16 * into the context register.
57 mm->context = asid; get_new_mmu_context()
62 * Initialize the context related info for a new mm_struct
68 mm->context = 0; init_new_context()
78 if ((next->context ^ asid_cache) & ASID_VERSION_MASK) switch_mm()
81 pevn_set(next->context); switch_mm()
87 * Destroy context related info for an mm_struct that is about
99 * the context for the new mm so we see the new mappings.
108 pevn_set(next->context); activate_mm()
/linux-4.1.27/arch/hexagon/include/asm/
H A Dmmu_context.h2 * MM context support for the Hexagon architecture
52 * init_new_context - initialize context related info for new mm_struct instance
59 /* mm->context is set up by pgd_alloc */ init_new_context()
64 * Switch active mm context
75 if (next->context.generation < prev->context.generation) { switch_mm()
79 next->context.generation = prev->context.generation; switch_mm()
82 __vmnewmap((void *)next->context.ptbase); switch_mm()
/linux-4.1.27/tools/perf/scripts/python/Perf-Trace-Util/
H A DContext.c31 PyObject *context; perf_trace_context_common_pc() local
34 if (!PyArg_ParseTuple(args, "O", &context)) perf_trace_context_common_pc()
37 scripting_context = PyCObject_AsVoidPtr(context); perf_trace_context_common_pc()
47 PyObject *context; perf_trace_context_common_flags() local
50 if (!PyArg_ParseTuple(args, "O", &context)) perf_trace_context_common_flags()
53 scripting_context = PyCObject_AsVoidPtr(context); perf_trace_context_common_flags()
63 PyObject *context; perf_trace_context_common_lock_depth() local
66 if (!PyArg_ParseTuple(args, "O", &context)) perf_trace_context_common_lock_depth()
69 scripting_context = PyCObject_AsVoidPtr(context); perf_trace_context_common_lock_depth()
/linux-4.1.27/drivers/infiniband/core/
H A Dumem_odp.c82 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context) ib_ucontext_notifier_start_account() argument
84 atomic_inc(&context->notifier_count); ib_ucontext_notifier_start_account()
91 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context) ib_ucontext_notifier_end_account() argument
93 int zero_notifiers = atomic_dec_and_test(&context->notifier_count); ib_ucontext_notifier_end_account()
96 !list_empty(&context->no_private_counters)) { ib_ucontext_notifier_end_account()
103 down_write(&context->umem_rwsem); ib_ucontext_notifier_end_account()
107 if (!atomic_read(&context->notifier_count)) { ib_ucontext_notifier_end_account()
109 &context->no_private_counters, ib_ucontext_notifier_end_account()
119 up_write(&context->umem_rwsem); ib_ucontext_notifier_end_account()
135 item->context->invalidate_range(item, ib_umem_start(item), ib_umem_notifier_release_trampoline()
143 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_release() local
145 if (!context->invalidate_range) ib_umem_notifier_release()
148 ib_ucontext_notifier_start_account(context); ib_umem_notifier_release()
149 down_read(&context->umem_rwsem); ib_umem_notifier_release()
150 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0, ib_umem_notifier_release()
154 up_read(&context->umem_rwsem); ib_umem_notifier_release()
161 item->context->invalidate_range(item, start, start + PAGE_SIZE); invalidate_page_trampoline()
170 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_invalidate_page() local
172 if (!context->invalidate_range) ib_umem_notifier_invalidate_page()
175 ib_ucontext_notifier_start_account(context); ib_umem_notifier_invalidate_page()
176 down_read(&context->umem_rwsem); ib_umem_notifier_invalidate_page()
177 rbt_ib_umem_for_each_in_range(&context->umem_tree, address, ib_umem_notifier_invalidate_page()
180 up_read(&context->umem_rwsem); ib_umem_notifier_invalidate_page()
181 ib_ucontext_notifier_end_account(context); ib_umem_notifier_invalidate_page()
188 item->context->invalidate_range(item, start, end); invalidate_range_start_trampoline()
197 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_invalidate_range_start() local
199 if (!context->invalidate_range) ib_umem_notifier_invalidate_range_start()
202 ib_ucontext_notifier_start_account(context); ib_umem_notifier_invalidate_range_start()
203 down_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_start()
204 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, ib_umem_notifier_invalidate_range_start()
207 up_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_start()
222 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); ib_umem_notifier_invalidate_range_end() local
224 if (!context->invalidate_range) ib_umem_notifier_invalidate_range_end()
227 down_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_end()
228 rbt_ib_umem_for_each_in_range(&context->umem_tree, start, ib_umem_notifier_invalidate_range_end()
231 up_read(&context->umem_rwsem); ib_umem_notifier_invalidate_range_end()
232 ib_ucontext_notifier_end_account(context); ib_umem_notifier_invalidate_range_end()
242 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) ib_umem_odp_get() argument
256 if (context->tgid != our_pid) { ib_umem_odp_get()
292 down_write(&context->umem_rwsem); ib_umem_odp_get()
293 context->odp_mrs_count++; ib_umem_odp_get()
296 &context->umem_tree); ib_umem_odp_get()
297 if (likely(!atomic_read(&context->notifier_count)) || ib_umem_odp_get()
298 context->odp_mrs_count == 1) ib_umem_odp_get()
302 &context->no_private_counters); ib_umem_odp_get()
303 downgrade_write(&context->umem_rwsem); ib_umem_odp_get()
305 if (context->odp_mrs_count == 1) { ib_umem_odp_get()
308 * for this context! ib_umem_odp_get()
310 atomic_set(&context->notifier_count, 0); ib_umem_odp_get()
311 INIT_HLIST_NODE(&context->mn.hlist); ib_umem_odp_get()
312 context->mn.ops = &ib_umem_notifiers; ib_umem_odp_get()
318 ret_val = mmu_notifier_register(&context->mn, mm); ib_umem_odp_get()
327 up_read(&context->umem_rwsem); ib_umem_odp_get()
339 up_read(&context->umem_rwsem); ib_umem_odp_get()
352 struct ib_ucontext *context = umem->context; ib_umem_odp_release() local
363 down_write(&context->umem_rwsem); ib_umem_odp_release()
366 &context->umem_tree); ib_umem_odp_release()
367 context->odp_mrs_count--; ib_umem_odp_release()
380 downgrade_write(&context->umem_rwsem); ib_umem_odp_release()
381 if (!context->odp_mrs_count) { ib_umem_odp_release()
385 owning_process = get_pid_task(context->tgid, ib_umem_odp_release()
401 mmu_notifier_unregister(&context->mn, owning_mm); ib_umem_odp_release()
409 up_read(&context->umem_rwsem); ib_umem_odp_release()
443 struct ib_device *dev = umem->context->device; ib_umem_odp_map_dma_single_page()
482 if (umem->context->invalidate_range || !stored_page) ib_umem_odp_map_dma_single_page()
485 if (remove_existing_mapping && umem->context->invalidate_range) { ib_umem_odp_map_dma_single_page()
547 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID); ib_umem_odp_map_dma_pages()
626 struct ib_device *dev = umem->context->device; ib_umem_odp_unmap_dma_pages()
661 if (!umem->context->invalidate_range) ib_umem_odp_unmap_dma_pages()
H A Ddevice.c205 struct ib_client_data *context; add_client_context() local
208 context = kmalloc(sizeof *context, GFP_KERNEL); add_client_context()
209 if (!context) { add_client_context()
210 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", add_client_context()
215 context->client = client; add_client_context()
216 context->data = NULL; add_client_context()
219 list_add(&context->list, &device->client_data_list); add_client_context()
340 struct ib_client_data *context, *tmp; ib_unregister_device() local
359 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_device()
360 kfree(context); ib_unregister_device()
407 struct ib_client_data *context, *tmp; ib_unregister_client() local
418 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) ib_unregister_client()
419 if (context->client == client) { ib_unregister_client()
420 list_del(&context->list); ib_unregister_client()
421 kfree(context); ib_unregister_client()
432 * ib_get_client_data - Get IB client context
433 * @device:Device to get context for
434 * @client:Client to get context for
436 * ib_get_client_data() returns client context set with
441 struct ib_client_data *context; ib_get_client_data() local
446 list_for_each_entry(context, &device->client_data_list, list) ib_get_client_data()
447 if (context->client == client) { ib_get_client_data()
448 ret = context->data; ib_get_client_data()
458 * ib_set_client_data - Set IB client context
459 * @device:Device to set context for
460 * @client:Client to set context for
463 * ib_set_client_data() sets client context that can be retrieved with
469 struct ib_client_data *context; ib_set_client_data() local
473 list_for_each_entry(context, &device->client_data_list, list) ib_set_client_data()
474 if (context->client == client) { ib_set_client_data()
475 context->data = data; ib_set_client_data()
479 printk(KERN_WARNING "No client context found for %s/%s\n", ib_set_client_data()
494 * callback may occur in interrupt context.
/linux-4.1.27/arch/xtensa/include/asm/
H A Dmmu_context.h2 * Switch an MMU context.
38 * any user or kernel context. We use the reserved values in the
80 mm->context.asid[cpu] = asid; get_new_mmu_context()
81 mm->context.cpu = cpu; get_new_mmu_context()
91 unsigned long asid = mm->context.asid[cpu]; get_mmu_context()
102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); activate_context()
107 * Initialize the context related info for a new mm_struct
117 mm->context.asid[cpu] = NO_CONTEXT; for_each_possible_cpu()
119 mm->context.cpu = -1;
127 int migrated = next->context.cpu != cpu; switch_mm()
131 next->context.cpu = cpu; switch_mm()
141 * Destroy context related info for an mm_struct that is about
/linux-4.1.27/arch/microblaze/include/asm/
H A Dmmu_context_mm.h22 * segment IDs). We use a skew on both the context and the high 4 bits
45 * Set the current MMU context.
54 extern void set_context(mm_context_t context, pgd_t *pgd);
63 * This caches the next context number that we expect to be free.
64 * Its use is an optimization only, we can't rely on this context
79 * Get a new mmu context for the address space described by `mm'.
85 if (mm->context != NO_CONTEXT) get_mmu_context()
96 mm->context = ctx; get_mmu_context()
101 * Set up the context for a new address space.
103 # define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
106 * We're finished using the context for an address space.
110 if (mm->context != NO_CONTEXT) { destroy_context()
111 clear_bit(mm->context, context_map); destroy_context()
112 mm->context = NO_CONTEXT; destroy_context()
122 set_context(next->context, next->pgd); switch_mm()
127 * the context for the new mm so we see the new mappings.
134 set_context(mm->context, mm->pgd); activate_mm()
/linux-4.1.27/arch/sparc/mm/
H A Dtsb.c77 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user()
79 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user()
80 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user()
86 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user()
87 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user()
88 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user()
94 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user()
101 spin_lock_irqsave(&mm->context.lock, flags); flush_tsb_user_page()
103 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; flush_tsb_user_page()
104 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; flush_tsb_user_page()
110 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { flush_tsb_user_page()
111 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; flush_tsb_user_page()
112 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; flush_tsb_user_page()
118 spin_unlock_irqrestore(&mm->context.lock, flags); flush_tsb_user_page()
134 mm->context.tsb_block[tsb_idx].tsb_nentries = setup_tsb_params()
151 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); setup_tsb_params()
212 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params()
213 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; setup_tsb_params()
214 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; setup_tsb_params()
220 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; setup_tsb_params()
221 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; setup_tsb_params()
222 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; setup_tsb_params()
227 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; setup_tsb_params()
367 if (mm->context.tsb_block[tsb_index].tsb == NULL && tsb_grow()
378 if (mm->context.tsb_block[tsb_index].tsb != NULL) tsb_grow()
379 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; tsb_grow()
390 * We have to hold mm->context.lock while committing to the tsb_grow()
408 spin_lock_irqsave(&mm->context.lock, flags); tsb_grow()
410 old_tsb = mm->context.tsb_block[tsb_index].tsb; tsb_grow()
412 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); tsb_grow()
413 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * tsb_grow()
422 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { tsb_grow()
423 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow()
429 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; tsb_grow()
446 mm->context.tsb_block[tsb_index].tsb = new_tsb; tsb_grow()
449 spin_unlock_irqrestore(&mm->context.lock, flags); tsb_grow()
475 spin_lock_init(&mm->context.lock); init_new_context()
477 mm->context.sparc64_ctx_val = 0UL; init_new_context()
484 huge_pte_count = mm->context.huge_pte_count; init_new_context()
485 mm->context.huge_pte_count = 0; init_new_context()
493 mm->context.tsb_block[i].tsb = NULL; init_new_context()
505 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) init_new_context()
528 tsb_destroy_one(&mm->context.tsb_block[i]); destroy_context()
532 if (CTX_VALID(mm->context)) { destroy_context()
533 unsigned long nr = CTX_NRBITS(mm->context); destroy_context()
/linux-4.1.27/drivers/usb/image/
H A Dmicrotek.c77 * 20000515 Put transfer context and URB in mts_desc (john)
190 MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \
191 MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \
192 mts_debug_dump(context->instance);\
207 struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \
375 /* Interrupt context! */ mts_int_submit_urb()
377 /* Holding transfer->context->lock! */ mts_int_submit_urb()
384 context->instance->usb_dev, mts_int_submit_urb()
389 context mts_int_submit_urb()
395 context->srb->result = DID_ERROR << 16; mts_int_submit_urb()
402 /* Interrupt context! */ mts_transfer_cleanup()
406 if ( likely(context->final_callback != NULL) ) mts_transfer_cleanup()
407 context->final_callback(context->srb); mts_transfer_cleanup()
414 context->srb->result &= MTS_SCSI_ERR_MASK; mts_transfer_done()
415 context->srb->result |= (unsigned)(*context->scsi_status)<<1; mts_transfer_done()
422 /* Interrupt context! */ mts_get_status()
427 usb_rcvbulkpipe(context->instance->usb_dev, mts_get_status()
428 context->instance->ep_response), mts_get_status()
429 context->scsi_status, mts_get_status()
435 /* Interrupt context! */ mts_data_done()
440 if ( context->data_length != transfer->actual_length ) { mts_data_done()
441 scsi_set_resid(context->srb, context->data_length - mts_data_done()
444 context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; mts_data_done()
452 /* Interrupt context! */ mts_command_done()
461 context->srb->result = DID_ABORT<<16; mts_command_done()
466 context->srb->result = DID_ERROR<<16; mts_command_done()
473 if (context->srb->cmnd[0] == REQUEST_SENSE) { mts_command_done()
475 context->data_pipe, mts_command_done()
476 context->srb->sense_buffer, mts_command_done()
477 context->data_length, mts_command_done()
479 } else { if ( context->data ) { mts_command_done()
481 context->data_pipe, mts_command_done()
482 context->data, mts_command_done()
483 context->data_length, mts_command_done()
484 scsi_sg_count(context->srb) > 1 ? mts_command_done()
498 MTS_DEBUG("Processing fragment %d of %d\n", context->fragment, mts_do_sg()
499 scsi_sg_count(context->srb)); mts_do_sg()
502 context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; mts_do_sg()
506 sg = scsi_sglist(context->srb); mts_do_sg()
507 context->fragment++; mts_do_sg()
509 context->data_pipe, mts_do_sg()
510 sg_virt(&sg[context->fragment]), mts_do_sg()
511 sg[context->fragment].length, mts_do_sg()
512 context->fragment + 1 == scsi_sg_count(context->srb) ? mts_do_sg()
536 desc->context.instance = desc; mts_build_transfer_context()
537 desc->context.srb = srb; mts_build_transfer_context()
538 desc->context.fragment = 0; mts_build_transfer_context()
541 desc->context.data = NULL; mts_build_transfer_context()
542 desc->context.data_length = 0; mts_build_transfer_context()
546 desc->context.data = sg_virt(&sg[0]); mts_build_transfer_context()
547 desc->context.data_length = sg[0].length; mts_build_transfer_context()
568 desc->context.data_pipe = pipe; mts_build_transfer_context()
604 &desc->context mts_scsi_queuecommand_lck()
609 desc->context.final_callback = callback; mts_scsi_queuecommand_lck()
743 new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL); mts_usb_probe()
744 if (!new_desc->context.scsi_status) mts_usb_probe()
785 kfree(new_desc->context.scsi_status); mts_usb_probe()
805 kfree(desc->context.scsi_status); mts_usb_disconnect()
/linux-4.1.27/include/linux/
H A Dssbi.h24 ssbi_reg_read(void *context, unsigned int reg, unsigned int *val) ssbi_reg_read() argument
29 ret = ssbi_read(context, reg, &v, 1); ssbi_reg_read()
37 ssbi_reg_write(void *context, unsigned int reg, unsigned int val) ssbi_reg_write() argument
40 return ssbi_write(context, reg, &v, 1); ssbi_reg_write()
H A Dhardirq.h43 * Enter irq context (on NO_HZ, update jiffies):
48 * Exit irq context without processing softirqs:
58 * Exit irq context and process softirqs if needed:
H A Ddm-kcopyd.h62 void *context);
66 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
72 * It must not be called from interrupt context.
76 * It may be called from interrupt context.
80 dm_kcopyd_notify_fn fn, void *context);
85 unsigned flags, dm_kcopyd_notify_fn fn, void *context);
H A Dww_mutex.h96 * ww_acquire_init - initialize a w/w acquire context
97 * @ctx: w/w acquire context to initialize
98 * @ww_class: w/w class of the context
100 * Initializes an context to acquire multiple mutexes of the given w/w class.
106 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
115 * An acquire context must be released with ww_acquire_fini by the same task
116 * before the memory is freed. It is recommended to allocate the context itself
144 * @ctx: the acquire context
147 * this context are forbidden.
164 * ww_acquire_fini - releases a w/w acquire context
165 * @ctx: the acquire context to free
167 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
197 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
203 * will either sleep until it is (wait case). Or it selects the current context
205 * same lock with the same context twice is also detected and signalled by
209 * the given context and then wait for this contending lock to be available by
219 * of the same w/w lock class as was used to initialize the acquire context.
235 * @ctx: w/w acquire context
241 * will either sleep until it is (wait case). Or it selects the current context
243 * same lock with the same context twice is also detected and signalled by
248 * the given context and then wait for this contending lock to be available by
258 * of the same w/w lock class as was used to initialize the acquire context.
274 * @ctx: w/w acquire context
276 * Acquires a w/w mutex with the given context after a wound case. This function
280 * context and then call this function on the contended lock.
287 * with the context held. It is forbidden to call this on anything else than the
308 * @ctx: w/w acquire context
310 * Acquires a w/w mutex with the given context after a wound case. This function
316 * context and then call this function on the contended lock.
323 * with the given context held. It is forbidden to call this on anything else
343 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
346 * Trylocks a mutex without acquire context, so no deadlock detection is
H A Dfirmware.h46 const char *name, struct device *device, gfp_t gfp, void *context,
47 void (*cont)(const struct firmware *fw, void *context));
61 const char *name, struct device *device, gfp_t gfp, void *context, request_firmware_nowait()
62 void (*cont)(const struct firmware *fw, void *context)) request_firmware_nowait()
59 request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)) request_firmware_nowait() argument
H A Dhw_breakpoint.h49 void *context,
62 void *context,
68 void *context);
94 void *context, register_user_hw_breakpoint()
102 void *context, register_wide_hw_breakpoint_cpu()
107 void *context) { return NULL; }
92 register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, struct task_struct *tsk) register_user_hw_breakpoint() argument
100 register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context, int cpu) register_wide_hw_breakpoint_cpu() argument
105 register_wide_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, void *context) register_wide_hw_breakpoint() argument
H A Dmei_cl_bus.h32 u32 events, void *context);
34 mei_cl_event_cb_t read_cb, void *context);
H A Dasn1_decoder.h20 void *context,
H A Ddm-region-hash.h36 void *context, void (*dispatch_bios)(void *context,
38 void (*wakeup_workers)(void *context),
39 void (*wakeup_all_recovery_waiters)(void *context),
H A Dvexpress.h39 struct regmap * (*regmap_init)(struct device *dev, void *context);
40 void (*regmap_exit)(struct regmap *regmap, void *context);
44 struct vexpress_config_bridge_ops *ops, void *context);
H A Dpreempt_mask.h60 * Are we in a softirq context? Interrupt context?
70 * Are we in NMI context?
104 * Are we running in atomic context? WARNING: this macro cannot
105 * always detect atomic context; in particular, it cannot know about
/linux-4.1.27/arch/m32r/include/asm/
H A Dmmu.h12 /* Default "unsigned long" context */
H A Dmmu_context.h21 * Cache of MMU context last used.
26 #define mm_context(mm) mm->context
30 #define mm_context(mm) mm->context[smp_processor_id()]
56 * Get MMU context if needed.
63 /* Check if we have old version of context. get_mmu_context()
64 If it's old, we need to get new context with new version. */ get_mmu_context()
71 * Initialize the context related info for a new mm_struct
78 mm->context = NO_CONTEXT; init_new_context()
84 mm->context[i] = NO_CONTEXT; init_new_context()
91 * Destroy context related info for an mm_struct that is about
113 * the context for the new mm so we see the new mappings.
/linux-4.1.27/arch/arm/include/asm/xen/
H A Dhypervisor.h7 /* Lazy mode for batching updates / context switch */
/linux-4.1.27/mm/
H A Dmmu_context.c16 * mm context.
18 * from a kernel thread context)
46 * specified mm context which was earlier taken on
49 * from a kernel thread context)
/linux-4.1.27/arch/cris/mm/
H A Dtlb.c17 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
40 D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); alloc_context()
52 old_mm->context.page_id = NO_CONTEXT; alloc_context()
57 mm->context.page_id = map_replace_ptr; alloc_context()
67 * if needed, get a new MMU context for the mm. otherwise nothing is done.
73 if(mm->context.page_id == NO_CONTEXT) get_mmu_context()
77 /* called by __exit_mm to destroy the used MMU context if any before
88 if(mm->context.page_id != NO_CONTEXT) { destroy_context()
89 D(printk("destroy_context %d (%p)\n", mm->context.page_id, mm)); destroy_context()
91 page_id_map[mm->context.page_id] = NULL; destroy_context()
111 /* the init_mm has context 0 from the boot */ tlb_init()
/linux-4.1.27/arch/microblaze/mm/
H A Dmmu_context.c39 * Initialize the context management stuff.
44 * The use of context zero is reserved for the kernel. mmu_context_init()
53 * Steal a context from a task that has one at the moment.
55 * This isn't an LRU system, it just frees up each context in
63 /* free up context `next_mmu_context' */ steal_context()
64 /* if we shouldn't free context 0, don't... */ steal_context()
/linux-4.1.27/arch/m68k/sun3/
H A Dmmu_emu.c56 context. 0xffffffff is a marker for kernel context */
61 /* has this context been mmdrop'd? */
203 /* erase the mappings for a dead context. Uses the pg_dir for hints
207 context for when they're cleared */ clear_context()
208 void clear_context(unsigned long context) clear_context() argument
213 if(context) { clear_context()
214 if(!ctx_alloc[context]) clear_context()
215 panic("clear_context: context not allocated\n"); clear_context()
217 ctx_alloc[context]->context = SUN3_INVALID_CONTEXT; clear_context()
218 ctx_alloc[context] = (struct mm_struct *)0; clear_context()
224 sun3_put_context(context); clear_context()
227 if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) { clear_context()
238 /* gets an empty context. if full, kills the next context listed to
240 /* This context invalidation scheme is, well, totally arbitrary, I'm
250 /* kill someone to get our context */ get_free_context()
265 panic("get_free_context: failed to find free context"); get_free_context()
276 * `context'. Maintain internal PMEG management structures. This doesn't
282 inline void mmu_emu_map_pmeg (int context, int vaddr) mmu_emu_map_pmeg() argument
296 printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n", mmu_emu_map_pmeg()
297 curr_pmeg, context, vaddr); mmu_emu_map_pmeg()
304 sun3_put_context(context); mmu_emu_map_pmeg()
317 sun3_put_context(context); mmu_emu_map_pmeg()
324 pmeg_ctx[curr_pmeg] = context; mmu_emu_map_pmeg()
357 unsigned char context; mmu_emu_handle_fault() local
363 context = 0; mmu_emu_handle_fault()
365 context = current->mm->context; mmu_emu_handle_fault()
400 mmu_emu_map_pmeg (context, vaddr); mmu_emu_handle_fault()
/linux-4.1.27/arch/x86/kernel/
H A Dldt.c24 /* context.lock is held for us, so we don't need any locking. */ flush_ldt()
32 pc = &current->active_mm->context; flush_ldt()
78 /* context.lock is held */ install_ldt()
83 smp_store_release(&current_mm->context.ldt, ldt); install_ldt()
112 mutex_init(&mm->context.lock); init_new_context()
115 mm->context.ldt = NULL; init_new_context()
119 mutex_lock(&old_mm->context.lock); init_new_context()
120 if (!old_mm->context.ldt) { init_new_context()
121 mm->context.ldt = NULL; init_new_context()
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); init_new_context()
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries, init_new_context()
135 mm->context.ldt = new_ldt; init_new_context()
138 mutex_unlock(&old_mm->context.lock); init_new_context()
149 free_ldt_struct(mm->context.ldt); destroy_context()
150 mm->context.ldt = NULL; destroy_context()
159 mutex_lock(&mm->context.lock); read_ldt()
161 if (!mm->context.ldt) { read_ldt()
169 size = mm->context.ldt->size * LDT_ENTRY_SIZE; read_ldt()
173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { read_ldt()
188 mutex_unlock(&mm->context.lock); read_ldt()
248 mutex_lock(&mm->context.lock); write_ldt()
250 old_ldt = mm->context.ldt; write_ldt()
269 mutex_unlock(&mm->context.lock); write_ldt()
/linux-4.1.27/drivers/misc/echo/
H A Doslec.h47 * oslec_create - Create a voice echo canceller context.
49 * @return: The new canceller context, or NULL if the canceller could not be
55 * oslec_free - Free a voice echo canceller context.
56 * @ec: The echo canceller context.
61 * oslec_flush - Flush (reinitialise) a voice echo canceller context.
62 * @ec: The echo canceller context.
67 * oslec_adaption_mode - set the adaption mode of a voice echo canceller context.
68 * @ec The echo canceller context.
77 * @ec: The echo canceller context.
87 * @ec: The echo canceller context.
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_provider.c301 struct mthca_ucontext *context; mthca_alloc_ucontext() local
315 context = kmalloc(sizeof *context, GFP_KERNEL); mthca_alloc_ucontext()
316 if (!context) mthca_alloc_ucontext()
319 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); mthca_alloc_ucontext()
321 kfree(context); mthca_alloc_ucontext()
325 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); mthca_alloc_ucontext()
326 if (IS_ERR(context->db_tab)) { mthca_alloc_ucontext()
327 err = PTR_ERR(context->db_tab); mthca_alloc_ucontext()
328 mthca_uar_free(to_mdev(ibdev), &context->uar); mthca_alloc_ucontext()
329 kfree(context); mthca_alloc_ucontext()
334 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); mthca_alloc_ucontext()
335 mthca_uar_free(to_mdev(ibdev), &context->uar); mthca_alloc_ucontext()
336 kfree(context); mthca_alloc_ucontext()
340 context->reg_mr_warned = 0; mthca_alloc_ucontext()
342 return &context->ibucontext; mthca_alloc_ucontext()
345 static int mthca_dealloc_ucontext(struct ib_ucontext *context) mthca_dealloc_ucontext() argument
347 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, mthca_dealloc_ucontext()
348 to_mucontext(context)->db_tab); mthca_dealloc_ucontext()
349 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); mthca_dealloc_ucontext()
350 kfree(to_mucontext(context)); mthca_dealloc_ucontext()
355 static int mthca_mmap_uar(struct ib_ucontext *context, mthca_mmap_uar() argument
364 to_mucontext(context)->uar.pfn, mthca_mmap_uar()
372 struct ib_ucontext *context, mthca_alloc_pd()
382 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); mthca_alloc_pd()
388 if (context) { mthca_alloc_pd()
439 struct mthca_ucontext *context = NULL; mthca_create_srq() local
451 context = to_mucontext(pd->uobject->context); mthca_create_srq()
458 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_srq()
459 context->db_tab, ucmd.db_index, mthca_create_srq()
473 mthca_unmap_user_db(to_mdev(pd->device), &context->uar, mthca_create_srq()
474 context->db_tab, ucmd.db_index); mthca_create_srq()
479 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { mthca_create_srq()
495 struct mthca_ucontext *context; mthca_destroy_srq() local
498 context = to_mucontext(srq->uobject->context); mthca_destroy_srq()
500 mthca_unmap_user_db(to_mdev(srq->device), &context->uar, mthca_destroy_srq()
501 context->db_tab, to_msrq(srq)->db_index); mthca_destroy_srq()
526 struct mthca_ucontext *context; mthca_create_qp() local
533 context = to_mucontext(pd->uobject->context); mthca_create_qp()
540 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_qp()
541 context->db_tab, mthca_create_qp()
548 err = mthca_map_user_db(to_mdev(pd->device), &context->uar, mthca_create_qp()
549 context->db_tab, mthca_create_qp()
553 &context->uar, mthca_create_qp()
554 context->db_tab, mthca_create_qp()
572 context = to_mucontext(pd->uobject->context); mthca_create_qp()
575 &context->uar, mthca_create_qp()
576 context->db_tab, mthca_create_qp()
579 &context->uar, mthca_create_qp()
580 context->db_tab, mthca_create_qp()
631 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp()
632 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp()
635 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp()
636 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp()
646 struct ib_ucontext *context, mthca_create_cq()
657 if (context) { mthca_create_cq()
661 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq()
662 to_mucontext(context)->db_tab, mthca_create_cq()
667 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq()
668 to_mucontext(context)->db_tab, mthca_create_cq()
680 if (context) { mthca_create_cq()
690 context ? to_mucontext(context) : NULL, mthca_create_cq()
691 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, mthca_create_cq()
696 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { mthca_create_cq()
710 if (context) mthca_create_cq()
711 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq()
712 to_mucontext(context)->db_tab, ucmd.arm_db_index); mthca_create_cq()
715 if (context) mthca_create_cq()
716 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, mthca_create_cq()
717 to_mucontext(context)->db_tab, ucmd.set_db_index); mthca_create_cq()
847 &to_mucontext(cq->uobject->context)->uar, mthca_destroy_cq()
848 to_mucontext(cq->uobject->context)->db_tab, mthca_destroy_cq()
851 &to_mucontext(cq->uobject->context)->uar, mthca_destroy_cq()
852 to_mucontext(cq->uobject->context)->db_tab, mthca_destroy_cq()
990 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) { mthca_reg_user_mr()
995 ++to_mucontext(pd->uobject->context)->reg_mr_warned; mthca_reg_user_mr()
1004 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, mthca_reg_user_mr()
371 mthca_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) mthca_alloc_pd() argument
644 mthca_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) mthca_create_cq() argument
/linux-4.1.27/drivers/iommu/
H A Dmsm_iommu.h38 * context bank. The number of MIDs mapped to the same CB does not affect
48 * ncb Number of context banks present on this IOMMU HW instance
56 * struct msm_iommu_ctx_dev - an IOMMU context bank instance
57 * name Human-readable name given to this context bank
58 * num Index of this context bank within the hardware
59 * mids List of Machine IDs that are to be mapped into this context
91 * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
92 * @num: Hardware context number of this context
97 * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
107 * Look up an IOMMU context device by its context name. NULL if none found.
114 * Interrupt handler for the IOMMU context fault interrupt. Hooking the
/linux-4.1.27/arch/cris/arch-v10/mm/
H A Dtlb.c20 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
61 /* invalidate the selected mm context only */
67 int page_id = mm->context.page_id; flush_tlb_mm()
70 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); flush_tlb_mm()
102 int page_id = mm->context.page_id; flush_tlb_page()
106 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); flush_tlb_page()
113 /* invalidate those TLB entries that match both the mm context flush_tlb_page()
138 * Initialize the context related info for a new mm_struct
145 mm->context.page_id = NO_CONTEXT; init_new_context()
155 /* make sure we have a context */ switch_mm()
167 /* switch context in the MMU */ switch_mm()
170 next->context, next)); switch_mm()
173 page_id, next->context.page_id); switch_mm()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c44 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, mlx4_ib_db_map_user() argument
50 mutex_lock(&context->db_page_mutex); mlx4_ib_db_map_user()
52 list_for_each_entry(page, &context->db_page_list, list) mlx4_ib_db_map_user()
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx4_ib_db_map_user()
72 list_add(&page->list, &context->db_page_list); mlx4_ib_db_map_user()
80 mutex_unlock(&context->db_page_mutex); mlx4_ib_db_map_user()
85 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) mlx4_ib_db_unmap_user() argument
87 mutex_lock(&context->db_page_mutex); mlx4_ib_db_unmap_user()
95 mutex_unlock(&context->db_page_mutex); mlx4_ib_db_unmap_user()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Ddoorbell.c46 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, mlx5_ib_db_map_user() argument
52 mutex_lock(&context->db_page_mutex); mlx5_ib_db_map_user()
54 list_for_each_entry(page, &context->db_page_list, list) mlx5_ib_db_map_user()
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, mlx5_ib_db_map_user()
74 list_add(&page->list, &context->db_page_list); mlx5_ib_db_map_user()
82 mutex_unlock(&context->db_page_mutex); mlx5_ib_db_map_user()
87 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db) mlx5_ib_db_unmap_user() argument
89 mutex_lock(&context->db_page_mutex); mlx5_ib_db_unmap_user()
97 mutex_unlock(&context->db_page_mutex); mlx5_ib_db_unmap_user()
/linux-4.1.27/drivers/base/regmap/
H A Dregmap-spi.c32 static int regmap_spi_write(void *context, const void *data, size_t count) regmap_spi_write() argument
34 struct device *dev = context; regmap_spi_write()
40 static int regmap_spi_gather_write(void *context, regmap_spi_gather_write() argument
44 struct device *dev = context; regmap_spi_gather_write()
57 static int regmap_spi_async_write(void *context, regmap_spi_async_write() argument
65 struct device *dev = context; regmap_spi_async_write()
79 async->m.context = async; regmap_spi_async_write()
95 static int regmap_spi_read(void *context, regmap_spi_read() argument
99 struct device *dev = context; regmap_spi_read()
H A Dregmap-i2c.c19 static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, regmap_smbus_byte_reg_read() argument
22 struct device *dev = context; regmap_smbus_byte_reg_read()
38 static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, regmap_smbus_byte_reg_write() argument
41 struct device *dev = context; regmap_smbus_byte_reg_write()
55 static int regmap_smbus_word_reg_read(void *context, unsigned int reg, regmap_smbus_word_reg_read() argument
58 struct device *dev = context; regmap_smbus_word_reg_read()
74 static int regmap_smbus_word_reg_write(void *context, unsigned int reg, regmap_smbus_word_reg_write() argument
77 struct device *dev = context; regmap_smbus_word_reg_write()
91 static int regmap_smbus_word_read_swapped(void *context, unsigned int reg, regmap_smbus_word_read_swapped() argument
94 struct device *dev = context; regmap_smbus_word_read_swapped()
110 static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, regmap_smbus_word_write_swapped() argument
113 struct device *dev = context; regmap_smbus_word_write_swapped()
127 static int regmap_i2c_write(void *context, const void *data, size_t count) regmap_i2c_write() argument
129 struct device *dev = context; regmap_i2c_write()
142 static int regmap_i2c_gather_write(void *context, regmap_i2c_gather_write() argument
146 struct device *dev = context; regmap_i2c_gather_write()
176 static int regmap_i2c_read(void *context, regmap_i2c_read() argument
180 struct device *dev = context; regmap_i2c_read()
H A Dregmap-spmi.c25 static int regmap_spmi_base_read(void *context, regmap_spmi_base_read() argument
35 err = spmi_register_read(context, addr++, val++); regmap_spmi_base_read()
40 static int regmap_spmi_base_gather_write(void *context, regmap_spmi_base_gather_write() argument
55 err = spmi_register_zero_write(context, *data); regmap_spmi_base_gather_write()
65 err = spmi_register_write(context, addr, *data); regmap_spmi_base_gather_write()
78 static int regmap_spmi_base_write(void *context, const void *data, regmap_spmi_base_write() argument
82 return regmap_spmi_base_gather_write(context, data, 1, data + 1, regmap_spmi_base_write()
125 static int regmap_spmi_ext_read(void *context, regmap_spmi_ext_read() argument
144 err = spmi_ext_register_read(context, addr, val, len); regmap_spmi_ext_read()
156 err = spmi_ext_register_readl(context, addr, val, len); regmap_spmi_ext_read()
169 static int regmap_spmi_ext_gather_write(void *context, regmap_spmi_ext_gather_write() argument
184 err = spmi_ext_register_write(context, addr, val, len); regmap_spmi_ext_gather_write()
196 err = spmi_ext_register_writel(context, addr, val, len); regmap_spmi_ext_gather_write()
209 static int regmap_spmi_ext_write(void *context, const void *data, regmap_spmi_ext_write() argument
213 return regmap_spmi_ext_gather_write(context, data, 2, data + 2, regmap_spmi_ext_write()
/linux-4.1.27/tools/perf/scripts/perl/Perf-Trace-Util/
H A DContext.c55 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); XS()
58 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); XS() local
62 RETVAL = common_pc(context); XS()
78 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); XS()
81 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); XS() local
85 RETVAL = common_flags(context); XS()
101 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context"); XS()
104 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); XS() local
108 RETVAL = common_lock_depth(context); XS()
/linux-4.1.27/drivers/staging/skein/
H A Dskein_api.h50 * struct skein_ctx ctx; // a Skein hash or MAC context
52 * // prepare context, here for a Skein with a state size of 512 bits.
55 * // Initialize the context to set the requested hash length in bits
71 * An application may use @c skein_reset to reset a Skein context and use
113 * Prepare a Skein context.
116 * context. The functions clears memory and initializes size dependent
120 * Pointer to a Skein context.
129 * Initialize a Skein context.
131 * Initializes the context with this data and saves the resulting Skein
135 * Pointer to a Skein context.
145 * Resets a Skein context for further use.
147 * Restores the saved chaining variables to reset the Skein context.
152 * Pointer to a pre-initialized Skein MAC context
157 * Initializes a Skein context for MAC usage.
159 * Initializes the context with this data and saves the resulting Skein
166 * Pointer to an empty or preinitialized Skein MAC context
183 * Pointer to initialized Skein context
201 * Pointer to initialized Skein context
214 * reset the Skein context.
217 * Pointer to initialized Skein context
/linux-4.1.27/arch/nios2/include/asm/
H A Dmmu_context.h32 * Initialize the context related info for a new mm_struct instance.
35 * the currently running generation when this context is switched in.
40 mm->context = 0; init_new_context()
45 * Destroy context related info for an mm_struct that is about
62 * the context for the new mm so we see the new mappings.
H A Dmmu.h13 /* Default "unsigned long" context */
/linux-4.1.27/arch/frv/include/asm/
H A Dmmu_context.h1 /* mmu_context.h: MMU context management routines
38 change_mm_context(&prev->context, &next->context, next->pgd); \
43 change_mm_context(&prev->context, &next->context, next->pgd); \
H A Dmmu.h1 /* mmu.h: memory management context for FR-V with or without MMU support
16 struct list_head id_link; /* link in list of context ID owners */
17 unsigned short id; /* MMU context ID */
H A Dtlbflush.h38 __flush_tlb_mm((mm)->context.id); \
45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
/linux-4.1.27/crypto/asymmetric_keys/
H A Dpkcs7_parser.c163 int pkcs7_note_OID(void *context, size_t hdrlen, pkcs7_note_OID() argument
167 struct pkcs7_parse_context *ctx = context; pkcs7_note_OID()
182 int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, pkcs7_sig_note_digest_algo() argument
186 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_digest_algo()
211 int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, pkcs7_sig_note_pkey_algo() argument
215 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_pkey_algo()
229 * Extract a certificate and store it in the context.
231 int pkcs7_extract_cert(void *context, size_t hdrlen, pkcs7_extract_cert() argument
235 struct pkcs7_parse_context *ctx = context; pkcs7_extract_cert()
271 int pkcs7_note_certificate_list(void *context, size_t hdrlen, pkcs7_note_certificate_list() argument
275 struct pkcs7_parse_context *ctx = context; pkcs7_note_certificate_list()
288 * the context.
290 int pkcs7_note_data(void *context, size_t hdrlen, pkcs7_note_data() argument
294 struct pkcs7_parse_context *ctx = context; pkcs7_note_data()
308 int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen, pkcs7_sig_note_authenticated_attr() argument
312 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_authenticated_attr()
331 int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen, pkcs7_sig_note_set_of_authattrs() argument
335 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_set_of_authattrs()
346 int pkcs7_sig_note_serial(void *context, size_t hdrlen, pkcs7_sig_note_serial() argument
350 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_serial()
359 int pkcs7_sig_note_issuer(void *context, size_t hdrlen, pkcs7_sig_note_issuer() argument
363 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_issuer()
372 int pkcs7_sig_note_signature(void *context, size_t hdrlen, pkcs7_sig_note_signature() argument
376 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_signature()
393 int pkcs7_note_signed_info(void *context, size_t hdrlen, pkcs7_note_signed_info() argument
397 struct pkcs7_parse_context *ctx = context; pkcs7_note_signed_info()
H A Dmscode_parser.c46 int mscode_note_content_type(void *context, size_t hdrlen, mscode_note_content_type() argument
78 int mscode_note_digest_algo(void *context, size_t hdrlen, mscode_note_digest_algo() argument
82 struct pefile_context *ctx = context; mscode_note_digest_algo()
117 int mscode_note_digest(void *context, size_t hdrlen, mscode_note_digest() argument
121 struct pefile_context *ctx = context; mscode_note_digest()
H A Dx509_cert_parser.c139 int x509_note_OID(void *context, size_t hdrlen, x509_note_OID() argument
143 struct x509_parse_context *ctx = context; x509_note_OID()
159 int x509_note_tbs_certificate(void *context, size_t hdrlen, x509_note_tbs_certificate() argument
163 struct x509_parse_context *ctx = context; x509_note_tbs_certificate()
176 int x509_note_pkey_algo(void *context, size_t hdrlen, x509_note_pkey_algo() argument
180 struct x509_parse_context *ctx = context; x509_note_pkey_algo()
228 int x509_note_signature(void *context, size_t hdrlen, x509_note_signature() argument
232 struct x509_parse_context *ctx = context; x509_note_signature()
250 int x509_note_serial(void *context, size_t hdrlen, x509_note_serial() argument
254 struct x509_parse_context *ctx = context; x509_note_serial()
263 int x509_extract_name_segment(void *context, size_t hdrlen, x509_extract_name_segment() argument
267 struct x509_parse_context *ctx = context; x509_extract_name_segment()
367 int x509_note_issuer(void *context, size_t hdrlen, x509_note_issuer() argument
371 struct x509_parse_context *ctx = context; x509_note_issuer()
377 int x509_note_subject(void *context, size_t hdrlen, x509_note_subject() argument
381 struct x509_parse_context *ctx = context; x509_note_subject()
390 int x509_extract_key_data(void *context, size_t hdrlen, x509_extract_key_data() argument
394 struct x509_parse_context *ctx = context; x509_extract_key_data()
410 int rsa_extract_mpi(void *context, size_t hdrlen, rsa_extract_mpi() argument
414 struct x509_parse_context *ctx = context; rsa_extract_mpi()
436 int x509_process_extension(void *context, size_t hdrlen, x509_process_extension() argument
440 struct x509_parse_context *ctx = context; x509_process_extension()
570 int x509_note_not_before(void *context, size_t hdrlen, x509_note_not_before() argument
574 struct x509_parse_context *ctx = context; x509_note_not_before()
578 int x509_note_not_after(void *context, size_t hdrlen, x509_note_not_after() argument
582 struct x509_parse_context *ctx = context; x509_note_not_after()
589 int x509_akid_note_kid(void *context, size_t hdrlen, x509_akid_note_kid() argument
593 struct x509_parse_context *ctx = context; x509_akid_note_kid()
614 int x509_akid_note_name(void *context, size_t hdrlen, x509_akid_note_name() argument
618 struct x509_parse_context *ctx = context; x509_akid_note_name()
630 int x509_akid_note_serial(void *context, size_t hdrlen, x509_akid_note_serial() argument
634 struct x509_parse_context *ctx = context; x509_akid_note_serial()
/linux-4.1.27/drivers/staging/vt6656/
H A Dusbpipe.c107 struct vnt_private *priv = urb->context; vnt_start_interrupt_urb_complete()
169 struct vnt_rcb *rcb = urb->context; vnt_submit_rx_urb_complete()
249 struct vnt_usb_send_context *context = urb->context; vnt_tx_context_complete() local
250 struct vnt_private *priv = context->priv; vnt_tx_context_complete()
254 dev_dbg(&priv->usb->dev, "Write %d bytes\n", context->buf_len); vnt_tx_context_complete()
259 context->in_use = false; vnt_tx_context_complete()
267 if (context->type == CONTEXT_DATA_PACKET) vnt_tx_context_complete()
270 if (urb->status || context->type == CONTEXT_BEACON_PACKET) { vnt_tx_context_complete()
271 if (context->skb) vnt_tx_context_complete()
272 ieee80211_free_txskb(priv->hw, context->skb); vnt_tx_context_complete()
274 context->in_use = false; vnt_tx_context_complete()
279 struct vnt_usb_send_context *context) vnt_tx_context()
285 context->in_use = false; vnt_tx_context()
289 urb = context->urb; vnt_tx_context()
294 context->data, vnt_tx_context()
295 context->buf_len, vnt_tx_context()
297 context); vnt_tx_context()
303 context->in_use = false; vnt_tx_context()
278 vnt_tx_context(struct vnt_private *priv, struct vnt_usb_send_context *context) vnt_tx_context() argument
H A Dint.c72 struct vnt_usb_send_context *context; vnt_int_report_rate() local
81 context = priv->tx_context[pkt_no]; vnt_int_report_rate()
83 if (!context->skb) vnt_int_report_rate()
86 info = IEEE80211_SKB_CB(context->skb); vnt_int_report_rate()
89 if (context->fb_option && !(tsr & (TSR_TMO | TSR_RETRYTMO))) { vnt_int_report_rate()
99 if (context->fb_option == AUTO_FB_0) vnt_int_report_rate()
101 else if (context->fb_option == AUTO_FB_1) vnt_int_report_rate()
119 ieee80211_tx_status_irqsafe(priv->hw, context->skb); vnt_int_report_rate()
121 context->in_use = false; vnt_int_report_rate()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_context.c29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
44 * never use the default context, though that limits the driver's ability to
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
55 * describing the context life.
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
61 * S4: context is current, but destroyed 1 1 0
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
70 * S2->S4: context destroy called with current context
72 * S4->S5->S0: destroy path on current context
75 * The "current context" means the context which is currently running on the
78 * offset, but it will on the next context switch. The only way to avoid this
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
84 * possible to destroy a context, but it is still active.
165 * Try to make the context utilize L3 as well as LLC. i915_gem_alloc_context_obj()
209 /* Default context will never have a file_priv */ __create_hw_context()
220 /* NB: Mark all slices as needing a remap so that when the context first __create_hw_context()
235 * The default context needs to exist per ring that uses contexts. It stores the
236 * context state of the GPU for applications that don't utilize HW contexts, as
256 * context. This can cause a problem as pinning the i915_gem_create_context()
257 * default context also requires GTT space which may not i915_gem_create_context()
259 * context. i915_gem_create_context()
353 DRM_ERROR("Failed to create default global context (error %ld)\n", i915_gem_context_init()
365 DRM_DEBUG_DRIVER("%s context support initialized\n", i915_gem_context_init()
378 /* The only known way to stop the gpu from accessing the hw context is i915_gem_context_fini()
383 /* When default context is created and switched to, base object refcount i915_gem_context_fini()
386 * to default context. So we need to unreference the base object once i915_gem_context_fini()
391 /* Fake switch to NULL context */ i915_gem_context_fini()
427 DRM_ERROR("ring init context: %d\n", for_each_ring()
663 * Pin can switch back to the default context if we end up calling into do_switch()
665 * switches to the default context. Hence we need to reload from here. do_switch()
673 * a context."*/ do_switch()
692 * write domains when putting a context object onto the active list do_switch()
707 if (WARN_ONCE(ret, "GGTT context bind failed!")) do_switch()
713 /* NB: If we inhibit the restore, the context is not allowed to do_switch()
735 /* The hardware context switch is emitted, but we haven't do_switch()
741 DRM_ERROR("Failed to change address space on context switch\n"); do_switch()
758 /* The backing object for the context is done after switching to the do_switch()
759 * *next* context. Therefore we cannot retire the previous context until do_switch()
760 * the next context has already started running. In fact, the below code do_switch()
769 * object dirty. The only exception is that the context must be do_switch()
794 DRM_ERROR("ring init context: %d\n", ret); do_switch()
807 * i915_switch_context() - perform a GPU context switch.
808 * @ring: ring for which we'll execute the context switch
809 * @to: the context to switch to
811 * The context life cycle is simple. The context refcount is incremented and
812 * decremented by 1 and create and destroy. If the context is in use by the GPU,
813 * it will have a refcount > 1. This allows us to destroy the context abstract
816 * This function should not be used in execlists mode. Instead the context is
818 * context.
828 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ i915_switch_context()
867 DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); i915_gem_context_create_ioctl()
897 DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); i915_gem_context_destroy_ioctl()
/linux-4.1.27/arch/um/kernel/
H A Dexec.c27 ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data); flush_thread()
28 ret = ret || unmap(&current->mm->context.id, STUB_END, flush_thread()
38 __switch_mm(&current->mm->context.id); flush_thread()
H A Dreboot.c28 pid = t->mm->context.id.u.pid; for_each_process()
/linux-4.1.27/drivers/tty/ipwireless/
H A Dmain.h54 /* Hardware context */
56 /* Network layer context */
58 /* TTY device context */
/linux-4.1.27/arch/arm64/include/asm/
H A Dmmu.h26 .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
28 #define ASID(mm) ((mm)->context.id & 0xffff)
/linux-4.1.27/drivers/pci/hotplug/
H A Dacpiphp_glue.c64 static void hotplug_event(u32 type, struct acpiphp_context *context);
68 * acpiphp_init_context - Create hotplug context and grab a reference to it.
69 * @adev: ACPI device object to create the context for.
75 struct acpiphp_context *context; acpiphp_init_context() local
77 context = kzalloc(sizeof(*context), GFP_KERNEL); acpiphp_init_context()
78 if (!context) acpiphp_init_context()
81 context->refcount = 1; acpiphp_init_context()
82 context->hp.notify = acpiphp_hotplug_notify; acpiphp_init_context()
83 context->hp.fixup = acpiphp_post_dock_fixup; acpiphp_init_context()
84 acpi_set_hp_context(adev, &context->hp); acpiphp_init_context()
85 return context; acpiphp_init_context()
89 * acpiphp_get_context - Get hotplug context and grab a reference to it.
90 * @adev: ACPI device object to get the context for.
96 struct acpiphp_context *context; acpiphp_get_context() local
101 context = to_acpiphp_context(adev->hp); acpiphp_get_context()
102 context->refcount++; acpiphp_get_context()
103 return context; acpiphp_get_context()
107 * acpiphp_put_context - Drop a reference to ACPI hotplug context.
108 * @context: ACPI hotplug context to drop a reference to.
110 * The context object is removed if there are no more references to it.
114 static void acpiphp_put_context(struct acpiphp_context *context) acpiphp_put_context() argument
116 if (--context->refcount) acpiphp_put_context()
119 WARN_ON(context->bridge); acpiphp_put_context()
120 context->hp.self->hp = NULL; acpiphp_put_context()
121 kfree(context); acpiphp_put_context()
136 struct acpiphp_context *context; acpiphp_grab_context() local
139 context = acpiphp_get_context(adev); acpiphp_grab_context()
140 if (!context || context->func.parent->is_going_away) { acpiphp_grab_context()
144 get_bridge(context->func.parent); acpiphp_grab_context()
145 acpiphp_put_context(context); acpiphp_grab_context()
147 return context; acpiphp_grab_context()
150 static void acpiphp_let_context_go(struct acpiphp_context *context) acpiphp_let_context_go() argument
152 put_bridge(context->func.parent); acpiphp_let_context_go()
157 struct acpiphp_context *context; free_bridge() local
173 context = bridge->context; free_bridge()
174 /* Root bridges will not have hotplug context. */ free_bridge()
175 if (context) { free_bridge()
177 put_bridge(context->func.parent); free_bridge()
178 context->bridge = NULL; free_bridge()
179 acpiphp_put_context(context); free_bridge()
197 struct acpiphp_context *context = acpiphp_grab_context(adev); acpiphp_post_dock_fixup() local
201 if (!context) acpiphp_post_dock_fixup()
204 bus = context->func.slot->bus; acpiphp_post_dock_fixup()
222 acpiphp_let_context_go(context); acpiphp_post_dock_fixup()
255 * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
256 * @handle: ACPI handle of the object to add a context to.
265 struct acpiphp_context *context; acpiphp_add_context() local
290 context = acpiphp_init_context(adev); acpiphp_add_context()
291 if (!context) { acpiphp_add_context()
293 acpi_handle_err(handle, "No hotplug context\n"); acpiphp_add_context()
296 newfunc = &context->func; acpiphp_add_context()
319 acpiphp_put_context(context); acpiphp_add_context()
751 static void hotplug_event(u32 type, struct acpiphp_context *context) hotplug_event() argument
753 acpi_handle handle = context->hp.self->handle; hotplug_event()
754 struct acpiphp_func *func = &context->func; hotplug_event()
759 bridge = context->bridge; hotplug_event()
807 struct acpiphp_context *context; acpiphp_hotplug_notify() local
809 context = acpiphp_grab_context(adev); acpiphp_hotplug_notify()
810 if (!context) acpiphp_hotplug_notify()
813 hotplug_event(type, context); acpiphp_hotplug_notify()
814 acpiphp_let_context_go(context); acpiphp_hotplug_notify()
869 struct acpiphp_context *context; acpiphp_enumerate_slots() local
873 * under its parent, so the context should be there, unless the acpiphp_enumerate_slots()
877 context = acpiphp_get_context(adev); acpiphp_enumerate_slots()
878 if (!context) acpiphp_enumerate_slots()
881 bridge->context = context; acpiphp_enumerate_slots()
882 context->bridge = bridge; acpiphp_enumerate_slots()
884 get_bridge(context->func.parent); acpiphp_enumerate_slots()
/linux-4.1.27/drivers/staging/ozwpan/
H A Dozusbsvc.h10 * Per PD context info stored in application context area of PD.
H A Dozusbsvc.c63 /* Create a USB context in case we need one. If we find the PD already oz_usb_start()
64 * has a USB context then we will destroy it. oz_usb_start()
72 /* Install the USB context if the PD doesn't already have one. oz_usb_start()
83 oz_dbg(ON, "Already have USB context\n"); oz_usb_start()
88 * the USB context is destroyed. oz_usb_start()
92 /* If we already had a USB context and had obtained a port from oz_usb_start()
135 /* At this point the reference count on the usb context should oz_usb_stop()
156 * This increments the reference count of the context area for a specific PD.
157 * This ensures this context area does not disappear while still in use.
168 * This decrements the reference count of the context area for a specific PD
169 * and destroys the context area if the reference count becomes zero.
177 oz_dbg(ON, "Dealloc USB context\n"); oz_usb_put()
/linux-4.1.27/arch/um/kernel/skas/
H A Dmmu.c53 struct mm_context *to_mm = &mm->context; init_new_context()
63 from_mm = &current->mm->context; init_new_context()
100 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); uml_setup_stubs()
104 mm->context.stub_pages[0] = virt_to_page(&__syscall_stub_start); uml_setup_stubs()
105 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); uml_setup_stubs()
111 mm->context.stub_pages); uml_setup_stubs()
139 struct mm_context *mmu = &mm->context; destroy_context()
/linux-4.1.27/drivers/acpi/
H A Dsbshc.h25 typedef void (*smbus_alarm_callback)(void *context);
32 smbus_alarm_callback callback, void *context);
H A Dbus.c136 void *context) acpi_bus_private_data_handler()
166 acpi_handle_debug(handle, "No context for object\n"); acpi_bus_get_private_data()
181 struct acpi_osc_context *context, char *error) acpi_print_osc_error()
193 for (i = 0; i < context->cap.length; i += sizeof(u32)) acpi_print_osc_error()
194 printk("%x ", *((u32 *)(context->cap.pointer + i))); acpi_print_osc_error()
221 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) acpi_run_osc() argument
231 if (!context) acpi_run_osc()
233 if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid))) acpi_run_osc()
235 context->ret.length = ACPI_ALLOCATE_BUFFER; acpi_run_osc()
236 context->ret.pointer = NULL; acpi_run_osc()
245 in_params[1].integer.value = context->rev; acpi_run_osc()
247 in_params[2].integer.value = context->cap.length/sizeof(u32); acpi_run_osc()
249 in_params[3].buffer.length = context->cap.length; acpi_run_osc()
250 in_params[3].buffer.pointer = context->cap.pointer; acpi_run_osc()
261 || out_obj->buffer.length != context->cap.length) { acpi_run_osc()
262 acpi_print_osc_error(handle, context, acpi_run_osc()
271 acpi_print_osc_error(handle, context, acpi_run_osc()
274 acpi_print_osc_error(handle, context, acpi_run_osc()
277 acpi_print_osc_error(handle, context, acpi_run_osc()
280 if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD] acpi_run_osc()
290 context->ret.length = out_obj->buffer.length; acpi_run_osc()
291 context->ret.pointer = kmemdup(out_obj->buffer.pointer, acpi_run_osc()
292 context->ret.length, GFP_KERNEL); acpi_run_osc()
293 if (!context->ret.pointer) { acpi_run_osc()
302 context->ret.pointer = NULL; acpi_run_osc()
312 struct acpi_osc_context context = { acpi_bus_osc_support() local
337 if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { acpi_bus_osc_support()
338 u32 *capbuf_ret = context.ret.pointer; acpi_bus_osc_support()
339 if (context.ret.length > OSC_SUPPORT_DWORD) acpi_bus_osc_support()
342 kfree(context.ret.pointer); acpi_bus_osc_support()
135 acpi_bus_private_data_handler(acpi_handle handle, void *context) acpi_bus_private_data_handler() argument
180 acpi_print_osc_error(acpi_handle handle, struct acpi_osc_context *context, char *error) acpi_print_osc_error() argument
/linux-4.1.27/arch/frv/include/uapi/asm/
H A Dsigcontext.h1 /* sigcontext.h: FRV signal context
17 * Signal context structure - contains all info to do with the state
/linux-4.1.27/arch/arm/include/asm/
H A Dmmu_context.h29 #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) check_and_switch_context()
59 mm->context.switch_pending = 1; check_and_switch_context()
70 if (mm && mm->context.switch_pending) { finish_arch_post_lock_switch()
78 if (mm->context.switch_pending) { finish_arch_post_lock_switch()
79 mm->context.switch_pending = 0; finish_arch_post_lock_switch()
98 * mm: describes the currently active mm context
H A Dmmu.h22 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
/linux-4.1.27/drivers/net/phy/
H A Dat803x.c66 struct at803x_context *context) at803x_context_save()
68 context->bmcr = phy_read(phydev, MII_BMCR); at803x_context_save()
69 context->advertise = phy_read(phydev, MII_ADVERTISE); at803x_context_save()
70 context->control1000 = phy_read(phydev, MII_CTRL1000); at803x_context_save()
71 context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE); at803x_context_save()
72 context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED); at803x_context_save()
73 context->led_control = phy_read(phydev, AT803X_LED_CONTROL); at803x_context_save()
78 const struct at803x_context *context) at803x_context_restore()
80 phy_write(phydev, MII_BMCR, context->bmcr); at803x_context_restore()
81 phy_write(phydev, MII_ADVERTISE, context->advertise); at803x_context_restore()
82 phy_write(phydev, MII_CTRL1000, context->control1000); at803x_context_restore()
83 phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable); at803x_context_restore()
84 phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed); at803x_context_restore()
85 phy_write(phydev, AT803X_LED_CONTROL, context->led_control); at803x_context_restore()
273 struct at803x_context context; at803x_link_change_notify() local
275 at803x_context_save(phydev, &context); at803x_link_change_notify()
282 at803x_context_restore(phydev, &context); at803x_link_change_notify()
65 at803x_context_save(struct phy_device *phydev, struct at803x_context *context) at803x_context_save() argument
77 at803x_context_restore(struct phy_device *phydev, const struct at803x_context *context) at803x_context_restore() argument
/linux-4.1.27/drivers/scsi/isci/
H A Dremote_node_context.h61 * the remote node context in the silicon. It exists to model and manage
62 * the remote node context in the silicon.
94 * @SCI_RNC_INITIAL initial state for a remote node context. On a resume
95 * request the remote node context will transition to the posting state.
98 * the RNC is posted the remote node context will be made ready.
101 * the hardware. Once the invalidate is complete the remote node context will
106 * remote node context will transition to the ready state.
108 * @SCI_RNC_READY: state that the remote node context must be in to accept io
111 * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
114 * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
117 * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
119 * either there is a request to supend the remote node context or when there is
142 * node context.
156 * associated with the remote node context object. The remote node context
169 * context suspension.
176 * This field is true if the remote node context is resuming from its current
/linux-4.1.27/drivers/crypto/caam/
H A Dkey_gen.h13 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
/linux-4.1.27/arch/x86/um/
H A Dtls_64.c12 * during context switches. arch_copy_tls()
/linux-4.1.27/arch/x86/vdso/
H A Dvgetcpu.c5 * Fast user context implementation of getcpu()
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dramht.h10 int nvkm_ramht_insert(struct nvkm_ramht *, int chid, u32 handle, u32 context);
/linux-4.1.27/arch/mips/kernel/
H A Dpm.c26 * Ensures that general CPU context is saved, notably FPU and DSP.
41 * Restores important CPU context.
63 * mips_pm_notifier() - Notifier for preserving general CPU context.
69 * ensure that important CPU context is preserved across a CPU power down.
H A Dr4k_switch.S47 * Check whether we need to save any FP context. FP context is saved
48 * iff the process has used the context with the scalar FPU or the MSA
59 * This allows us to lazily restore the FP context.
67 /* Check whether we're saving scalar or vector context. */
70 /* Save 128b MSA vector context + scalar FP control & status. */
80 1: /* Save 32b/64b scalar FP context. */
115 * Save a thread's fp context.
127 * Restore a thread's fp context.
141 * Save a thread's MSA vector context.
149 * Restore a thread's MSA vector context.
/linux-4.1.27/arch/cris/include/arch-v32/arch/
H A Dtlb.h6 * to store the "process" it belongs to (=> fast mm context switch). The
/linux-4.1.27/include/linux/ceph/
H A Dtypes.h23 /* context for the caps reservation mechanism */
/linux-4.1.27/arch/powerpc/include/asm/
H A Dmmu_context.h15 * Most if the context management is out of line
45 /* Mark this context has been used on the new CPU */ switch_mm()
62 /* Switch coprocessor context only if prev or next uses a coprocessor */ switch_mm()
63 if (prev->context.acop || next->context.acop) switch_mm()
68 * context switch_mm()
91 * the context for the new mm so we see the new mappings.
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_mmap.c50 struct ipath_ibdev *dev = to_idev(ip->context->device); ipath_release_mmap_info()
85 * @context: the IB user context of the process making the mmap() call
89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ipath_mmap() argument
91 struct ipath_ibdev *dev = to_idev(context->device); ipath_mmap()
106 if (context != ip->context || (__u64) offset != ip->offset) ipath_mmap()
133 struct ib_ucontext *context, ipath_create_mmap_info()
152 ip->context = context; ipath_create_mmap_info()
131 ipath_create_mmap_info(struct ipath_ibdev *dev, u32 size, struct ib_ucontext *context, void *obj) ipath_create_mmap_info() argument
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_mmap.c50 struct qib_ibdev *dev = to_idev(ip->context->device); qib_release_mmap_info()
85 * @context: the IB user context of the process making the mmap() call
89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) qib_mmap() argument
91 struct qib_ibdev *dev = to_idev(context->device); qib_mmap()
106 if (context != ip->context || (__u64) offset != ip->offset) qib_mmap()
133 struct ib_ucontext *context, qib_create_mmap_info()
152 ip->context = context; qib_create_mmap_info()
131 qib_create_mmap_info(struct qib_ibdev *dev, u32 size, struct ib_ucontext *context, void *obj) qib_create_mmap_info() argument
/linux-4.1.27/arch/cris/arch-v32/mm/
H A Dtlb.c31 * context is found in the PID register. Each TLB entry contains a page_id that
88 page_id = mm->context.page_id; __flush_tlb_mm()
130 page_id = vma->vm_mm->context.page_id; __flush_tlb_page()
138 * Invalidate those TLB entries that match both the mm context and the __flush_tlb_page()
164 * Initialize the context related info for a new mm_struct
171 mm->context.page_id = NO_CONTEXT; init_new_context()
185 /* Make sure there is a MMU context. */ switch_mm()
198 /* Switch context in the MMU. */ switch_mm()
200 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | switch_mm()
203 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); switch_mm()
/linux-4.1.27/drivers/staging/i2o/
H A Diop.c82 * i2o_cntxt_list_add - Append a pointer to context list and return a id
83 * @c: controller to which the context list belong
84 * @ptr: pointer to add to the context list
86 * Because the context field in I2O is only 32-bit large, on 64-bit the
87 * pointer is to large to fit in the context field. The i2o_cntxt_list
88 * functions therefore map pointers to context fields.
90 * Returns context id > 0 on success or 0 on failure.
98 osm_err("%s: couldn't add NULL pointer to context list!\n", i2o_cntxt_list_add()
103 osm_err("%s: Could not allocate memory for context list element" i2o_cntxt_list_add()
117 entry->context = atomic_read(&c->context_list_counter); i2o_cntxt_list_add()
123 osm_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context); i2o_cntxt_list_add()
125 return entry->context; i2o_cntxt_list_add()
129 * i2o_cntxt_list_remove - Remove a pointer from the context list
130 * @c: controller to which the context list belong
131 * @ptr: pointer which should be removed from the context list
133 * Removes a previously added pointer from the context list and returns
134 * the matching context id.
136 * Returns context id on success or 0 on failure.
141 u32 context = 0; i2o_cntxt_list_remove() local
148 context = entry->context; i2o_cntxt_list_remove()
154 if (!context) i2o_cntxt_list_remove()
158 osm_debug("%s: remove ptr from context list %d -> %p\n", c->name, i2o_cntxt_list_remove()
159 context, ptr); i2o_cntxt_list_remove()
161 return context; i2o_cntxt_list_remove()
165 * i2o_cntxt_list_get - Get a pointer from the context list and remove it
166 * @c: controller to which the context list belong
167 * @context: context id to which the pointer belong
169 * Returns pointer to the matching context id on success or NULL on
172 void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) i2o_cntxt_list_get() argument
180 if (entry->context == context) { i2o_cntxt_list_get()
189 osm_warn("%s: context id %d not found\n", c->name, context); i2o_cntxt_list_get()
191 osm_debug("%s: get ptr from context list %d -> %p\n", c->name, context, i2o_cntxt_list_get()
198 * i2o_cntxt_list_get_ptr - Get a context id from the context list
199 * @c: controller to which the context list belong
200 * @ptr: pointer to which the context id should be fetched
202 * Returns context id which matches to the pointer on success or 0 on
208 u32 context = 0; i2o_cntxt_list_get_ptr() local
214 context = entry->context; i2o_cntxt_list_get_ptr()
219 if (!context) i2o_cntxt_list_get_ptr()
223 osm_debug("%s: get context id from context list %p -> %d\n", c->name, i2o_cntxt_list_get_ptr()
224 ptr, context); i2o_cntxt_list_get_ptr()
226 return context; i2o_cntxt_list_get_ptr()
449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_iop_init_outbound_queue()
513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_iop_reset()
937 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); i2o_status_get()
1154 * @tcntxt: transaction context to use with this notifier
1177 msg->u.s.icntxt = cpu_to_le32(drv->context); i2o_event_register()
/linux-4.1.27/drivers/gpio/
H A Dgpio-omap.c56 struct gpio_regs context; member in struct:gpio_bank
110 bank->context.oe = l; omap_set_gpio_direction()
123 bank->context.dataout |= l; omap_set_gpio_dataout_reg()
126 bank->context.dataout &= ~l; omap_set_gpio_dataout_reg()
146 bank->context.dataout = l; omap_set_gpio_dataout_mask()
254 bank->context.debounce = debounce; omap2_set_gpio_debounce()
255 bank->context.debounce_en = val; omap2_set_gpio_debounce()
280 bank->context.debounce_en &= ~gpio_bit; omap_clear_gpio_debounce()
281 writel_relaxed(bank->context.debounce_en, omap_clear_gpio_debounce()
285 bank->context.debounce = 0; omap_clear_gpio_debounce()
286 writel_relaxed(bank->context.debounce, bank->base + omap_clear_gpio_debounce()
308 bank->context.leveldetect0 = omap_set_gpio_trigger()
310 bank->context.leveldetect1 = omap_set_gpio_trigger()
312 bank->context.risingdetect = omap_set_gpio_trigger()
314 bank->context.fallingdetect = omap_set_gpio_trigger()
319 bank->context.wake_en = omap_set_gpio_trigger()
415 bank->context.wake_en = omap_set_gpio_triggering()
439 bank->context.ctrl = ctrl; omap_enable_gpio_module()
452 bank->context.wake_en = omap_disable_gpio_module()
464 bank->context.ctrl = ctrl; omap_disable_gpio_module()
563 bank->context.irqenable1 |= gpio_mask; omap_enable_gpio_irqbank()
571 bank->context.irqenable1 = l; omap_enable_gpio_irqbank()
585 bank->context.irqenable1 &= ~gpio_mask; omap_disable_gpio_irqbank()
593 bank->context.irqenable1 = l; omap_disable_gpio_irqbank()
631 bank->context.wake_en |= gpio_bit; omap_set_gpio_wakeup()
633 bank->context.wake_en &= ~gpio_bit; omap_set_gpio_wakeup()
635 writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); omap_set_gpio_wakeup()
878 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); omap_mpuio_suspend_noirq()
893 writel_relaxed(bank->context.wake_en, mask_reg); omap_mpuio_resume_noirq()
1046 /* Save OE default value (0xffffffff) in the context */ omap_gpio_mod_init()
1047 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); omap_gpio_mod_init()
1260 * by writing back the values saved in bank->context. omap_gpio_runtime_suspend()
1262 wake_low = bank->context.leveldetect0 & bank->context.wake_en; omap_gpio_runtime_suspend()
1264 writel_relaxed(wake_low | bank->context.fallingdetect, omap_gpio_runtime_suspend()
1266 wake_hi = bank->context.leveldetect1 & bank->context.wake_en; omap_gpio_runtime_suspend()
1268 writel_relaxed(wake_hi | bank->context.risingdetect, omap_gpio_runtime_suspend()
1285 l1 = bank->context.fallingdetect; omap_gpio_runtime_suspend()
1286 l2 = bank->context.risingdetect; omap_gpio_runtime_suspend()
1320 * On the first resume during the probe, the context has not omap_gpio_runtime_resume()
1322 * the context loss count. omap_gpio_runtime_resume()
1340 writel_relaxed(bank->context.fallingdetect, omap_gpio_runtime_resume()
1342 writel_relaxed(bank->context.risingdetect, omap_gpio_runtime_resume()
1379 gen0 = l & bank->context.fallingdetect; omap_gpio_runtime_resume()
1382 gen1 = l & bank->context.risingdetect; omap_gpio_runtime_resume()
1386 gen = l & (~(bank->context.fallingdetect) & omap_gpio_runtime_resume()
1387 ~(bank->context.risingdetect)); omap_gpio_runtime_resume()
1453 p->context.ctrl = readl_relaxed(base + regs->ctrl); omap_gpio_init_context()
1454 p->context.oe = readl_relaxed(base + regs->direction); omap_gpio_init_context()
1455 p->context.wake_en = readl_relaxed(base + regs->wkup_en); omap_gpio_init_context()
1456 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); omap_gpio_init_context()
1457 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); omap_gpio_init_context()
1458 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); omap_gpio_init_context()
1459 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); omap_gpio_init_context()
1460 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); omap_gpio_init_context()
1461 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); omap_gpio_init_context()
1464 p->context.dataout = readl_relaxed(base + regs->set_dataout); omap_gpio_init_context()
1466 p->context.dataout = readl_relaxed(base + regs->dataout); omap_gpio_init_context()
1473 writel_relaxed(bank->context.wake_en, omap_gpio_restore_context()
1475 writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl); omap_gpio_restore_context()
1476 writel_relaxed(bank->context.leveldetect0, omap_gpio_restore_context()
1478 writel_relaxed(bank->context.leveldetect1, omap_gpio_restore_context()
1480 writel_relaxed(bank->context.risingdetect, omap_gpio_restore_context()
1482 writel_relaxed(bank->context.fallingdetect, omap_gpio_restore_context()
1485 writel_relaxed(bank->context.dataout, omap_gpio_restore_context()
1488 writel_relaxed(bank->context.dataout, omap_gpio_restore_context()
1490 writel_relaxed(bank->context.oe, bank->base + bank->regs->direction); omap_gpio_restore_context()
1493 writel_relaxed(bank->context.debounce, bank->base + omap_gpio_restore_context()
1495 writel_relaxed(bank->context.debounce_en, omap_gpio_restore_context()
1499 writel_relaxed(bank->context.irqenable1, omap_gpio_restore_context()
1501 writel_relaxed(bank->context.irqenable2, omap_gpio_restore_context()
/linux-4.1.27/drivers/isdn/hisax/
H A Dhfc_usb.c260 hfcusb_data *hfc = (hfcusb_data *) urb->context; ctrl_complete()
429 usb_complete_t complete, void *context) fill_isoc_urb()
438 urb->context = context; fill_isoc_urb()
544 iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; tx_iso_complete()
594 tx_iso_complete, urb->context); tx_iso_complete()
682 iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; rx_iso_complete()
762 rx_iso_complete, urb->context); rx_iso_complete()
855 usb_fifo *fifo = (usb_fifo *) urb->context; rx_int_complete()
1260 hfcusb_data *context; hfc_usb_probe() local
1359 if (!(context = kzalloc(sizeof(hfcusb_data), GFP_KERNEL))) hfc_usb_probe()
1379 context-> hfc_usb_probe()
1386 context-> hfc_usb_probe()
1395 context-> hfc_usb_probe()
1405 context-> hfc_usb_probe()
1414 context-> hfc_usb_probe()
1423 context-> hfc_usb_probe()
1433 context-> hfc_usb_probe()
1442 context-> hfc_usb_probe()
1450 context-> hfc_usb_probe()
1455 if (context->fifos[cidx].pipe) { hfc_usb_probe()
1456 context->fifos[cidx]. hfc_usb_probe()
1458 context->fifos[cidx].hfc = hfc_usb_probe()
1459 context; hfc_usb_probe()
1460 context->fifos[cidx].usb_packet_maxlen = hfc_usb_probe()
1462 context->fifos[cidx]. hfc_usb_probe()
1465 context->fifos[cidx]. hfc_usb_probe()
1471 context->dev = dev; /* save device */ hfc_usb_probe()
1472 context->if_used = ifnum; /* save used interface */ hfc_usb_probe()
1473 context->alt_used = alt_used; /* and alternate config */ hfc_usb_probe()
1474 context->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */ hfc_usb_probe()
1475 context->cfg_used = vcf[16]; /* store used config */ hfc_usb_probe()
1476 context->vend_idx = vend_idx; /* store found vendor */ hfc_usb_probe()
1477 context->packet_size = packet_size; hfc_usb_probe()
1478 context->iso_packet_size = iso_packet_size; hfc_usb_probe()
1481 context->ctrl_in_pipe = hfc_usb_probe()
1482 usb_rcvctrlpipe(context->dev, 0); hfc_usb_probe()
1483 context->ctrl_out_pipe = hfc_usb_probe()
1484 usb_sndctrlpipe(context->dev, 0); hfc_usb_probe()
1489 context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); hfc_usb_probe()
1491 if (!context->ctrl_urb) { hfc_usb_probe()
1494 kfree(context); hfc_usb_probe()
1503 conf_str[small_match], context->if_used, hfc_usb_probe()
1504 context->alt_used, hfc_usb_probe()
1508 if (hfc_usb_init(context)) { hfc_usb_probe()
1509 usb_kill_urb(context->ctrl_urb); hfc_usb_probe()
1510 usb_free_urb(context->ctrl_urb); hfc_usb_probe()
1511 context->ctrl_urb = NULL; hfc_usb_probe()
1512 kfree(context); hfc_usb_probe()
1515 usb_set_intfdata(intf, context); hfc_usb_probe()
1529 hfcusb_data *context = usb_get_intfdata(intf); hfc_usb_disconnect() local
1532 handle_led(context, LED_POWER_OFF); hfc_usb_disconnect()
1536 context->disc_flag = 1; hfc_usb_disconnect()
1539 if (timer_pending(&context->t3_timer)) hfc_usb_disconnect()
1540 del_timer(&context->t3_timer); hfc_usb_disconnect()
1541 if (timer_pending(&context->t4_timer)) hfc_usb_disconnect()
1542 del_timer(&context->t4_timer); hfc_usb_disconnect()
1546 if (context->fifos[i].usb_transfer_mode == USB_ISOC) { hfc_usb_disconnect()
1547 if (context->fifos[i].active > 0) { hfc_usb_disconnect()
1548 stop_isoc_chain(&context->fifos[i]); hfc_usb_disconnect()
1554 if (context->fifos[i].active > 0) { hfc_usb_disconnect()
1555 context->fifos[i].active = 0; hfc_usb_disconnect()
1560 usb_kill_urb(context->fifos[i].urb); hfc_usb_disconnect()
1561 usb_free_urb(context->fifos[i].urb); hfc_usb_disconnect()
1562 context->fifos[i].urb = NULL; hfc_usb_disconnect()
1564 context->fifos[i].active = 0; hfc_usb_disconnect()
1566 usb_kill_urb(context->ctrl_urb); hfc_usb_disconnect()
1567 usb_free_urb(context->ctrl_urb); hfc_usb_disconnect()
1568 context->ctrl_urb = NULL; hfc_usb_disconnect()
1569 hisax_unregister(&context->d_if); hfc_usb_disconnect()
1570 kfree(context); /* free our structure again */ hfc_usb_disconnect()
427 fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, void *buf, int num_packets, int packet_size, int interval, usb_complete_t complete, void *context) fill_isoc_urb() argument
/linux-4.1.27/drivers/media/pci/cx23885/
H A Dcx23885-ir.c71 /* Possibly called in an IRQ context */ cx23885_ir_rx_v4l2_dev_notify()
87 * For the integrated AV core, we are already in a workqueue context. cx23885_ir_rx_v4l2_dev_notify()
88 * For the CX23888 integrated IR, we are in an interrupt context. cx23885_ir_rx_v4l2_dev_notify()
96 /* Possibly called in an IRQ context */ cx23885_ir_tx_v4l2_dev_notify()
106 * For the integrated AV core, we are already in a workqueue context. cx23885_ir_tx_v4l2_dev_notify()
107 * For the CX23888 integrated IR, we are in an interrupt context. cx23885_ir_tx_v4l2_dev_notify()
/linux-4.1.27/arch/hexagon/mm/
H A Dvm_tlb.c42 if (mm->context.ptbase == current->active_mm->context.ptbase) flush_tlb_range()
71 if (current->active_mm->context.ptbase == mm->context.ptbase) flush_tlb_mm()
82 if (mm->context.ptbase == current->active_mm->context.ptbase) flush_tlb_page()
/linux-4.1.27/drivers/gpu/drm/tegra/
H A Ddrm.c133 DRM_DEBUG("IOMMU context initialized\n"); tegra_drm_load()
246 static void tegra_drm_context_free(struct tegra_drm_context *context) tegra_drm_context_free() argument
248 context->client->ops->close_channel(context); tegra_drm_context_free()
249 kfree(context); tegra_drm_context_free()
318 int tegra_drm_submit(struct tegra_drm_context *context, tegra_drm_submit() argument
339 job = host1x_job_alloc(context->channel, args->num_cmdbufs, tegra_drm_submit()
346 job->client = (u32)args->context; tegra_drm_submit()
347 job->class = context->client->base.class; tegra_drm_submit()
391 job->is_addr_reg = context->client->ops->is_addr_reg; tegra_drm_submit()
399 err = host1x_job_pin(job, context->client->base.dev); tegra_drm_submit()
421 static struct tegra_drm_context *tegra_drm_get_context(__u64 context) tegra_drm_get_context() argument
423 return (struct tegra_drm_context *)(uintptr_t)context; tegra_drm_get_context()
427 struct tegra_drm_context *context) tegra_drm_file_owns_context()
432 if (ctx == context) tegra_drm_file_owns_context()
522 struct tegra_drm_context *context; tegra_open_channel() local
526 context = kzalloc(sizeof(*context), GFP_KERNEL); tegra_open_channel()
527 if (!context) tegra_open_channel()
532 err = client->ops->open_channel(client, context); tegra_open_channel()
536 list_add(&context->list, &fpriv->contexts); tegra_open_channel()
537 args->context = (uintptr_t)context; tegra_open_channel()
538 context->client = client; tegra_open_channel()
542 kfree(context); tegra_open_channel()
551 struct tegra_drm_context *context; tegra_close_channel() local
553 context = tegra_drm_get_context(args->context); tegra_close_channel()
555 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_close_channel()
558 list_del(&context->list); tegra_close_channel()
559 tegra_drm_context_free(context); tegra_close_channel()
569 struct tegra_drm_context *context; tegra_get_syncpt() local
572 context = tegra_drm_get_context(args->context); tegra_get_syncpt()
574 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_get_syncpt()
577 if (args->index >= context->client->base.num_syncpts) tegra_get_syncpt()
580 syncpt = context->client->base.syncpts[args->index]; tegra_get_syncpt()
591 struct tegra_drm_context *context; tegra_submit() local
593 context = tegra_drm_get_context(args->context); tegra_submit()
595 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_submit()
598 return context->client->ops->submit(context, args, drm, file); tegra_submit()
606 struct tegra_drm_context *context; tegra_get_syncpt_base() local
610 context = tegra_drm_get_context(args->context); tegra_get_syncpt_base()
612 if (!tegra_drm_file_owns_context(fpriv, context)) tegra_get_syncpt_base()
615 if (args->syncpt >= context->client->base.num_syncpts) tegra_get_syncpt_base()
618 syncpt = context->client->base.syncpts[args->syncpt]; tegra_get_syncpt_base()
852 struct tegra_drm_context *context, *tmp; tegra_drm_preclose() local
858 list_for_each_entry_safe(context, tmp, &fpriv->contexts, list) tegra_drm_preclose()
859 tegra_drm_context_free(context); tegra_drm_preclose()
426 tegra_drm_file_owns_context(struct tegra_drm_file *file, struct tegra_drm_context *context) tegra_drm_file_owns_context() argument
/linux-4.1.27/arch/arm/plat-omap/
H A Ddmtimer.c97 timer->context.twer); omap_timer_restore_context()
99 timer->context.tcrr); omap_timer_restore_context()
101 timer->context.tldr); omap_timer_restore_context()
103 timer->context.tmar); omap_timer_restore_context()
105 timer->context.tsicr); omap_timer_restore_context()
106 writel_relaxed(timer->context.tier, timer->irq_ena); omap_timer_restore_context()
108 timer->context.tclr); omap_timer_restore_context()
451 /* Save the context */ omap_dm_timer_start()
452 timer->context.tclr = l; omap_dm_timer_start()
472 * context. omap_dm_timer_stop()
474 timer->context.tclr = omap_dm_timer_stop()
556 /* Save the context */ omap_dm_timer_set_load()
557 timer->context.tclr = l; omap_dm_timer_set_load()
558 timer->context.tldr = load; omap_dm_timer_set_load()
586 /* Save the context */ omap_dm_timer_set_load_start()
587 timer->context.tclr = l; omap_dm_timer_set_load_start()
588 timer->context.tldr = load; omap_dm_timer_set_load_start()
589 timer->context.tcrr = load; omap_dm_timer_set_load_start()
611 /* Save the context */ omap_dm_timer_set_match()
612 timer->context.tclr = l; omap_dm_timer_set_match()
613 timer->context.tmar = match; omap_dm_timer_set_match()
638 /* Save the context */ omap_dm_timer_set_pwm()
639 timer->context.tclr = l; omap_dm_timer_set_pwm()
661 /* Save the context */ omap_dm_timer_set_prescaler()
662 timer->context.tclr = l; omap_dm_timer_set_prescaler()
677 /* Save the context */ omap_dm_timer_set_int_enable()
678 timer->context.tier = value; omap_dm_timer_set_int_enable()
679 timer->context.twer = value; omap_dm_timer_set_int_enable()
708 /* Save the context */ omap_dm_timer_set_int_disable()
709 timer->context.tier &= ~mask; omap_dm_timer_set_int_disable()
710 timer->context.twer &= ~mask; omap_dm_timer_set_int_disable()
762 /* Save the context */ omap_dm_timer_write_counter()
763 timer->context.tcrr = value; omap_dm_timer_write_counter()
/linux-4.1.27/drivers/md/
H A Ddm-round-robin.c71 ps->context = s; rr_create()
77 struct selector *s = (struct selector *) ps->context; rr_destroy()
82 ps->context = NULL; rr_destroy()
114 struct selector *s = (struct selector *) ps->context; rr_add_path()
133 *error = "round-robin ps: Error allocating path context"; rr_add_path()
149 struct selector *s = (struct selector *) ps->context; rr_fail_path()
157 struct selector *s = (struct selector *) ps->context; rr_reinstate_path()
168 struct selector *s = (struct selector *) ps->context; rr_select_path()
H A Ddm-snap-transient.c28 kfree(store->context); transient_dtr()
42 struct transient_c *tc = store->context; transient_prepare_exception()
68 *sectors_allocated = ((struct transient_c *) store->context)->next_free; transient_usage()
83 store->context = tc; transient_ctr()
H A Ddm-queue-length.c60 ps->context = s; ql_create()
76 struct selector *s = ps->context; ql_destroy()
81 ps->context = NULL; ql_destroy()
112 struct selector *s = ps->context; ql_add_path()
152 struct selector *s = ps->context; ql_fail_path()
160 struct selector *s = ps->context; ql_reinstate_path()
174 struct selector *s = ps->context; ql_select_path()
/linux-4.1.27/drivers/staging/unisys/include/
H A Dsparstop.h25 typedef void (*SPARSTOP_COMPLETE_FUNC) (void *context, int status);
27 int sp_stop(void *context, SPARSTOP_COMPLETE_FUNC get_complete_func);
/linux-4.1.27/arch/sparc/prom/
H A Dmp.c17 /* Start cpu with prom-tree node 'cpunode' using context described
18 * by 'ctable_reg' in context 'ctx' at program counter 'pc'.
/linux-4.1.27/arch/mips/include/asm/
H A Dmmu_context.h2 * Switch a MMU context.
55 * into the context register.
85 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
123 * Initialize the context related info for a new mm_struct
134 atomic_set(&mm->context.fp_mode_switching, 0); init_new_context()
165 * Destroy context related info for an mm_struct that is about
176 * the context for the new mm so we see the new mappings.
217 /* will get a new context next time */ drop_mmu_context()
/linux-4.1.27/arch/m68k/mm/
H A Dmcfmmu.c132 asid = mm->context & 0xff; cf_tlb_miss()
154 * Initialize the context management stuff.
161 * init_mm, and require using context 0 for a normal task. mmu_context_init()
162 * Other processors reserve the use of context zero for the kernel. mmu_context_init()
171 * Steal a context from a task that has one at the moment.
177 * This isn't an LRU system, it just frees up each context in
186 * free up context `next_mmu_context' steal_context()
187 * if we shouldn't free context 0, don't... steal_context()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-dvb.h8 #include "pvrusb2-context.h"
/linux-4.1.27/drivers/nfc/
H A Dmei_phy.h26 void nfc_mei_event_cb(struct mei_cl_device *device, u32 events, void *context);
/linux-4.1.27/arch/unicore32/include/asm/
H A Dsuspend.h21 struct cpu_context_save cpu_context; /* cpu context */
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/engine/
H A Dpm.h10 struct nvkm_perfctx *context; member in struct:nvkm_pm
/linux-4.1.27/arch/arc/include/uapi/asm/
H A Dsigcontext.h15 * Signal context structure - contains all info to do with the state
/linux-4.1.27/security/selinux/include/
H A Daudit.h44 * selinux_audit_rule_match - determine if a context ID matches a rule.
45 * @sid: the context ID to check
49 * @actx: the audit context (can be NULL) associated with the check
51 * Returns 1 if the context id matches the rule, 0 if it does not, and
/linux-4.1.27/drivers/acpi/acpica/
H A Devxface.c61 acpi_gpe_handler address, void *context);
76 * context - Value passed to the handler on each GPE
93 acpi_notify_handler handler, void *context) acpi_install_notify_handler()
132 acpi_gbl_global_notify[i].context = context; acpi_install_notify_handler()
202 handler_obj->notify.context = context; acpi_install_notify_handler()
287 acpi_gbl_global_notify[i].context = NULL; ACPI_EXPORT_SYMBOL()
416 * context - Value passed to the handler on each SCI ACPI_EXPORT_SYMBOL()
423 acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context) ACPI_EXPORT_SYMBOL()
444 new_sci_handler->context = context; ACPI_EXPORT_SYMBOL()
557 * context - Value passed to the handler on each event ACPI_EXPORT_SYMBOL()
568 acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context) ACPI_EXPORT_SYMBOL()
593 acpi_gbl_global_event_handler_context = context; ACPI_EXPORT_SYMBOL()
609 * context - Value passed to the handler on each GPE ACPI_EXPORT_SYMBOL()
619 acpi_event_handler handler, void *context) ACPI_EXPORT_SYMBOL()
646 acpi_gbl_fixed_event_handlers[event].context = context; ACPI_EXPORT_SYMBOL()
659 acpi_gbl_fixed_event_handlers[event].context = NULL; ACPI_EXPORT_SYMBOL()
711 acpi_gbl_fixed_event_handlers[event].context = NULL; ACPI_EXPORT_SYMBOL()
741 * context - Value passed to the handler on each GPE
754 acpi_gpe_handler address, void *context) acpi_ev_install_gpe_handler()
803 handler->context = context; acpi_ev_install_gpe_handler()
865 * context - Value passed to the handler on each GPE
876 u32 type, acpi_gpe_handler address, void *context) acpi_install_gpe_handler()
884 address, context); acpi_install_gpe_handler()
901 * context - Value passed to the handler on each GPE ACPI_EXPORT_SYMBOL()
911 u32 type, acpi_gpe_handler address, void *context) ACPI_EXPORT_SYMBOL()
918 address, context); ACPI_EXPORT_SYMBOL()
91 acpi_install_notify_handler(acpi_handle device, u32 handler_type, acpi_notify_handler handler, void *context) acpi_install_notify_handler() argument
750 acpi_ev_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, u8 is_raw_handler, acpi_gpe_handler address, void *context) acpi_ev_install_gpe_handler() argument
874 acpi_install_gpe_handler(acpi_handle gpe_device, u32 gpe_number, u32 type, acpi_gpe_handler address, void *context) acpi_install_gpe_handler() argument
H A Dacevents.h117 void *context);
138 u32 level, void *context, void **return_value);
144 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
148 struct acpi_gpe_block_info *gpe_block, void *context);
159 void *context);
174 acpi_adr_space_setup setup, void *context);
244 u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_context.c206 DRM_ERROR("Out of hw context ids.\n"); vmw_context_init()
263 DRM_ERROR("Failed to allocate a context id.\n"); vmw_gb_context_create()
274 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_create()
308 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_bind()
355 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_unbind()
405 DRM_ERROR("Failed reserving FIFO space for context " vmw_gb_context_destroy()
423 * User-space context management:
498 DRM_ERROR("Out of graphics memory for context" vmw_context_define_ioctl()
542 * vmw_context_scrub_shader - scrub a shader binding from a context.
574 * from a context.
608 * vmw_context_scrub_texture - scrub a texture binding from a context.
648 * vmw_context_binding_drop: Stop tracking a context binding
652 * Stops tracking a context binding, and re-initializes its storage.
653 * Typically used when the context binding is replaced with a binding to
665 * vmw_context_binding_add: Start tracking a context binding
667 * @cbs: Pointer to the context binding state tracker.
671 * bounds and then starts tracking the binding in the context binding
722 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
724 * @cbs: Pointer to the persistent context binding state tracker.
779 * @cbs: Pointer to the context binding state tracker.
782 * context binding state tracker. Then re-initializes the whole structure.
796 * @cbs: Pointer to the context binding state tracker.
799 * context binding state tracker.
854 * @ctx: Pointer to context to commit the staged binding info to.
858 * structure in the context. This can be done once commands
872 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
874 * @ctx: The context resource
876 * Walks through the context binding list and rebinds all scrubbed
906 * vmw_context_binding_list - Return a list of context bindings
908 * @ctx: The context resource
910 * Returns the current list of bindings of the given context. Note that
/linux-4.1.27/drivers/uwb/i1480/dfu/
H A Ddfu.c43 * @context: expected context
53 const char *cmd, u8 context, u8 expected_type, i1480_rceb_check()
58 if (rceb->bEventContext != context) { i1480_rceb_check()
60 dev_err(dev, "%s: unexpected context id 0x%02x " i1480_rceb_check()
62 rceb->bEventContext, context); i1480_rceb_check()
100 u8 context; i1480_cmd() local
105 get_random_bytes(&context, 1); i1480_cmd()
106 } while (context == 0x00 || context == 0xff); i1480_cmd()
107 cmd->bCommandContext = context; i1480_cmd()
147 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, i1480_cmd()
52 i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, const char *cmd, u8 context, u8 expected_type, unsigned expected_event) i1480_rceb_check() argument
/linux-4.1.27/arch/arm64/include/uapi/asm/
H A Dsigcontext.h22 * Signal context structure - contains all info to do with the state
38 * context. Such structures must be placed after the rt_sigframe on the stack
56 /* ESR_EL1 context */
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dpmae_handling_test.c16 * Test that the kernel properly handles PMAE across context switches.
22 * The kernel must make sure that when it context switches us back in, it
46 /* Try and get ourselves scheduled, to force a PMU context switch */ syscall_ebb_callee()
/linux-4.1.27/tools/testing/selftests/powerpc/tm/
H A Dtm-resched-dscr.c1 /* Test context switching to see if the DSCR SPR is correctly preserved
17 * If the abort was because of a context switch, check the DSCR value.
45 printf("Check DSCR TM context switch: "); test_body()
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_pd.c49 struct ib_ucontext *context, struct ib_udata *udata) ehca_alloc_pd()
56 ehca_err(device, "device=%p context=%p out of memory", ehca_alloc_pd()
57 device, context); ehca_alloc_pd()
69 * User PD: when context != -1 ehca_alloc_pd()
71 if (!context) { ehca_alloc_pd()
48 ehca_alloc_pd(struct ib_device *device, struct ib_ucontext *context, struct ib_udata *udata) ehca_alloc_pd() argument
/linux-4.1.27/arch/x86/math-emu/
H A Dfpu_system.h29 mutex_lock(&current->mm->context.lock); FPU_get_ldt_descriptor()
30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size) FPU_get_ldt_descriptor()
31 ret = current->mm->context.ldt->entries[seg]; FPU_get_ldt_descriptor()
32 mutex_unlock(&current->mm->context.lock); FPU_get_ldt_descriptor()
/linux-4.1.27/net/sunrpc/
H A Dtimer.c29 * rpc_init_rtt - Initialize an RPC RTT estimator context
30 * @rt: context to initialize
52 * rpc_update_rtt - Update an RPC RTT estimator context
53 * @rt: context to update
93 * @rt: context to use for calculation
/linux-4.1.27/fs/ntfs/
H A Dindex.h36 * @idx_ni: index inode containing the @entry described by this context
42 * @actx: attribute search context if @is_in_root and NULL otherwise
47 * @idx_ni is the index inode this context belongs to.
49 * @entry is the index entry described by this context. @data and @data_len
54 * by the attribute search context @actx and the base inode @base_ni. @ia and
61 * To obtain a context call ntfs_index_ctx_get().
63 * We use this context to allow ntfs_index_lookup() to return the found index
68 * free the context and other associated resources.
98 * @ictx: ntfs index context describing the index entry
121 * @ictx: ntfs index context describing the index entry
123 * Mark the index entry described by the index entry context @ictx dirty.
/linux-4.1.27/drivers/media/firewire/
H A Dfiredtv-fw.c77 struct fw_iso_context *context; member in struct:fdtv_ir_context
93 return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, queue_iso()
97 static void handle_iso(struct fw_iso_context *context, u32 cycle, handle_iso() argument
127 fw_iso_context_queue_flush(ctx->context); handle_iso()
141 ctx->context = fw_iso_context_create(device->card, fdtv_start_iso()
144 if (IS_ERR(ctx->context)) { fdtv_start_iso()
145 err = PTR_ERR(ctx->context); fdtv_start_iso()
166 err = fw_iso_context_start(ctx->context, -1, 0, fdtv_start_iso()
177 fw_iso_context_destroy(ctx->context); fdtv_start_iso()
188 fw_iso_context_stop(ctx->context); fdtv_stop_iso()
190 fw_iso_context_destroy(ctx->context); fdtv_stop_iso()
/linux-4.1.27/arch/sh/include/asm/
H A Dmmu_context.h18 * The MMU "context" consists of two things:
38 #define cpu_context(cpu, mm) ((mm)->context.id[cpu])
55 * Get MMU context if needed.
61 /* Check if we have old version of context. */ get_mmu_context()
66 /* It's old, we need to get new context with new version. */ get_mmu_context()
94 * Initialize the context related info for a new mm_struct
110 * the context for the new mm so we see the new mappings.

Completed in 5658 milliseconds

1234567891011>>