Searched refs:new (Results 1 - 200 of 6646) sorted by relevance

1234567891011>>

/linux-4.1.27/tools/build/feature/
H A Dtest-sync-compare-and-swap.c7 uint64_t old, new = argc; main() local
12 } while (!__sync_bool_compare_and_swap(&x, old, new)); main()
13 return old == new; main()
/linux-4.1.27/fs/nfsd/
H A Dauth.c24 struct cred *new; nfsd_setuser() local
30 /* discard any old override before preparing the new set */ nfsd_setuser()
32 new = prepare_creds(); nfsd_setuser()
33 if (!new) nfsd_setuser()
36 new->fsuid = rqstp->rq_cred.cr_uid; nfsd_setuser()
37 new->fsgid = rqstp->rq_cred.cr_gid; nfsd_setuser()
42 new->fsuid = exp->ex_anon_uid; nfsd_setuser()
43 new->fsgid = exp->ex_anon_gid; nfsd_setuser()
48 if (uid_eq(new->fsuid, GLOBAL_ROOT_UID)) nfsd_setuser()
49 new->fsuid = exp->ex_anon_uid; nfsd_setuser()
50 if (gid_eq(new->fsgid, GLOBAL_ROOT_GID)) nfsd_setuser()
51 new->fsgid = exp->ex_anon_gid; nfsd_setuser()
67 if (uid_eq(new->fsuid, INVALID_UID)) nfsd_setuser()
68 new->fsuid = exp->ex_anon_uid; nfsd_setuser()
69 if (gid_eq(new->fsgid, INVALID_GID)) nfsd_setuser()
70 new->fsgid = exp->ex_anon_gid; nfsd_setuser()
72 set_groups(new, gi); nfsd_setuser()
75 if (!uid_eq(new->fsuid, GLOBAL_ROOT_UID)) nfsd_setuser()
76 new->cap_effective = cap_drop_nfsd_set(new->cap_effective); nfsd_setuser()
78 new->cap_effective = cap_raise_nfsd_set(new->cap_effective, nfsd_setuser()
79 new->cap_permitted); nfsd_setuser()
81 put_cred(override_creds(new)); nfsd_setuser()
82 put_cred(new); nfsd_setuser()
87 abort_creds(new); nfsd_setuser()
H A Dexport.c66 static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
193 struct svc_expkey *new = container_of(b, struct svc_expkey, h); expkey_match() local
195 if (orig->ek_fsidtype != new->ek_fsidtype || expkey_match()
196 orig->ek_client != new->ek_client || expkey_match()
197 memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0) expkey_match()
205 struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); expkey_init() local
209 new->ek_client = item->ek_client; expkey_init()
210 new->ek_fsidtype = item->ek_fsidtype; expkey_init()
212 memcpy(new->ek_fsid, item->ek_fsid, sizeof(new->ek_fsid)); expkey_init()
218 struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); expkey_update() local
221 new->ek_path = item->ek_path; expkey_update()
275 svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new, svc_expkey_update() argument
279 int hash = svc_expkey_hash(new); svc_expkey_update()
281 ch = sunrpc_cache_update(cd, &new->h, &old->h, hash); svc_expkey_update()
338 static struct svc_export *svc_export_update(struct svc_export *new,
595 * new values, then see what the result was. svc_export_parse()
692 struct svc_export *new = container_of(b, struct svc_export, h); svc_export_match() local
693 return orig->ex_client == new->ex_client && svc_export_match()
694 path_equal(&orig->ex_path, &new->ex_path); svc_export_match()
699 struct svc_export *new = container_of(cnew, struct svc_export, h); svc_export_init() local
703 new->ex_client = item->ex_client; svc_export_init()
704 new->ex_path = item->ex_path; svc_export_init()
706 new->ex_fslocs.locations = NULL; svc_export_init()
707 new->ex_fslocs.locations_count = 0; svc_export_init()
708 new->ex_fslocs.migrated = 0; svc_export_init()
709 new->ex_layout_type = 0; svc_export_init()
710 new->ex_uuid = NULL; svc_export_init()
711 new->cd = item->cd; svc_export_init()
716 struct svc_export *new = container_of(cnew, struct svc_export, h); export_update() local
720 new->ex_flags = item->ex_flags; export_update()
721 new->ex_anon_uid = item->ex_anon_uid; export_update()
722 new->ex_anon_gid = item->ex_anon_gid; export_update()
723 new->ex_fsid = item->ex_fsid; export_update()
724 new->ex_devid_map = item->ex_devid_map; export_update()
726 new->ex_uuid = item->ex_uuid; export_update()
728 new->ex_fslocs.locations = item->ex_fslocs.locations; export_update()
730 new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; export_update()
732 new->ex_fslocs.migrated = item->ex_fslocs.migrated; export_update()
734 new->ex_layout_type = item->ex_layout_type; export_update()
735 new->ex_nflavors = item->ex_nflavors; export_update()
737 new->ex_flavors[i] = item->ex_flavors[i]; export_update()
789 svc_export_update(struct svc_export *new, struct svc_export *old) svc_export_update() argument
794 ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash); svc_export_update()
/linux-4.1.27/kernel/
H A Dcred.c203 struct cred *new; cred_alloc_blank() local
205 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL); cred_alloc_blank()
206 if (!new) cred_alloc_blank()
209 atomic_set(&new->usage, 1); cred_alloc_blank()
211 new->magic = CRED_MAGIC; cred_alloc_blank()
214 if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) cred_alloc_blank()
217 return new; cred_alloc_blank()
220 abort_creds(new); cred_alloc_blank()
225 * prepare_creds - Prepare a new set of credentials for modification
227 * Prepare a new set of task credentials for modification. A task's creds
229 * prepare a new copy, which the caller then modifies and then commits by
234 * Returns a pointer to the new creds-to-be if successful, NULL otherwise.
242 struct cred *new; prepare_creds() local
246 new = kmem_cache_alloc(cred_jar, GFP_KERNEL); prepare_creds()
247 if (!new) prepare_creds()
250 kdebug("prepare_creds() alloc %p", new); prepare_creds()
253 memcpy(new, old, sizeof(struct cred)); prepare_creds()
255 atomic_set(&new->usage, 1); prepare_creds()
256 set_cred_subscribers(new, 0); prepare_creds()
257 get_group_info(new->group_info); prepare_creds()
258 get_uid(new->user); prepare_creds()
259 get_user_ns(new->user_ns); prepare_creds()
262 key_get(new->session_keyring); prepare_creds()
263 key_get(new->process_keyring); prepare_creds()
264 key_get(new->thread_keyring); prepare_creds()
265 key_get(new->request_key_auth); prepare_creds()
269 new->security = NULL; prepare_creds()
272 if (security_prepare_creds(new, old, GFP_KERNEL) < 0) prepare_creds()
274 validate_creds(new); prepare_creds()
275 return new; prepare_creds()
278 abort_creds(new); prepare_creds()
289 struct cred *new; prepare_exec_creds() local
291 new = prepare_creds(); prepare_exec_creds()
292 if (!new) prepare_exec_creds()
293 return new; prepare_exec_creds()
297 key_put(new->thread_keyring); prepare_exec_creds()
298 new->thread_keyring = NULL; prepare_exec_creds()
300 /* inherit the session keyring; new process keyring */ prepare_exec_creds()
301 key_put(new->process_keyring); prepare_exec_creds()
302 new->process_keyring = NULL; prepare_exec_creds()
305 return new; prepare_exec_creds()
309 * Copy credentials for the new process created by fork()
311 * We share if we can, but under some circumstances we have to generate a new
314 * The new process gets the current process's subjective credentials as its
319 struct cred *new; copy_creds() local
338 new = prepare_creds(); copy_creds()
339 if (!new) copy_creds()
343 ret = create_user_ns(new); copy_creds()
349 /* new threads get their own thread keyrings if their parent already copy_creds()
351 if (new->thread_keyring) { copy_creds()
352 key_put(new->thread_keyring); copy_creds()
353 new->thread_keyring = NULL; copy_creds()
355 install_thread_keyring_to_cred(new); copy_creds()
362 key_put(new->process_keyring); copy_creds()
363 new->process_keyring = NULL; copy_creds()
367 atomic_inc(&new->user->processes); copy_creds()
368 p->cred = p->real_cred = get_cred(new); copy_creds()
369 alter_cred_subscribers(new, 2); copy_creds()
370 validate_creds(new); copy_creds()
374 put_cred(new); copy_creds()
404 * commit_creds - Install new credentials upon the current task
405 * @new: The credentials to be assigned
407 * Install a new set of credentials to the current task, using RCU to replace
412 * This function eats the caller's reference to the new credentials.
417 int commit_creds(struct cred *new) commit_creds() argument
422 kdebug("commit_creds(%p{%d,%d})", new, commit_creds()
423 atomic_read(&new->usage), commit_creds()
424 read_cred_subscribers(new)); commit_creds()
430 validate_creds(new); commit_creds()
432 BUG_ON(atomic_read(&new->usage) < 1); commit_creds()
434 get_cred(new); /* we will require a ref for the subj creds too */ commit_creds()
437 if (!uid_eq(old->euid, new->euid) || commit_creds()
438 !gid_eq(old->egid, new->egid) || commit_creds()
439 !uid_eq(old->fsuid, new->fsuid) || commit_creds()
440 !gid_eq(old->fsgid, new->fsgid) || commit_creds()
441 !cred_cap_issubset(old, new)) { commit_creds()
449 if (!uid_eq(new->fsuid, old->fsuid)) commit_creds()
451 if (!gid_eq(new->fsgid, old->fsgid)) commit_creds()
458 alter_cred_subscribers(new, 2); commit_creds()
459 if (new->user != old->user) commit_creds()
460 atomic_inc(&new->user->processes); commit_creds()
461 rcu_assign_pointer(task->real_cred, new); commit_creds()
462 rcu_assign_pointer(task->cred, new); commit_creds()
463 if (new->user != old->user) commit_creds()
468 if (!uid_eq(new->uid, old->uid) || commit_creds()
469 !uid_eq(new->euid, old->euid) || commit_creds()
470 !uid_eq(new->suid, old->suid) || commit_creds()
471 !uid_eq(new->fsuid, old->fsuid)) commit_creds()
474 if (!gid_eq(new->gid, old->gid) || commit_creds()
475 !gid_eq(new->egid, old->egid) || commit_creds()
476 !gid_eq(new->sgid, old->sgid) || commit_creds()
477 !gid_eq(new->fsgid, old->fsgid)) commit_creds()
489 * @new: The credentials that were going to be applied
494 void abort_creds(struct cred *new) abort_creds() argument
496 kdebug("abort_creds(%p{%d,%d})", new, abort_creds()
497 atomic_read(&new->usage), abort_creds()
498 read_cred_subscribers(new)); abort_creds()
501 BUG_ON(read_cred_subscribers(new) != 0); abort_creds()
503 BUG_ON(atomic_read(&new->usage) < 1); abort_creds()
504 put_cred(new); abort_creds()
510 * @new: The credentials to be assigned
515 const struct cred *override_creds(const struct cred *new) override_creds() argument
519 kdebug("override_creds(%p{%d,%d})", new, override_creds()
520 atomic_read(&new->usage), override_creds()
521 read_cred_subscribers(new)); override_creds()
524 validate_creds(new); override_creds()
525 get_cred(new); override_creds()
526 alter_cred_subscribers(new, 1); override_creds()
527 rcu_assign_pointer(current->cred, new); override_creds()
585 * Returns the new credentials or NULL if out of memory.
592 struct cred *new; prepare_kernel_cred() local
594 new = kmem_cache_alloc(cred_jar, GFP_KERNEL); prepare_kernel_cred()
595 if (!new) prepare_kernel_cred()
598 kdebug("prepare_kernel_cred() alloc %p", new); prepare_kernel_cred()
607 *new = *old; prepare_kernel_cred()
608 atomic_set(&new->usage, 1); prepare_kernel_cred()
609 set_cred_subscribers(new, 0); prepare_kernel_cred()
610 get_uid(new->user); prepare_kernel_cred()
611 get_user_ns(new->user_ns); prepare_kernel_cred()
612 get_group_info(new->group_info); prepare_kernel_cred()
615 new->session_keyring = NULL; prepare_kernel_cred()
616 new->process_keyring = NULL; prepare_kernel_cred()
617 new->thread_keyring = NULL; prepare_kernel_cred()
618 new->request_key_auth = NULL; prepare_kernel_cred()
619 new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; prepare_kernel_cred()
623 new->security = NULL; prepare_kernel_cred()
625 if (security_prepare_creds(new, old, GFP_KERNEL) < 0) prepare_kernel_cred()
629 validate_creds(new); prepare_kernel_cred()
630 return new; prepare_kernel_cred()
633 put_cred(new); prepare_kernel_cred()
641 * @new: The credentials to alter
647 int set_security_override(struct cred *new, u32 secid) set_security_override() argument
649 return security_kernel_act_as(new, secid); set_security_override()
655 * @new: The credentials to alter
663 int set_security_override_from_ctx(struct cred *new, const char *secctx) set_security_override_from_ctx() argument
672 return set_security_override(new, secid); set_security_override_from_ctx()
678 * @new: The credentials to alter
682 * as the object context of the specified inode, so that the new inodes have
685 int set_create_files_as(struct cred *new, struct inode *inode) set_create_files_as() argument
687 new->fsuid = inode->i_uid; set_create_files_as()
688 new->fsgid = inode->i_gid; set_create_files_as()
689 return security_kernel_create_files_as(new, inode); set_create_files_as()
H A Dresource.c208 static struct resource * __request_resource(struct resource *root, struct resource *new) __request_resource() argument
210 resource_size_t start = new->start; __request_resource()
211 resource_size_t end = new->end; __request_resource()
224 new->sibling = tmp; __request_resource()
225 *p = new; __request_resource()
226 new->parent = root; __request_resource()
288 * @new: resource descriptor desired by caller
292 struct resource *request_resource_conflict(struct resource *root, struct resource *new) request_resource_conflict() argument
297 conflict = __request_resource(root, new); request_resource_conflict()
305 * @new: resource descriptor desired by caller
309 int request_resource(struct resource *root, struct resource *new) request_resource() argument
313 conflict = request_resource_conflict(root, new); request_resource()
557 struct resource *new, __find_resource()
562 struct resource tmp = *new, avail, alloc; __find_resource()
588 avail.flags = new->flags & ~IORESOURCE_UNSET; __find_resource()
595 new->start = alloc.start; __find_resource()
596 new->end = alloc.end; __find_resource()
614 static int find_resource(struct resource *root, struct resource *new, find_resource() argument
618 return __find_resource(root, NULL, new, size, constraint); find_resource()
623 * The resource will be relocated if the new size cannot be reallocated in the
628 * @newsize: new size of the resource descriptor
636 struct resource new = *old; reallocate_resource() local
641 if ((err = __find_resource(root, old, &new, newsize, constraint))) reallocate_resource()
644 if (resource_contains(&new, old)) { reallocate_resource()
645 old->start = new.start; reallocate_resource()
646 old->end = new.end; reallocate_resource()
655 if (resource_contains(old, &new)) { reallocate_resource()
656 old->start = new.start; reallocate_resource()
657 old->end = new.end; reallocate_resource()
660 *old = new; reallocate_resource()
672 * The resource will be reallocated with a new size if it was already allocated
674 * @new: resource descriptor desired by caller
682 int allocate_resource(struct resource *root, struct resource *new, allocate_resource() argument
703 if ( new->parent ) { allocate_resource()
705 the new constraints */ allocate_resource()
706 return reallocate_resource(root, new, size, &constraint); allocate_resource()
710 err = find_resource(root, new, size, &constraint); allocate_resource()
711 if (err >= 0 && __request_resource(root, new)) allocate_resource()
744 static struct resource * __insert_resource(struct resource *parent, struct resource *new) __insert_resource() argument
749 first = __request_resource(parent, new); __insert_resource()
755 if (WARN_ON(first == new)) /* duplicated insertion */ __insert_resource()
758 if ((first->start > new->start) || (first->end < new->end)) __insert_resource()
760 if ((first->start == new->start) && (first->end == new->end)) __insert_resource()
766 if (next->start < new->start || next->end > new->end) __insert_resource()
770 if (next->sibling->start > new->end) __insert_resource()
774 new->parent = parent; __insert_resource()
775 new->sibling = next->sibling; __insert_resource()
776 new->child = first; __insert_resource()
780 next->parent = new; __insert_resource()
783 parent->child = new; __insert_resource()
788 next->sibling = new; __insert_resource()
795 * @parent: parent of the new resource
796 * @new: new resource to insert
802 * entirely fit within the range of the new resource, then the new
804 * the new resource.
806 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) insert_resource_conflict() argument
811 conflict = __insert_resource(parent, new); insert_resource_conflict()
818 * @parent: parent of the new resource
819 * @new: new resource to insert
823 int insert_resource(struct resource *parent, struct resource *new) insert_resource() argument
827 conflict = insert_resource_conflict(parent, new); insert_resource()
834 * @new: new resource to insert
839 void insert_resource_expand_to_fit(struct resource *root, struct resource *new) insert_resource_expand_to_fit() argument
841 if (new->parent) insert_resource_expand_to_fit()
848 conflict = __insert_resource(root, new); insert_resource_expand_to_fit()
855 if (conflict->start < new->start) insert_resource_expand_to_fit()
856 new->start = conflict->start; insert_resource_expand_to_fit()
857 if (conflict->end > new->end) insert_resource_expand_to_fit()
858 new->end = conflict->end; insert_resource_expand_to_fit()
860 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); insert_resource_expand_to_fit()
905 * @start: new start value
906 * @size: new size
1035 * request_region creates a new busy region.
1043 * __request_region - create a new busy resource region
1265 * @new: descriptor of the resource to request
1280 struct resource *new) devm_request_resource()
1288 *ptr = new; devm_request_resource()
1290 conflict = request_resource_conflict(root, new); devm_request_resource()
1293 new, conflict->name, conflict); devm_request_resource()
1313 * @new: descriptor of the resource to release
1317 void devm_release_resource(struct device *dev, struct resource *new) devm_release_resource() argument
1320 new)); devm_release_resource()
556 __find_resource(struct resource *root, struct resource *old, struct resource *new, resource_size_t size, struct resource_constraint *constraint) __find_resource() argument
1279 devm_request_resource(struct device *dev, struct resource *root, struct resource *new) devm_request_resource() argument
H A Duser.c174 struct user_struct *up, *new; alloc_uid() local
181 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); alloc_uid()
182 if (!new) alloc_uid()
185 new->uid = uid; alloc_uid()
186 atomic_set(&new->__count, 1); alloc_uid()
195 key_put(new->uid_keyring); alloc_uid()
196 key_put(new->session_keyring); alloc_uid()
197 kmem_cache_free(uid_cachep, new); alloc_uid()
199 uid_hash_insert(new, hashent); alloc_uid()
200 up = new; alloc_uid()
H A Dgroups.c156 * @new: The newly prepared set of credentials to alter
159 void set_groups(struct cred *new, struct group_info *group_info) set_groups() argument
161 put_group_info(new->group_info); set_groups()
164 new->group_info = group_info; set_groups()
178 struct cred *new; set_current_groups() local
180 new = prepare_creds(); set_current_groups()
181 if (!new) set_current_groups()
184 set_groups(new, group_info); set_current_groups()
185 return commit_creds(new); set_current_groups()
H A Dsys.c315 * equal to the real gid, then the saved gid is set to the new effective gid.
333 struct cred *new; SYSCALL_DEFINE2() local
345 new = prepare_creds(); SYSCALL_DEFINE2()
346 if (!new) SYSCALL_DEFINE2()
355 new->gid = krgid; SYSCALL_DEFINE2()
364 new->egid = kegid; SYSCALL_DEFINE2()
371 new->sgid = new->egid; SYSCALL_DEFINE2()
372 new->fsgid = new->egid; SYSCALL_DEFINE2()
374 return commit_creds(new); SYSCALL_DEFINE2()
377 abort_creds(new); SYSCALL_DEFINE2()
390 struct cred *new; SYSCALL_DEFINE1() local
398 new = prepare_creds(); SYSCALL_DEFINE1()
399 if (!new) SYSCALL_DEFINE1()
405 new->gid = new->egid = new->sgid = new->fsgid = kgid; SYSCALL_DEFINE1()
407 new->egid = new->fsgid = kgid; SYSCALL_DEFINE1()
411 return commit_creds(new); SYSCALL_DEFINE1()
414 abort_creds(new); SYSCALL_DEFINE1()
419 * change the user struct in a credentials set to match the new UID
421 static int set_user(struct cred *new) set_user() argument
425 new_user = alloc_uid(new->uid); set_user()
442 free_uid(new->user); set_user()
443 new->user = new_user; set_user()
452 * equal to the real uid, then the saved uid is set to the new effective uid.
466 struct cred *new; SYSCALL_DEFINE2() local
478 new = prepare_creds(); SYSCALL_DEFINE2()
479 if (!new) SYSCALL_DEFINE2()
485 new->uid = kruid; SYSCALL_DEFINE2()
493 new->euid = keuid; SYSCALL_DEFINE2()
501 if (!uid_eq(new->uid, old->uid)) { SYSCALL_DEFINE2()
502 retval = set_user(new); SYSCALL_DEFINE2()
508 new->suid = new->euid; SYSCALL_DEFINE2()
509 new->fsuid = new->euid; SYSCALL_DEFINE2()
511 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); SYSCALL_DEFINE2()
515 return commit_creds(new); SYSCALL_DEFINE2()
518 abort_creds(new); SYSCALL_DEFINE2()
537 struct cred *new; SYSCALL_DEFINE1() local
545 new = prepare_creds(); SYSCALL_DEFINE1()
546 if (!new) SYSCALL_DEFINE1()
552 new->suid = new->uid = kuid; SYSCALL_DEFINE1()
554 retval = set_user(new); SYSCALL_DEFINE1()
558 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { SYSCALL_DEFINE1()
562 new->fsuid = new->euid = kuid; SYSCALL_DEFINE1()
564 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); SYSCALL_DEFINE1()
568 return commit_creds(new); SYSCALL_DEFINE1()
571 abort_creds(new); SYSCALL_DEFINE1()
584 struct cred *new; SYSCALL_DEFINE3() local
601 new = prepare_creds(); SYSCALL_DEFINE3()
602 if (!new) SYSCALL_DEFINE3()
621 new->uid = kruid; SYSCALL_DEFINE3()
623 retval = set_user(new); SYSCALL_DEFINE3()
629 new->euid = keuid; SYSCALL_DEFINE3()
631 new->suid = ksuid; SYSCALL_DEFINE3()
632 new->fsuid = new->euid; SYSCALL_DEFINE3()
634 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); SYSCALL_DEFINE3()
638 return commit_creds(new); SYSCALL_DEFINE3()
641 abort_creds(new); SYSCALL_DEFINE3()
671 struct cred *new; SYSCALL_DEFINE3() local
686 new = prepare_creds(); SYSCALL_DEFINE3()
687 if (!new) SYSCALL_DEFINE3()
705 new->gid = krgid; SYSCALL_DEFINE3()
707 new->egid = kegid; SYSCALL_DEFINE3()
709 new->sgid = ksgid; SYSCALL_DEFINE3()
710 new->fsgid = new->egid; SYSCALL_DEFINE3()
712 return commit_creds(new); SYSCALL_DEFINE3()
715 abort_creds(new); SYSCALL_DEFINE3()
749 struct cred *new; SYSCALL_DEFINE1() local
760 new = prepare_creds(); SYSCALL_DEFINE1()
761 if (!new) SYSCALL_DEFINE1()
768 new->fsuid = kuid; SYSCALL_DEFINE1()
769 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) SYSCALL_DEFINE1()
774 abort_creds(new); SYSCALL_DEFINE1()
778 commit_creds(new); SYSCALL_DEFINE1()
788 struct cred *new; SYSCALL_DEFINE1() local
799 new = prepare_creds(); SYSCALL_DEFINE1()
800 if (!new) SYSCALL_DEFINE1()
807 new->fsgid = kgid; SYSCALL_DEFINE1()
812 abort_creds(new); SYSCALL_DEFINE1()
816 commit_creds(new); SYSCALL_DEFINE1()
1453 struct rlimit old, new; SYSCALL_DEFINE4() local
1460 rlim64_to_rlim(&new64, &new); SYSCALL_DEFINE4()
1477 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, SYSCALL_DEFINE4()
1509 * reads are atomic, we either get new values or old values and we don't
1711 /* set the new file, lockless */ prctl_set_mm_exe_file()
2003 * to fill vector with new values. It's up prctl_set_mm()
/linux-4.1.27/arch/avr32/include/asm/
H A Dcmpxchg.h47 unsigned long new) __cmpxchg_u32()
56 " stcond %[m], %[new]\n" __cmpxchg_u32()
60 : "m"(m), [old] "ir"(old), [new] "r"(new) __cmpxchg_u32()
66 volatile int * m, unsigned long old, unsigned long new);
76 unsigned long new, int size) __cmpxchg()
80 return __cmpxchg_u32(ptr, old, new); __cmpxchg()
82 return __cmpxchg_u64(ptr, old, new); __cmpxchg()
89 #define cmpxchg(ptr, old, new) \
91 (unsigned long)(new), \
98 unsigned long new, int size) __cmpxchg_local()
102 return __cmpxchg_u32(ptr, old, new); __cmpxchg_local()
104 return __cmpxchg_local_generic(ptr, old, new, size); __cmpxchg_local()
110 #define cmpxchg_local(ptr, old, new) \
112 (unsigned long)(new), \
46 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
75 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
96 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local() argument
/linux-4.1.27/lib/
H A Dlist_debug.c16 * Insert a new entry between two known consecutive entries.
22 void __list_add(struct list_head *new, __list_add() argument
34 WARN(new == prev || new == next, __list_add()
35 "list_add double add: new=%p, prev=%p, next=%p.\n", __list_add()
36 new, prev, next); __list_add()
37 next->prev = new; __list_add()
38 new->next = next; __list_add()
39 new->prev = prev; __list_add()
40 prev->next = new; __list_add()
86 void __list_add_rcu(struct list_head *new, __list_add_rcu() argument
95 new->next = next; __list_add_rcu()
96 new->prev = prev; __list_add_rcu()
97 rcu_assign_pointer(list_next_rcu(prev), new); __list_add_rcu() local
98 next->prev = new; __list_add_rcu()
H A Dlockref.c23 struct lockref new = old, prev = old; \
27 new.lock_count); \
51 new.count++; lockref_get()
72 new.count++; lockref_get_not_zero()
99 new.count++; lockref_get_or_lock()
119 * Decrement the reference count and return the new value.
125 new.count--; lockref_put_return()
129 return new.count; lockref_put_return()
143 new.count--; lockref_put_or_lock()
180 new.count++; lockref_get_not_dead()
H A Drbtree_test.c26 struct rb_node **new = &root->rb_node, *parent = NULL; insert() local
29 while (*new) { insert()
30 parent = *new; insert()
32 new = &parent->rb_left; insert()
34 new = &parent->rb_right; insert()
37 rb_link_node(&node->rb, parent, new); insert()
69 struct rb_node **new = &root->rb_node, *rb_parent = NULL; insert_augmented() local
74 while (*new) { insert_augmented()
75 rb_parent = *new; insert_augmented()
80 new = &parent->rb.rb_left; insert_augmented()
82 new = &parent->rb.rb_right; insert_augmented()
86 rb_link_node(&node->rb, rb_parent, new); insert_augmented()
H A Didr.c15 * a new id quick.
82 * idr_layer_alloc - allocate a new idr_layer
96 struct idr_layer *new; idr_layer_alloc() local
109 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); idr_layer_alloc()
110 if (new) idr_layer_alloc()
111 return new; idr_layer_alloc()
119 new = __this_cpu_read(idr_preload_head); idr_layer_alloc()
120 if (new) { idr_layer_alloc()
121 __this_cpu_write(idr_preload_head, new->ary[0]); idr_layer_alloc()
123 new->ary[0] = NULL; idr_layer_alloc()
126 if (new) idr_layer_alloc()
127 return new; idr_layer_alloc()
195 struct idr_layer *new; __idr_pre_get() local
196 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); __idr_pre_get()
197 if (new == NULL) __idr_pre_get()
199 move_to_free_list(idp, new); __idr_pre_get()
224 struct idr_layer *p, *new; sub_alloc() local
273 new = idr_layer_alloc(gfp_mask, layer_idr); sub_alloc()
274 if (!new) sub_alloc()
276 new->layer = l-1; sub_alloc()
277 new->prefix = id & idr_layer_prefix_mask(new->layer); sub_alloc()
278 rcu_assign_pointer(p->ary[m], new); sub_alloc()
293 struct idr_layer *p, *new; idr_get_empty_slot() local
308 * Add a new layer to the top of the tree if the requested idr_get_empty_slot()
322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { idr_get_empty_slot()
328 for (new = p; p && p != idp->top; new = p) { idr_get_empty_slot()
330 new->ary[0] = NULL; idr_get_empty_slot()
331 new->count = 0; idr_get_empty_slot()
332 bitmap_clear(new->bitmap, 0, IDR_SIZE); idr_get_empty_slot()
333 __move_to_free_list(idp, new); idr_get_empty_slot()
338 new->ary[0] = p; idr_get_empty_slot()
339 new->count = 1; idr_get_empty_slot()
340 new->layer = layers-1; idr_get_empty_slot()
341 new->prefix = id & idr_layer_prefix_mask(new->layer); idr_get_empty_slot()
343 __set_bit(0, new->bitmap); idr_get_empty_slot()
344 p = new; idr_get_empty_slot()
414 struct idr_layer *new; idr_preload() local
417 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); idr_preload()
419 if (!new) idr_preload()
422 /* link the new one to per-cpu preload list */ idr_preload()
423 new->ary[0] = __this_cpu_read(idr_preload_head); idr_preload()
424 __this_cpu_write(idr_preload_head, new); idr_preload()
431 * idr_alloc - allocate new idr entry
433 * @ptr: pointer to be associated with the new id
477 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
479 * @ptr: pointer to be associated with the new id
918 * ida_get_new_above - allocate new ID above or equal to a start id
923 * Allocate new ID above or equal to @starting_id. It should be called
954 /* if bitmap isn't there, create a new one */ ida_get_new_above()
1067 * ida_simple_get - get a new id.
/linux-4.1.27/arch/sparc/kernel/
H A Dftrace.c25 static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) ftrace_modify_code() argument
31 "1: cas [%[ip]], %[old], %[new]\n" ftrace_modify_code()
46 : [new] "0" (new), [old] "r" (old), [ip] "r" (ip) ftrace_modify_code()
49 if (replaced != old && replaced != new) ftrace_modify_code()
58 u32 old, new; ftrace_make_nop() local
61 new = ftrace_nop; ftrace_make_nop()
62 return ftrace_modify_code(ip, old, new); ftrace_make_nop()
68 u32 old, new; ftrace_make_call() local
71 new = ftrace_call_replace(ip, addr); ftrace_make_call()
72 return ftrace_modify_code(ip, old, new); ftrace_make_call()
78 u32 old, new; ftrace_update_ftrace_func() local
81 new = ftrace_call_replace(ip, (unsigned long)func); ftrace_update_ftrace_func()
82 return ftrace_modify_code(ip, old, new); ftrace_update_ftrace_func()
99 u32 old, new; ftrace_enable_ftrace_graph_caller() local
102 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); ftrace_enable_ftrace_graph_caller()
103 return ftrace_modify_code(ip, old, new); ftrace_enable_ftrace_graph_caller()
109 u32 old, new; ftrace_disable_ftrace_graph_caller() local
112 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); ftrace_disable_ftrace_graph_caller()
114 return ftrace_modify_code(ip, old, new); ftrace_disable_ftrace_graph_caller()
/linux-4.1.27/tools/perf/util/
H A Dcomm.c50 struct comm_str *iter, *new; comm_str__findnew() local
67 new = comm_str__alloc(str); comm_str__findnew()
68 if (!new) comm_str__findnew()
71 rb_link_node(&new->rb_node, parent, p); comm_str__findnew()
72 rb_insert_color(&new->rb_node, root); comm_str__findnew()
74 return new; comm_str__findnew()
100 struct comm_str *new, *old = comm->comm_str; comm__override() local
102 new = comm_str__findnew(str, &comm_str_root); comm__override()
103 if (!new) comm__override()
106 comm_str__get(new); comm__override()
108 comm->comm_str = new; comm__override()
H A Dordered-events.c14 static void queue_event(struct ordered_events *oe, struct ordered_event *new) queue_event() argument
17 u64 timestamp = new->timestamp; queue_event()
21 oe->last = new; queue_event()
26 list_add(&new->list, &oe->events); queue_event()
33 * the last queued event. We expect that the new event is close to queue_event()
40 list_add_tail(&new->list, &oe->events); queue_event()
46 list_add_tail(&new->list, &last->list); queue_event()
51 list_add(&new->list, &oe->events); queue_event()
56 list_add(&new->list, &last->list); queue_event()
93 struct ordered_event *new = NULL; alloc_event() local
101 new = list_entry(cache->next, struct ordered_event, list); alloc_event()
102 list_del(&new->list); alloc_event()
104 new = oe->buffer + oe->buffer_idx; alloc_event()
108 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new); alloc_event()
124 new = oe->buffer + 1; alloc_event()
129 new->event = new_event; alloc_event()
130 return new; alloc_event()
137 struct ordered_event *new; ordered_events__new_event() local
139 new = alloc_event(oe, event); ordered_events__new_event()
140 if (new) { ordered_events__new_event()
141 new->timestamp = timestamp; ordered_events__new_event()
142 queue_event(oe, new); ordered_events__new_event()
145 return new; ordered_events__new_event()
H A Dcallchain.c402 * Create a child for a parent. If inherit_children, then the new child
403 * will become the new parent of it's parent children
408 struct callchain_node *new; create_child() local
410 new = zalloc(sizeof(*new)); create_child()
411 if (!new) { create_child()
415 new->parent = parent; create_child()
416 INIT_LIST_HEAD(&new->val); create_child()
422 new->rb_root_in = parent->rb_root_in; create_child()
425 n = rb_first(&new->rb_root_in); create_child()
428 child->parent = new; create_child()
433 rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node); create_child()
434 rb_insert_color(&new->rb_node_in, &parent->rb_root_in); create_child()
437 return new; create_child()
478 struct callchain_node *new; add_child() local
480 new = create_child(parent, false); add_child()
481 fill_node(new, cursor); add_child()
483 new->children_hit = 0; add_child()
484 new->hit = period; add_child()
485 return new; add_child()
501 * Split the parent in two parts (a new child is created) and
503 * Then create another child to host the given callchain of new branch
511 struct callchain_node *new; split_add_child() local
516 new = create_child(parent, true); split_add_child()
518 /* split the callchain and move a part to the new child */ split_add_child()
521 new->val.next = &to_split->list; split_add_child()
522 new->val.prev = old_tail; split_add_child()
523 to_split->list.prev = &new->val; split_add_child()
524 old_tail->next = &new->val; split_add_child()
527 new->hit = parent->hit; split_add_child()
528 new->children_hit = parent->children_hit; split_add_child()
529 parent->children_hit = callchain_cumul_hits(new); split_add_child()
530 new->val_nr = parent->val_nr - idx_local; split_add_child()
533 /* create a new child for the new branch if any */ split_add_child()
544 new = add_child(parent, cursor, period); split_add_child()
548 * to new (first) child above. split_add_child()
560 rb_link_node(&new->rb_node_in, p, pp); split_add_child()
561 rb_insert_color(&new->rb_node_in, &parent->rb_root_in); split_add_child()
653 /* we match only a part of the node. Split it and add the new chain */ append_chain()
H A Dstrfilter.h21 * strfilter__new - Create a new string filter
25 * Parse @rules and return new strfilter. Return NULL if an error detected.
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dacl.c96 /* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */ lustre_posix_acl_xattr_reduce_space()
102 posix_acl_xattr_header *new; lustre_posix_acl_xattr_reduce_space() local
107 OBD_ALLOC(new, new_size); lustre_posix_acl_xattr_reduce_space()
108 if (unlikely(new == NULL)) lustre_posix_acl_xattr_reduce_space()
111 memcpy(new, *header, new_size); lustre_posix_acl_xattr_reduce_space()
113 *header = new; lustre_posix_acl_xattr_reduce_space()
117 /* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */ lustre_ext_acl_xattr_reduce_space()
124 ext_acl_xattr_header *new; lustre_ext_acl_xattr_reduce_space() local
129 OBD_ALLOC(new, ext_size); lustre_ext_acl_xattr_reduce_space()
130 if (unlikely(new == NULL)) lustre_ext_acl_xattr_reduce_space()
133 memcpy(new, *header, ext_size); lustre_ext_acl_xattr_reduce_space()
135 *header = new; lustre_ext_acl_xattr_reduce_space()
140 * Generate new extended ACL based on the posix ACL.
146 ext_acl_xattr_header *new; lustre_posix_acl_xattr_2ext() local
155 OBD_ALLOC(new, esize); lustre_posix_acl_xattr_2ext()
156 if (unlikely(new == NULL)) lustre_posix_acl_xattr_2ext()
159 new->a_count = cpu_to_le32(count); lustre_posix_acl_xattr_2ext()
161 new->a_entries[i].e_tag = header->a_entries[i].e_tag; lustre_posix_acl_xattr_2ext()
162 new->a_entries[i].e_perm = header->a_entries[i].e_perm; lustre_posix_acl_xattr_2ext()
163 new->a_entries[i].e_id = header->a_entries[i].e_id; lustre_posix_acl_xattr_2ext()
164 new->a_entries[i].e_stat = cpu_to_le32(ES_UNK); lustre_posix_acl_xattr_2ext()
167 return new; lustre_posix_acl_xattr_2ext()
179 posix_acl_xattr_header *new; lustre_posix_acl_xattr_filter() local
183 if (size < sizeof(*new)) lustre_posix_acl_xattr_filter()
186 OBD_ALLOC(new, size); lustre_posix_acl_xattr_filter()
187 if (unlikely(new == NULL)) lustre_posix_acl_xattr_filter()
190 new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION); lustre_posix_acl_xattr_filter()
204 memcpy(&new->a_entries[j++], &header->a_entries[i], lustre_posix_acl_xattr_filter()
209 memcpy(&new->a_entries[j++], lustre_posix_acl_xattr_filter()
215 memcpy(&new->a_entries[j++], lustre_posix_acl_xattr_filter()
226 rc = lustre_posix_acl_xattr_reduce_space(&new, count, j); lustre_posix_acl_xattr_filter()
229 *out = new; lustre_posix_acl_xattr_filter()
235 OBD_FREE(new, size); lustre_posix_acl_xattr_filter()
294 * Merge the posix ACL and the extended ACL into new posix ACL.
303 posix_acl_xattr_header *new; lustre_acl_xattr_merge2posix() local
312 OBD_ALLOC(new, posix_size); lustre_acl_xattr_merge2posix()
313 if (unlikely(new == NULL)) lustre_acl_xattr_merge2posix()
316 new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION); lustre_acl_xattr_merge2posix()
330 new->a_entries[j].e_tag = lustre_acl_xattr_merge2posix()
332 new->a_entries[j].e_perm = lustre_acl_xattr_merge2posix()
334 new->a_entries[j++].e_id = lustre_acl_xattr_merge2posix()
363 OBD_ALLOC(new, posix_size); lustre_acl_xattr_merge2posix()
364 if (unlikely(new == NULL)) lustre_acl_xattr_merge2posix()
367 new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION); lustre_acl_xattr_merge2posix()
375 memcpy(&new->a_entries[j++], lustre_acl_xattr_merge2posix()
385 new->a_entries[j].e_tag = lustre_acl_xattr_merge2posix()
387 new->a_entries[j].e_perm = lustre_acl_xattr_merge2posix()
389 new->a_entries[j++].e_id = lustre_acl_xattr_merge2posix()
396 rc = lustre_posix_acl_xattr_reduce_space(&new, posix_count, j); lustre_acl_xattr_merge2posix()
399 *out = new; lustre_acl_xattr_merge2posix()
405 OBD_FREE(new, posix_size); lustre_acl_xattr_merge2posix()
413 * Merge the posix ACL and the extended ACL into new extended ACL.
422 ext_acl_xattr_header *new; lustre_acl_xattr_merge2ext() local
435 OBD_ALLOC(new, ext_size); lustre_acl_xattr_merge2ext()
436 if (unlikely(new == NULL)) lustre_acl_xattr_merge2ext()
455 new->a_entries[j].e_tag = lustre_acl_xattr_merge2ext()
457 new->a_entries[j].e_perm = lustre_acl_xattr_merge2ext()
459 new->a_entries[j].e_id = lustre_acl_xattr_merge2ext()
468 new->a_entries[j++].e_stat = lustre_acl_xattr_merge2ext()
473 new->a_entries[j++].e_stat = lustre_acl_xattr_merge2ext()
476 /* new entry. */ lustre_acl_xattr_merge2ext()
477 new->a_entries[j++].e_stat = lustre_acl_xattr_merge2ext()
485 new->a_entries[j].e_tag = lustre_acl_xattr_merge2ext()
487 new->a_entries[j].e_perm = lustre_acl_xattr_merge2ext()
489 new->a_entries[j].e_id = lustre_acl_xattr_merge2ext()
498 new->a_entries[j++].e_stat = lustre_acl_xattr_merge2ext()
503 new->a_entries[j++].e_stat = lustre_acl_xattr_merge2ext()
506 /* new entry. */ lustre_acl_xattr_merge2ext()
507 new->a_entries[j++].e_stat = lustre_acl_xattr_merge2ext()
526 new->a_entries[j].e_tag = lustre_acl_xattr_merge2ext()
528 new->a_entries[j].e_perm = lustre_acl_xattr_merge2ext()
530 new->a_entries[j].e_id = ext_header->a_entries[i].e_id; lustre_acl_xattr_merge2ext()
531 new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL); lustre_acl_xattr_merge2ext()
535 new->a_count = cpu_to_le32(j); lustre_acl_xattr_merge2ext()
537 rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count); lustre_acl_xattr_merge2ext()
541 OBD_FREE(new, ext_size); lustre_acl_xattr_merge2ext()
542 new = ERR_PTR(rc); lustre_acl_xattr_merge2ext()
544 return new; lustre_acl_xattr_merge2ext()
/linux-4.1.27/arch/ia64/include/asm/
H A Dacenv.h25 unsigned int old, new, val; ia64_acpi_acquire_global_lock()
28 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); ia64_acpi_acquire_global_lock()
29 val = ia64_cmpxchg4_acq(lock, new, old); ia64_acpi_acquire_global_lock()
31 return (new < 3) ? -1 : 0; ia64_acpi_acquire_global_lock()
37 unsigned int old, new, val; ia64_acpi_release_global_lock()
40 new = old & ~0x3; ia64_acpi_release_global_lock()
41 val = ia64_cmpxchg4_acq(lock, new, old); ia64_acpi_release_global_lock()
H A Datomic.h34 __s32 old, new; \
40 new = old c_op i; \
41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
78 __s64 old, new; \
84 new = old c_op i; \
85 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
86 return new; \
118 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
119 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
121 #define atomic64_cmpxchg(v, old, new) \
122 (cmpxchg(&((v)->counter), old, new))
123 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
H A Drwsem.h55 long old, new; __down_write()
59 new = old + RWSEM_ACTIVE_WRITE_BIAS; __down_write()
60 } while (cmpxchg_acq(&sem->count, old, new) != old); __down_write()
84 long old, new; __up_write()
88 new = old - RWSEM_ACTIVE_WRITE_BIAS; __up_write()
89 } while (cmpxchg_rel(&sem->count, old, new) != old); __up_write()
91 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) __up_write()
127 long old, new; __downgrade_write()
131 new = old - RWSEM_WAITING_BIAS; __downgrade_write()
132 } while (cmpxchg_rel(&sem->count, old, new) != old); __downgrade_write()
H A Dbitops.h41 __u32 bit, old, new; set_bit()
50 new = old | bit; set_bit()
51 } while (cmpxchg_acq(m, old, new) != old); set_bit()
82 __u32 mask, old, new; clear_bit()
91 new = old & mask; clear_bit()
92 } while (cmpxchg_acq(m, old, new) != old); clear_bit()
106 __u32 mask, old, new; clear_bit_unlock()
115 new = old & mask; clear_bit_unlock()
116 } while (cmpxchg_rel(m, old, new) != old); clear_bit_unlock()
131 __u32 const new = *m & ~(1 << (nr & 31)); __clear_bit_unlock()
133 ia64_st4_rel_nta(m, new); __clear_bit_unlock()
163 __u32 bit, old, new; change_bit()
172 new = old ^ bit; change_bit()
173 } while (cmpxchg_acq(m, old, new) != old); change_bit()
202 __u32 bit, old, new; test_and_set_bit()
211 new = old | bit; test_and_set_bit()
212 } while (cmpxchg_acq(m, old, new) != old); test_and_set_bit()
256 __u32 mask, old, new; test_and_clear_bit()
265 new = old & mask; test_and_clear_bit()
266 } while (cmpxchg_acq(m, old, new) != old); test_and_clear_bit()
301 __u32 bit, old, new; test_and_change_bit()
310 new = old ^ bit; test_and_change_bit()
311 } while (cmpxchg_acq(m, old, new) != old); test_and_change_bit()
/linux-4.1.27/fs/cachefiles/
H A Dsecurity.c22 struct cred *new; cachefiles_get_security_ID() local
27 new = prepare_kernel_cred(current); cachefiles_get_security_ID()
28 if (!new) { cachefiles_get_security_ID()
34 ret = set_security_override_from_ctx(new, cache->secctx); cachefiles_get_security_ID()
36 put_cred(new); cachefiles_get_security_ID()
43 cache->cache_cred = new; cachefiles_get_security_ID()
83 struct cred *new; cachefiles_determine_cache_security() local
90 new = prepare_creds(); cachefiles_determine_cache_security()
91 if (!new) cachefiles_determine_cache_security()
98 ret = set_create_files_as(new, d_backing_inode(root)); cachefiles_determine_cache_security()
100 abort_creds(new); cachefiles_determine_cache_security()
107 cache->cache_cred = new; cachefiles_determine_cache_security()
/linux-4.1.27/include/asm-generic/
H A Duser.h5 * used for a.out files, which are not supported on new architectures.
H A Dcmpxchg-local.h15 unsigned long old, unsigned long new, int size) __cmpxchg_local_generic()
29 *(u8 *)ptr = (u8)new; __cmpxchg_local_generic()
33 *(u16 *)ptr = (u16)new; __cmpxchg_local_generic()
37 *(u32 *)ptr = (u32)new; __cmpxchg_local_generic()
41 *(u64 *)ptr = (u64)new; __cmpxchg_local_generic()
54 u64 old, u64 new) __cmpxchg64_local_generic()
62 *(u64 *)ptr = new; __cmpxchg64_local_generic()
14 __cmpxchg_local_generic(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local_generic() argument
53 __cmpxchg64_local_generic(volatile void *ptr, u64 old, u64 new) __cmpxchg64_local_generic() argument
H A Dhw_irq.h6 * In general, this is not needed for new architectures.
H A Datomic-long.h134 #define atomic_long_cmpxchg(l, old, new) \
135 (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
136 #define atomic_long_xchg(v, new) \
137 (atomic64_xchg((atomic64_t *)(v), (new)))
251 #define atomic_long_cmpxchg(l, old, new) \
252 (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
253 #define atomic_long_xchg(v, new) \
254 (atomic_xchg((atomic_t *)(v), (new)))
/linux-4.1.27/arch/s390/include/asm/
H A Drwsem.h54 signed long old, new; __down_read()
62 : "=&d" (old), "=&d" (new), "=Q" (sem->count) __down_read()
74 signed long old, new; __down_read_trylock()
84 : "=&d" (old), "=&d" (new), "=Q" (sem->count) __down_read_trylock()
95 signed long old, new, tmp; __down_write_nested()
104 : "=&d" (old), "=&d" (new), "=Q" (sem->count) __down_write_nested()
141 signed long old, new; __up_read()
149 : "=&d" (old), "=&d" (new), "=Q" (sem->count) __up_read()
152 if (new < 0) __up_read()
153 if ((new & RWSEM_ACTIVE_MASK) == 0) __up_read()
162 signed long old, new, tmp; __up_write()
171 : "=&d" (old), "=&d" (new), "=Q" (sem->count) __up_write()
174 if (new < 0) __up_write()
175 if ((new & RWSEM_ACTIVE_MASK) == 0) __up_write()
184 signed long old, new, tmp; __downgrade_write()
193 : "=&d" (old), "=&d" (new), "=Q" (sem->count) __downgrade_write()
196 if (new > 1) __downgrade_write()
205 signed long old, new; rwsem_atomic_add()
213 : "=&d" (old), "=&d" (new), "=Q" (sem->count) rwsem_atomic_add()
223 signed long old, new; rwsem_atomic_update()
231 : "=&d" (old), "=&d" (new), "=Q" (sem->count) rwsem_atomic_update()
234 return new; rwsem_atomic_update()
H A Datomic.h131 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) atomic_cmpxchg() argument
138 : "d" (new) atomic_cmpxchg()
260 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
263 long long old, long long new) atomic64_cmpxchg()
268 : "d" (new) atomic64_cmpxchg()
262 atomic64_cmpxchg(atomic64_t *v, long long old, long long new) atomic64_cmpxchg() argument
/linux-4.1.27/fs/nfs/blocklayout/
H A Dextent_tree.c126 struct pnfs_block_extent *new, bool merge_ok) __ext_tree_insert()
135 if (new->be_f_offset < be->be_f_offset) { __ext_tree_insert()
136 if (merge_ok && ext_can_merge(new, be)) { __ext_tree_insert()
137 be->be_f_offset = new->be_f_offset; __ext_tree_insert()
139 be->be_v_offset = new->be_v_offset; __ext_tree_insert()
140 be->be_length += new->be_length; __ext_tree_insert()
145 } else if (new->be_f_offset >= ext_f_end(be)) { __ext_tree_insert()
146 if (merge_ok && ext_can_merge(be, new)) { __ext_tree_insert()
147 be->be_length += new->be_length; __ext_tree_insert()
157 rb_link_node(&new->be_node, parent, p); __ext_tree_insert()
158 rb_insert_color(&new->be_node, root); __ext_tree_insert()
161 nfs4_put_deviceid_node(new->be_device); __ext_tree_insert()
162 kfree(new); __ext_tree_insert()
189 struct pnfs_block_extent *new; __ext_tree_remove() local
191 new = kzalloc(sizeof(*new), GFP_ATOMIC); __ext_tree_remove()
192 if (!new) __ext_tree_remove()
197 new->be_f_offset = end; __ext_tree_remove()
199 new->be_v_offset = __ext_tree_remove()
202 new->be_length = len2; __ext_tree_remove()
203 new->be_state = be->be_state; __ext_tree_remove()
204 new->be_tag = be->be_tag; __ext_tree_remove()
205 new->be_device = nfs4_get_deviceid(be->be_device); __ext_tree_remove()
207 __ext_tree_insert(root, new, true); __ext_tree_remove()
244 ext_tree_insert(struct pnfs_block_layout *bl, struct pnfs_block_extent *new) ext_tree_insert() argument
250 switch (new->be_state) { ext_tree_insert()
266 be = __ext_tree_search(root, new->be_f_offset); ext_tree_insert()
267 if (!be || be->be_f_offset >= ext_f_end(new)) { ext_tree_insert()
268 __ext_tree_insert(root, new, true); ext_tree_insert()
269 } else if (new->be_f_offset >= be->be_f_offset) { ext_tree_insert()
270 if (ext_f_end(new) <= ext_f_end(be)) { ext_tree_insert()
271 nfs4_put_deviceid_node(new->be_device); ext_tree_insert()
272 kfree(new); ext_tree_insert()
274 sector_t new_len = ext_f_end(new) - ext_f_end(be); ext_tree_insert()
275 sector_t diff = new->be_length - new_len; ext_tree_insert()
277 new->be_f_offset += diff; ext_tree_insert()
278 new->be_v_offset += diff; ext_tree_insert()
279 new->be_length = new_len; ext_tree_insert()
282 } else if (ext_f_end(new) <= ext_f_end(be)) { ext_tree_insert()
283 new->be_length = be->be_f_offset - new->be_f_offset; ext_tree_insert()
284 __ext_tree_insert(root, new, true); ext_tree_insert()
287 sector_t new_len = ext_f_end(new) - ext_f_end(be); ext_tree_insert()
288 sector_t diff = new->be_length - new_len; ext_tree_insert()
290 split = kmemdup(new, sizeof(*new), GFP_ATOMIC); ext_tree_insert()
297 split->be_device = nfs4_get_deviceid(new->be_device); ext_tree_insert()
300 new->be_f_offset += diff; ext_tree_insert()
301 new->be_v_offset += diff; ext_tree_insert()
302 new->be_length = new_len; ext_tree_insert()
370 struct pnfs_block_extent *new; ext_tree_split() local
373 new = kzalloc(sizeof(*new), GFP_ATOMIC); ext_tree_split()
374 if (!new) ext_tree_split()
379 new->be_f_offset = split; ext_tree_split()
381 new->be_v_offset = be->be_v_offset + be->be_length; ext_tree_split()
382 new->be_length = orig_len - be->be_length; ext_tree_split()
383 new->be_state = be->be_state; ext_tree_split()
384 new->be_tag = be->be_tag; ext_tree_split()
385 new->be_device = nfs4_get_deviceid(be->be_device); ext_tree_split()
387 __ext_tree_insert(root, new, false); ext_tree_split()
125 __ext_tree_insert(struct rb_root *root, struct pnfs_block_extent *new, bool merge_ok) __ext_tree_insert() argument
/linux-4.1.27/arch/arm/kernel/
H A Dftrace.c103 unsigned long new, bool validate) ftrace_modify_code()
109 new = __opcode_to_mem_thumb32(new); ftrace_modify_code()
112 new = __opcode_to_mem_arm(new); ftrace_modify_code()
123 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) ftrace_modify_code()
134 unsigned long new; ftrace_update_ftrace_func() local
138 new = ftrace_call_replace(pc, (unsigned long)func); ftrace_update_ftrace_func()
140 ret = ftrace_modify_code(pc, 0, new, false); ftrace_update_ftrace_func()
145 new = ftrace_call_replace(pc, (unsigned long)func); ftrace_update_ftrace_func()
147 ret = ftrace_modify_code(pc, 0, new, false); ftrace_update_ftrace_func()
156 unsigned long new, old; ftrace_make_call() local
160 new = ftrace_call_replace(ip, adjust_address(rec, addr)); ftrace_make_call()
162 return ftrace_modify_code(rec->ip, old, new, true); ftrace_make_call()
170 unsigned long new; ftrace_make_nop() local
174 new = ftrace_nop_replace(rec); ftrace_make_nop()
175 ret = ftrace_modify_code(ip, old, new, true); ftrace_make_nop()
182 new = ftrace_nop_replace(rec); ftrace_make_nop()
183 ret = ftrace_modify_code(ip, old, new, true); ftrace_make_nop()
241 unsigned long new = enable ? branch : nop; __ftrace_modify_caller() local
243 return ftrace_modify_code(pc, old, new, true); __ftrace_modify_caller()
102 ftrace_modify_code(unsigned long pc, unsigned long old, unsigned long new, bool validate) ftrace_modify_code() argument
/linux-4.1.27/scripts/dtc/
H A Dlivetree.c29 struct label *new; add_label() local
32 for_each_label_withdel(*labels, new) add_label()
33 if (streq(new->label, label)) { add_label()
34 new->deleted = 0; add_label()
38 new = xmalloc(sizeof(*new)); add_label()
39 memset(new, 0, sizeof(*new)); add_label()
40 new->label = label; add_label()
41 new->next = *labels; add_label()
42 *labels = new; add_label()
55 struct property *new = xmalloc(sizeof(*new)); build_property() local
57 memset(new, 0, sizeof(*new)); build_property()
59 new->name = name; build_property()
60 new->val = val; build_property()
62 return new; build_property()
67 struct property *new = xmalloc(sizeof(*new)); build_property_delete() local
69 memset(new, 0, sizeof(*new)); build_property_delete()
71 new->name = name; build_property_delete()
72 new->deleted = 1; build_property_delete()
74 return new; build_property_delete()
102 struct node *new = xmalloc(sizeof(*new)); build_node() local
105 memset(new, 0, sizeof(*new)); build_node()
107 new->proplist = reverse_properties(proplist); build_node()
108 new->children = children; build_node()
110 for_each_child(new, child) { for_each_child()
111 child->parent = new; for_each_child()
114 return new;
119 struct node *new = xmalloc(sizeof(*new)); build_node_delete() local
121 memset(new, 0, sizeof(*new)); build_node_delete()
123 new->deleted = 1; build_node_delete()
125 return new; build_node_delete()
145 /* Add new node labels to old node */ merge_nodes()
149 /* Move properties from the new node to the old node. If there merge_nodes()
150 * is a collision, replace the old value with the new */ merge_nodes()
163 /* Look for a collision, set new value if there is */ for_each_property_withdel()
166 /* Add new labels to old property */ for_each_property_withdel()
212 /* The new node contents are now merged into the old node. Free
213 * the new node. */
301 struct reserve_info *new = xmalloc(sizeof(*new)); build_reserve_entry() local
303 memset(new, 0, sizeof(*new)); build_reserve_entry()
305 new->re.address = address; build_reserve_entry()
306 new->re.size = size; build_reserve_entry()
308 return new; build_reserve_entry()
321 struct reserve_info *new) add_reserve_entry()
325 new->next = NULL; add_reserve_entry()
328 return new; add_reserve_entry()
333 last->next = new; add_reserve_entry()
320 add_reserve_entry(struct reserve_info *list, struct reserve_info *new) add_reserve_entry() argument
/linux-4.1.27/scripts/
H A Dbloat-o-meter32 new = getsizes(sys.argv[2]) variable
37 if a in new:
46 for name in new:
49 up += new[name]
50 delta.append((new[name], name))
53 d = new.get(name, 0) - old.get(name, 0)
63 print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
65 if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
/linux-4.1.27/arch/sparc/include/asm/
H A Dcmpxchg_64.h71 __cmpxchg_u32(volatile int *m, int old, int new) __cmpxchg_u32() argument
74 : "=&r" (new) __cmpxchg_u32()
75 : "0" (new), "r" (m), "r" (old) __cmpxchg_u32()
78 return new; __cmpxchg_u32()
82 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) __cmpxchg_u64() argument
85 : "=&r" (new) __cmpxchg_u64()
86 : "0" (new), "r" (m), "r" (old) __cmpxchg_u64()
89 return new; __cmpxchg_u64()
97 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
101 return __cmpxchg_u32(ptr, old, new); __cmpxchg()
103 return __cmpxchg_u64(ptr, old, new); __cmpxchg()
124 unsigned long new, int size) __cmpxchg_local()
128 case 8: return __cmpxchg(ptr, old, new, size); __cmpxchg_local()
130 return __cmpxchg_local_generic(ptr, old, new, size); __cmpxchg_local()
122 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local() argument
H A Dmmu_context_32.h12 /* Initialize a new mmu context. This is invoked when a new
30 /* Activate a new MM instance for the current task. */
H A Datomic_64.h73 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
92 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
/linux-4.1.27/include/linux/usb/
H A Dgadget_configfs.h61 struct struct_in *new; \
65 new = kzalloc(sizeof(*new), GFP_KERNEL); \
66 if (!new) \
69 ret = check_user_usb_string(name, &new->stringtab_dev); \
72 config_group_init_type_name(&new->group, name, \
78 if (gs->stringtab_dev.language == new->stringtab_dev.language) \
86 list_add_tail(&new->list, &gi->string_list); \
87 return &new->group; \
89 kfree(new); \
/linux-4.1.27/security/apparmor/
H A Dcontext.c24 * cred or task context but instead creates a new one. Ideally the task
33 * aa_alloc_task_context - allocate a new task_cxt
60 * @new: a blank task context (NOT NULL)
63 void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old) aa_dup_task_context() argument
65 *new = *old; aa_dup_task_context()
66 aa_get_profile(new->profile); aa_dup_task_context()
67 aa_get_profile(new->previous); aa_dup_task_context()
68 aa_get_profile(new->onexec); aa_dup_task_context()
90 * @profile: new profile (NOT NULL)
97 struct cred *new; aa_replace_current_profile() local
103 new = prepare_creds(); aa_replace_current_profile()
104 if (!new) aa_replace_current_profile()
107 cxt = cred_cxt(new); aa_replace_current_profile()
122 commit_creds(new); aa_replace_current_profile()
135 struct cred *new = prepare_creds(); aa_set_current_onexec() local
136 if (!new) aa_set_current_onexec()
139 cxt = cred_cxt(new); aa_set_current_onexec()
144 commit_creds(new); aa_set_current_onexec()
161 struct cred *new = prepare_creds(); aa_set_current_hat() local
162 if (!new) aa_set_current_hat()
166 cxt = cred_cxt(new); aa_set_current_hat()
175 abort_creds(new); aa_set_current_hat()
183 commit_creds(new); aa_set_current_hat()
199 struct cred *new = prepare_creds(); aa_restore_previous_profile() local
200 if (!new) aa_restore_previous_profile()
203 cxt = cred_cxt(new); aa_restore_previous_profile()
205 abort_creds(new); aa_restore_previous_profile()
210 abort_creds(new); aa_restore_previous_profile()
220 commit_creds(new); aa_restore_previous_profile()
H A Dresource.c85 * @new_rlim - the new resource limit (NOT NULL)
117 * __aa_transition_rlimits - apply new profile rlimits
119 * @new: new profile with rlimits to apply (NOT NULL)
121 void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new) __aa_transition_rlimits() argument
141 /* set any new hard limits as dictated by the new profile */ __aa_transition_rlimits()
142 if (!new->rlimits.mask) __aa_transition_rlimits()
145 if (!(new->rlimits.mask & mask)) __aa_transition_rlimits()
150 new->rlimits.limits[i].rlim_max); __aa_transition_rlimits()
H A Dpolicy.c268 * alloc_namespace - allocate, initialize and return a new namespace
370 * aa_prepare_namespace - find an existing or create a new namespace of @name
641 * aa_alloc_profile - allocate, initialize and return a new profile
675 * aa_new_null_profile - create a new null-X learning profile
686 * Returns: new refcounted profile else NULL on failure
783 * is used to load a new profile.
954 if (ent->new == profile) list_for_each_entry()
956 if (strncmp(ent->new->base.hname, profile->base.hname, len) == list_for_each_entry()
957 0 && ent->new->base.hname[len] == 0) list_for_each_entry()
958 return ent->new; list_for_each_entry()
965 * __replace_profile - replace @old with @new on a list
967 * @new: profile to replace @old with (NOT NULL)
968 * @share_replacedby: transfer @old->replacedby to @new
970 * Will duplicate and refcount elements that @new inherits from @old
973 * refcount @new for list, put @old list refcount
977 static void __replace_profile(struct aa_profile *old, struct aa_profile *new, __replace_profile() argument
990 p = __find_child(&new->base.profiles, child->base.name); __replace_profile()
999 /* list refcount transferred to @new */ __replace_profile()
1001 rcu_assign_pointer(child->parent, aa_get_profile(new)); __replace_profile()
1002 list_add_rcu(&child->base.list, &new->base.profiles); __replace_profile()
1007 if (!rcu_access_pointer(new->parent)) { __replace_profile()
1009 rcu_assign_pointer(new->parent, aa_get_profile(parent)); __replace_profile()
1011 __aa_update_replacedby(old, new); __replace_profile()
1013 aa_put_replacedby(new->replacedby); __replace_profile()
1014 new->replacedby = aa_get_replacedby(old->replacedby); __replace_profile()
1015 } else if (!rcu_access_pointer(new->replacedby->profile)) __replace_profile()
1017 rcu_assign_pointer(new->replacedby->profile, __replace_profile()
1018 aa_get_profile(new)); __replace_profile()
1019 __aa_fs_profile_migrate_dents(old, new); __replace_profile()
1021 if (list_empty(&new->base.list)) { __replace_profile()
1022 /* new is not on a list already */ __replace_profile()
1023 list_replace_rcu(&old->base.list, &new->base.list); __replace_profile()
1024 aa_get_profile(new); __replace_profile()
1096 name = ent->new->base.hname; aa_replace_profiles()
1097 error = __lookup_replace(ns, ent->new->base.hname, noreplace, aa_replace_profiles()
1102 if (ent->new->rename) { aa_replace_profiles()
1103 error = __lookup_replace(ns, ent->new->rename, aa_replace_profiles()
1110 /* released when @new is freed */ aa_replace_profiles()
1111 ent->new->ns = aa_get_namespace(ns); aa_replace_profiles()
1117 policy = __lookup_parent(ns, ent->new->base.hname); aa_replace_profiles()
1120 p = __list_lookup_parent(&lh, ent->new); aa_replace_profiles()
1124 name = ent->new->base.hname; aa_replace_profiles()
1127 rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); aa_replace_profiles()
1131 rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); aa_replace_profiles()
1135 /* create new fs entries for introspection if needed */ aa_replace_profiles()
1146 if (rcu_access_pointer(ent->new->parent)) { aa_replace_profiles()
1148 p = aa_deref_parent(ent->new); aa_replace_profiles()
1151 parent = ns_subprofs_dir(ent->new->ns); aa_replace_profiles()
1152 error = __aa_fs_profile_mkdir(ent->new, parent); aa_replace_profiles()
1166 audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error); aa_replace_profiles()
1169 __replace_profile(ent->old, ent->new, 1); aa_replace_profiles()
1172 struct aa_replacedby *r = ent->new->replacedby; aa_replace_profiles()
1174 aa_get_profile(ent->new)); aa_replace_profiles()
1175 __replace_profile(ent->rename, ent->new, 0); aa_replace_profiles()
1179 rcu_assign_pointer(ent->new->replacedby->profile, aa_replace_profiles()
1180 aa_get_profile(ent->new)); aa_replace_profiles()
1181 __replace_profile(ent->rename, ent->new, 0); aa_replace_profiles()
1182 } else if (ent->new->parent) { aa_replace_profiles()
1184 parent = aa_deref_parent(ent->new); aa_replace_profiles()
1191 rcu_assign_pointer(ent->new->parent, newest); aa_replace_profiles()
1195 rcu_assign_pointer(ent->new->replacedby->profile, aa_replace_profiles()
1196 aa_get_profile(ent->new)); aa_replace_profiles()
1197 __list_add_profile(&parent->base.profiles, ent->new); aa_replace_profiles()
1200 rcu_assign_pointer(ent->new->replacedby->profile, aa_replace_profiles()
1201 aa_get_profile(ent->new)); aa_replace_profiles()
1202 __list_add_profile(&ns->base.profiles, ent->new); aa_replace_profiles()
/linux-4.1.27/drivers/mtd/
H A Dmtd_blkdevs.c323 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) add_mtd_blktrans_dev() argument
325 struct mtd_blktrans_ops *tr = new->tr; add_mtd_blktrans_dev()
338 if (new->devnum == -1) { add_mtd_blktrans_dev()
342 new->devnum = last_devnum+1; add_mtd_blktrans_dev()
343 list_add_tail(&new->list, &d->list); add_mtd_blktrans_dev()
346 } else if (d->devnum == new->devnum) { add_mtd_blktrans_dev()
350 } else if (d->devnum > new->devnum) { add_mtd_blktrans_dev()
352 list_add_tail(&new->list, &d->list); add_mtd_blktrans_dev()
359 if (new->devnum == -1) add_mtd_blktrans_dev()
360 new->devnum = last_devnum+1; add_mtd_blktrans_dev()
365 if (new->devnum > (MINORMASK >> tr->part_bits) || add_mtd_blktrans_dev()
366 (tr->part_bits && new->devnum >= 27 * 26)) { add_mtd_blktrans_dev()
371 list_add_tail(&new->list, &tr->devs); add_mtd_blktrans_dev()
375 mutex_init(&new->lock); add_mtd_blktrans_dev()
376 kref_init(&new->ref); add_mtd_blktrans_dev()
378 new->readonly = 1; add_mtd_blktrans_dev()
387 new->disk = gd; add_mtd_blktrans_dev()
388 gd->private_data = new; add_mtd_blktrans_dev()
390 gd->first_minor = (new->devnum) << tr->part_bits; add_mtd_blktrans_dev()
394 if (new->devnum < 26) add_mtd_blktrans_dev()
396 "%s%c", tr->name, 'a' + new->devnum); add_mtd_blktrans_dev()
400 'a' - 1 + new->devnum / 26, add_mtd_blktrans_dev()
401 'a' + new->devnum % 26); add_mtd_blktrans_dev()
404 "%s%d", tr->name, new->devnum); add_mtd_blktrans_dev()
406 set_capacity(gd, (new->size * tr->blksize) >> 9); add_mtd_blktrans_dev()
409 spin_lock_init(&new->queue_lock); add_mtd_blktrans_dev()
410 new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); add_mtd_blktrans_dev()
412 if (!new->rq) add_mtd_blktrans_dev()
416 blk_queue_flush(new->rq, REQ_FLUSH); add_mtd_blktrans_dev()
418 new->rq->queuedata = new; add_mtd_blktrans_dev()
419 blk_queue_logical_block_size(new->rq, tr->blksize); add_mtd_blktrans_dev()
421 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); add_mtd_blktrans_dev()
422 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq); add_mtd_blktrans_dev()
425 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); add_mtd_blktrans_dev()
426 new->rq->limits.max_discard_sectors = UINT_MAX; add_mtd_blktrans_dev()
429 gd->queue = new->rq; add_mtd_blktrans_dev()
432 new->wq = alloc_workqueue("%s%d", 0, 0, add_mtd_blktrans_dev()
433 tr->name, new->mtd->index); add_mtd_blktrans_dev()
434 if (!new->wq) add_mtd_blktrans_dev()
436 INIT_WORK(&new->work, mtd_blktrans_work); add_mtd_blktrans_dev()
438 gd->driverfs_dev = &new->mtd->dev; add_mtd_blktrans_dev()
440 if (new->readonly) add_mtd_blktrans_dev()
445 if (new->disk_attributes) { add_mtd_blktrans_dev()
447 new->disk_attributes); add_mtd_blktrans_dev()
452 blk_cleanup_queue(new->rq); add_mtd_blktrans_dev()
454 put_disk(new->disk); add_mtd_blktrans_dev()
456 list_del(&new->list); add_mtd_blktrans_dev()
474 /* Stop new requests to arrive */ del_mtd_blktrans_dev()
/linux-4.1.27/arch/s390/kernel/
H A Djump_label.c40 struct insn *new) jump_label_bug()
44 unsigned char *ipn = (unsigned char *)new; jump_label_bug()
65 struct insn old, new; __jump_label_transform() local
69 jump_label_make_branch(entry, &new); __jump_label_transform()
72 jump_label_make_nop(entry, &new); __jump_label_transform()
76 jump_label_bug(entry, &orignop, &new); __jump_label_transform()
79 jump_label_bug(entry, &old, &new); __jump_label_transform()
81 s390_kernel_write((void *)entry->code, &new, sizeof(new)); __jump_label_transform()
39 jump_label_bug(struct jump_entry *entry, struct insn *expected, struct insn *new) jump_label_bug() argument
H A Dhead_kdump.S2 * S390 kdump lowlevel functions (new kernel)
16 # kdump entry (new kernel - not yet relocated)
69 mvc 0(256,%r5),0(%r11) # Copy new kernel to old
70 mvc 0(256,%r11),0(%r10) # Copy tmp to new
83 # Startup of kdump (relocated new kernel)
88 0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel...
H A Dftrace.c107 struct ftrace_insn orig, new, old; ftrace_make_nop() local
114 ftrace_generate_nop_insn(&new); ftrace_make_nop()
124 ftrace_generate_kprobe_nop_insn(&new); ftrace_make_nop()
128 ftrace_generate_nop_insn(&new); ftrace_make_nop()
133 s390_kernel_write((void *) rec->ip, &new, sizeof(new)); ftrace_make_nop()
139 struct ftrace_insn orig, new, old; ftrace_make_call() local
152 ftrace_generate_kprobe_call_insn(&new); ftrace_make_call()
156 ftrace_generate_call_insn(&new, rec->ip); ftrace_make_call()
161 s390_kernel_write((void *) rec->ip, &new, sizeof(new)); ftrace_make_call()
/linux-4.1.27/security/keys/
H A Dprocess_keys.c128 * Install a fresh thread keyring directly to new credentials. This keyring is
131 int install_thread_keyring_to_cred(struct cred *new) install_thread_keyring_to_cred() argument
135 keyring = keyring_alloc("_tid", new->uid, new->gid, new, install_thread_keyring_to_cred()
141 new->thread_keyring = keyring; install_thread_keyring_to_cred()
150 struct cred *new; install_thread_keyring() local
153 new = prepare_creds(); install_thread_keyring()
154 if (!new) install_thread_keyring()
157 BUG_ON(new->thread_keyring); install_thread_keyring()
159 ret = install_thread_keyring_to_cred(new); install_thread_keyring()
161 abort_creds(new); install_thread_keyring()
165 return commit_creds(new); install_thread_keyring()
174 int install_process_keyring_to_cred(struct cred *new) install_process_keyring_to_cred() argument
178 if (new->process_keyring) install_process_keyring_to_cred()
181 keyring = keyring_alloc("_pid", new->uid, new->gid, new, install_process_keyring_to_cred()
187 new->process_keyring = keyring; install_process_keyring_to_cred()
200 struct cred *new; install_process_keyring() local
203 new = prepare_creds(); install_process_keyring()
204 if (!new) install_process_keyring()
207 ret = install_process_keyring_to_cred(new); install_process_keyring()
209 abort_creds(new); install_process_keyring()
213 return commit_creds(new); install_process_keyring()
257 struct cred *new; install_session_keyring() local
260 new = prepare_creds(); install_session_keyring()
261 if (!new) install_session_keyring()
264 ret = install_session_keyring_to_cred(new, keyring); install_session_keyring()
266 abort_creds(new); install_session_keyring()
270 return commit_creds(new); install_session_keyring()
735 /* if we attempted to install a keyring, then it may have caused new lookup_user_key()
744 * create a new one of that name and join that.
756 struct cred *new; join_session_keyring() local
760 new = prepare_creds(); join_session_keyring()
761 if (!new) join_session_keyring()
767 ret = install_session_keyring_to_cred(new, NULL); join_session_keyring()
771 serial = new->session_keyring->serial; join_session_keyring()
772 ret = commit_creds(new); join_session_keyring()
784 /* not found - try and create a new one */ join_session_keyring()
796 } else if (keyring == new->session_keyring) { join_session_keyring()
803 ret = install_session_keyring_to_cred(new, keyring); join_session_keyring()
807 commit_creds(new); join_session_keyring()
818 abort_creds(new); join_session_keyring()
829 struct cred *new = container_of(twork, struct cred, rcu); key_change_session_keyring() local
832 put_cred(new); key_change_session_keyring()
836 new-> uid = old-> uid; key_change_session_keyring()
837 new-> euid = old-> euid; key_change_session_keyring()
838 new-> suid = old-> suid; key_change_session_keyring()
839 new->fsuid = old->fsuid; key_change_session_keyring()
840 new-> gid = old-> gid; key_change_session_keyring()
841 new-> egid = old-> egid; key_change_session_keyring()
842 new-> sgid = old-> sgid; key_change_session_keyring()
843 new->fsgid = old->fsgid; key_change_session_keyring()
844 new->user = get_uid(old->user); key_change_session_keyring()
845 new->user_ns = get_user_ns(old->user_ns); key_change_session_keyring()
846 new->group_info = get_group_info(old->group_info); key_change_session_keyring()
848 new->securebits = old->securebits; key_change_session_keyring()
849 new->cap_inheritable = old->cap_inheritable; key_change_session_keyring()
850 new->cap_permitted = old->cap_permitted; key_change_session_keyring()
851 new->cap_effective = old->cap_effective; key_change_session_keyring()
852 new->cap_bset = old->cap_bset; key_change_session_keyring()
854 new->jit_keyring = old->jit_keyring; key_change_session_keyring()
855 new->thread_keyring = key_get(old->thread_keyring); key_change_session_keyring()
856 new->process_keyring = key_get(old->process_keyring); key_change_session_keyring()
858 security_transfer_creds(new, old); key_change_session_keyring()
860 commit_creds(new); key_change_session_keyring()
/linux-4.1.27/arch/alpha/include/asm/
H A Dxchg.h132 * prev is equal to old) then we aren't acquiring anything new and
137 ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) ____cmpxchg() argument
157 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) ____cmpxchg()
158 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); ____cmpxchg()
164 ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) ____cmpxchg() argument
184 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) ____cmpxchg()
185 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); ____cmpxchg()
191 ____cmpxchg(_u32, volatile int *m, int old, int new) ____cmpxchg() argument
208 : "r"((long) old), "r"(new), "m"(*m) : "memory"); ____cmpxchg()
214 ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) ____cmpxchg() argument
231 : "r"((long) old), "r"(new), "m"(*m) : "memory"); ____cmpxchg()
241 ____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new, ____cmpxchg() argument
246 return ____cmpxchg(_u8, ptr, old, new); ____cmpxchg()
248 return ____cmpxchg(_u16, ptr, old, new); ____cmpxchg()
250 return ____cmpxchg(_u32, ptr, old, new); ____cmpxchg()
252 return ____cmpxchg(_u64, ptr, old, new); ____cmpxchg()
H A Datomic.h119 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
120 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
122 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
123 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
136 int c, new, old; __atomic_add_unless()
141 " addl %[old],%[a],%[new]\n" __atomic_add_unless()
143 " stl_c %[new],%[mem]\n" __atomic_add_unless()
144 " beq %[new],3f\n" __atomic_add_unless()
149 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c) __atomic_add_unless()
/linux-4.1.27/include/linux/
H A Dvt.h16 extern int vt_kmsg_redirect(int new);
20 static inline int vt_kmsg_redirect(int new) vt_kmsg_redirect() argument
H A Drculist.h43 * Insert a new entry between two known consecutive entries.
49 static inline void __list_add_rcu(struct list_head *new, __list_add_rcu() argument
52 new->next = next; __list_add_rcu()
53 new->prev = prev; __list_add_rcu()
54 rcu_assign_pointer(list_next_rcu(prev), new); __list_add_rcu()
55 next->prev = new; __list_add_rcu()
58 void __list_add_rcu(struct list_head *new,
63 * list_add_rcu - add a new entry to rcu-protected list
64 * @new: new entry to be added
67 * Insert a new entry after the specified head.
78 static inline void list_add_rcu(struct list_head *new, struct list_head *head) list_add_rcu() argument
80 __list_add_rcu(new, head, head->next); list_add_rcu()
84 * list_add_tail_rcu - add a new entry to rcu-protected list
85 * @new: new entry to be added
88 * Insert a new entry before the specified head.
99 static inline void list_add_tail_rcu(struct list_head *new, list_add_tail_rcu() argument
102 __list_add_rcu(new, head->prev, head); list_add_tail_rcu()
164 * list_replace_rcu - replace old entry by new one
166 * @new : the new element to insert
168 * The @old entry will be replaced with the @new entry atomically.
172 struct list_head *new) list_replace_rcu()
174 new->next = old->next; list_replace_rcu()
175 new->prev = old->prev; list_replace_rcu()
176 rcu_assign_pointer(list_next_rcu(new->prev), new); list_replace_rcu()
177 new->next->prev = new; list_replace_rcu()
220 * the list body into the new list. Any new readers will see list_splice_init_rcu()
228 * The order is important if the new list is global and accessible list_splice_init_rcu()
350 * hlist_replace_rcu - replace old entry by new one
352 * @new : the new element to insert
354 * The @old entry will be replaced with the @new entry atomically.
357 struct hlist_node *new) hlist_replace_rcu()
361 new->next = next; hlist_replace_rcu()
362 new->pprev = old->pprev; hlist_replace_rcu()
363 rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); hlist_replace_rcu()
365 new->next->pprev = &new->next; hlist_replace_rcu()
409 * @n: the new element to add to the hash list.
410 * @next: the existing element to add the new element before.
436 * @n: the new element to add to the hash list.
437 * @prev: the existing element to add the new element after.
171 list_replace_rcu(struct list_head *old, struct list_head *new) list_replace_rcu() argument
356 hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) hlist_replace_rcu() argument
H A Dkcore.h33 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) kclist_add() argument
H A Drbtree_augmented.h40 void (*copy)(struct rb_node *old, struct rb_node *new);
41 void (*rotate)(struct rb_node *old, struct rb_node *new);
45 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
81 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
82 new->rbaugmented = old->rbaugmented; \
88 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
89 new->rbaugmented = old->rbaugmented; \
121 __rb_change_child(struct rb_node *old, struct rb_node *new, __rb_change_child() argument
126 parent->rb_left = new; __rb_change_child()
128 parent->rb_right = new; __rb_change_child()
130 root->rb_node = new; __rb_change_child()
134 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/linux-4.1.27/arch/x86/um/
H A Dbugs_32.c24 struct sigaction old, new; arch_check_bugs() local
27 new.sa_handler = cmov_sigill_test_handler; arch_check_bugs()
30 new.sa_flags = SA_NODEFER; arch_check_bugs()
31 sigemptyset(&new.sa_mask); arch_check_bugs()
32 sigaction(SIGILL, &new, &old); arch_check_bugs()
41 sigaction(SIGILL, &old, &new); arch_check_bugs()
/linux-4.1.27/arch/um/include/asm/
H A Dmmu_context.h38 static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) activate_mm() argument
42 * when the new ->mm is used for the first time. activate_mm()
44 __switch_mm(&new->context.id); activate_mm()
45 down_write(&new->mmap_sem); activate_mm()
46 uml_setup_stubs(new); activate_mm()
47 up_write(&new->mmap_sem); activate_mm()
/linux-4.1.27/arch/sh/include/asm/
H A Dcmpxchg-grb.h15 " mov.l %2, @%1 \n\t" /* store new value */ xchg_u32()
37 " mov.b %2, @%1 \n\t" /* store new value */ xchg_u8()
49 unsigned long new) __cmpxchg_u32()
62 " mov.l %2, @%3 \n\t" /* store new value */ __cmpxchg_u32()
65 "+r" (old), "+r" (new) /* old or new can be r15 */ __cmpxchg_u32()
48 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
H A Dcmpxchg-irq.h29 unsigned long new) __cmpxchg_u32()
37 *m = new; __cmpxchg_u32()
28 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
H A Dcmpxchg-llsc.h47 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
64 : "r" (m), "r" (old), "r" (new) __cmpxchg_u32()
H A Datomic-grb.h16 " mov.l %0, @%1 \n\t" /* store new value */ \
36 " mov.l %0, @%1 \n\t" /* store new value */ \
67 " mov.l %0, @%1 \n\t" /* store new value */ atomic_clear_mask()
86 " mov.l %0, @%1 \n\t" /* store new value */ atomic_set_mask()
H A Dbitops-grb.h20 " mov.l %0, @%1 \n\t" /* store new value */ set_bit()
43 " mov.l %0, @%1 \n\t" /* store new value */ clear_bit()
66 " mov.l %0, @%1 \n\t" /* store new value */ change_bit()
94 " mov.l %0, @%2 \n\t" /* store new value */ test_and_set_bit()
127 " mov.l %0, @%2 \n\t" /* store new value */ test_and_clear_bit()
159 " mov.l %0, @%2 \n\t" /* store new value */ test_and_change_bit()
H A Datomic.h38 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
H A Dcmpxchg.h52 unsigned long new, int size) __cmpxchg()
56 return __cmpxchg_u32(ptr, old, new); __cmpxchg()
51 __cmpxchg(volatile void * ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
H A Dmmu_context.h66 /* It's old, we need to get new context with new version. */ get_mmu_context()
70 * Flush all TLB and start new cycle. get_mmu_context()
94 * Initialize the context related info for a new mm_struct
109 * After we have set current->mm to a new value, this activates
110 * the context for the new mm so we see the new mappings.
/linux-4.1.27/arch/powerpc/mm/
H A Dvphn.c41 u16 new = be16_to_cpup(field++); vphn_unpack_associativity() local
48 cpu_to_be32(last << 16 | new); vphn_unpack_associativity()
50 } else if (new == VPHN_FIELD_UNUSED) vphn_unpack_associativity()
53 else if (new & VPHN_FIELD_MSB) { vphn_unpack_associativity()
56 cpu_to_be32(new & VPHN_FIELD_MASK); vphn_unpack_associativity()
61 last = new; vphn_unpack_associativity()
/linux-4.1.27/tools/testing/selftests/powerpc/vphn/
H A Dvphn.c41 u16 new = be16_to_cpup(field++); vphn_unpack_associativity() local
48 cpu_to_be32(last << 16 | new); vphn_unpack_associativity()
50 } else if (new == VPHN_FIELD_UNUSED) vphn_unpack_associativity()
53 else if (new & VPHN_FIELD_MSB) { vphn_unpack_associativity()
56 cpu_to_be32(new & VPHN_FIELD_MASK); vphn_unpack_associativity()
61 last = new; vphn_unpack_associativity()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dcmpxchg.h144 * Compare and exchange - if *p == old, set it to new,
150 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) __cmpxchg_u32() argument
166 : "r" (p), "r" (old), "r" (new) __cmpxchg_u32()
174 unsigned long new) __cmpxchg_u32_local()
188 : "r" (p), "r" (old), "r" (new) __cmpxchg_u32_local()
196 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) __cmpxchg_u64() argument
211 : "r" (p), "r" (old), "r" (new) __cmpxchg_u64()
219 unsigned long new) __cmpxchg_u64_local()
232 : "r" (p), "r" (old), "r" (new) __cmpxchg_u64_local()
244 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, __cmpxchg() argument
249 return __cmpxchg_u32(ptr, old, new); __cmpxchg()
252 return __cmpxchg_u64(ptr, old, new); __cmpxchg()
260 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, __cmpxchg_local() argument
265 return __cmpxchg_u32_local(ptr, old, new); __cmpxchg_local()
268 return __cmpxchg_u64_local(ptr, old, new); __cmpxchg_local()
173 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, unsigned long new) __cmpxchg_u32_local() argument
218 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, unsigned long new) __cmpxchg_u64_local() argument
/linux-4.1.27/drivers/mtd/devices/
H A Dphram.c99 struct phram_mtd_list *new; register_device() local
102 new = kzalloc(sizeof(*new), GFP_KERNEL); register_device()
103 if (!new) register_device()
107 new->mtd.priv = ioremap(start, len); register_device()
108 if (!new->mtd.priv) { register_device()
114 new->mtd.name = name; register_device()
115 new->mtd.size = len; register_device()
116 new->mtd.flags = MTD_CAP_RAM; register_device()
117 new->mtd._erase = phram_erase; register_device()
118 new->mtd._point = phram_point; register_device()
119 new->mtd._unpoint = phram_unpoint; register_device()
120 new->mtd._read = phram_read; register_device()
121 new->mtd._write = phram_write; register_device()
122 new->mtd.owner = THIS_MODULE; register_device()
123 new->mtd.type = MTD_RAM; register_device()
124 new->mtd.erasesize = PAGE_SIZE; register_device()
125 new->mtd.writesize = 1; register_device()
128 if (mtd_device_register(&new->mtd, NULL, 0)) { register_device()
129 pr_err("Failed to register new device\n"); register_device()
133 list_add_tail(&new->list, &phram_list); register_device()
137 iounmap(new->mtd.priv); register_device()
139 kfree(new); register_device()
/linux-4.1.27/security/
H A Dcommoncap.c240 * @new: The proposed new credentials; alterations should be made here
242 * @effective: A pointer to the proposed new effective capabilities set
243 * @inheritable: A pointer to the proposed new inheritable capabilities set
244 * @permitted: A pointer to the proposed new permitted capabilities set
247 * process's capability sets. The changes are made to the proposed new
250 int cap_capset(struct cred *new, cap_capset() argument
266 /* no new pI capabilities outside bounding set */ cap_capset()
269 /* verify restrictions on target's new Permitted set */ cap_capset()
277 new->cap_effective = *effective; cap_capset()
278 new->cap_inheritable = *inheritable; cap_capset()
279 new->cap_permitted = *permitted; cap_capset()
336 * Calculate the new process capability sets from the capability sets attached
344 struct cred *new = bprm->cred; bprm_caps_from_vfs_caps() local
361 new->cap_permitted.cap[i] = CAP_FOR_EACH_U32()
362 (new->cap_bset.cap[i] & permitted) | CAP_FOR_EACH_U32()
363 (new->cap_inheritable.cap[i] & inheritable); CAP_FOR_EACH_U32()
365 if (permitted & ~new->cap_permitted.cap[i]) CAP_FOR_EACH_U32()
479 * Set up the proposed credentials for a new execution context being
486 struct cred *new = bprm->cred; cap_bprm_set_creds() local
496 root_uid = make_kuid(new->user_ns, 0); cap_bprm_set_creds()
504 if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) { cap_bprm_set_creds()
515 if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) { cap_bprm_set_creds()
517 new->cap_permitted = cap_combine(old->cap_bset, cap_bprm_set_creds()
520 if (uid_eq(new->euid, root_uid)) cap_bprm_set_creds()
526 if (!cap_issubset(new->cap_permitted, old->cap_permitted)) cap_bprm_set_creds()
533 * In addition, if NO_NEW_PRIVS, then ensure we get no new privs. cap_bprm_set_creds()
535 if ((!uid_eq(new->euid, old->uid) || cap_bprm_set_creds()
536 !gid_eq(new->egid, old->gid) || cap_bprm_set_creds()
537 !cap_issubset(new->cap_permitted, old->cap_permitted)) && cap_bprm_set_creds()
542 new->euid = new->uid; cap_bprm_set_creds()
543 new->egid = new->gid; cap_bprm_set_creds()
545 new->cap_permitted = cap_intersect(new->cap_permitted, cap_bprm_set_creds()
549 new->suid = new->fsuid = new->euid; cap_bprm_set_creds()
550 new->sgid = new->fsgid = new->egid; cap_bprm_set_creds()
553 new->cap_effective = new->cap_permitted; cap_bprm_set_creds()
555 cap_clear(new->cap_effective); cap_bprm_set_creds()
570 if (!cap_isclear(new->cap_effective)) { cap_bprm_set_creds()
571 if (!cap_issubset(CAP_FULL_SET, new->cap_effective) || cap_bprm_set_creds()
572 !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) || cap_bprm_set_creds()
574 ret = audit_log_bprm_fcaps(bprm, new, old); cap_bprm_set_creds()
580 new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); cap_bprm_set_creds()
695 static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old) cap_emulate_setxuid() argument
702 (!uid_eq(new->uid, root_uid) && cap_emulate_setxuid()
703 !uid_eq(new->euid, root_uid) && cap_emulate_setxuid()
704 !uid_eq(new->suid, root_uid)) && cap_emulate_setxuid()
706 cap_clear(new->cap_permitted); cap_emulate_setxuid()
707 cap_clear(new->cap_effective); cap_emulate_setxuid()
709 if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid)) cap_emulate_setxuid()
710 cap_clear(new->cap_effective); cap_emulate_setxuid()
711 if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid)) cap_emulate_setxuid()
712 new->cap_effective = new->cap_permitted; cap_emulate_setxuid()
717 * @new: The proposed credentials
724 int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags) cap_task_fix_setuid() argument
733 cap_emulate_setxuid(new, old); cap_task_fix_setuid()
745 if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid)) cap_task_fix_setuid()
746 new->cap_effective = cap_task_fix_setuid()
747 cap_drop_fs_set(new->cap_effective); cap_task_fix_setuid()
749 if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid)) cap_task_fix_setuid()
750 new->cap_effective = cap_task_fix_setuid()
751 cap_raise_fs_set(new->cap_effective, cap_task_fix_setuid()
752 new->cap_permitted); cap_task_fix_setuid()
831 struct cred *new; cap_prctl_drop() local
838 new = prepare_creds(); cap_prctl_drop()
839 if (!new) cap_prctl_drop()
841 cap_lower(new->cap_bset, cap); cap_prctl_drop()
842 return commit_creds(new); cap_prctl_drop()
861 struct cred *new; cap_task_prctl() local
910 new = prepare_creds(); cap_task_prctl()
911 if (!new) cap_task_prctl()
913 new->securebits = arg2; cap_task_prctl()
914 return commit_creds(new); cap_task_prctl()
928 new = prepare_creds(); cap_task_prctl()
929 if (!new) cap_task_prctl()
932 new->securebits |= issecure_mask(SECURE_KEEP_CAPS); cap_task_prctl()
934 new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); cap_task_prctl()
935 return commit_creds(new); cap_task_prctl()
944 * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
945 * @mm: The VM space in which the new mapping is to be made
948 * Determine whether the allocation of a new virtual mapping by the current
/linux-4.1.27/arch/metag/kernel/
H A Dftrace.c73 /* replace the text with the new text */ ftrace_modify_code()
86 unsigned char old[MCOUNT_INSN_SIZE], *new; ftrace_update_ftrace_func() local
90 new = ftrace_call_replace(pc, (unsigned long)func); ftrace_update_ftrace_func()
91 ret = ftrace_modify_code(pc, old, new); ftrace_update_ftrace_func()
99 unsigned char *new, *old; ftrace_make_nop() local
103 new = ftrace_nop_replace(); ftrace_make_nop()
105 return ftrace_modify_code(ip, old, new); ftrace_make_nop()
110 unsigned char *new, *old; ftrace_make_call() local
114 new = ftrace_call_replace(ip, addr); ftrace_make_call()
116 return ftrace_modify_code(ip, old, new); ftrace_make_call()
/linux-4.1.27/arch/m32r/include/asm/
H A Dcmpxchg.h113 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) __cmpxchg_u32() argument
131 : "r" (p), "r" (old), "r" (new) __cmpxchg_u32()
144 unsigned int new) __cmpxchg_local_u32()
162 : "r" (p), "r" (old), "r" (new) __cmpxchg_local_u32()
178 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
182 return __cmpxchg_u32(ptr, old, new); __cmpxchg()
185 return __cmpxchg_u64(ptr, old, new); __cmpxchg()
200 unsigned long new, int size) __cmpxchg_local()
204 return __cmpxchg_local_u32(ptr, old, new); __cmpxchg_local()
206 return __cmpxchg_local_generic(ptr, old, new, size); __cmpxchg_local()
143 __cmpxchg_local_u32(volatile unsigned int *p, unsigned int old, unsigned int new) __cmpxchg_local_u32() argument
198 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local() argument
H A Dswitch_to.h33 " ld lr, @%5 ; load new LR \n" \
36 " ld sp, @%3 ; load new SP \n" \
37 " push %1 ; store `prev' on new stack \n" \
41 " pop %0 ; restore `__last' from new stack \n" \
H A Dmmu_context.h45 Flush all TLB and start new cycle. */ get_new_mmu_context()
64 If it's old, we need to get new context with new version. */ get_mmu_context()
71 * Initialize the context related info for a new mm_struct
112 * After we have set current->mm to a new value, this activates
113 * the context for the new mm so we see the new mappings.
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_interval.c44 * drbd_insert_interval - insert a new interval into a tree
49 struct rb_node **new = &root->rb_node, *parent = NULL; drbd_insert_interval() local
54 while (*new) { drbd_insert_interval()
56 rb_entry(*new, struct drbd_interval, rb); drbd_insert_interval()
58 parent = *new; drbd_insert_interval()
62 new = &(*new)->rb_left; drbd_insert_interval()
64 new = &(*new)->rb_right; drbd_insert_interval()
66 new = &(*new)->rb_left; drbd_insert_interval()
68 new = &(*new)->rb_right; drbd_insert_interval()
74 rb_link_node(&this->rb, parent, new); drbd_insert_interval()
/linux-4.1.27/net/dccp/ccids/lib/
H A Dloss_interval.c108 * subsequent packets as belonging to a new loss interval. This tfrc_lh_update_i_mean()
122 /* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ tfrc_lh_is_new_loss()
131 * tfrc_lh_interval_add - Insert new record into the Loss Interval database
137 * Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
142 struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new; tfrc_lh_interval_add() local
147 new = tfrc_lh_demand_next(lh); tfrc_lh_interval_add()
148 if (unlikely(new == NULL)) { tfrc_lh_interval_add()
153 new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno; tfrc_lh_interval_add()
154 new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval; tfrc_lh_interval_add()
155 new->li_is_closed = 0; tfrc_lh_interval_add()
158 lh->i_mean = new->li_length = (*calc_first_li)(sk); tfrc_lh_interval_add()
160 cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); tfrc_lh_interval_add()
161 new->li_length = dccp_delta_seqno(new->li_seqno, tfrc_lh_interval_add()
/linux-4.1.27/arch/x86/include/asm/
H A Dcmpxchg_32.h12 * "new previous" value. That is why there is a loop. Preloading
46 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) __cmpxchg64() argument
52 : "b" ((u32)new), __cmpxchg64()
53 "c" ((u32)(new >> 32)), __cmpxchg64()
59 static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) __cmpxchg64_local() argument
65 : "b" ((u32)new), __cmpxchg64_local()
66 "c" ((u32)(new >> 32)), __cmpxchg64_local()
H A Dcmpxchg.h85 #define __raw_cmpxchg(ptr, old, new, size, lock) \
89 __typeof__(*(ptr)) __new = (new); \
133 #define __cmpxchg(ptr, old, new, size) \
134 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
136 #define __sync_cmpxchg(ptr, old, new, size) \
137 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
139 #define __cmpxchg_local(ptr, old, new, size) \
140 __raw_cmpxchg((ptr), (old), (new), (size), "")
148 #define cmpxchg(ptr, old, new) \
149 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
151 #define sync_cmpxchg(ptr, old, new) \
152 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
154 #define cmpxchg_local(ptr, old, new) \
155 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
/linux-4.1.27/sound/pci/ice1712/
H A Dwtm.c81 unsigned char new, old; stac9460_dac_mute_all() local
92 new = (~mute << 7 & 0x80) | (old & ~0x80); stac9460_dac_mute_all()
93 change = (new != old); stac9460_dac_mute_all()
95 stac9460_put(ice, idx, new); stac9460_dac_mute_all()
111 new = (~mute << 7 & 0x80) | (old & ~0x80); stac9460_dac_mute_all()
112 change = (new != old); stac9460_dac_mute_all()
114 stac9460_2_put(ice, idx, new); stac9460_dac_mute_all()
158 unsigned char new, old; stac9460_dac_mute_put() local
165 new = (~ucontrol->value.integer.value[0] << 7 & 0x80) | stac9460_dac_mute_put()
167 change = (new != old); stac9460_dac_mute_put()
169 stac9460_put(ice, idx, new); stac9460_dac_mute_put()
170 stac9460_2_put(ice, idx, new); stac9460_dac_mute_put()
179 new = (~ucontrol->value.integer.value[0] << 7 & 0x80) | stac9460_dac_mute_put()
181 change = (new != old); stac9460_dac_mute_put()
184 stac9460_put(ice, idx, new); stac9460_dac_mute_put()
186 stac9460_2_put(ice, idx - 6, new); stac9460_dac_mute_put()
298 unsigned char new, old; stac9460_adc_mute_put() local
307 new = (~ucontrol->value.integer.value[i]<<7&0x80) | stac9460_adc_mute_put()
309 change = (new != old); stac9460_adc_mute_put()
311 stac9460_put(ice, reg, new); stac9460_adc_mute_put()
317 new = (~ucontrol->value.integer.value[i]<<7&0x80) | stac9460_adc_mute_put()
319 change = (new != old); stac9460_adc_mute_put()
321 stac9460_2_put(ice, reg, new); stac9460_adc_mute_put()
429 unsigned char new, old; stac9460_mic_sw_put() local
437 new = (ucontrol->value.enumerated.item[0] << 7 & 0x80) | (old & ~0x80); stac9460_mic_sw_put()
438 change = (new != old); stac9460_mic_sw_put()
441 stac9460_put(ice, STAC946X_GENERAL_PURPOSE, new); stac9460_mic_sw_put()
443 stac9460_2_put(ice, STAC946X_GENERAL_PURPOSE, new); stac9460_mic_sw_put()
454 unsigned char old, new; stac9460_set_rate_val() local
461 new = 0x08; /* 256x, base rate mode */ stac9460_set_rate_val()
463 new = 0x11; /* 256x, mid rate mode */ stac9460_set_rate_val()
465 new = 0x12; /* 128x, high rate mode */ stac9460_set_rate_val()
468 if (old == new) stac9460_set_rate_val()
476 /*printk(KERN_DEBUG "Rate change: %d, new MC: 0x%02x\n", rate, new);*/ stac9460_set_rate_val()
477 stac9460_put(ice, STAC946X_MASTER_CLOCKING, new); stac9460_set_rate_val()
478 stac9460_2_put(ice, STAC946X_MASTER_CLOCKING, new); stac9460_set_rate_val()
H A Dquartet.c344 /* set the new bits */ qtet_akm_set_regs()
357 if (rate == 0) /* no hint - S/PDIF input is master or the new spdif qtet_akm_set_rate_val()
369 /* set new value */ qtet_akm_set_rate_val()
528 unsigned int old, new, smute; qtet_mute_put() local
532 new = 0; qtet_mute_put()
537 new = SCR_MUTE; qtet_mute_put()
541 if (old != new) { qtet_mute_put()
543 set_scr(ice, (get_scr(ice) & ~SCR_MUTE) | new); qtet_mute_put()
589 unsigned int old, new, tmp, masked_old; qtet_ain12_sw_put() local
590 old = new = get_scr(ice); qtet_ain12_sw_put()
600 new = old & ~(SCR_AIN12_SEL1 | SCR_AIN12_SEL0); qtet_ain12_sw_put()
601 set_scr(ice, new); qtet_ain12_sw_put()
603 new &= ~SCR_RELAY; qtet_ain12_sw_put()
604 set_scr(ice, new); qtet_ain12_sw_put()
608 new = old | SCR_RELAY; qtet_ain12_sw_put()
609 set_scr(ice, new); qtet_ain12_sw_put()
610 new = (new & ~SCR_AIN12_SEL1) | SCR_AIN12_SEL0; qtet_ain12_sw_put()
611 set_scr(ice, new); qtet_ain12_sw_put()
615 new = old | SCR_RELAY; qtet_ain12_sw_put()
616 set_scr(ice, new); qtet_ain12_sw_put()
617 new |= SCR_AIN12_SEL1 | SCR_AIN12_SEL0; qtet_ain12_sw_put()
618 set_scr(ice, new); qtet_ain12_sw_put()
644 unsigned int old, new; qtet_php_put() local
645 old = new = get_scr(ice); qtet_php_put()
650 new = old | SCR_PHP_V; qtet_php_put()
651 set_scr(ice, new); qtet_php_put()
653 new &= ~SCR_PHP; qtet_php_put()
654 set_scr(ice, new); qtet_php_put()
659 new = old & ~SCR_PHP_V; qtet_php_put()
660 set_scr(ice, new); qtet_php_put()
662 new |= SCR_PHP; qtet_php_put()
663 set_scr(ice, new); qtet_php_put()
665 if (old != new) qtet_php_put()
720 unsigned int old, new; qtet_sw_put() local
723 new = old | private.bit; qtet_sw_put()
725 new = old & ~private.bit; qtet_sw_put()
726 if (old != new) { qtet_sw_put()
727 private.set_register(ice, new); qtet_sw_put()
867 /* setting new rate */ qtet_set_rate()
870 unsigned int new; qtet_set_rate() local
876 new = (get_cpld(ice) & ~CPLD_CKS_MASK) | get_cks_val(rate); qtet_set_rate()
878 new &= ~CPLD_SYNC_SEL; qtet_set_rate()
879 /* dev_dbg(ice->card->dev, "QT - set_rate: old %x, new %x\n", qtet_set_rate()
880 get_cpld(ice), new); */ qtet_set_rate()
881 set_cpld(ice, new); qtet_set_rate()
894 unsigned int old, new; qtet_set_spdif_clock() local
896 old = new = get_cpld(ice); qtet_set_spdif_clock()
897 new &= ~(CPLD_CKS_MASK | CPLD_WORD_SEL); qtet_set_spdif_clock()
900 new |= CPLD_EXT_SPDIF; qtet_set_spdif_clock()
903 new |= CPLD_EXT_WORDCLOCK_1FS; qtet_set_spdif_clock()
906 new |= CPLD_EXT_WORDCLOCK_256FS; qtet_set_spdif_clock()
911 if (old != new) { qtet_set_spdif_clock()
912 set_cpld(ice, new); qtet_set_spdif_clock()
/linux-4.1.27/arch/tile/kernel/
H A Dftrace.c111 unsigned long new) ftrace_modify_code()
122 if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE)) ftrace_modify_code()
136 unsigned long new; ftrace_update_ftrace_func() local
141 new = ftrace_call_replace(pc, (unsigned long)func); ftrace_update_ftrace_func()
143 ret = ftrace_modify_code(pc, old, new); ftrace_update_ftrace_func()
150 unsigned long new, old; ftrace_make_call() local
154 new = ftrace_call_replace(ip, addr); ftrace_make_call()
156 return ftrace_modify_code(rec->ip, old, new); ftrace_make_call()
164 unsigned long new; ftrace_make_nop() local
168 new = ftrace_nop_replace(rec); ftrace_make_nop()
169 ret = ftrace_modify_code(ip, old, new); ftrace_make_nop()
222 unsigned long new = enable ? branch : nop; __ftrace_modify_caller() local
224 return ftrace_modify_code(pc, old, new); __ftrace_modify_caller()
110 ftrace_modify_code(unsigned long pc, unsigned long old, unsigned long new) ftrace_modify_code() argument
/linux-4.1.27/mm/
H A Dpage_counter.c22 long new; page_counter_cancel() local
24 new = atomic_long_sub_return(nr_pages, &counter->count); page_counter_cancel()
26 WARN_ON_ONCE(new < 0); page_counter_cancel()
41 long new; page_counter_charge() local
43 new = atomic_long_add_return(nr_pages, &c->count); page_counter_charge()
48 if (new > c->watermark) page_counter_charge()
49 c->watermark = new; page_counter_charge()
69 long new; page_counter_try_charge() local
81 * we either see the new limit or the setter sees the page_counter_try_charge()
84 new = atomic_long_add_return(nr_pages, &c->count); page_counter_try_charge()
85 if (new > c->limit) { page_counter_try_charge()
99 if (new > c->watermark) page_counter_try_charge()
100 c->watermark = new; page_counter_try_charge()
/linux-4.1.27/virt/kvm/
H A Dirqchip.c164 struct kvm_irq_routing_table *new, *old; kvm_set_irq_routing() local
176 new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head)) kvm_set_irq_routing()
180 if (!new) kvm_set_irq_routing()
183 new->rt_entries = (void *)&new->map[nr_rt_entries]; kvm_set_irq_routing()
185 new->nr_rt_entries = nr_rt_entries; kvm_set_irq_routing()
188 new->chip[i][j] = -1; kvm_set_irq_routing()
194 r = setup_routing_entry(new, &new->rt_entries[i], ue); kvm_set_irq_routing()
202 rcu_assign_pointer(kvm->irq_routing, new); kvm_set_irq_routing()
208 new = old; kvm_set_irq_routing()
212 kfree(new); kvm_set_irq_routing()
/linux-4.1.27/arch/xtensa/include/asm/
H A Dcmpxchg.h23 __cmpxchg_u32(volatile int *p, int old, int new) __cmpxchg_u32() argument
29 : "+a" (new) __cmpxchg_u32()
34 return new; __cmpxchg_u32()
45 : "a" (p), "a" (old), "r" (new) __cmpxchg_u32()
56 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
59 case 4: return __cmpxchg_u32(ptr, old, new); __cmpxchg()
76 unsigned long new, int size) __cmpxchg_local()
80 return __cmpxchg_u32(ptr, old, new); __cmpxchg_local()
82 return __cmpxchg_local_generic(ptr, old, new, size); __cmpxchg_local()
74 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local() argument
/linux-4.1.27/arch/frv/include/asm/
H A Dcmpxchg.h80 extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v);
84 #define cmpxchg(ptr, test, new) \
89 __typeof__(*(ptr)) __xg_new = (new); \
122 extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
124 #define cmpxchg(ptr, test, new) \
129 __typeof__(*(ptr)) __xg_new = (new); \
151 unsigned long new, int size) __cmpxchg_local()
155 return cmpxchg((unsigned long *)ptr, old, new); __cmpxchg_local()
157 return __cmpxchg_local_generic(ptr, old, new, size); __cmpxchg_local()
149 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local() argument
H A Datomic.h179 #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
180 #define atomic_xchg(v, new) (xchg(&(v)->counter, new))
181 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
182 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
/linux-4.1.27/arch/hexagon/mm/
H A Dcopy_user_template.S33 if (!p0.new) jump:nt .Ldone
40 if (!p0.new) jump:nt .Loop_not_aligned_8
66 if (p0.new) jump:nt .Lalign
70 if (!p0.new) jump:nt .Loop_not_aligned_4
96 if (!p0.new) jump:nt .Loop_not_aligned
139 if (p0.new) jump:nt .Loop_not_aligned
150 if (p0.new) w_dbuf = memub(src)
162 if (p0.new) w_dbuf = memuh(src)
174 if (p0.new) w_dbuf = memw(src)
H A Dstrnlen_user.S55 if (P0.new) jump:t dw_loop; /* fire up the oven */
64 if (P0.new) jump:nt exit_found;
97 if (!P0.new) jump:nt exit_found;
98 if (!P0.new) start = add(obo,tmp1);
109 if (P0.new) jump:nt exit_error; /* neverfound! */
/linux-4.1.27/arch/nios2/include/asm/
H A Dmmu_context.h32 * Initialize the context related info for a new mm_struct instance.
34 * Set all new contexts to 0, that way the generation will never match
61 * After we have set current->mm to a new value, this activates
62 * the context for the new mm so we see the new mappings.
/linux-4.1.27/fs/jfs/
H A Dresize.c51 * new LVSize: in LV blocks (required)
52 * new LogSize: in LV blocks (optional)
53 * new FSSize: in LV blocks (optional)
55 * new configuration:
56 * 1. set new LogSize as specified or default from new LVSize;
57 * 2. compute new FSCKSize from new LVSize;
58 * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where
59 * assert(new FSSize >= old FSSize),
132 * validate new size, or, if not specified, determine new size jfs_extendfs()
181 * compute new file system space; jfs_extendfs()
193 * the old one, we can format the new log before we quiesce the jfs_extendfs()
206 * block any new transactions and wait for completion of jfs_extendfs()
225 * update on-disk superblock for the new space configuration jfs_extendfs()
255 * format new inline log synchronously; jfs_extendfs()
265 * activate new log jfs_extendfs()
277 * extendfs() for new extension, retry after crash recovery; jfs_extendfs()
287 * compute the new block allocation map configuration jfs_extendfs()
297 /* number of data pages of new bmap file: jfs_extendfs()
298 * roundup new size to full dmap page boundary and jfs_extendfs()
307 * map new extension with unmapped part of the last partial jfs_extendfs()
329 * update map pages for new extension: jfs_extendfs()
351 * allocate new map pages and its backing blocks, and jfs_extendfs()
362 * grow bmap file for the new map pages required: jfs_extendfs()
386 newPage = nPages; /* first new page number */ jfs_extendfs()
412 * di_size = new map file size; jfs_extendfs()
416 * (it could have been used up for new map pages), jfs_extendfs()
417 * but the newly grown map file now covers lot bigger new free space jfs_extendfs()
432 * move iag lists from old to new iag; jfs_extendfs()
436 * will correctly identify the new ag); jfs_extendfs()
438 /* if new AG size the same as old AG size, done! */ jfs_extendfs()
453 * updated with new descriptors: logredo will recover jfs_extendfs()
/linux-4.1.27/drivers/s390/crypto/
H A Dzcrypt_cex4.c27 #define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
28 #define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
29 #define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */
30 #define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */
31 #define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */
32 #define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */
/linux-4.1.27/arch/hexagon/include/asm/
H A Datomic.h33 static inline void atomic_set(atomic_t *v, int new) atomic_set() argument
40 : "r" (&v->counter), "r" (new) atomic_set()
56 * @new: new value (technically passed in a register -- see xchg)
58 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
65 * @new: new value to put in
78 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) atomic_cmpxchg() argument
85 " if (!P0.new) jump:nt 2f; }\n" atomic_cmpxchg()
90 : "r" (&v->counter), "r" (old), "r" (new) atomic_cmpxchg()
158 " if (p3.new) jump:nt 2f;" __atomic_add_unless()
H A Dcmpxchg.h33 * Note: there was an errata for V2 about .new's and memw_locked.
69 #define cmpxchg(ptr, old, new) \
73 __typeof__(*(ptr)) __new = (new); \
79 " if (!P0.new) jump:nt 2f; }\n" \
/linux-4.1.27/tools/usb/usbip/libsrc/
H A Dlist.h34 * Insert a new entry between two known consecutive entries.
39 static inline void __list_add(struct list_head *new, __list_add() argument
43 next->prev = new; __list_add()
44 new->next = next; __list_add()
45 new->prev = prev; __list_add()
46 prev->next = new; __list_add()
50 * list_add - add a new entry
51 * @new: new entry to be added
54 * Insert a new entry after the specified head.
57 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument
59 __list_add(new, head, head->next); list_add()
/linux-4.1.27/arch/sparc/lib/
H A Datomic32.c48 int atomic_xchg(atomic_t *v, int new) atomic_xchg() argument
55 v->counter = new; atomic_xchg()
61 int atomic_cmpxchg(atomic_t *v, int old, int new) atomic_cmpxchg() argument
69 v->counter = new; atomic_cmpxchg()
140 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) __cmpxchg_u32() argument
147 *ptr = new; __cmpxchg_u32()
154 unsigned long __xchg_u32(volatile u32 *ptr, u32 new) __xchg_u32() argument
161 *ptr = new; __xchg_u32()
/linux-4.1.27/arch/um/drivers/
H A Dmconsole_kern.h42 extern void mconsole_register_dev(struct mc_device *new);
46 static inline void mconsole_register_dev(struct mc_device *new) mconsole_register_dev() argument
H A Dxterm.c88 int pid, fd, new, err; xterm_open() local
152 new = xterm_fd(fd, &data->helper_pid); xterm_open()
153 if (new < 0) { xterm_open()
154 err = new; xterm_open()
160 err = os_set_fd_block(new, 0); xterm_open()
167 CATCH_EINTR(err = tcgetattr(new, &data->tt)); xterm_open()
169 new = err; xterm_open()
174 err = raw(new); xterm_open()
176 new = err; xterm_open()
185 return new; xterm_open()
188 close(new); xterm_open()
H A Dport_user.c169 int new, err; port_connection() local
174 new = accept(fd, NULL, 0); port_connection()
175 if (new < 0) port_connection()
183 { .sock_fd = new, port_connection()
191 return new; port_connection()
199 close(new); port_connection()
/linux-4.1.27/fs/lockd/
H A Dmon.c108 struct rpc_clnt *clnt, *new; nsm_client_get() local
115 clnt = new = nsm_create(net, nodename); nsm_client_get()
119 clnt = nsm_client_set(ln, new); nsm_client_get()
120 if (clnt != new) nsm_client_get()
121 rpc_shutdown_client(new); nsm_client_get()
332 struct nsm_handle *new; nsm_create_handle() local
334 new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL); nsm_create_handle()
335 if (unlikely(new == NULL)) nsm_create_handle()
338 atomic_set(&new->sm_count, 1); nsm_create_handle()
339 new->sm_name = (char *)(new + 1); nsm_create_handle()
340 memcpy(nsm_addr(new), sap, salen); nsm_create_handle()
341 new->sm_addrlen = salen; nsm_create_handle()
342 nsm_init_private(new); nsm_create_handle()
344 if (rpc_ntop(nsm_addr(new), new->sm_addrbuf, nsm_create_handle()
345 sizeof(new->sm_addrbuf)) == 0) nsm_create_handle()
346 (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf), nsm_create_handle()
348 memcpy(new->sm_name, hostname, hostname_len); nsm_create_handle()
349 new->sm_name[hostname_len] = '\0'; nsm_create_handle()
351 return new; nsm_create_handle()
372 struct nsm_handle *cached, *new = NULL; nsm_get_handle() local
394 kfree(new); nsm_get_handle()
402 if (new != NULL) { nsm_get_handle()
403 list_add(&new->sm_link, &nsm_handles); nsm_get_handle()
406 new->sm_name, new->sm_addrbuf); nsm_get_handle()
407 return new; nsm_get_handle()
412 new = nsm_create_handle(sap, salen, hostname, hostname_len); nsm_get_handle()
413 if (unlikely(new == NULL)) nsm_get_handle()
/linux-4.1.27/arch/score/include/asm/
H A Dcmpxchg.h27 unsigned long old, unsigned long new) __cmpxchg()
35 *m = new; __cmpxchg()
26 __cmpxchg(volatile unsigned long *m, unsigned long old, unsigned long new) __cmpxchg() argument
H A Dmmu_context.h52 local_flush_tlb_all(); /* start new asid cycle */ get_new_mmu_context()
62 * Initialize the context related info for a new mm_struct
98 * After we have set current->mm to a new value, this activates
99 * the context for the new mm so we see the new mappings.
/linux-4.1.27/arch/arm64/include/asm/
H A Dalternative.h13 u8 alt_len; /* size of new instruction(s), <= orig_len */
22 " .word 663f - .\n" /* new instruction */ \
H A Datomic.h92 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) atomic_cmpxchg() argument
107 : "Ir" (old), "r" (new) atomic_cmpxchg()
114 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
190 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) atomic64_cmpxchg() argument
205 : "Ir" (old), "r" (new) atomic64_cmpxchg()
212 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
H A Dcmpxchg.h84 unsigned long new, int size) __cmpxchg()
99 : "Ir" (old), "r" (new) __cmpxchg()
114 : "Ir" (old), "r" (new) __cmpxchg()
129 : "Ir" (old), "r" (new) __cmpxchg()
144 : "Ir" (old), "r" (new) __cmpxchg()
202 unsigned long new, int size) __cmpxchg_mb()
207 ret = __cmpxchg(ptr, old, new, size); __cmpxchg_mb()
83 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
201 __cmpxchg_mb(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_mb() argument
/linux-4.1.27/arch/frv/mb93090-mb00/
H A Dpci-dma-nommu.c39 struct dma_alloc_record *new; dma_alloc_coherent() local
50 new = kmalloc(sizeof (*new), GFP_ATOMIC); dma_alloc_coherent()
51 if (!new) dma_alloc_coherent()
55 new->len = (size + 31) & ~31; dma_alloc_coherent()
74 new->ofs = start; dma_alloc_coherent()
75 list_add_tail(&new->list, this); dma_alloc_coherent()
82 kfree(new); dma_alloc_coherent()
/linux-4.1.27/arch/metag/include/asm/
H A Dcmpxchg_irq.h29 unsigned long new) __cmpxchg_u32()
37 *m = new; __cmpxchg_u32()
28 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
H A Dcmpxchg_lock1.h33 unsigned long new) __cmpxchg_u32()
42 *m = new; __cmpxchg_u32()
32 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
H A Dcmpxchg.h44 unsigned long new, int size) __cmpxchg()
48 return __cmpxchg_u32(ptr, old, new); __cmpxchg()
43 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
H A Dcmpxchg_lnkget.h57 unsigned long new) __cmpxchg_u32()
77 : "da" (m), "bd" (old), "da" (new) __cmpxchg_u32()
56 __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) __cmpxchg_u32() argument
H A Datomic_lock1.h96 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) atomic_cmpxchg() argument
105 v->counter = new; atomic_cmpxchg()
112 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/linux-4.1.27/drivers/scsi/sym53c8xx_2/
H A Dsym_misc.h65 static inline void __sym_que_add(struct sym_quehead * new, __sym_que_add() argument
69 flink->blink = new; __sym_que_add()
70 new->flink = flink; __sym_que_add()
71 new->blink = blink; __sym_que_add()
72 blink->flink = new; __sym_que_add()
126 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink)
130 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink)
143 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head)
/linux-4.1.27/arch/sh/drivers/
H A Dheartbeat.c38 unsigned int new; heartbeat_toggle_bit() local
40 new = (1 << hd->bit_pos[bit]); heartbeat_toggle_bit()
42 new = ~new; heartbeat_toggle_bit()
44 new &= hd->mask; heartbeat_toggle_bit()
48 new |= ioread32(hd->base) & ~hd->mask; heartbeat_toggle_bit()
49 iowrite32(new, hd->base); heartbeat_toggle_bit()
52 new |= ioread16(hd->base) & ~hd->mask; heartbeat_toggle_bit()
53 iowrite16(new, hd->base); heartbeat_toggle_bit()
56 new |= ioread8(hd->base) & ~hd->mask; heartbeat_toggle_bit()
57 iowrite8(new, hd->base); heartbeat_toggle_bit()
/linux-4.1.27/arch/arm64/kernel/
H A Dftrace.c25 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, ftrace_modify_code() argument
46 if (aarch64_insn_patch_text_nosync((void *)pc, new)) ftrace_modify_code()
58 u32 new; ftrace_update_ftrace_func() local
61 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, ftrace_update_ftrace_func()
64 return ftrace_modify_code(pc, 0, new, false); ftrace_update_ftrace_func()
73 u32 old, new; ftrace_make_call() local
76 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); ftrace_make_call()
78 return ftrace_modify_code(pc, old, new, true); ftrace_make_call()
88 u32 old, new; ftrace_make_nop() local
91 new = aarch64_insn_gen_nop(); ftrace_make_nop()
93 return ftrace_modify_code(pc, old, new, true); ftrace_make_nop()
/linux-4.1.27/fs/
H A Danon_inodes.c55 * anon_inode_getfile - creates a new file instance by hooking it up to an
59 * @name: [in] name of the "class" of the new file
60 * @fops: [in] file operations for the new file
61 * @priv: [in] private data for the new file (will be file's private_data)
64 * Creates a new file by hooking it on a single inode. This is useful for files
124 * anon_inode_getfd - creates a new file instance by hooking it up to an
128 * @name: [in] name of the "class" of the new file
129 * @fops: [in] file operations for the new file
130 * @priv: [in] private data for the new file (will be file's private_data)
133 * Creates a new file by hooking it on a single inode. This is useful for files
137 * setup. Returns new descriptor or an error code.
/linux-4.1.27/net/6lowpan/
H A Dnhc.c26 struct rb_node **new = &rb_root.rb_node, *parent = NULL; lowpan_nhc_insert() local
28 /* Figure out where to put new node */ lowpan_nhc_insert()
29 while (*new) { lowpan_nhc_insert()
30 struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc, lowpan_nhc_insert()
45 parent = *new; lowpan_nhc_insert()
47 new = &((*new)->rb_left); lowpan_nhc_insert()
49 new = &((*new)->rb_right); lowpan_nhc_insert()
54 /* Add new node and rebalance tree. */ lowpan_nhc_insert()
55 rb_link_node(&nhc->node, parent, new); lowpan_nhc_insert()
/linux-4.1.27/net/netfilter/
H A Dnf_conntrack_extend.c75 struct nf_ct_ext *old, *new; __nf_ct_ext_add_length() local
97 new = __krealloc(old, newlen, gfp); __nf_ct_ext_add_length()
98 if (!new) __nf_ct_ext_add_length()
101 if (new != old) { __nf_ct_ext_add_length()
109 t->move((void *)new + new->offset[i], __nf_ct_ext_add_length()
114 ct->ext = new; __nf_ct_ext_add_length()
117 new->offset[id] = newoff; __nf_ct_ext_add_length()
118 new->len = newlen; __nf_ct_ext_add_length()
119 memset((void *)new + newoff, 0, newlen - newoff); __nf_ct_ext_add_length()
120 return (void *)new + newoff; __nf_ct_ext_add_length()
/linux-4.1.27/arch/microblaze/include/uapi/asm/
H A Dunistd.h357 #define __NR_signalfd4 339 /* new */
358 #define __NR_eventfd2 340 /* new */
359 #define __NR_epoll_create1 341 /* new */
360 #define __NR_dup3 342 /* new */
361 #define __NR_pipe2 343 /* new */
362 #define __NR_inotify_init1 344 /* new */
363 #define __NR_socket 345 /* new */
364 #define __NR_socketpair 346 /* new */
365 #define __NR_bind 347 /* new */
366 #define __NR_listen 348 /* new */
367 #define __NR_accept 349 /* new */
368 #define __NR_connect 350 /* new */
369 #define __NR_getsockname 351 /* new */
370 #define __NR_getpeername 352 /* new */
371 #define __NR_sendto 353 /* new */
372 #define __NR_send 354 /* new */
373 #define __NR_recvfrom 355 /* new */
374 #define __NR_recv 356 /* new */
375 #define __NR_setsockopt 357 /* new */
376 #define __NR_getsockopt 358 /* new */
377 #define __NR_shutdown 359 /* new */
378 #define __NR_sendmsg 360 /* new */
379 #define __NR_recvmsg 361 /* new */
380 #define __NR_accept4 362 /* new */
381 #define __NR_preadv 363 /* new */
382 #define __NR_pwritev 364 /* new */
383 #define __NR_rt_tgsigqueueinfo 365 /* new */
384 #define __NR_perf_event_open 366 /* new */
385 #define __NR_recvmmsg 367 /* new */
/linux-4.1.27/security/selinux/ss/
H A Debitmap.c51 struct ebitmap_node *n, *new, *prev; ebitmap_cpy() local
57 new = kzalloc(sizeof(*new), GFP_ATOMIC); ebitmap_cpy()
58 if (!new) { ebitmap_cpy()
62 new->startbit = n->startbit; ebitmap_cpy()
63 memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); ebitmap_cpy()
64 new->next = NULL; ebitmap_cpy()
66 prev->next = new; ebitmap_cpy()
68 dst->node = new; ebitmap_cpy()
69 prev = new; ebitmap_cpy()
258 struct ebitmap_node *n, *prev, *new; ebitmap_set_bit() local
302 new = kzalloc(sizeof(*new), GFP_ATOMIC); ebitmap_set_bit()
303 if (!new) ebitmap_set_bit()
306 new->startbit = bit - (bit % EBITMAP_SIZE); ebitmap_set_bit()
307 ebitmap_node_set_bit(new, bit); ebitmap_set_bit()
311 e->highbit = new->startbit + EBITMAP_SIZE; ebitmap_set_bit()
314 new->next = prev->next; ebitmap_set_bit()
315 prev->next = new; ebitmap_set_bit()
317 new->next = e->node; ebitmap_set_bit()
318 e->node = new; ebitmap_set_bit()
/linux-4.1.27/drivers/iio/accel/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.1.27/drivers/iio/gyro/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.1.27/drivers/iio/imu/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.1.27/drivers/iio/magnetometer/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.1.27/drivers/iio/pressure/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.1.27/arch/mips/boot/dts/include/dt-bindings/clock/
H A Dstih410-clks.h10 /* STiH410 introduces new clock outputs compared to STiH407 */
H A Dstih418-clks.h10 /* STiH418 introduces new clock outputs compared to STiH410 */
/linux-4.1.27/include/uapi/linux/netfilter_ipv4/
H A Dipt_CLUSTERIP.h25 /* only relevant for new ones */
/linux-4.1.27/arch/parisc/lib/
H A Dbitops.c59 unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new) __cmpxchg_u64() argument
66 *ptr = new; __cmpxchg_u64()
72 unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) __cmpxchg_u32() argument
79 *ptr = new; __cmpxchg_u32()
/linux-4.1.27/arch/powerpc/boot/dts/include/dt-bindings/clock/
H A Dstih410-clks.h10 /* STiH410 introduces new clock outputs compared to STiH407 */
H A Dstih418-clks.h10 /* STiH418 introduces new clock outputs compared to STiH410 */
/linux-4.1.27/arch/arm64/boot/dts/include/dt-bindings/clock/
H A Dstih410-clks.h10 /* STiH410 introduces new clock outputs compared to STiH407 */
H A Dstih418-clks.h10 /* STiH418 introduces new clock outputs compared to STiH410 */
/linux-4.1.27/arch/cris/include/asm/
H A Ddma.h8 /* it's useless on the Etrax, but unfortunately needed by the new
/linux-4.1.27/arch/cris/include/uapi/asm/
H A Drs485.h4 * XXX: Do not use it for new code!
/linux-4.1.27/arch/metag/boot/dts/include/dt-bindings/clock/
H A Dstih410-clks.h10 /* STiH410 introduces new clock outputs compared to STiH407 */
H A Dstih418-clks.h10 /* STiH418 introduces new clock outputs compared to STiH410 */
/linux-4.1.27/arch/arm/boot/dts/include/dt-bindings/clock/
H A Dstih410-clks.h10 /* STiH410 introduces new clock outputs compared to STiH407 */
H A Dstih418-clks.h10 /* STiH418 introduces new clock outputs compared to STiH410 */
/linux-4.1.27/include/dt-bindings/clock/
H A Dstih410-clks.h10 /* STiH410 introduces new clock outputs compared to STiH407 */
H A Dstih418-clks.h10 /* STiH418 introduces new clock outputs compared to STiH410 */
/linux-4.1.27/include/uapi/linux/netfilter/
H A Dnf_conntrack_common.h14 /* Started a new connection to track (only
98 IPCT_NEW, /* new conntrack */
104 IPCT_HELPER, /* new helper has been set */
105 IPCT_MARK, /* new mark has been set */
108 IPCT_SECMARK, /* new security mark has been set */
109 IPCT_LABEL, /* new connlabel has been set */
113 IPEXP_NEW, /* new expectation */
/linux-4.1.27/arch/arm/xen/
H A Dp2m.c31 static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) xen_add_phys_to_mach_entry() argument
42 if (new->pfn == entry->pfn) xen_add_phys_to_mach_entry()
45 if (new->pfn < entry->pfn) xen_add_phys_to_mach_entry()
50 rb_link_node(&new->rbnode_phys, parent, link); xen_add_phys_to_mach_entry()
51 rb_insert_color(&new->rbnode_phys, &phys_to_mach); xen_add_phys_to_mach_entry()
57 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); xen_add_phys_to_mach_entry()
/linux-4.1.27/arch/blackfin/include/asm/
H A Dcmpxchg.h18 unsigned long new, unsigned long old);
20 unsigned long new, unsigned long old);
22 unsigned long new, unsigned long old);
50 unsigned long new, int size) __cmpxchg()
56 tmp = __raw_cmpxchg_1_asm(ptr, new, old); __cmpxchg()
59 tmp = __raw_cmpxchg_2_asm(ptr, new, old); __cmpxchg()
62 tmp = __raw_cmpxchg_4_asm(ptr, new, old); __cmpxchg()
49 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
/linux-4.1.27/arch/mips/include/asm/
H A Dcmpxchg.h143 #define __cmpxchg_asm(ld, st, m, old, new) \
162 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
179 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
187 *m = new; \
200 #define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
204 __typeof__(*(ptr)) __new = (new); \
229 #define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
230 #define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , )
H A Dmmu_context.h111 kvm_local_flush_tlb_all(); /* start new asid cycle */ get_new_mmu_context()
113 local_flush_tlb_all(); /* start new asid cycle */ get_new_mmu_context()
123 * Initialize the context related info for a new mm_struct
175 * After we have set current->mm to a new value, this activates
176 * the context for the new mm so we see the new mappings.
187 /* Unconditionally get a new ASID. */ activate_mm()
203 * we will get a new one for it.
217 /* will get a new context next time */ drop_mmu_context()
/linux-4.1.27/net/sched/
H A Dcls_cgroup.c97 struct cls_cgroup_head *new; cls_cgroup_change() local
111 new = kzalloc(sizeof(*head), GFP_KERNEL); cls_cgroup_change()
112 if (!new) cls_cgroup_change()
115 tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); cls_cgroup_change()
116 new->handle = handle; cls_cgroup_change()
117 new->tp = tp; cls_cgroup_change()
134 tcf_exts_change(tp, &new->exts, &e); cls_cgroup_change()
135 tcf_em_tree_change(tp, &new->ematches, &t); cls_cgroup_change()
137 rcu_assign_pointer(tp->root, new); cls_cgroup_change()
142 kfree(new); cls_cgroup_change()
/linux-4.1.27/drivers/pci/pcie/aer/
H A Daerdrv.h83 enum pci_ers_result new) merge_result()
85 if (new == PCI_ERS_RESULT_NO_AER_DRIVER) merge_result()
88 if (new == PCI_ERS_RESULT_NONE) merge_result()
94 orig = new; merge_result()
97 if (new == PCI_ERS_RESULT_NEED_RESET) merge_result()
82 merge_result(enum pci_ers_result orig, enum pci_ers_result new) merge_result() argument
/linux-4.1.27/arch/s390/lib/
H A Ddelay.c32 unsigned long cr0, cr6, new; __udelay_disabled() local
39 new = (cr0 & 0xffff00e0) | 0x00000800; __udelay_disabled()
40 __ctl_load(new , 0, 0); __udelay_disabled()
41 new = 0; __udelay_disabled()
42 __ctl_load(new, 6, 6); __udelay_disabled()
/linux-4.1.27/arch/m68k/include/asm/
H A Dcmpxchg.h96 unsigned long new, int size) __cmpxchg()
102 : "d" (new), "0" (old), "m" (*(char *)p)); __cmpxchg()
107 : "d" (new), "0" (old), "m" (*(short *)p)); __cmpxchg()
112 : "d" (new), "0" (old), "m" (*(int *)p)); __cmpxchg()
115 old = __invalid_cmpxchg_size(p, old, new, size); __cmpxchg()
95 __cmpxchg(volatile void *p, unsigned long old, unsigned long new, int size) __cmpxchg() argument
H A Datomic.h121 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
125 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) atomic_cmpxchg() argument
133 atomic_set(v, new); atomic_cmpxchg()
138 static inline int atomic_xchg(atomic_t *v, int new) atomic_xchg() argument
145 atomic_set(v, new); atomic_xchg()
/linux-4.1.27/tools/perf/scripts/python/
H A Dsched-migration.py73 return "new forked task %s" % thread_name(self.child)
80 def __init__(self, new):
81 self.new = new
84 return "task migrated in %s" % thread_name(self.new)
131 def __migrate_in(self, new, event):
132 if new in self.tasks:
135 next_tasks = self.tasks[:] + tuple([new])
139 def migrate_in(self, new):
140 return self.__migrate_in(new, RunqueueMigrateIn(new))
142 def wake_up(self, new):
143 return self.__migrate_in(new, RunqueueEventWakeup(new))
145 def wake_up_new(self, new):
146 return self.__migrate_in(new, RunqueueEventFork(new))
189 def migrate(self, ts_list, new, old_cpu, new_cpu):
193 out_rq = old_rq.migrate_out(new)
198 in_rq = new_rq.migrate_in(new)
/linux-4.1.27/fs/fat/
H A Dcache.c117 struct fat_cache_id *new) fat_cache_merge()
122 /* Find the same part as "new" in cluster-chain. */ fat_cache_merge()
123 if (p->fcluster == new->fcluster) { fat_cache_merge()
124 BUG_ON(p->dcluster != new->dcluster); fat_cache_merge()
125 if (new->nr_contig > p->nr_contig) fat_cache_merge()
126 p->nr_contig = new->nr_contig; fat_cache_merge()
133 static void fat_cache_add(struct inode *inode, struct fat_cache_id *new) fat_cache_add() argument
137 if (new->fcluster == -1) /* dummy cache */ fat_cache_add()
141 if (new->id != FAT_CACHE_VALID && fat_cache_add()
142 new->id != MSDOS_I(inode)->cache_valid_id) fat_cache_add()
145 cache = fat_cache_merge(inode, new); fat_cache_add()
160 cache = fat_cache_merge(inode, new); fat_cache_add()
171 cache->fcluster = new->fcluster; fat_cache_add()
172 cache->dcluster = new->dcluster; fat_cache_add()
173 cache->nr_contig = new->nr_contig; fat_cache_add()
116 fat_cache_merge(struct inode *inode, struct fat_cache_id *new) fat_cache_merge() argument
/linux-4.1.27/fs/jffs2/
H A Dnodelist.c26 void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) jffs2_add_fd_to_list() argument
30 dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); jffs2_add_fd_to_list()
32 while ((*prev) && (*prev)->nhash <= new->nhash) { jffs2_add_fd_to_list()
33 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { jffs2_add_fd_to_list()
35 if (new->version < (*prev)->version) { jffs2_add_fd_to_list()
36 dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", jffs2_add_fd_to_list()
38 jffs2_mark_node_obsolete(c, new->raw); jffs2_add_fd_to_list()
39 jffs2_free_full_dirent(new); jffs2_add_fd_to_list()
43 new->next = (*prev)->next; jffs2_add_fd_to_list()
49 *prev = new; jffs2_add_fd_to_list()
55 new->next = *prev; jffs2_add_fd_to_list()
56 *prev = new; jffs2_add_fd_to_list()
147 * Allocate and initializes a new fragment.
166 * Called when there is no overlapping fragment exist. Inserts a hole before the new
167 * fragment and inserts the new fragment to the fragtree.
174 /* put a hole in before the new fragment */ no_overlapping_node()
187 dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", no_overlapping_node()
202 So that's where we want to put new fragment */ no_overlapping_node()
203 dbg_fragtree2("add the new node at the right\n"); no_overlapping_node()
206 dbg_fragtree2("insert the new node at the root of the tree\n"); no_overlapping_node()
236 /* Check if 'this' node was on the same page as the new node. jffs2_add_frag_to_fragtree()
237 If so, both 'this' and the new node get marked REF_NORMAL so jffs2_add_frag_to_fragtree()
263 /* Mark the new node and the partially covered node REF_NORMAL -- let jffs2_add_frag_to_fragtree()
270 /* The new node splits 'this' frag into two */ jffs2_add_frag_to_fragtree()
342 the new frag */ jffs2_add_frag_to_fragtree()
360 * Given an inode, probably with existing tree of fragments, add the new node
436 void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new) jffs2_add_ino_cache() argument
441 if (!new->ino) jffs2_add_ino_cache()
442 new->ino = ++c->highest_ino; jffs2_add_ino_cache()
444 dbg_inocache("add %p (ino #%u)\n", new, new->ino); jffs2_add_ino_cache()
446 prev = &c->inocache_list[new->ino % c->inocache_hashsize]; jffs2_add_ino_cache()
448 while ((*prev) && (*prev)->ino < new->ino) { jffs2_add_ino_cache()
451 new->next = *prev; jffs2_add_ino_cache()
452 *prev = new; jffs2_add_ino_cache()
618 JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n", jffs2_link_node_ref()
/linux-4.1.27/drivers/iommu/
H A Diova.c93 unsigned long limit_pfn, struct iova *new) __cached_rbnode_insert_update()
97 iovad->cached32_node = &new->node; __cached_rbnode_insert_update()
140 struct iova *new, bool size_aligned) __alloc_and_insert_iova_range()
182 new->pfn_lo = limit_pfn - (size + pad_size) + 1; __alloc_and_insert_iova_range()
183 new->pfn_hi = new->pfn_lo + size - 1; __alloc_and_insert_iova_range()
186 /* Add new node and rebalance tree. */ __alloc_and_insert_iova_range()
197 /* Figure out where to put new node */ __alloc_and_insert_iova_range()
203 if (new->pfn_lo < this->pfn_lo) __alloc_and_insert_iova_range()
205 else if (new->pfn_lo > this->pfn_lo) __alloc_and_insert_iova_range()
211 /* Add new node and rebalance tree. */ __alloc_and_insert_iova_range()
212 rb_link_node(&new->node, parent, entry); __alloc_and_insert_iova_range()
213 rb_insert_color(&new->node, &iovad->rbroot); __alloc_and_insert_iova_range()
215 __cached_rbnode_insert_update(iovad, saved_pfn, new); __alloc_and_insert_iova_range()
226 struct rb_node **new = &(root->rb_node), *parent = NULL; iova_insert_rbtree() local
227 /* Figure out where to put new node */ iova_insert_rbtree()
228 while (*new) { iova_insert_rbtree()
229 struct iova *this = container_of(*new, struct iova, node); iova_insert_rbtree()
230 parent = *new; iova_insert_rbtree()
233 new = &((*new)->rb_left); iova_insert_rbtree()
235 new = &((*new)->rb_right); iova_insert_rbtree()
239 /* Add new node and rebalance tree. */ iova_insert_rbtree()
240 rb_link_node(&iova->node, parent, new); iova_insert_rbtree()
92 __cached_rbnode_insert_update(struct iova_domain *iovad, unsigned long limit_pfn, struct iova *new) __cached_rbnode_insert_update() argument
138 __alloc_and_insert_iova_range(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, struct iova *new, bool size_aligned) __alloc_and_insert_iova_range() argument
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dmkregtable.c57 * Insert a new entry between two known consecutive entries.
63 static inline void __list_add(struct list_head *new, __list_add() argument
66 next->prev = new; __list_add()
67 new->next = next; __list_add()
68 new->prev = prev; __list_add()
69 prev->next = new; __list_add()
72 extern void __list_add(struct list_head *new,
77 * list_add - add a new entry
78 * @new: new entry to be added
81 * Insert a new entry after the specified head.
84 static inline void list_add(struct list_head *new, struct list_head *head) list_add() argument
86 __list_add(new, head, head->next); list_add()
90 * list_add_tail - add a new entry
91 * @new: new entry to be added
94 * Insert a new entry before the specified head.
97 static inline void list_add_tail(struct list_head *new, struct list_head *head) list_add_tail() argument
99 __list_add(new, head->prev, head); list_add_tail()
133 * list_replace - replace old entry by new one
135 * @new : the new element to insert
139 static inline void list_replace(struct list_head *old, struct list_head *new) list_replace() argument
141 new->next = old->next; list_replace()
142 new->next->prev = new; list_replace()
143 new->prev = old->prev; list_replace()
144 new->prev->next = new; list_replace()
148 struct list_head *new) list_replace_init()
150 list_replace(old, new); list_replace_init()
250 * @list: a new list to add all removed entries
291 * @list: the new list to add.
303 * @list: the new list to add.
315 * @list: the new list to add.
331 * @list: the new list to add.
147 list_replace_init(struct list_head *old, struct list_head *new) list_replace_init() argument
/linux-4.1.27/sound/soc/
H A Dsoc-io.c68 unsigned int old, new; snd_soc_component_update_bits_legacy() local
80 new = (old & ~mask) | (val & mask); snd_soc_component_update_bits_legacy()
81 *change = old != new; snd_soc_component_update_bits_legacy()
83 ret = component->write(component, reg, new); snd_soc_component_update_bits_legacy()
177 * Tests a register with a new value and checks if the new value is
185 unsigned int old, new; snd_soc_component_test_bits() local
191 new = (old & ~mask) | value; snd_soc_component_test_bits()
192 return old != new; snd_soc_component_test_bits()
221 * @value: new value
223 * Writes new register value.
240 * @value: new value
242 * Tests a register with a new value and checks if the new value is
/linux-4.1.27/arch/avr32/kernel/
H A Dsetup.c81 struct resource *new; resource_init() local
86 new = alloc_bootmem_low(sizeof(struct resource)); resource_init()
87 memcpy(new, mem, sizeof(struct resource)); resource_init()
89 new->sibling = NULL; resource_init()
90 if (request_resource(&iomem_resource, new)) resource_init()
96 new = alloc_bootmem_low(sizeof(struct resource)); resource_init()
97 memcpy(new, res, sizeof(struct resource)); resource_init()
99 new->sibling = NULL; resource_init()
100 if (insert_resource(&iomem_resource, new)) resource_init()
110 struct resource *new, *next, **pprev; add_physical_memory() local
133 new = &res_cache[res_cache_next_free++]; add_physical_memory()
134 new->start = start; add_physical_memory()
135 new->end = end; add_physical_memory()
136 new->name = "System RAM"; add_physical_memory()
137 new->flags = IORESOURCE_MEM; add_physical_memory()
139 *pprev = new; add_physical_memory()
146 struct resource *new, *next, **pprev; add_reserved_region() local
162 new = &res_cache[res_cache_next_free++]; add_reserved_region()
163 new->start = start; add_reserved_region()
164 new->end = end; add_reserved_region()
165 new->name = name; add_reserved_region()
166 new->sibling = next; add_reserved_region()
167 new->flags = IORESOURCE_MEM; add_reserved_region()
169 *pprev = new; add_reserved_region()
/linux-4.1.27/drivers/char/agp/
H A Dgeneric.c103 struct agp_memory *new; agp_create_user_memory() local
109 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); agp_create_user_memory()
110 if (new == NULL) agp_create_user_memory()
113 new->key = agp_get_key(); agp_create_user_memory()
115 if (new->key < 0) { agp_create_user_memory()
116 kfree(new); agp_create_user_memory()
120 agp_alloc_page_array(alloc_size, new); agp_create_user_memory()
122 if (new->pages == NULL) { agp_create_user_memory()
123 agp_free_key(new->key); agp_create_user_memory()
124 kfree(new); agp_create_user_memory()
127 new->num_scratch_pages = 0; agp_create_user_memory()
128 return new; agp_create_user_memory()
133 struct agp_memory *new; agp_create_memory() local
135 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); agp_create_memory()
136 if (new == NULL) agp_create_memory()
139 new->key = agp_get_key(); agp_create_memory()
141 if (new->key < 0) { agp_create_memory()
142 kfree(new); agp_create_memory()
146 agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); agp_create_memory()
148 if (new->pages == NULL) { agp_create_memory()
149 agp_free_key(new->key); agp_create_memory()
150 kfree(new); agp_create_memory()
153 new->num_scratch_pages = scratch_pages; agp_create_memory()
154 new->type = AGP_NORMAL_MEMORY; agp_create_memory()
155 return new; agp_create_memory()
226 struct agp_memory *new; agp_allocate_memory() local
239 new = agp_generic_alloc_user(page_count, type); agp_allocate_memory()
240 if (new) agp_allocate_memory()
241 new->bridge = bridge; agp_allocate_memory()
242 return new; agp_allocate_memory()
246 new = bridge->driver->alloc_by_type(page_count, type); agp_allocate_memory()
247 if (new) agp_allocate_memory()
248 new->bridge = bridge; agp_allocate_memory()
249 return new; agp_allocate_memory()
254 new = agp_create_memory(scratch_pages); agp_allocate_memory()
256 if (new == NULL) agp_allocate_memory()
260 if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { agp_allocate_memory()
261 agp_free_memory(new); agp_allocate_memory()
264 new->bridge = bridge; agp_allocate_memory()
265 return new; agp_allocate_memory()
272 agp_free_memory(new); agp_allocate_memory()
275 new->pages[i] = page; agp_allocate_memory()
276 new->page_count++; agp_allocate_memory()
278 new->bridge = bridge; agp_allocate_memory()
280 return new; agp_allocate_memory()
1169 struct agp_memory *new; agp_generic_alloc_user() local
1174 new = agp_create_user_memory(page_count); agp_generic_alloc_user()
1175 if (new == NULL) agp_generic_alloc_user()
1179 new->pages[i] = NULL; agp_generic_alloc_user()
1180 new->page_count = 0; agp_generic_alloc_user()
1181 new->type = type; agp_generic_alloc_user()
1182 new->num_scratch_pages = pages; agp_generic_alloc_user()
1184 return new; agp_generic_alloc_user()
/linux-4.1.27/arch/arm/include/asm/
H A Dcmpxchg.h139 unsigned long new, int size) __cmpxchg()
155 : "r" (ptr), "Ir" (old), "r" (new) __cmpxchg()
167 : "r" (ptr), "Ir" (old), "r" (new) __cmpxchg()
180 : "r" (ptr), "Ir" (old), "r" (new) __cmpxchg()
193 unsigned long new, int size) __cmpxchg_mb()
198 ret = __cmpxchg(ptr, old, new, size); __cmpxchg_mb()
212 unsigned long new, int size) __cmpxchg_local()
220 ret = __cmpxchg_local_generic(ptr, old, new, size); __cmpxchg_local()
224 ret = __cmpxchg(ptr, old, new, size); __cmpxchg_local()
232 unsigned long long new) __cmpxchg64()
249 : "r" (ptr), "r" (old), "r" (new) __cmpxchg64()
257 unsigned long long new) __cmpxchg64_mb()
262 ret = __cmpxchg64(ptr, old, new); __cmpxchg64_mb()
138 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg() argument
192 __cmpxchg_mb(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_mb() argument
210 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg_local() argument
230 __cmpxchg64(unsigned long long *ptr, unsigned long long old, unsigned long long new) __cmpxchg64() argument
255 __cmpxchg64_mb(unsigned long long *ptr, unsigned long long old, unsigned long long new) __cmpxchg64_mb() argument
/linux-4.1.27/arch/alpha/kernel/
H A Derr_common.c281 cdl_register_subpacket_annotation(struct el_subpacket_annotation *new) cdl_register_subpacket_annotation()
285 if (a == NULL) subpacket_annotation_list = new; cdl_register_subpacket_annotation()
288 if ((a->class == new->class && a->type == new->type) || cdl_register_subpacket_annotation()
289 a == new) { cdl_register_subpacket_annotation()
295 a->next = new; cdl_register_subpacket_annotation()
297 new->next = NULL; cdl_register_subpacket_annotation()
303 cdl_register_subpacket_handler(struct el_subpacket_handler *new) cdl_register_subpacket_handler()
307 if (h == NULL) subpacket_handler_list = new; cdl_register_subpacket_handler()
310 if (h->class == new->class || h == new) { cdl_register_subpacket_handler()
316 h->next = new; cdl_register_subpacket_handler()
318 new->next = NULL; cdl_register_subpacket_handler()
279 cdl_register_subpacket_annotation(struct el_subpacket_annotation *new) cdl_register_subpacket_annotation() argument
301 cdl_register_subpacket_handler(struct el_subpacket_handler *new) cdl_register_subpacket_handler() argument
/linux-4.1.27/net/x25/
H A Dx25_facilities.c5 * randomly fail to work with new releases, misbehave and/or generally
271 struct x25_facilities *new, struct x25_dte_facilities *dte) x25_negotiate_facilities()
279 memcpy(new, ours, sizeof(*new)); x25_negotiate_facilities()
293 new->reverse = theirs.reverse; x25_negotiate_facilities()
302 new->throughput = (new->throughput & 0xf0) | theirs_in; x25_negotiate_facilities()
307 new->throughput = (new->throughput & 0x0f) | theirs_out; x25_negotiate_facilities()
314 new->pacsize_in = theirs.pacsize_in; x25_negotiate_facilities()
318 new->pacsize_out = theirs.pacsize_out; x25_negotiate_facilities()
325 new->winsize_in = theirs.winsize_in; x25_negotiate_facilities()
329 new->winsize_out = theirs.winsize_out; x25_negotiate_facilities()
270 x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, struct x25_facilities *new, struct x25_dte_facilities *dte) x25_negotiate_facilities() argument
/linux-4.1.27/security/selinux/
H A Dnetif.c86 * sel_netif_insert - Insert a new interface into the table
87 * @netif: the new interface record
90 * Add a new interface record to the network interface hash table. Returns
140 struct sel_netif *new = NULL; sel_netif_sid_slow() local
161 new = kzalloc(sizeof(*new), GFP_ATOMIC); sel_netif_sid_slow()
162 if (new == NULL) { sel_netif_sid_slow()
166 ret = security_netif_sid(dev->name, &new->nsec.sid); sel_netif_sid_slow()
169 new->nsec.ns = ns; sel_netif_sid_slow()
170 new->nsec.ifindex = ifindex; sel_netif_sid_slow()
171 ret = sel_netif_insert(new); sel_netif_sid_slow()
174 *sid = new->nsec.sid; sel_netif_sid_slow()
184 kfree(new); sel_netif_sid_slow()
H A Dnetnode.c149 * sel_netnode_insert - Insert a new node into the table
150 * @node: the new node record
153 * Add a new node record to the network address hash table.
204 struct sel_netnode *new = NULL; sel_netnode_sid_slow() local
213 new = kzalloc(sizeof(*new), GFP_ATOMIC); sel_netnode_sid_slow()
214 if (new == NULL) sel_netnode_sid_slow()
220 new->nsec.addr.ipv4 = *(__be32 *)addr; sel_netnode_sid_slow()
225 new->nsec.addr.ipv6 = *(struct in6_addr *)addr; sel_netnode_sid_slow()
234 new->nsec.family = family; sel_netnode_sid_slow()
235 new->nsec.sid = *sid; sel_netnode_sid_slow()
236 sel_netnode_insert(new); sel_netnode_sid_slow()
244 kfree(new); sel_netnode_sid_slow()
H A Dnetport.c108 * sel_netport_insert - Insert a new port into the table
109 * @port: the new port record
112 * Add a new port record to the network address hash table.
152 struct sel_netport *new = NULL; sel_netport_sid_slow() local
161 new = kzalloc(sizeof(*new), GFP_ATOMIC); sel_netport_sid_slow()
162 if (new == NULL) sel_netport_sid_slow()
168 new->psec.port = pnum; sel_netport_sid_slow()
169 new->psec.protocol = protocol; sel_netport_sid_slow()
170 new->psec.sid = *sid; sel_netport_sid_slow()
171 sel_netport_insert(new); sel_netport_sid_slow()
179 kfree(new); sel_netport_sid_slow()
/linux-4.1.27/arch/x86/lib/
H A Dcmpxchg8b_emu.S19 * %ebx : low 32 bits of new value
20 * %ecx : high 32 bits of new value
/linux-4.1.27/arch/x86/mm/
H A Dpat_internal.h31 extern int rbt_memtype_check_insert(struct memtype *new,
37 static inline int rbt_memtype_check_insert(struct memtype *new, rbt_memtype_check_insert() argument
/linux-4.1.27/arch/mips/kernel/
H A Dsyscall.c99 static inline int mips_atomic_set(unsigned long addr, unsigned long new) mips_atomic_set() argument
116 " move %[tmp], %[new] \n" mips_atomic_set()
134 [new] "r" (new), mips_atomic_set()
142 " move %[tmp], %[new] \n" mips_atomic_set()
164 [new] "r" (new), mips_atomic_set()
175 err |= __put_user(new, (unsigned int *) addr); mips_atomic_set()
/linux-4.1.27/sound/hda/
H A Darray.c10 * snd_array_new - get a new element from the given array
13 * Get a new element from the given array. If it exceeds the
/linux-4.1.27/include/net/
H A Dnetevent.h19 struct dst_entry *new;
/linux-4.1.27/include/linux/netfilter/
H A Dnf_conntrack_common.h9 unsigned int new;
/linux-4.1.27/include/uapi/linux/
H A Dif_plip.h2 * NET3 PLIP tuning facilities for the new Niibe PLIP.
H A Dkernel-page-flags.h20 /* 11-20: new additions in 2.6.31 */
H A Db1lli.h44 * struct for adding new cards
64 #define AVMB1_ADDCARD 1 /* add a new card - OBSOLETE */
67 #define AVMB1_ADDCARD_WITH_TYPE 4 /* add a new card, with cardtype */
/linux-4.1.27/arch/mn10300/include/asm/
H A Dcmpxchg.h40 unsigned long old, unsigned long new) __cmpxchg()
57 "r"(old), "r"(new) __cmpxchg()
91 unsigned long old, unsigned long new) __cmpxchg()
99 *m = new; __cmpxchg()
39 __cmpxchg(volatile unsigned long *m, unsigned long old, unsigned long new) __cmpxchg() argument
90 __cmpxchg(volatile unsigned long *m, unsigned long old, unsigned long new) __cmpxchg() argument
/linux-4.1.27/arch/blackfin/mach-common/
H A Ddpmc.c109 if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) { vreg_cpufreq_notifier()
111 bfin_set_vlev(bfin_get_vlev(freq->new)); vreg_cpufreq_notifier()
114 } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) { vreg_cpufreq_notifier()
116 bfin_set_vlev(bfin_get_vlev(freq->new)); vreg_cpufreq_notifier()
/linux-4.1.27/arch/ia64/include/uapi/asm/
H A Dgcc_intrin.h307 #define ia64_cmpxchg1_acq(ptr, new, old) \
312 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
316 #define ia64_cmpxchg1_rel(ptr, new, old) \
321 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
325 #define ia64_cmpxchg2_acq(ptr, new, old) \
330 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
334 #define ia64_cmpxchg2_rel(ptr, new, old) \
340 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
344 #define ia64_cmpxchg4_acq(ptr, new, old) \
349 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
353 #define ia64_cmpxchg4_rel(ptr, new, old) \
358 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
362 #define ia64_cmpxchg8_acq(ptr, new, old) \
367 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
371 #define ia64_cmpxchg8_rel(ptr, new, old) \
377 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
H A Dcmpxchg.h72 #define ia64_cmpxchg(sem, ptr, old, new, size) \
94 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
98 _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
102 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
106 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
/linux-4.1.27/arch/arm/plat-samsung/
H A Dpm-gpio.c114 * { OUT => SFN } Change CON first, so new data will not glitch
115 * { OUT => IN } Change CON first, so new data will not glitch
117 * { SFN => OUT } Change DAT first, so new data will not glitch [1]
133 u32 gpcon, old, new, mask; samsung_gpio_pm_2bit_resume() local
147 new = (gps_gpcon & mask) >> nr; samsung_gpio_pm_2bit_resume()
151 if (old == new) samsung_gpio_pm_2bit_resume()
156 if (is_sfn(old) && is_sfn(new)) samsung_gpio_pm_2bit_resume()
161 if (is_in(old) && is_out(new)) samsung_gpio_pm_2bit_resume()
166 if (is_sfn(old) && is_out(new)) samsung_gpio_pm_2bit_resume()
176 /* Write the new CON settings */ samsung_gpio_pm_2bit_resume()
210 u32 old, new, mask; samsung_gpio_pm_4bit_mask() local
216 new = (gps_gpcon & mask) >> nr; samsung_gpio_pm_4bit_mask()
220 if (old == new) samsung_gpio_pm_4bit_mask()
225 if (is_sfn(old) && is_sfn(new)) samsung_gpio_pm_4bit_mask()
230 if (is_in(old) && is_out(new)) samsung_gpio_pm_4bit_mask()
235 if (is_sfn(old) && is_out(new)) samsung_gpio_pm_4bit_mask()
/linux-4.1.27/tools/testing/selftests/efivarfs/
H A Dcreate-read.c33 fprintf(stderr, "Reading a new var should return EOF\n"); main()
/linux-4.1.27/drivers/scsi/megaraid/
H A Dmegaraid_mm.h59 * : uioc_t structure instead. All new hba drivers use the new
61 * : new uioc_t format and send it to the hba drivers.
/linux-4.1.27/drivers/staging/sm750fb/
H A Dddk750_chip.h50 Others = the new main chip clock
54 Others = the new memory clock
58 Others = the new master clock
/linux-4.1.27/drivers/cpufreq/
H A Dcpufreq-nforce2.c114 * Writes new FSB PLL value to chipset
164 * nforce2_set_fsb - set new FSB
167 * Sets new FSB
244 * nforce2_target - set a new CPUFreq policy
245 * @policy: new policy
250 * Sets a new CPUFreq policy.
265 freqs.new = target_fsb * fid * 100; nforce2_target()
267 if (freqs.old == freqs.new) nforce2_target()
270 pr_debug("Old CPU frequency %d kHz, new %d kHz\n", nforce2_target()
271 freqs.old, freqs.new); nforce2_target()
294 * nforce2_verify - verifies a new CPUFreq policy
295 * @policy: new policy
/linux-4.1.27/arch/blackfin/kernel/
H A Dflat.c70 pr_debug("new value %x at %p", get_unaligned(usptr), usptr); bfin_put_addr_at_rp()
75 pr_debug("new value %x", get_unaligned(usptr)); bfin_put_addr_at_rp()
80 pr_debug("new ptr =%lx", get_unaligned(ptr)); bfin_put_addr_at_rp()
H A Dfixed_code.S50 * R2: new value to store
51 * The new value is stored if the contents of the memory
69 * Outputs: R0: new contents of the memory address.
84 * Outputs: R0: new contents of the memory address.
99 * Outputs: R0: new contents of the memory address.
114 * Outputs: R0: new contents of the memory address.
129 * Outputs: R0: new contents of the memory address.
/linux-4.1.27/arch/arc/include/asm/
H A Dmmu_context.h37 * A new allocation cycle, post rollover, could potentially reassign an ASID
38 * to a different task. Thus the rule is to refresh the ASID in a new cycle.
57 * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
68 * Move to new ASID if it was not from current alloc-cycle/generation. get_new_mmu_context()
72 * Note: Callers needing new ASID unconditionally, independent of get_new_mmu_context()
80 /* move to new ASID and handle rollover */ get_new_mmu_context()
94 /* Assign new ASID to tsk */ get_new_mmu_context()
104 * Initialize the context related info for a new mm_struct
129 If task doesn't have an ASID (never alloc or stolen, get a new ASID)
158 * Called at the time of execve() to get a new ASID
160 * vs. in switch_mm(). Here it always returns a new ASID, because mm has
161 * an unallocated "initial" value, while in latter, it moves to a new ASID,
/linux-4.1.27/net/irda/irnet/
H A Dirnet_irda.c338 /* Close the last instance of IrIAP, and open a new one. irnet_discover_next_daddr()
345 /* Create a new IAP instance */ irnet_discover_next_daddr()
751 irnet_socket * new = (irnet_socket *) NULL; irnet_find_socket() local
770 new = (irnet_socket *) hashbin_find(irnet_server.list, irnet_find_socket()
772 if(new) irnet_find_socket()
774 new, new->rname); irnet_find_socket()
781 if(new == (irnet_socket *) NULL) irnet_find_socket()
783 new = (irnet_socket *) hashbin_get_first(irnet_server.list); irnet_find_socket()
784 while(new !=(irnet_socket *) NULL) irnet_find_socket()
787 if((new->rdaddr == self->daddr) || (new->daddr == self->daddr)) irnet_find_socket()
791 new, self->daddr); irnet_find_socket()
794 new = (irnet_socket *) hashbin_get_next(irnet_server.list); irnet_find_socket()
799 if(new == (irnet_socket *) NULL) irnet_find_socket()
801 new = (irnet_socket *) hashbin_get_first(irnet_server.list); irnet_find_socket()
802 while(new !=(irnet_socket *) NULL) irnet_find_socket()
805 if(!(test_bit(0, &new->ttp_open)) && (new->rdaddr == DEV_ADDR_ANY) && irnet_find_socket()
806 (new->rname[0] == '\0') && (new->ppp_open)) irnet_find_socket()
810 new); irnet_find_socket()
813 new = (irnet_socket *) hashbin_get_next(irnet_server.list); irnet_find_socket()
820 DEXIT(IRDA_SERV_TRACE, " - new = 0x%p\n", new); irnet_find_socket()
821 return new; irnet_find_socket()
833 irnet_socket * new, irnet_connect_socket()
838 DENTER(IRDA_SERV_TRACE, "(server=0x%p, new=0x%p)\n", irnet_connect_socket()
839 server, new); irnet_connect_socket()
841 /* Now attach up the new socket */ irnet_connect_socket()
842 new->tsap = irttp_dup(server->tsap, new); irnet_connect_socket()
843 DABORT(new->tsap == NULL, -1, IRDA_SERV_ERROR, "dup failed!\n"); irnet_connect_socket()
845 /* Set up all the relevant parameters on the new socket */ irnet_connect_socket()
846 new->stsap_sel = new->tsap->stsap_sel; irnet_connect_socket()
847 new->dtsap_sel = new->tsap->dtsap_sel; irnet_connect_socket()
848 new->saddr = irttp_get_saddr(new->tsap); irnet_connect_socket()
849 new->daddr = irttp_get_daddr(new->tsap); irnet_connect_socket()
851 new->max_header_size = max_header_size; irnet_connect_socket()
852 new->max_sdu_size_tx = max_sdu_size; irnet_connect_socket()
853 new->max_data_size = max_sdu_size; irnet_connect_socket()
857 new->max_data_size = irttp_get_max_seg_size(new->tsap); irnet_connect_socket()
863 /* Send a connection response on the new socket */ irnet_connect_socket()
864 irttp_connect_response(new->tsap, new->max_sdu_size_rx, NULL); irnet_connect_socket()
866 /* Allow PPP to send its junk over the new socket... */ irnet_connect_socket()
867 set_bit(0, &new->ttp_open); irnet_connect_socket()
871 clear_bit(0, &new->ttp_connect); irnet_connect_socket()
872 if(new->iriap) irnet_connect_socket()
874 iriap_close(new->iriap); irnet_connect_socket()
875 new->iriap = NULL; irnet_connect_socket()
877 if(new->discoveries != NULL) irnet_connect_socket()
879 kfree(new->discoveries); irnet_connect_socket()
880 new->discoveries = NULL; irnet_connect_socket()
888 ppp_output_wakeup(&new->chan); irnet_connect_socket()
892 irnet_post_event(new, IRNET_CONNECT_FROM, irnet_connect_socket()
893 new->saddr, new->daddr, server->rname, 0); irnet_connect_socket()
1341 irnet_socket * new = (irnet_socket *) NULL; irnet_connect_indication() local
1349 new = irnet_find_socket(server); irnet_connect_indication()
1352 if(new == (irnet_socket *) NULL) irnet_connect_indication()
1360 if(test_bit(0, &new->ttp_open)) irnet_connect_indication()
1378 * originator and expect the originator to bind this new connection irnet_connect_indication()
1384 * 1) The "originator" must accept the new connection and get rid irnet_connect_indication()
1386 * 2) One side must deny the new connection to avoid races, irnet_connect_indication()
1405 (test_and_clear_bit(0, &new->ttp_connect))) irnet_connect_indication()
1412 if(new->tsap != NULL) irnet_connect_indication()
1414 /* Close the old connection the new socket was attempting, irnet_connect_indication()
1415 * so that we can hook it up to the new connection. irnet_connect_indication()
1417 irttp_close_tsap(new->tsap); irnet_connect_indication()
1418 new->tsap = NULL; irnet_connect_indication()
1433 if((test_bit(0, &new->ttp_connect)) || (new->tsap != NULL)) irnet_connect_indication()
1443 irnet_connect_socket(server, new, qos, max_sdu_size, max_header_size); irnet_connect_indication()
1451 irnet_data_indication(new, new->tsap, skb); irnet_connect_indication()
1538 * If we have more addresses in the log, just initiate a new query.
1644 * check if it is a "new" node for us...
1670 DEBUG(IRDA_OCB_INFO, "Discovered new IrNET/IrLAN node %s...\n", irnet_discovery_indication()
1688 * check if it is a "new" node...
832 irnet_connect_socket(irnet_socket * server, irnet_socket * new, struct qos_info * qos, __u32 max_sdu_size, __u8 max_header_size) irnet_connect_socket() argument
/linux-4.1.27/drivers/input/joystick/iforce/
H A Diforce-ff.c202 struct ff_effect *new) need_condition_modifier()
207 if (new->type != FF_SPRING && new->type != FF_FRICTION) { need_condition_modifier()
214 ret |= old->u.condition[i].right_saturation != new->u.condition[i].right_saturation need_condition_modifier()
215 || old->u.condition[i].left_saturation != new->u.condition[i].left_saturation need_condition_modifier()
216 || old->u.condition[i].right_coeff != new->u.condition[i].right_coeff need_condition_modifier()
217 || old->u.condition[i].left_coeff != new->u.condition[i].left_coeff need_condition_modifier()
218 || old->u.condition[i].deadband != new->u.condition[i].deadband need_condition_modifier()
219 || old->u.condition[i].center != new->u.condition[i].center; need_condition_modifier()
278 struct ff_effect *new) need_period_modifier()
280 if (new->type != FF_PERIODIC) { need_period_modifier()
285 return (old->u.periodic.period != new->u.periodic.period need_period_modifier()
286 || old->u.periodic.magnitude != new->u.periodic.magnitude need_period_modifier()
287 || old->u.periodic.offset != new->u.periodic.offset need_period_modifier()
288 || old->u.periodic.phase != new->u.periodic.phase); need_period_modifier()
295 static int need_core(struct ff_effect *old, struct ff_effect *new) need_core() argument
297 if (old->direction != new->direction need_core()
298 || old->trigger.button != new->trigger.button need_core()
299 || old->trigger.interval != new->trigger.interval need_core()
300 || old->replay.length != new->replay.length need_core()
301 || old->replay.delay != new->replay.delay) need_core()
200 need_condition_modifier(struct iforce *iforce, struct ff_effect *old, struct ff_effect *new) need_condition_modifier() argument
277 need_period_modifier(struct iforce *iforce, struct ff_effect *old, struct ff_effect *new) need_period_modifier() argument
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_lock.c189 unsigned int old, new, prev; drm_lock_take() local
196 new = old | _DRM_LOCK_CONT; drm_lock_take()
198 new = context | _DRM_LOCK_HELD | drm_lock_take()
202 prev = cmpxchg(lock, old, new); drm_lock_take()
216 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { drm_lock_take()
238 unsigned int old, new, prev; drm_lock_transfer() local
244 new = context | _DRM_LOCK_HELD; drm_lock_transfer()
245 prev = cmpxchg(lock, old, new); drm_lock_transfer()
263 unsigned int old, new, prev; drm_legacy_lock_free() local
277 new = _DRM_LOCKING_CONTEXT(old); drm_legacy_lock_free()
278 prev = cmpxchg(lock, old, new); drm_legacy_lock_free()
305 unsigned int old, new, prev; drm_notifier() local
316 new = old | _DRM_LOCK_CONT; drm_notifier()
317 prev = cmpxchg(&lock->lock, old, new); drm_notifier()
/linux-4.1.27/arch/sh/kernel/
H A Dftrace.c77 * and the new code into the "code" buffer.
117 int new = old & ~MOD_CODE_WRITE_FLAG; clear_mod_flag() local
119 if (old == new) clear_mod_flag()
122 old = atomic_cmpxchg(&nmi_running, old, new); clear_mod_flag()
137 /* if we fail, then kill any new writers */ ftrace_mod_code()
232 /* replace the text with the new text */ ftrace_modify_code()
244 unsigned char old[MCOUNT_INSN_SIZE], *new; ftrace_update_ftrace_func() local
247 new = ftrace_call_replace(ip, (unsigned long)func); ftrace_update_ftrace_func()
249 return ftrace_modify_code(ip, old, new); ftrace_update_ftrace_func()
255 unsigned char *new, *old; ftrace_make_nop() local
259 new = ftrace_nop_replace(ip); ftrace_make_nop()
261 return ftrace_modify_code(rec->ip, old, new); ftrace_make_nop()
266 unsigned char *new, *old; ftrace_make_call() local
270 new = ftrace_call_replace(ip, addr); ftrace_make_call()
272 return ftrace_modify_code(rec->ip, old, new); ftrace_make_call()
/linux-4.1.27/drivers/hid/usbhid/
H A Dusbkbd.c76 * new key is pressed or a key that was pressed is released.
83 * @new: Buffer for the @irq URB
102 unsigned char *new; member in struct:usb_kbd
131 input_report_key(kbd->dev, usb_kbd_keycode[i + 224], (kbd->new[0] >> i) & 1); usb_kbd_irq()
135 if (kbd->old[i] > 3 && memscan(kbd->new + 2, kbd->old[i], 6) == kbd->new + 8) { usb_kbd_irq()
144 if (kbd->new[i] > 3 && memscan(kbd->old + 2, kbd->new[i], 6) == kbd->old + 8) { usb_kbd_irq()
145 if (usb_kbd_keycode[kbd->new[i]]) usb_kbd_irq()
146 input_report_key(kbd->dev, usb_kbd_keycode[kbd->new[i]], 1); usb_kbd_irq()
150 kbd->new[i]); usb_kbd_irq()
156 memcpy(kbd->old, kbd->new, 8); usb_kbd_irq()
255 if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma))) usb_kbd_alloc_mem()
269 usb_free_coherent(dev, 8, kbd->new, kbd->new_dma); usb_kbd_free_mem()
349 kbd->new, (maxp > 8 ? 8 : maxp), usb_kbd_probe()
/linux-4.1.27/drivers/md/
H A Ddm-bio-prison.h67 * Returns 1 if pre-existing cell returned, zero if new cell created using
79 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
97 * Visits the cell and then releases. Guarantees no new inmates are
108 * We do this to ensure the new mapping caused by a write isn't performed
110 * new mapping could free the old block that the read bios are mapped to.
/linux-4.1.27/drivers/net/irda/
H A Dsir_dongle.c32 int irda_register_dongle(struct dongle_driver *new) irda_register_dongle() argument
38 __func__, new->driver_name, new->type); irda_register_dongle()
43 if (new->type == drv->type) { irda_register_dongle()
48 list_add(&new->dongle_list, &dongle_list); irda_register_dongle()
/linux-4.1.27/drivers/staging/android/uapi/
H A Dsync.h20 * @name: name of new fence
21 * @fence: returns the fd of the new fence to userspace
25 char name[32]; /* name of new fence */
77 * Takes a struct sync_merge_data. Creates a new fence containing copies of
79 * new fence's fd in sync_merge_data.fence
/linux-4.1.27/fs/pstore/
H A Dram_core.c54 int new; buffer_start_add_atomic() local
58 new = old + a; buffer_start_add_atomic()
59 while (unlikely(new >= prz->buffer_size)) buffer_start_add_atomic()
60 new -= prz->buffer_size; buffer_start_add_atomic()
61 } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old); buffer_start_add_atomic()
70 size_t new; buffer_size_add_atomic() local
77 new = old + a; buffer_size_add_atomic()
78 if (new > prz->buffer_size) buffer_size_add_atomic()
79 new = prz->buffer_size; buffer_size_add_atomic()
80 } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); buffer_size_add_atomic()
89 int new; buffer_start_add_locked() local
95 new = old + a; buffer_start_add_locked()
96 while (unlikely(new >= prz->buffer_size)) buffer_start_add_locked()
97 new -= prz->buffer_size; buffer_start_add_locked()
98 atomic_set(&prz->buffer->start, new); buffer_start_add_locked()
109 size_t new; buffer_size_add_locked() local
118 new = old + a; buffer_size_add_locked()
119 if (new > prz->buffer_size) buffer_size_add_locked()
120 new = prz->buffer_size; buffer_size_add_locked()
121 atomic_set(&prz->buffer->size, new); buffer_size_add_locked()
/linux-4.1.27/fs/xfs/
H A Dkmem.c98 void *new; kmem_realloc() local
100 new = kmem_alloc(newsize, flags); kmem_realloc()
102 if (new) kmem_realloc()
103 memcpy(new, ptr, kmem_realloc()
107 return new; kmem_realloc()
/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_dir2_sf.c127 * Calculate the new size, see if we should give up yet. xfs_dir2_block_sfsize()
280 * Convert to block form if necessary, if the new entry won't fit.
291 xfs_dir2_data_aoff_t offset = 0; /* offset for new entry */ xfs_dir2_sf_addname()
323 * Yes, adjust the inode size. old count + (parent + new) xfs_dir2_sf_addname()
364 * Do it the hard way - look for a place to insert the new entry. xfs_dir2_sf_addname()
378 * Add the new entry the "easy" way.
379 * This is copying the old directory and adding the new entry at the end.
387 xfs_dir2_sf_entry_t *sfep, /* pointer to new entry */ xfs_dir2_sf_addname_easy()
388 xfs_dir2_data_aoff_t offset, /* offset to use for new ent */ xfs_dir2_sf_addname_easy()
389 int new_isize) /* new directory size */ xfs_dir2_sf_addname_easy()
410 * Fill in the new entry. xfs_dir2_sf_addname_easy()
429 * Add the new entry the "hard" way.
432 * Find a hole that the new entry will fit into, and copy
433 * the first part of the entries, the new entry, and the last part of
441 int new_isize) /* new directory size */ xfs_dir2_sf_addname_hard()
443 int add_datasize; /* data size need for new ent */ xfs_dir2_sf_addname_hard()
453 xfs_dir2_sf_entry_t *sfep; /* entry in new dir */ xfs_dir2_sf_addname_hard()
454 xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */ xfs_dir2_sf_addname_hard()
468 * to insert the new entry. xfs_dir2_sf_addname_hard()
485 * the new one. We do this so xfs_idata_realloc won't copy xfs_dir2_sf_addname_hard()
501 * Fill in the new entry, and update the header counts. xfs_dir2_sf_addname_hard()
524 * Decide if the new entry will fit at all.
525 * If it will fit, pick between adding the new entry to the end (easy)
534 xfs_dir2_sf_entry_t **sfepp, /* out(1): new entry ptr */ xfs_dir2_sf_addname_pick()
535 xfs_dir2_data_aoff_t *offsetp) /* out(1): new offset */ xfs_dir2_sf_addname_pick()
556 * to insert the new entry. xfs_dir2_sf_addname_pick()
566 * Calculate data bytes used excluding the new entry, if this xfs_dir2_sf_addname_pick()
640 * Create a new (shortform) directory.
789 int newsize; /* new inode size */ xfs_dir2_sf_removename()
901 int newsize; /* new inode size */ xfs_dir2_sf_replace()
963 * See if the old number was large, the new number is small. xfs_dir2_sf_replace()
976 * See if the old number was small, the new number is large. xfs_dir2_sf_replace()
1004 int newsize; /* new inode size */ xfs_dir2_sf_toino4()
1008 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ xfs_dir2_sf_toino4()
1009 xfs_dir2_sf_hdr_t *sfp; /* new sf directory */ xfs_dir2_sf_toino4()
1017 * Then nuke it from the inode, and add the new buffer to the inode. xfs_dir2_sf_toino4()
1026 * Compute the new inode size. xfs_dir2_sf_toino4()
1040 * Fill in the new header. xfs_dir2_sf_toino4()
1070 * The new entry w/ an 8-byte inode number is not there yet; we leave with
1080 int newsize; /* new inode size */ xfs_dir2_sf_toino8()
1084 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ xfs_dir2_sf_toino8()
1085 xfs_dir2_sf_hdr_t *sfp; /* new sf directory */ xfs_dir2_sf_toino8()
1093 * Then nuke it from the inode, and add the new buffer to the inode. xfs_dir2_sf_toino8()
1102 * Compute the new inode size (nb: entry count + 1 for parent) xfs_dir2_sf_toino8()
1116 * Fill in the new header. xfs_dir2_sf_toino8()
/linux-4.1.27/arch/nios2/mm/
H A Dmmu_context.c51 * Set new context (pid), keep way
88 * generation then we have it should get a new generation/pid */ switch_mm()
103 * After we have set current->mm to a new value, this activates
104 * the context for the new mm so we see the new mappings.
/linux-4.1.27/net/sunrpc/
H A Dsvcauth.c117 * it's second argument 'new'. If this is non-null, it will
139 auth_domain_lookup(char *name, struct auth_domain *new) auth_domain_lookup() argument
155 if (new)
156 hlist_add_head(&new->hash, head);
158 return new;
/linux-4.1.27/drivers/md/persistent-data/
H A Ddm-bitset.h36 * want to create a brand new, empty bitset with dm_bitset_empty().
42 * it in parallel with the new root.
45 * return a root for a new, updated bitset.
91 * new_root - on success, points to the new root block
101 * new_nr_entries - the number of bits you want in the new bitset
102 * default_value - the value for any new bits
103 * new_root - on success, points to the new root block
120 * new_root - on success, points to the new root block
133 * new_root - on success, points to the new root block
146 * new_root - on success, points to the new root block (cached values may have been written)
159 * new_root - on success, points to the new root block
/linux-4.1.27/fs/reiserfs/
H A Dresize.c71 /* count bitmap blocks in new fs */ reiserfs_resize()
92 * the new journal bitmaps are zero filled, now we copy i reiserfs_resize()
94 * structs, and then transfer the new data structures reiserfs_resize()
109 * just in case vfree schedules on us, copy the new reiserfs_resize()
138 * journal transaction begins, and the new bitmaps don't reiserfs_resize()
181 /* Extend old last bitmap block - new blocks have been made available */ reiserfs_resize()
199 /* Correct new last bitmap block - It may not be full */ reiserfs_resize()
/linux-4.1.27/arch/powerpc/kvm/
H A Dfpu.S42 stfd 0,0(r3); /* save new fpscr value */ \
64 stfd 0,0(r3); /* save new fpscr value */ \
88 stfd 0,0(r3); /* save new fpscr value */ \
143 stfd 0,0(r3) /* save new fpscr value */
144 stw r6,0(r4) /* save new cr value */
220 stfd 0,0(r3); /* save new fpscr value */ \
221 stw r6,0(r4); /* save new cr value */ \
/linux-4.1.27/drivers/dma/
H A Dmmp_pdma.c450 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; mmp_pdma_prep_memcpy() local
470 new = mmp_pdma_alloc_descriptor(chan); mmp_pdma_prep_memcpy()
471 if (!new) { mmp_pdma_prep_memcpy()
480 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); mmp_pdma_prep_memcpy()
481 new->desc.dsadr = dma_src; mmp_pdma_prep_memcpy()
482 new->desc.dtadr = dma_dst; mmp_pdma_prep_memcpy()
485 first = new; mmp_pdma_prep_memcpy()
487 prev->desc.ddadr = new->async_tx.phys; mmp_pdma_prep_memcpy()
489 new->async_tx.cookie = 0; mmp_pdma_prep_memcpy()
490 async_tx_ack(&new->async_tx); mmp_pdma_prep_memcpy()
492 prev = new; mmp_pdma_prep_memcpy()
505 list_add_tail(&new->node, &first->tx_list); mmp_pdma_prep_memcpy()
512 new->desc.ddadr = DDADR_STOP; mmp_pdma_prep_memcpy()
513 new->desc.dcmd |= DCMD_ENDIRQEN; mmp_pdma_prep_memcpy()
531 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; mmp_pdma_prep_slave_sg() local
552 new = mmp_pdma_alloc_descriptor(chan); for_each_sg()
553 if (!new) { for_each_sg()
558 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); for_each_sg()
560 new->desc.dsadr = addr; for_each_sg()
561 new->desc.dtadr = chan->dev_addr; for_each_sg()
563 new->desc.dsadr = chan->dev_addr; for_each_sg()
564 new->desc.dtadr = addr; for_each_sg()
568 first = new; for_each_sg()
570 prev->desc.ddadr = new->async_tx.phys; for_each_sg()
572 new->async_tx.cookie = 0; for_each_sg()
573 async_tx_ack(&new->async_tx); for_each_sg()
574 prev = new; for_each_sg()
577 list_add_tail(&new->node, &first->tx_list); for_each_sg()
589 new->desc.ddadr = DDADR_STOP;
590 new->desc.dcmd |= DCMD_ENDIRQEN;
610 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; mmp_pdma_prep_dma_cyclic() local
643 new = mmp_pdma_alloc_descriptor(chan); mmp_pdma_prep_dma_cyclic()
644 if (!new) { mmp_pdma_prep_dma_cyclic()
649 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | mmp_pdma_prep_dma_cyclic()
651 new->desc.dsadr = dma_src; mmp_pdma_prep_dma_cyclic()
652 new->desc.dtadr = dma_dst; mmp_pdma_prep_dma_cyclic()
655 first = new; mmp_pdma_prep_dma_cyclic()
657 prev->desc.ddadr = new->async_tx.phys; mmp_pdma_prep_dma_cyclic()
659 new->async_tx.cookie = 0; mmp_pdma_prep_dma_cyclic()
660 async_tx_ack(&new->async_tx); mmp_pdma_prep_dma_cyclic()
662 prev = new; mmp_pdma_prep_dma_cyclic()
671 list_add_tail(&new->node, &first->tx_list); mmp_pdma_prep_dma_cyclic()
678 new->desc.ddadr = first->async_tx.phys; mmp_pdma_prep_dma_cyclic()
/linux-4.1.27/fs/nfs/
H A Ddns_resolve.c73 struct nfs_dns_ent *new; nfs_dns_ent_update() local
76 new = container_of(cnew, struct nfs_dns_ent, h); nfs_dns_ent_update()
79 memcpy(&new->addr, &key->addr, key->addrlen); nfs_dns_ent_update()
80 new->addrlen = key->addrlen; nfs_dns_ent_update()
86 struct nfs_dns_ent *new; nfs_dns_ent_init() local
89 new = container_of(cnew, struct nfs_dns_ent, h); nfs_dns_ent_init()
92 kfree(new->hostname); nfs_dns_ent_init()
93 new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); nfs_dns_ent_init()
94 if (new->hostname) { nfs_dns_ent_init()
95 new->namelen = key->namelen; nfs_dns_ent_init()
98 new->namelen = 0; nfs_dns_ent_init()
99 new->addrlen = 0; nfs_dns_ent_init()
206 struct nfs_dns_ent *new, nfs_dns_update()
212 &new->h, &key->h, nfs_dns_update()
205 nfs_dns_update(struct cache_detail *cd, struct nfs_dns_ent *new, struct nfs_dns_ent *key) nfs_dns_update() argument
/linux-4.1.27/scripts/kconfig/
H A Dqconf.cc107 * set the new data
470 item = new ConfigItem(this, 0, true); updateList()
477 item = new ConfigItem(this, last, rootEntry, true); updateList()
640 item = new ConfigItem(parent, last, child, visible); updateMenuList()
833 headerPopup = new Q3PopupMenu(this); contextMenuEvent()
834 action = new Q3Action(NULL, _("Show Name"), 0, this); contextMenuEvent()
842 action = new Q3Action(NULL, _("Show Range"), 0, this); contextMenuEvent()
850 action = new Q3Action(NULL, _("Show Data"), 0, this); contextMenuEvent()
873 list = new ConfigList(this, name); ConfigView()
874 lineEdit = new ConfigLineEdit(this); ConfigView()
1173 Q3Action* action = new Q3Action(NULL, _("Show Debug Info"), 0, popup); createPopupMenu()
1193 QVBoxLayout* layout1 = new QVBoxLayout(this, 11, 6); ConfigSearchWindow()
1194 QHBoxLayout* layout2 = new QHBoxLayout(0, 0, 6); ConfigSearchWindow()
1195 layout2->addWidget(new QLabel(_("Find:"), this)); ConfigSearchWindow()
1196 editField = new QLineEdit(this); ConfigSearchWindow()
1199 searchButton = new QPushButton(_("Search"), this); ConfigSearchWindow()
1205 split = new QSplitter(this); ConfigSearchWindow()
1207 list = new ConfigView(split, name); ConfigSearchWindow()
1209 info = new ConfigInfoView(split, name); ConfigSearchWindow()
1266 lastItem = new ConfigItem(list->list, lastItem, prop->menu, search()
1302 split1 = new QSplitter(this); ConfigMainWindow()
1306 menuView = new ConfigView(split1, "menu"); ConfigMainWindow()
1309 split2 = new QSplitter(split1); ConfigMainWindow()
1313 configView = new ConfigView(split2, "config"); ConfigMainWindow()
1316 helpText = new ConfigInfoView(split2, "help"); ConfigMainWindow()
1323 toolBar = new Q3ToolBar("Tools", this); ConfigMainWindow()
1325 backAction = new Q3Action("Back", QPixmap(xpm_back), _("Back"), 0, this); ConfigMainWindow()
1328 Q3Action *quitAction = new Q3Action("Quit", _("&Quit"), Qt::CTRL + Qt::Key_Q, this); ConfigMainWindow()
1330 Q3Action *loadAction = new Q3Action("Load", QPixmap(xpm_load), _("&Load"), Qt::CTRL + Qt::Key_L, this); ConfigMainWindow()
1332 saveAction = new Q3Action("Save", QPixmap(xpm_save), _("&Save"), Qt::CTRL + Qt::Key_S, this); ConfigMainWindow()
1337 Q3Action *saveAsAction = new Q3Action("Save As...", _("Save &As..."), 0, this); ConfigMainWindow()
1339 Q3Action *searchAction = new Q3Action("Find", _("&Find"), Qt::CTRL + Qt::Key_F, this); ConfigMainWindow()
1341 Q3Action *singleViewAction = new Q3Action("Single View", QPixmap(xpm_single_view), _("Single View"), 0, this); ConfigMainWindow()
1343 Q3Action *splitViewAction = new Q3Action("Split View", QPixmap(xpm_split_view), _("Split View"), 0, this); ConfigMainWindow()
1345 Q3Action *fullViewAction = new Q3Action("Full View", QPixmap(xpm_tree_view), _("Full View"), 0, this); ConfigMainWindow()
1348 Q3Action *showNameAction = new Q3Action(NULL, _("Show Name"), 0, this); ConfigMainWindow()
1353 Q3Action *showRangeAction = new Q3Action(NULL, _("Show Range"), 0, this); ConfigMainWindow()
1358 Q3Action *showDataAction = new Q3Action(NULL, _("Show Data"), 0, this); ConfigMainWindow()
1364 QActionGroup *optGroup = new QActionGroup(this); ConfigMainWindow()
1372 configView->showNormalAction = new QAction(_("Show Normal Options"), optGroup); ConfigMainWindow()
1373 configView->showAllAction = new QAction(_("Show All Options"), optGroup); ConfigMainWindow()
1374 configView->showPromptAction = new QAction(_("Show Prompt Options"), optGroup); ConfigMainWindow()
1376 configView->showNormalAction = new QAction(_("Show Normal Options"), 0, optGroup); ConfigMainWindow()
1377 configView->showAllAction = new QAction(_("Show All Options"), 0, optGroup); ConfigMainWindow()
1378 configView->showPromptAction = new QAction(_("Show Prompt Options"), 0, optGroup); ConfigMainWindow()
1387 Q3Action *showDebugAction = new Q3Action(NULL, _("Show Debug Info"), 0, this); ConfigMainWindow()
1393 Q3Action *showIntroAction = new Q3Action(NULL, _("Introduction"), 0, this); ConfigMainWindow()
1395 Q3Action *showAboutAction = new Q3Action(NULL, _("About"), 0, this); ConfigMainWindow()
1409 Q3PopupMenu* config = new Q3PopupMenu(this); ConfigMainWindow()
1418 Q3PopupMenu* editMenu = new Q3PopupMenu(this); ConfigMainWindow()
1423 Q3PopupMenu* optionMenu = new Q3PopupMenu(this); ConfigMainWindow()
1433 Q3PopupMenu* helpMenu = new Q3PopupMenu(this); ConfigMainWindow()
1507 searchWindow = new ConfigSearchWindow(this, "search"); searchConfig()
1762 configApp = new QApplication(ac, av); main()
1783 configSettings = new ConfigSettings(); main()
1785 v = new ConfigMainWindow(); main()

Completed in 6620 milliseconds

1234567891011>>