root/arch/arm64/kernel/ptrace.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. regs_query_register_offset
  2. regs_within_kernel_stack
  3. regs_get_kernel_stack_nth
  4. ptrace_disable
  5. ptrace_hbptriggered
  6. flush_ptrace_hw_breakpoint
  7. ptrace_hw_copy_thread
  8. ptrace_hbp_get_event
  9. ptrace_hbp_set_event
  10. ptrace_hbp_create
  11. ptrace_hbp_fill_attr_ctrl
  12. ptrace_hbp_get_resource_info
  13. ptrace_hbp_get_ctrl
  14. ptrace_hbp_get_addr
  15. ptrace_hbp_get_initialised_bp
  16. ptrace_hbp_set_ctrl
  17. ptrace_hbp_set_addr
  18. hw_break_get
  19. hw_break_set
  20. gpr_get
  21. gpr_set
  22. fpr_active
  23. __fpr_get
  24. fpr_get
  25. __fpr_set
  26. fpr_set
  27. tls_get
  28. tls_set
  29. system_call_get
  30. system_call_set
  31. sve_init_header_from_task
  32. sve_size_from_header
  33. sve_get_size
  34. sve_get
  35. sve_set
  36. pac_mask_get
  37. pac_key_to_user
  38. pac_key_from_user
  39. pac_address_keys_to_user
  40. pac_address_keys_from_user
  41. pac_address_keys_get
  42. pac_address_keys_set
  43. pac_generic_keys_to_user
  44. pac_generic_keys_from_user
  45. pac_generic_keys_get
  46. pac_generic_keys_set
  47. compat_gpr_get
  48. compat_gpr_set
  49. compat_vfp_get
  50. compat_vfp_set
  51. compat_tls_get
  52. compat_tls_set
  53. compat_ptrace_read_user
  54. compat_ptrace_write_user
  55. compat_ptrace_hbp_num_to_idx
  56. compat_ptrace_hbp_get_resource_info
  57. compat_ptrace_hbp_get
  58. compat_ptrace_hbp_set
  59. compat_ptrace_gethbpregs
  60. compat_ptrace_sethbpregs
  61. compat_arch_ptrace
  62. task_user_regset_view
  63. arch_ptrace
  64. tracehook_report_syscall
  65. syscall_trace_enter
  66. syscall_trace_exit
  67. valid_compat_regs
  68. valid_native_regs
  69. valid_user_regs

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Based on arch/arm/kernel/ptrace.c
   4  *
   5  * By Ross Biro 1/23/92
   6  * edited by Linus Torvalds
   7  * ARM modifications Copyright (C) 2000 Russell King
   8  * Copyright (C) 2012 ARM Ltd.
   9  */
  10 
  11 #include <linux/audit.h>
  12 #include <linux/compat.h>
  13 #include <linux/kernel.h>
  14 #include <linux/sched/signal.h>
  15 #include <linux/sched/task_stack.h>
  16 #include <linux/mm.h>
  17 #include <linux/nospec.h>
  18 #include <linux/smp.h>
  19 #include <linux/ptrace.h>
  20 #include <linux/user.h>
  21 #include <linux/seccomp.h>
  22 #include <linux/security.h>
  23 #include <linux/init.h>
  24 #include <linux/signal.h>
  25 #include <linux/string.h>
  26 #include <linux/uaccess.h>
  27 #include <linux/perf_event.h>
  28 #include <linux/hw_breakpoint.h>
  29 #include <linux/regset.h>
  30 #include <linux/tracehook.h>
  31 #include <linux/elf.h>
  32 
  33 #include <asm/compat.h>
  34 #include <asm/cpufeature.h>
  35 #include <asm/debug-monitors.h>
  36 #include <asm/fpsimd.h>
  37 #include <asm/pgtable.h>
  38 #include <asm/pointer_auth.h>
  39 #include <asm/stacktrace.h>
  40 #include <asm/syscall.h>
  41 #include <asm/traps.h>
  42 #include <asm/system_misc.h>
  43 
  44 #define CREATE_TRACE_POINTS
  45 #include <trace/events/syscalls.h>
  46 
  47 struct pt_regs_offset {
  48         const char *name;
  49         int offset;
  50 };
  51 
  52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
  54 #define GPR_OFFSET_NAME(r) \
  55         {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
  56 
  57 static const struct pt_regs_offset regoffset_table[] = {
  58         GPR_OFFSET_NAME(0),
  59         GPR_OFFSET_NAME(1),
  60         GPR_OFFSET_NAME(2),
  61         GPR_OFFSET_NAME(3),
  62         GPR_OFFSET_NAME(4),
  63         GPR_OFFSET_NAME(5),
  64         GPR_OFFSET_NAME(6),
  65         GPR_OFFSET_NAME(7),
  66         GPR_OFFSET_NAME(8),
  67         GPR_OFFSET_NAME(9),
  68         GPR_OFFSET_NAME(10),
  69         GPR_OFFSET_NAME(11),
  70         GPR_OFFSET_NAME(12),
  71         GPR_OFFSET_NAME(13),
  72         GPR_OFFSET_NAME(14),
  73         GPR_OFFSET_NAME(15),
  74         GPR_OFFSET_NAME(16),
  75         GPR_OFFSET_NAME(17),
  76         GPR_OFFSET_NAME(18),
  77         GPR_OFFSET_NAME(19),
  78         GPR_OFFSET_NAME(20),
  79         GPR_OFFSET_NAME(21),
  80         GPR_OFFSET_NAME(22),
  81         GPR_OFFSET_NAME(23),
  82         GPR_OFFSET_NAME(24),
  83         GPR_OFFSET_NAME(25),
  84         GPR_OFFSET_NAME(26),
  85         GPR_OFFSET_NAME(27),
  86         GPR_OFFSET_NAME(28),
  87         GPR_OFFSET_NAME(29),
  88         GPR_OFFSET_NAME(30),
  89         {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
  90         REG_OFFSET_NAME(sp),
  91         REG_OFFSET_NAME(pc),
  92         REG_OFFSET_NAME(pstate),
  93         REG_OFFSET_END,
  94 };
  95 
  96 /**
  97  * regs_query_register_offset() - query register offset from its name
  98  * @name:       the name of a register
  99  *
 100  * regs_query_register_offset() returns the offset of a register in struct
 101  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 102  */
 103 int regs_query_register_offset(const char *name)
 104 {
 105         const struct pt_regs_offset *roff;
 106 
 107         for (roff = regoffset_table; roff->name != NULL; roff++)
 108                 if (!strcmp(roff->name, name))
 109                         return roff->offset;
 110         return -EINVAL;
 111 }
 112 
 113 /**
 114  * regs_within_kernel_stack() - check the address in the stack
 115  * @regs:      pt_regs which contains kernel stack pointer.
 116  * @addr:      address which is checked.
 117  *
 118  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
 119  * If @addr is within the kernel stack, it returns true. If not, returns false.
 120  */
 121 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
 122 {
 123         return ((addr & ~(THREAD_SIZE - 1))  ==
 124                 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
 125                 on_irq_stack(addr, NULL);
 126 }
 127 
 128 /**
 129  * regs_get_kernel_stack_nth() - get Nth entry of the stack
 130  * @regs:       pt_regs which contains kernel stack pointer.
 131  * @n:          stack entry number.
 132  *
 133  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
 134  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
 135  * this returns 0.
 136  */
 137 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
 138 {
 139         unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
 140 
 141         addr += n;
 142         if (regs_within_kernel_stack(regs, (unsigned long)addr))
 143                 return *addr;
 144         else
 145                 return 0;
 146 }
 147 
 148 /*
 149  * TODO: does not yet catch signals sent when the child dies.
 150  * in exit.c or in signal.c.
 151  */
 152 
 153 /*
 154  * Called by kernel/ptrace.c when detaching..
 155  */
 156 void ptrace_disable(struct task_struct *child)
 157 {
 158         /*
 159          * This would be better off in core code, but PTRACE_DETACH has
 160          * grown its fair share of arch-specific worts and changing it
 161          * is likely to cause regressions on obscure architectures.
 162          */
 163         user_disable_single_step(child);
 164 }
 165 
 166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 167 /*
 168  * Handle hitting a HW-breakpoint.
 169  */
 170 static void ptrace_hbptriggered(struct perf_event *bp,
 171                                 struct perf_sample_data *data,
 172                                 struct pt_regs *regs)
 173 {
 174         struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
 175         const char *desc = "Hardware breakpoint trap (ptrace)";
 176 
 177 #ifdef CONFIG_COMPAT
 178         if (is_compat_task()) {
 179                 int si_errno = 0;
 180                 int i;
 181 
 182                 for (i = 0; i < ARM_MAX_BRP; ++i) {
 183                         if (current->thread.debug.hbp_break[i] == bp) {
 184                                 si_errno = (i << 1) + 1;
 185                                 break;
 186                         }
 187                 }
 188 
 189                 for (i = 0; i < ARM_MAX_WRP; ++i) {
 190                         if (current->thread.debug.hbp_watch[i] == bp) {
 191                                 si_errno = -((i << 1) + 1);
 192                                 break;
 193                         }
 194                 }
 195                 arm64_force_sig_ptrace_errno_trap(si_errno,
 196                                                   (void __user *)bkpt->trigger,
 197                                                   desc);
 198         }
 199 #endif
 200         arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
 201                               (void __user *)(bkpt->trigger),
 202                               desc);
 203 }
 204 
 205 /*
 206  * Unregister breakpoints from this task and reset the pointers in
 207  * the thread_struct.
 208  */
 209 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
 210 {
 211         int i;
 212         struct thread_struct *t = &tsk->thread;
 213 
 214         for (i = 0; i < ARM_MAX_BRP; i++) {
 215                 if (t->debug.hbp_break[i]) {
 216                         unregister_hw_breakpoint(t->debug.hbp_break[i]);
 217                         t->debug.hbp_break[i] = NULL;
 218                 }
 219         }
 220 
 221         for (i = 0; i < ARM_MAX_WRP; i++) {
 222                 if (t->debug.hbp_watch[i]) {
 223                         unregister_hw_breakpoint(t->debug.hbp_watch[i]);
 224                         t->debug.hbp_watch[i] = NULL;
 225                 }
 226         }
 227 }
 228 
 229 void ptrace_hw_copy_thread(struct task_struct *tsk)
 230 {
 231         memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
 232 }
 233 
 234 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
 235                                                struct task_struct *tsk,
 236                                                unsigned long idx)
 237 {
 238         struct perf_event *bp = ERR_PTR(-EINVAL);
 239 
 240         switch (note_type) {
 241         case NT_ARM_HW_BREAK:
 242                 if (idx >= ARM_MAX_BRP)
 243                         goto out;
 244                 idx = array_index_nospec(idx, ARM_MAX_BRP);
 245                 bp = tsk->thread.debug.hbp_break[idx];
 246                 break;
 247         case NT_ARM_HW_WATCH:
 248                 if (idx >= ARM_MAX_WRP)
 249                         goto out;
 250                 idx = array_index_nospec(idx, ARM_MAX_WRP);
 251                 bp = tsk->thread.debug.hbp_watch[idx];
 252                 break;
 253         }
 254 
 255 out:
 256         return bp;
 257 }
 258 
 259 static int ptrace_hbp_set_event(unsigned int note_type,
 260                                 struct task_struct *tsk,
 261                                 unsigned long idx,
 262                                 struct perf_event *bp)
 263 {
 264         int err = -EINVAL;
 265 
 266         switch (note_type) {
 267         case NT_ARM_HW_BREAK:
 268                 if (idx >= ARM_MAX_BRP)
 269                         goto out;
 270                 idx = array_index_nospec(idx, ARM_MAX_BRP);
 271                 tsk->thread.debug.hbp_break[idx] = bp;
 272                 err = 0;
 273                 break;
 274         case NT_ARM_HW_WATCH:
 275                 if (idx >= ARM_MAX_WRP)
 276                         goto out;
 277                 idx = array_index_nospec(idx, ARM_MAX_WRP);
 278                 tsk->thread.debug.hbp_watch[idx] = bp;
 279                 err = 0;
 280                 break;
 281         }
 282 
 283 out:
 284         return err;
 285 }
 286 
 287 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
 288                                             struct task_struct *tsk,
 289                                             unsigned long idx)
 290 {
 291         struct perf_event *bp;
 292         struct perf_event_attr attr;
 293         int err, type;
 294 
 295         switch (note_type) {
 296         case NT_ARM_HW_BREAK:
 297                 type = HW_BREAKPOINT_X;
 298                 break;
 299         case NT_ARM_HW_WATCH:
 300                 type = HW_BREAKPOINT_RW;
 301                 break;
 302         default:
 303                 return ERR_PTR(-EINVAL);
 304         }
 305 
 306         ptrace_breakpoint_init(&attr);
 307 
 308         /*
 309          * Initialise fields to sane defaults
 310          * (i.e. values that will pass validation).
 311          */
 312         attr.bp_addr    = 0;
 313         attr.bp_len     = HW_BREAKPOINT_LEN_4;
 314         attr.bp_type    = type;
 315         attr.disabled   = 1;
 316 
 317         bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
 318         if (IS_ERR(bp))
 319                 return bp;
 320 
 321         err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
 322         if (err)
 323                 return ERR_PTR(err);
 324 
 325         return bp;
 326 }
 327 
 328 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 329                                      struct arch_hw_breakpoint_ctrl ctrl,
 330                                      struct perf_event_attr *attr)
 331 {
 332         int err, len, type, offset, disabled = !ctrl.enabled;
 333 
 334         attr->disabled = disabled;
 335         if (disabled)
 336                 return 0;
 337 
 338         err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
 339         if (err)
 340                 return err;
 341 
 342         switch (note_type) {
 343         case NT_ARM_HW_BREAK:
 344                 if ((type & HW_BREAKPOINT_X) != type)
 345                         return -EINVAL;
 346                 break;
 347         case NT_ARM_HW_WATCH:
 348                 if ((type & HW_BREAKPOINT_RW) != type)
 349                         return -EINVAL;
 350                 break;
 351         default:
 352                 return -EINVAL;
 353         }
 354 
 355         attr->bp_len    = len;
 356         attr->bp_type   = type;
 357         attr->bp_addr   += offset;
 358 
 359         return 0;
 360 }
 361 
 362 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
 363 {
 364         u8 num;
 365         u32 reg = 0;
 366 
 367         switch (note_type) {
 368         case NT_ARM_HW_BREAK:
 369                 num = hw_breakpoint_slots(TYPE_INST);
 370                 break;
 371         case NT_ARM_HW_WATCH:
 372                 num = hw_breakpoint_slots(TYPE_DATA);
 373                 break;
 374         default:
 375                 return -EINVAL;
 376         }
 377 
 378         reg |= debug_monitors_arch();
 379         reg <<= 8;
 380         reg |= num;
 381 
 382         *info = reg;
 383         return 0;
 384 }
 385 
 386 static int ptrace_hbp_get_ctrl(unsigned int note_type,
 387                                struct task_struct *tsk,
 388                                unsigned long idx,
 389                                u32 *ctrl)
 390 {
 391         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 392 
 393         if (IS_ERR(bp))
 394                 return PTR_ERR(bp);
 395 
 396         *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
 397         return 0;
 398 }
 399 
 400 static int ptrace_hbp_get_addr(unsigned int note_type,
 401                                struct task_struct *tsk,
 402                                unsigned long idx,
 403                                u64 *addr)
 404 {
 405         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 406 
 407         if (IS_ERR(bp))
 408                 return PTR_ERR(bp);
 409 
 410         *addr = bp ? counter_arch_bp(bp)->address : 0;
 411         return 0;
 412 }
 413 
 414 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
 415                                                         struct task_struct *tsk,
 416                                                         unsigned long idx)
 417 {
 418         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 419 
 420         if (!bp)
 421                 bp = ptrace_hbp_create(note_type, tsk, idx);
 422 
 423         return bp;
 424 }
 425 
 426 static int ptrace_hbp_set_ctrl(unsigned int note_type,
 427                                struct task_struct *tsk,
 428                                unsigned long idx,
 429                                u32 uctrl)
 430 {
 431         int err;
 432         struct perf_event *bp;
 433         struct perf_event_attr attr;
 434         struct arch_hw_breakpoint_ctrl ctrl;
 435 
 436         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 437         if (IS_ERR(bp)) {
 438                 err = PTR_ERR(bp);
 439                 return err;
 440         }
 441 
 442         attr = bp->attr;
 443         decode_ctrl_reg(uctrl, &ctrl);
 444         err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
 445         if (err)
 446                 return err;
 447 
 448         return modify_user_hw_breakpoint(bp, &attr);
 449 }
 450 
 451 static int ptrace_hbp_set_addr(unsigned int note_type,
 452                                struct task_struct *tsk,
 453                                unsigned long idx,
 454                                u64 addr)
 455 {
 456         int err;
 457         struct perf_event *bp;
 458         struct perf_event_attr attr;
 459 
 460         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 461         if (IS_ERR(bp)) {
 462                 err = PTR_ERR(bp);
 463                 return err;
 464         }
 465 
 466         attr = bp->attr;
 467         attr.bp_addr = addr;
 468         err = modify_user_hw_breakpoint(bp, &attr);
 469         return err;
 470 }
 471 
 472 #define PTRACE_HBP_ADDR_SZ      sizeof(u64)
 473 #define PTRACE_HBP_CTRL_SZ      sizeof(u32)
 474 #define PTRACE_HBP_PAD_SZ       sizeof(u32)
 475 
 476 static int hw_break_get(struct task_struct *target,
 477                         const struct user_regset *regset,
 478                         unsigned int pos, unsigned int count,
 479                         void *kbuf, void __user *ubuf)
 480 {
 481         unsigned int note_type = regset->core_note_type;
 482         int ret, idx = 0, offset, limit;
 483         u32 info, ctrl;
 484         u64 addr;
 485 
 486         /* Resource info */
 487         ret = ptrace_hbp_get_resource_info(note_type, &info);
 488         if (ret)
 489                 return ret;
 490 
 491         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
 492                                   sizeof(info));
 493         if (ret)
 494                 return ret;
 495 
 496         /* Pad */
 497         offset = offsetof(struct user_hwdebug_state, pad);
 498         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
 499                                        offset + PTRACE_HBP_PAD_SZ);
 500         if (ret)
 501                 return ret;
 502 
 503         /* (address, ctrl) registers */
 504         offset = offsetof(struct user_hwdebug_state, dbg_regs);
 505         limit = regset->n * regset->size;
 506         while (count && offset < limit) {
 507                 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
 508                 if (ret)
 509                         return ret;
 510                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
 511                                           offset, offset + PTRACE_HBP_ADDR_SZ);
 512                 if (ret)
 513                         return ret;
 514                 offset += PTRACE_HBP_ADDR_SZ;
 515 
 516                 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
 517                 if (ret)
 518                         return ret;
 519                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
 520                                           offset, offset + PTRACE_HBP_CTRL_SZ);
 521                 if (ret)
 522                         return ret;
 523                 offset += PTRACE_HBP_CTRL_SZ;
 524 
 525                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 526                                                offset,
 527                                                offset + PTRACE_HBP_PAD_SZ);
 528                 if (ret)
 529                         return ret;
 530                 offset += PTRACE_HBP_PAD_SZ;
 531                 idx++;
 532         }
 533 
 534         return 0;
 535 }
 536 
 537 static int hw_break_set(struct task_struct *target,
 538                         const struct user_regset *regset,
 539                         unsigned int pos, unsigned int count,
 540                         const void *kbuf, const void __user *ubuf)
 541 {
 542         unsigned int note_type = regset->core_note_type;
 543         int ret, idx = 0, offset, limit;
 544         u32 ctrl;
 545         u64 addr;
 546 
 547         /* Resource info and pad */
 548         offset = offsetof(struct user_hwdebug_state, dbg_regs);
 549         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
 550         if (ret)
 551                 return ret;
 552 
 553         /* (address, ctrl) registers */
 554         limit = regset->n * regset->size;
 555         while (count && offset < limit) {
 556                 if (count < PTRACE_HBP_ADDR_SZ)
 557                         return -EINVAL;
 558                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
 559                                          offset, offset + PTRACE_HBP_ADDR_SZ);
 560                 if (ret)
 561                         return ret;
 562                 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
 563                 if (ret)
 564                         return ret;
 565                 offset += PTRACE_HBP_ADDR_SZ;
 566 
 567                 if (!count)
 568                         break;
 569                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
 570                                          offset, offset + PTRACE_HBP_CTRL_SZ);
 571                 if (ret)
 572                         return ret;
 573                 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
 574                 if (ret)
 575                         return ret;
 576                 offset += PTRACE_HBP_CTRL_SZ;
 577 
 578                 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 579                                                 offset,
 580                                                 offset + PTRACE_HBP_PAD_SZ);
 581                 if (ret)
 582                         return ret;
 583                 offset += PTRACE_HBP_PAD_SZ;
 584                 idx++;
 585         }
 586 
 587         return 0;
 588 }
 589 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
 590 
 591 static int gpr_get(struct task_struct *target,
 592                    const struct user_regset *regset,
 593                    unsigned int pos, unsigned int count,
 594                    void *kbuf, void __user *ubuf)
 595 {
 596         struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
 597         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
 598 }
 599 
 600 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
 601                    unsigned int pos, unsigned int count,
 602                    const void *kbuf, const void __user *ubuf)
 603 {
 604         int ret;
 605         struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
 606 
 607         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
 608         if (ret)
 609                 return ret;
 610 
 611         if (!valid_user_regs(&newregs, target))
 612                 return -EINVAL;
 613 
 614         task_pt_regs(target)->user_regs = newregs;
 615         return 0;
 616 }
 617 
 618 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
 619 {
 620         if (!system_supports_fpsimd())
 621                 return -ENODEV;
 622         return regset->n;
 623 }
 624 
 625 /*
 626  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
 627  */
 628 static int __fpr_get(struct task_struct *target,
 629                      const struct user_regset *regset,
 630                      unsigned int pos, unsigned int count,
 631                      void *kbuf, void __user *ubuf, unsigned int start_pos)
 632 {
 633         struct user_fpsimd_state *uregs;
 634 
 635         sve_sync_to_fpsimd(target);
 636 
 637         uregs = &target->thread.uw.fpsimd_state;
 638 
 639         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
 640                                    start_pos, start_pos + sizeof(*uregs));
 641 }
 642 
 643 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 644                    unsigned int pos, unsigned int count,
 645                    void *kbuf, void __user *ubuf)
 646 {
 647         if (!system_supports_fpsimd())
 648                 return -EINVAL;
 649 
 650         if (target == current)
 651                 fpsimd_preserve_current_state();
 652 
 653         return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
 654 }
 655 
 656 static int __fpr_set(struct task_struct *target,
 657                      const struct user_regset *regset,
 658                      unsigned int pos, unsigned int count,
 659                      const void *kbuf, const void __user *ubuf,
 660                      unsigned int start_pos)
 661 {
 662         int ret;
 663         struct user_fpsimd_state newstate;
 664 
 665         /*
 666          * Ensure target->thread.uw.fpsimd_state is up to date, so that a
 667          * short copyin can't resurrect stale data.
 668          */
 669         sve_sync_to_fpsimd(target);
 670 
 671         newstate = target->thread.uw.fpsimd_state;
 672 
 673         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
 674                                  start_pos, start_pos + sizeof(newstate));
 675         if (ret)
 676                 return ret;
 677 
 678         target->thread.uw.fpsimd_state = newstate;
 679 
 680         return ret;
 681 }
 682 
 683 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 684                    unsigned int pos, unsigned int count,
 685                    const void *kbuf, const void __user *ubuf)
 686 {
 687         int ret;
 688 
 689         if (!system_supports_fpsimd())
 690                 return -EINVAL;
 691 
 692         ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
 693         if (ret)
 694                 return ret;
 695 
 696         sve_sync_from_fpsimd_zeropad(target);
 697         fpsimd_flush_task_state(target);
 698 
 699         return ret;
 700 }
 701 
 702 static int tls_get(struct task_struct *target, const struct user_regset *regset,
 703                    unsigned int pos, unsigned int count,
 704                    void *kbuf, void __user *ubuf)
 705 {
 706         unsigned long *tls = &target->thread.uw.tp_value;
 707 
 708         if (target == current)
 709                 tls_preserve_current_state();
 710 
 711         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
 712 }
 713 
 714 static int tls_set(struct task_struct *target, const struct user_regset *regset,
 715                    unsigned int pos, unsigned int count,
 716                    const void *kbuf, const void __user *ubuf)
 717 {
 718         int ret;
 719         unsigned long tls = target->thread.uw.tp_value;
 720 
 721         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
 722         if (ret)
 723                 return ret;
 724 
 725         target->thread.uw.tp_value = tls;
 726         return ret;
 727 }
 728 
 729 static int system_call_get(struct task_struct *target,
 730                            const struct user_regset *regset,
 731                            unsigned int pos, unsigned int count,
 732                            void *kbuf, void __user *ubuf)
 733 {
 734         int syscallno = task_pt_regs(target)->syscallno;
 735 
 736         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 737                                    &syscallno, 0, -1);
 738 }
 739 
 740 static int system_call_set(struct task_struct *target,
 741                            const struct user_regset *regset,
 742                            unsigned int pos, unsigned int count,
 743                            const void *kbuf, const void __user *ubuf)
 744 {
 745         int syscallno = task_pt_regs(target)->syscallno;
 746         int ret;
 747 
 748         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
 749         if (ret)
 750                 return ret;
 751 
 752         task_pt_regs(target)->syscallno = syscallno;
 753         return ret;
 754 }
 755 
 756 #ifdef CONFIG_ARM64_SVE
 757 
 758 static void sve_init_header_from_task(struct user_sve_header *header,
 759                                       struct task_struct *target)
 760 {
 761         unsigned int vq;
 762 
 763         memset(header, 0, sizeof(*header));
 764 
 765         header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
 766                 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
 767         if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
 768                 header->flags |= SVE_PT_VL_INHERIT;
 769 
 770         header->vl = target->thread.sve_vl;
 771         vq = sve_vq_from_vl(header->vl);
 772 
 773         header->max_vl = sve_max_vl;
 774         header->size = SVE_PT_SIZE(vq, header->flags);
 775         header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
 776                                       SVE_PT_REGS_SVE);
 777 }
 778 
 779 static unsigned int sve_size_from_header(struct user_sve_header const *header)
 780 {
 781         return ALIGN(header->size, SVE_VQ_BYTES);
 782 }
 783 
 784 static unsigned int sve_get_size(struct task_struct *target,
 785                                  const struct user_regset *regset)
 786 {
 787         struct user_sve_header header;
 788 
 789         if (!system_supports_sve())
 790                 return 0;
 791 
 792         sve_init_header_from_task(&header, target);
 793         return sve_size_from_header(&header);
 794 }
 795 
 796 static int sve_get(struct task_struct *target,
 797                    const struct user_regset *regset,
 798                    unsigned int pos, unsigned int count,
 799                    void *kbuf, void __user *ubuf)
 800 {
 801         int ret;
 802         struct user_sve_header header;
 803         unsigned int vq;
 804         unsigned long start, end;
 805 
 806         if (!system_supports_sve())
 807                 return -EINVAL;
 808 
 809         /* Header */
 810         sve_init_header_from_task(&header, target);
 811         vq = sve_vq_from_vl(header.vl);
 812 
 813         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
 814                                   0, sizeof(header));
 815         if (ret)
 816                 return ret;
 817 
 818         if (target == current)
 819                 fpsimd_preserve_current_state();
 820 
 821         /* Registers: FPSIMD-only case */
 822 
 823         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
 824         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
 825                 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
 826                                  SVE_PT_FPSIMD_OFFSET);
 827 
 828         /* Otherwise: full SVE case */
 829 
 830         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
 831         start = SVE_PT_SVE_OFFSET;
 832         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
 833         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 834                                   target->thread.sve_state,
 835                                   start, end);
 836         if (ret)
 837                 return ret;
 838 
 839         start = end;
 840         end = SVE_PT_SVE_FPSR_OFFSET(vq);
 841         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 842                                        start, end);
 843         if (ret)
 844                 return ret;
 845 
 846         /*
 847          * Copy fpsr, and fpcr which must follow contiguously in
 848          * struct fpsimd_state:
 849          */
 850         start = end;
 851         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
 852         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 853                                   &target->thread.uw.fpsimd_state.fpsr,
 854                                   start, end);
 855         if (ret)
 856                 return ret;
 857 
 858         start = end;
 859         end = sve_size_from_header(&header);
 860         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 861                                         start, end);
 862 }
 863 
 864 static int sve_set(struct task_struct *target,
 865                    const struct user_regset *regset,
 866                    unsigned int pos, unsigned int count,
 867                    const void *kbuf, const void __user *ubuf)
 868 {
 869         int ret;
 870         struct user_sve_header header;
 871         unsigned int vq;
 872         unsigned long start, end;
 873 
 874         if (!system_supports_sve())
 875                 return -EINVAL;
 876 
 877         /* Header */
 878         if (count < sizeof(header))
 879                 return -EINVAL;
 880         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
 881                                  0, sizeof(header));
 882         if (ret)
 883                 goto out;
 884 
 885         /*
 886          * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
 887          * sve_set_vector_length(), which will also validate them for us:
 888          */
 889         ret = sve_set_vector_length(target, header.vl,
 890                 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
 891         if (ret)
 892                 goto out;
 893 
 894         /* Actual VL set may be less than the user asked for: */
 895         vq = sve_vq_from_vl(target->thread.sve_vl);
 896 
 897         /* Registers: FPSIMD-only case */
 898 
 899         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
 900         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
 901                 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
 902                                 SVE_PT_FPSIMD_OFFSET);
 903                 clear_tsk_thread_flag(target, TIF_SVE);
 904                 goto out;
 905         }
 906 
 907         /* Otherwise: full SVE case */
 908 
 909         /*
 910          * If setting a different VL from the requested VL and there is
 911          * register data, the data layout will be wrong: don't even
 912          * try to set the registers in this case.
 913          */
 914         if (count && vq != sve_vq_from_vl(header.vl)) {
 915                 ret = -EIO;
 916                 goto out;
 917         }
 918 
 919         sve_alloc(target);
 920 
 921         /*
 922          * Ensure target->thread.sve_state is up to date with target's
 923          * FPSIMD regs, so that a short copyin leaves trailing registers
 924          * unmodified.
 925          */
 926         fpsimd_sync_to_sve(target);
 927         set_tsk_thread_flag(target, TIF_SVE);
 928 
 929         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
 930         start = SVE_PT_SVE_OFFSET;
 931         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
 932         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 933                                  target->thread.sve_state,
 934                                  start, end);
 935         if (ret)
 936                 goto out;
 937 
 938         start = end;
 939         end = SVE_PT_SVE_FPSR_OFFSET(vq);
 940         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 941                                         start, end);
 942         if (ret)
 943                 goto out;
 944 
 945         /*
 946          * Copy fpsr, and fpcr which must follow contiguously in
 947          * struct fpsimd_state:
 948          */
 949         start = end;
 950         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
 951         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 952                                  &target->thread.uw.fpsimd_state.fpsr,
 953                                  start, end);
 954 
 955 out:
 956         fpsimd_flush_task_state(target);
 957         return ret;
 958 }
 959 
 960 #endif /* CONFIG_ARM64_SVE */
 961 
 962 #ifdef CONFIG_ARM64_PTR_AUTH
 963 static int pac_mask_get(struct task_struct *target,
 964                         const struct user_regset *regset,
 965                         unsigned int pos, unsigned int count,
 966                         void *kbuf, void __user *ubuf)
 967 {
 968         /*
 969          * The PAC bits can differ across data and instruction pointers
 970          * depending on TCR_EL1.TBID*, which we may make use of in future, so
 971          * we expose separate masks.
 972          */
 973         unsigned long mask = ptrauth_user_pac_mask();
 974         struct user_pac_mask uregs = {
 975                 .data_mask = mask,
 976                 .insn_mask = mask,
 977         };
 978 
 979         if (!system_supports_address_auth())
 980                 return -EINVAL;
 981 
 982         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
 983 }
 984 
 985 #ifdef CONFIG_CHECKPOINT_RESTORE
 986 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
 987 {
 988         return (__uint128_t)key->hi << 64 | key->lo;
 989 }
 990 
 991 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
 992 {
 993         struct ptrauth_key key = {
 994                 .lo = (unsigned long)ukey,
 995                 .hi = (unsigned long)(ukey >> 64),
 996         };
 997 
 998         return key;
 999 }
1000 
1001 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1002                                      const struct ptrauth_keys *keys)
1003 {
1004         ukeys->apiakey = pac_key_to_user(&keys->apia);
1005         ukeys->apibkey = pac_key_to_user(&keys->apib);
1006         ukeys->apdakey = pac_key_to_user(&keys->apda);
1007         ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1008 }
1009 
1010 static void pac_address_keys_from_user(struct ptrauth_keys *keys,
1011                                        const struct user_pac_address_keys *ukeys)
1012 {
1013         keys->apia = pac_key_from_user(ukeys->apiakey);
1014         keys->apib = pac_key_from_user(ukeys->apibkey);
1015         keys->apda = pac_key_from_user(ukeys->apdakey);
1016         keys->apdb = pac_key_from_user(ukeys->apdbkey);
1017 }
1018 
1019 static int pac_address_keys_get(struct task_struct *target,
1020                                 const struct user_regset *regset,
1021                                 unsigned int pos, unsigned int count,
1022                                 void *kbuf, void __user *ubuf)
1023 {
1024         struct ptrauth_keys *keys = &target->thread.keys_user;
1025         struct user_pac_address_keys user_keys;
1026 
1027         if (!system_supports_address_auth())
1028                 return -EINVAL;
1029 
1030         pac_address_keys_to_user(&user_keys, keys);
1031 
1032         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1033                                    &user_keys, 0, -1);
1034 }
1035 
1036 static int pac_address_keys_set(struct task_struct *target,
1037                                 const struct user_regset *regset,
1038                                 unsigned int pos, unsigned int count,
1039                                 const void *kbuf, const void __user *ubuf)
1040 {
1041         struct ptrauth_keys *keys = &target->thread.keys_user;
1042         struct user_pac_address_keys user_keys;
1043         int ret;
1044 
1045         if (!system_supports_address_auth())
1046                 return -EINVAL;
1047 
1048         pac_address_keys_to_user(&user_keys, keys);
1049         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1050                                  &user_keys, 0, -1);
1051         if (ret)
1052                 return ret;
1053         pac_address_keys_from_user(keys, &user_keys);
1054 
1055         return 0;
1056 }
1057 
1058 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1059                                      const struct ptrauth_keys *keys)
1060 {
1061         ukeys->apgakey = pac_key_to_user(&keys->apga);
1062 }
1063 
1064 static void pac_generic_keys_from_user(struct ptrauth_keys *keys,
1065                                        const struct user_pac_generic_keys *ukeys)
1066 {
1067         keys->apga = pac_key_from_user(ukeys->apgakey);
1068 }
1069 
1070 static int pac_generic_keys_get(struct task_struct *target,
1071                                 const struct user_regset *regset,
1072                                 unsigned int pos, unsigned int count,
1073                                 void *kbuf, void __user *ubuf)
1074 {
1075         struct ptrauth_keys *keys = &target->thread.keys_user;
1076         struct user_pac_generic_keys user_keys;
1077 
1078         if (!system_supports_generic_auth())
1079                 return -EINVAL;
1080 
1081         pac_generic_keys_to_user(&user_keys, keys);
1082 
1083         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1084                                    &user_keys, 0, -1);
1085 }
1086 
1087 static int pac_generic_keys_set(struct task_struct *target,
1088                                 const struct user_regset *regset,
1089                                 unsigned int pos, unsigned int count,
1090                                 const void *kbuf, const void __user *ubuf)
1091 {
1092         struct ptrauth_keys *keys = &target->thread.keys_user;
1093         struct user_pac_generic_keys user_keys;
1094         int ret;
1095 
1096         if (!system_supports_generic_auth())
1097                 return -EINVAL;
1098 
1099         pac_generic_keys_to_user(&user_keys, keys);
1100         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1101                                  &user_keys, 0, -1);
1102         if (ret)
1103                 return ret;
1104         pac_generic_keys_from_user(keys, &user_keys);
1105 
1106         return 0;
1107 }
1108 #endif /* CONFIG_CHECKPOINT_RESTORE */
1109 #endif /* CONFIG_ARM64_PTR_AUTH */
1110 
1111 enum aarch64_regset {
1112         REGSET_GPR,
1113         REGSET_FPR,
1114         REGSET_TLS,
1115 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1116         REGSET_HW_BREAK,
1117         REGSET_HW_WATCH,
1118 #endif
1119         REGSET_SYSTEM_CALL,
1120 #ifdef CONFIG_ARM64_SVE
1121         REGSET_SVE,
1122 #endif
1123 #ifdef CONFIG_ARM64_PTR_AUTH
1124         REGSET_PAC_MASK,
1125 #ifdef CONFIG_CHECKPOINT_RESTORE
1126         REGSET_PACA_KEYS,
1127         REGSET_PACG_KEYS,
1128 #endif
1129 #endif
1130 };
1131 
1132 static const struct user_regset aarch64_regsets[] = {
1133         [REGSET_GPR] = {
1134                 .core_note_type = NT_PRSTATUS,
1135                 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1136                 .size = sizeof(u64),
1137                 .align = sizeof(u64),
1138                 .get = gpr_get,
1139                 .set = gpr_set
1140         },
1141         [REGSET_FPR] = {
1142                 .core_note_type = NT_PRFPREG,
1143                 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1144                 /*
1145                  * We pretend we have 32-bit registers because the fpsr and
1146                  * fpcr are 32-bits wide.
1147                  */
1148                 .size = sizeof(u32),
1149                 .align = sizeof(u32),
1150                 .active = fpr_active,
1151                 .get = fpr_get,
1152                 .set = fpr_set
1153         },
1154         [REGSET_TLS] = {
1155                 .core_note_type = NT_ARM_TLS,
1156                 .n = 1,
1157                 .size = sizeof(void *),
1158                 .align = sizeof(void *),
1159                 .get = tls_get,
1160                 .set = tls_set,
1161         },
1162 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1163         [REGSET_HW_BREAK] = {
1164                 .core_note_type = NT_ARM_HW_BREAK,
1165                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1166                 .size = sizeof(u32),
1167                 .align = sizeof(u32),
1168                 .get = hw_break_get,
1169                 .set = hw_break_set,
1170         },
1171         [REGSET_HW_WATCH] = {
1172                 .core_note_type = NT_ARM_HW_WATCH,
1173                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1174                 .size = sizeof(u32),
1175                 .align = sizeof(u32),
1176                 .get = hw_break_get,
1177                 .set = hw_break_set,
1178         },
1179 #endif
1180         [REGSET_SYSTEM_CALL] = {
1181                 .core_note_type = NT_ARM_SYSTEM_CALL,
1182                 .n = 1,
1183                 .size = sizeof(int),
1184                 .align = sizeof(int),
1185                 .get = system_call_get,
1186                 .set = system_call_set,
1187         },
1188 #ifdef CONFIG_ARM64_SVE
1189         [REGSET_SVE] = { /* Scalable Vector Extension */
1190                 .core_note_type = NT_ARM_SVE,
1191                 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1192                                   SVE_VQ_BYTES),
1193                 .size = SVE_VQ_BYTES,
1194                 .align = SVE_VQ_BYTES,
1195                 .get = sve_get,
1196                 .set = sve_set,
1197                 .get_size = sve_get_size,
1198         },
1199 #endif
1200 #ifdef CONFIG_ARM64_PTR_AUTH
1201         [REGSET_PAC_MASK] = {
1202                 .core_note_type = NT_ARM_PAC_MASK,
1203                 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1204                 .size = sizeof(u64),
1205                 .align = sizeof(u64),
1206                 .get = pac_mask_get,
1207                 /* this cannot be set dynamically */
1208         },
1209 #ifdef CONFIG_CHECKPOINT_RESTORE
1210         [REGSET_PACA_KEYS] = {
1211                 .core_note_type = NT_ARM_PACA_KEYS,
1212                 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1213                 .size = sizeof(__uint128_t),
1214                 .align = sizeof(__uint128_t),
1215                 .get = pac_address_keys_get,
1216                 .set = pac_address_keys_set,
1217         },
1218         [REGSET_PACG_KEYS] = {
1219                 .core_note_type = NT_ARM_PACG_KEYS,
1220                 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1221                 .size = sizeof(__uint128_t),
1222                 .align = sizeof(__uint128_t),
1223                 .get = pac_generic_keys_get,
1224                 .set = pac_generic_keys_set,
1225         },
1226 #endif
1227 #endif
1228 };
1229 
1230 static const struct user_regset_view user_aarch64_view = {
1231         .name = "aarch64", .e_machine = EM_AARCH64,
1232         .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1233 };
1234 
1235 #ifdef CONFIG_COMPAT
1236 enum compat_regset {
1237         REGSET_COMPAT_GPR,
1238         REGSET_COMPAT_VFP,
1239 };
1240 
1241 static int compat_gpr_get(struct task_struct *target,
1242                           const struct user_regset *regset,
1243                           unsigned int pos, unsigned int count,
1244                           void *kbuf, void __user *ubuf)
1245 {
1246         int ret = 0;
1247         unsigned int i, start, num_regs;
1248 
1249         /* Calculate the number of AArch32 registers contained in count */
1250         num_regs = count / regset->size;
1251 
1252         /* Convert pos into an register number */
1253         start = pos / regset->size;
1254 
1255         if (start + num_regs > regset->n)
1256                 return -EIO;
1257 
1258         for (i = 0; i < num_regs; ++i) {
1259                 unsigned int idx = start + i;
1260                 compat_ulong_t reg;
1261 
1262                 switch (idx) {
1263                 case 15:
1264                         reg = task_pt_regs(target)->pc;
1265                         break;
1266                 case 16:
1267                         reg = task_pt_regs(target)->pstate;
1268                         reg = pstate_to_compat_psr(reg);
1269                         break;
1270                 case 17:
1271                         reg = task_pt_regs(target)->orig_x0;
1272                         break;
1273                 default:
1274                         reg = task_pt_regs(target)->regs[idx];
1275                 }
1276 
1277                 if (kbuf) {
1278                         memcpy(kbuf, &reg, sizeof(reg));
1279                         kbuf += sizeof(reg);
1280                 } else {
1281                         ret = copy_to_user(ubuf, &reg, sizeof(reg));
1282                         if (ret) {
1283                                 ret = -EFAULT;
1284                                 break;
1285                         }
1286 
1287                         ubuf += sizeof(reg);
1288                 }
1289         }
1290 
1291         return ret;
1292 }
1293 
1294 static int compat_gpr_set(struct task_struct *target,
1295                           const struct user_regset *regset,
1296                           unsigned int pos, unsigned int count,
1297                           const void *kbuf, const void __user *ubuf)
1298 {
1299         struct pt_regs newregs;
1300         int ret = 0;
1301         unsigned int i, start, num_regs;
1302 
1303         /* Calculate the number of AArch32 registers contained in count */
1304         num_regs = count / regset->size;
1305 
1306         /* Convert pos into an register number */
1307         start = pos / regset->size;
1308 
1309         if (start + num_regs > regset->n)
1310                 return -EIO;
1311 
1312         newregs = *task_pt_regs(target);
1313 
1314         for (i = 0; i < num_regs; ++i) {
1315                 unsigned int idx = start + i;
1316                 compat_ulong_t reg;
1317 
1318                 if (kbuf) {
1319                         memcpy(&reg, kbuf, sizeof(reg));
1320                         kbuf += sizeof(reg);
1321                 } else {
1322                         ret = copy_from_user(&reg, ubuf, sizeof(reg));
1323                         if (ret) {
1324                                 ret = -EFAULT;
1325                                 break;
1326                         }
1327 
1328                         ubuf += sizeof(reg);
1329                 }
1330 
1331                 switch (idx) {
1332                 case 15:
1333                         newregs.pc = reg;
1334                         break;
1335                 case 16:
1336                         reg = compat_psr_to_pstate(reg);
1337                         newregs.pstate = reg;
1338                         break;
1339                 case 17:
1340                         newregs.orig_x0 = reg;
1341                         break;
1342                 default:
1343                         newregs.regs[idx] = reg;
1344                 }
1345 
1346         }
1347 
1348         if (valid_user_regs(&newregs.user_regs, target))
1349                 *task_pt_regs(target) = newregs;
1350         else
1351                 ret = -EINVAL;
1352 
1353         return ret;
1354 }
1355 
1356 static int compat_vfp_get(struct task_struct *target,
1357                           const struct user_regset *regset,
1358                           unsigned int pos, unsigned int count,
1359                           void *kbuf, void __user *ubuf)
1360 {
1361         struct user_fpsimd_state *uregs;
1362         compat_ulong_t fpscr;
1363         int ret, vregs_end_pos;
1364 
1365         if (!system_supports_fpsimd())
1366                 return -EINVAL;
1367 
1368         uregs = &target->thread.uw.fpsimd_state;
1369 
1370         if (target == current)
1371                 fpsimd_preserve_current_state();
1372 
1373         /*
1374          * The VFP registers are packed into the fpsimd_state, so they all sit
1375          * nicely together for us. We just need to create the fpscr separately.
1376          */
1377         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1378         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1379                                   0, vregs_end_pos);
1380 
1381         if (count && !ret) {
1382                 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1383                         (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1384 
1385                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1386                                           vregs_end_pos, VFP_STATE_SIZE);
1387         }
1388 
1389         return ret;
1390 }
1391 
1392 static int compat_vfp_set(struct task_struct *target,
1393                           const struct user_regset *regset,
1394                           unsigned int pos, unsigned int count,
1395                           const void *kbuf, const void __user *ubuf)
1396 {
1397         struct user_fpsimd_state *uregs;
1398         compat_ulong_t fpscr;
1399         int ret, vregs_end_pos;
1400 
1401         if (!system_supports_fpsimd())
1402                 return -EINVAL;
1403 
1404         uregs = &target->thread.uw.fpsimd_state;
1405 
1406         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1407         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1408                                  vregs_end_pos);
1409 
1410         if (count && !ret) {
1411                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1412                                          vregs_end_pos, VFP_STATE_SIZE);
1413                 if (!ret) {
1414                         uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1415                         uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1416                 }
1417         }
1418 
1419         fpsimd_flush_task_state(target);
1420         return ret;
1421 }
1422 
1423 static int compat_tls_get(struct task_struct *target,
1424                           const struct user_regset *regset, unsigned int pos,
1425                           unsigned int count, void *kbuf, void __user *ubuf)
1426 {
1427         compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
1428         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1429 }
1430 
1431 static int compat_tls_set(struct task_struct *target,
1432                           const struct user_regset *regset, unsigned int pos,
1433                           unsigned int count, const void *kbuf,
1434                           const void __user *ubuf)
1435 {
1436         int ret;
1437         compat_ulong_t tls = target->thread.uw.tp_value;
1438 
1439         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1440         if (ret)
1441                 return ret;
1442 
1443         target->thread.uw.tp_value = tls;
1444         return ret;
1445 }
1446 
1447 static const struct user_regset aarch32_regsets[] = {
1448         [REGSET_COMPAT_GPR] = {
1449                 .core_note_type = NT_PRSTATUS,
1450                 .n = COMPAT_ELF_NGREG,
1451                 .size = sizeof(compat_elf_greg_t),
1452                 .align = sizeof(compat_elf_greg_t),
1453                 .get = compat_gpr_get,
1454                 .set = compat_gpr_set
1455         },
1456         [REGSET_COMPAT_VFP] = {
1457                 .core_note_type = NT_ARM_VFP,
1458                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1459                 .size = sizeof(compat_ulong_t),
1460                 .align = sizeof(compat_ulong_t),
1461                 .active = fpr_active,
1462                 .get = compat_vfp_get,
1463                 .set = compat_vfp_set
1464         },
1465 };
1466 
1467 static const struct user_regset_view user_aarch32_view = {
1468         .name = "aarch32", .e_machine = EM_ARM,
1469         .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1470 };
1471 
1472 static const struct user_regset aarch32_ptrace_regsets[] = {
1473         [REGSET_GPR] = {
1474                 .core_note_type = NT_PRSTATUS,
1475                 .n = COMPAT_ELF_NGREG,
1476                 .size = sizeof(compat_elf_greg_t),
1477                 .align = sizeof(compat_elf_greg_t),
1478                 .get = compat_gpr_get,
1479                 .set = compat_gpr_set
1480         },
1481         [REGSET_FPR] = {
1482                 .core_note_type = NT_ARM_VFP,
1483                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1484                 .size = sizeof(compat_ulong_t),
1485                 .align = sizeof(compat_ulong_t),
1486                 .get = compat_vfp_get,
1487                 .set = compat_vfp_set
1488         },
1489         [REGSET_TLS] = {
1490                 .core_note_type = NT_ARM_TLS,
1491                 .n = 1,
1492                 .size = sizeof(compat_ulong_t),
1493                 .align = sizeof(compat_ulong_t),
1494                 .get = compat_tls_get,
1495                 .set = compat_tls_set,
1496         },
1497 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1498         [REGSET_HW_BREAK] = {
1499                 .core_note_type = NT_ARM_HW_BREAK,
1500                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1501                 .size = sizeof(u32),
1502                 .align = sizeof(u32),
1503                 .get = hw_break_get,
1504                 .set = hw_break_set,
1505         },
1506         [REGSET_HW_WATCH] = {
1507                 .core_note_type = NT_ARM_HW_WATCH,
1508                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1509                 .size = sizeof(u32),
1510                 .align = sizeof(u32),
1511                 .get = hw_break_get,
1512                 .set = hw_break_set,
1513         },
1514 #endif
1515         [REGSET_SYSTEM_CALL] = {
1516                 .core_note_type = NT_ARM_SYSTEM_CALL,
1517                 .n = 1,
1518                 .size = sizeof(int),
1519                 .align = sizeof(int),
1520                 .get = system_call_get,
1521                 .set = system_call_set,
1522         },
1523 };
1524 
1525 static const struct user_regset_view user_aarch32_ptrace_view = {
1526         .name = "aarch32", .e_machine = EM_ARM,
1527         .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1528 };
1529 
1530 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1531                                    compat_ulong_t __user *ret)
1532 {
1533         compat_ulong_t tmp;
1534 
1535         if (off & 3)
1536                 return -EIO;
1537 
1538         if (off == COMPAT_PT_TEXT_ADDR)
1539                 tmp = tsk->mm->start_code;
1540         else if (off == COMPAT_PT_DATA_ADDR)
1541                 tmp = tsk->mm->start_data;
1542         else if (off == COMPAT_PT_TEXT_END_ADDR)
1543                 tmp = tsk->mm->end_code;
1544         else if (off < sizeof(compat_elf_gregset_t))
1545                 return copy_regset_to_user(tsk, &user_aarch32_view,
1546                                            REGSET_COMPAT_GPR, off,
1547                                            sizeof(compat_ulong_t), ret);
1548         else if (off >= COMPAT_USER_SZ)
1549                 return -EIO;
1550         else
1551                 tmp = 0;
1552 
1553         return put_user(tmp, ret);
1554 }
1555 
1556 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1557                                     compat_ulong_t val)
1558 {
1559         int ret;
1560         mm_segment_t old_fs = get_fs();
1561 
1562         if (off & 3 || off >= COMPAT_USER_SZ)
1563                 return -EIO;
1564 
1565         if (off >= sizeof(compat_elf_gregset_t))
1566                 return 0;
1567 
1568         set_fs(KERNEL_DS);
1569         ret = copy_regset_from_user(tsk, &user_aarch32_view,
1570                                     REGSET_COMPAT_GPR, off,
1571                                     sizeof(compat_ulong_t),
1572                                     &val);
1573         set_fs(old_fs);
1574 
1575         return ret;
1576 }
1577 
1578 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1579 
1580 /*
1581  * Convert a virtual register number into an index for a thread_info
1582  * breakpoint array. Breakpoints are identified using positive numbers
1583  * whilst watchpoints are negative. The registers are laid out as pairs
1584  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1585  * Register 0 is reserved for describing resource information.
1586  */
1587 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1588 {
1589         return (abs(num) - 1) >> 1;
1590 }
1591 
1592 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1593 {
1594         u8 num_brps, num_wrps, debug_arch, wp_len;
1595         u32 reg = 0;
1596 
1597         num_brps        = hw_breakpoint_slots(TYPE_INST);
1598         num_wrps        = hw_breakpoint_slots(TYPE_DATA);
1599 
1600         debug_arch      = debug_monitors_arch();
1601         wp_len          = 8;
1602         reg             |= debug_arch;
1603         reg             <<= 8;
1604         reg             |= wp_len;
1605         reg             <<= 8;
1606         reg             |= num_wrps;
1607         reg             <<= 8;
1608         reg             |= num_brps;
1609 
1610         *kdata = reg;
1611         return 0;
1612 }
1613 
1614 static int compat_ptrace_hbp_get(unsigned int note_type,
1615                                  struct task_struct *tsk,
1616                                  compat_long_t num,
1617                                  u32 *kdata)
1618 {
1619         u64 addr = 0;
1620         u32 ctrl = 0;
1621 
1622         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1623 
1624         if (num & 1) {
1625                 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1626                 *kdata = (u32)addr;
1627         } else {
1628                 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1629                 *kdata = ctrl;
1630         }
1631 
1632         return err;
1633 }
1634 
1635 static int compat_ptrace_hbp_set(unsigned int note_type,
1636                                  struct task_struct *tsk,
1637                                  compat_long_t num,
1638                                  u32 *kdata)
1639 {
1640         u64 addr;
1641         u32 ctrl;
1642 
1643         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1644 
1645         if (num & 1) {
1646                 addr = *kdata;
1647                 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1648         } else {
1649                 ctrl = *kdata;
1650                 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1651         }
1652 
1653         return err;
1654 }
1655 
1656 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1657                                     compat_ulong_t __user *data)
1658 {
1659         int ret;
1660         u32 kdata;
1661 
1662         /* Watchpoint */
1663         if (num < 0) {
1664                 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1665         /* Resource info */
1666         } else if (num == 0) {
1667                 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1668         /* Breakpoint */
1669         } else {
1670                 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1671         }
1672 
1673         if (!ret)
1674                 ret = put_user(kdata, data);
1675 
1676         return ret;
1677 }
1678 
1679 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1680                                     compat_ulong_t __user *data)
1681 {
1682         int ret;
1683         u32 kdata = 0;
1684 
1685         if (num == 0)
1686                 return 0;
1687 
1688         ret = get_user(kdata, data);
1689         if (ret)
1690                 return ret;
1691 
1692         if (num < 0)
1693                 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1694         else
1695                 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1696 
1697         return ret;
1698 }
1699 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
1700 
1701 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1702                         compat_ulong_t caddr, compat_ulong_t cdata)
1703 {
1704         unsigned long addr = caddr;
1705         unsigned long data = cdata;
1706         void __user *datap = compat_ptr(data);
1707         int ret;
1708 
1709         switch (request) {
1710                 case PTRACE_PEEKUSR:
1711                         ret = compat_ptrace_read_user(child, addr, datap);
1712                         break;
1713 
1714                 case PTRACE_POKEUSR:
1715                         ret = compat_ptrace_write_user(child, addr, data);
1716                         break;
1717 
1718                 case COMPAT_PTRACE_GETREGS:
1719                         ret = copy_regset_to_user(child,
1720                                                   &user_aarch32_view,
1721                                                   REGSET_COMPAT_GPR,
1722                                                   0, sizeof(compat_elf_gregset_t),
1723                                                   datap);
1724                         break;
1725 
1726                 case COMPAT_PTRACE_SETREGS:
1727                         ret = copy_regset_from_user(child,
1728                                                     &user_aarch32_view,
1729                                                     REGSET_COMPAT_GPR,
1730                                                     0, sizeof(compat_elf_gregset_t),
1731                                                     datap);
1732                         break;
1733 
1734                 case COMPAT_PTRACE_GET_THREAD_AREA:
1735                         ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1736                                        (compat_ulong_t __user *)datap);
1737                         break;
1738 
1739                 case COMPAT_PTRACE_SET_SYSCALL:
1740                         task_pt_regs(child)->syscallno = data;
1741                         ret = 0;
1742                         break;
1743 
1744                 case COMPAT_PTRACE_GETVFPREGS:
1745                         ret = copy_regset_to_user(child,
1746                                                   &user_aarch32_view,
1747                                                   REGSET_COMPAT_VFP,
1748                                                   0, VFP_STATE_SIZE,
1749                                                   datap);
1750                         break;
1751 
1752                 case COMPAT_PTRACE_SETVFPREGS:
1753                         ret = copy_regset_from_user(child,
1754                                                     &user_aarch32_view,
1755                                                     REGSET_COMPAT_VFP,
1756                                                     0, VFP_STATE_SIZE,
1757                                                     datap);
1758                         break;
1759 
1760 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1761                 case COMPAT_PTRACE_GETHBPREGS:
1762                         ret = compat_ptrace_gethbpregs(child, addr, datap);
1763                         break;
1764 
1765                 case COMPAT_PTRACE_SETHBPREGS:
1766                         ret = compat_ptrace_sethbpregs(child, addr, datap);
1767                         break;
1768 #endif
1769 
1770                 default:
1771                         ret = compat_ptrace_request(child, request, addr,
1772                                                     data);
1773                         break;
1774         }
1775 
1776         return ret;
1777 }
1778 #endif /* CONFIG_COMPAT */
1779 
1780 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1781 {
1782 #ifdef CONFIG_COMPAT
1783         /*
1784          * Core dumping of 32-bit tasks or compat ptrace requests must use the
1785          * user_aarch32_view compatible with arm32. Native ptrace requests on
1786          * 32-bit children use an extended user_aarch32_ptrace_view to allow
1787          * access to the TLS register.
1788          */
1789         if (is_compat_task())
1790                 return &user_aarch32_view;
1791         else if (is_compat_thread(task_thread_info(task)))
1792                 return &user_aarch32_ptrace_view;
1793 #endif
1794         return &user_aarch64_view;
1795 }
1796 
1797 long arch_ptrace(struct task_struct *child, long request,
1798                  unsigned long addr, unsigned long data)
1799 {
1800         return ptrace_request(child, request, addr, data);
1801 }
1802 
1803 enum ptrace_syscall_dir {
1804         PTRACE_SYSCALL_ENTER = 0,
1805         PTRACE_SYSCALL_EXIT,
1806 };
1807 
1808 static void tracehook_report_syscall(struct pt_regs *regs,
1809                                      enum ptrace_syscall_dir dir)
1810 {
1811         int regno;
1812         unsigned long saved_reg;
1813 
1814         /*
1815          * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1816          * used to denote syscall entry/exit:
1817          */
1818         regno = (is_compat_task() ? 12 : 7);
1819         saved_reg = regs->regs[regno];
1820         regs->regs[regno] = dir;
1821 
1822         if (dir == PTRACE_SYSCALL_EXIT)
1823                 tracehook_report_syscall_exit(regs, 0);
1824         else if (tracehook_report_syscall_entry(regs))
1825                 forget_syscall(regs);
1826 
1827         regs->regs[regno] = saved_reg;
1828 }
1829 
1830 int syscall_trace_enter(struct pt_regs *regs)
1831 {
1832         unsigned long flags = READ_ONCE(current_thread_info()->flags);
1833 
1834         if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
1835                 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1836                 if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
1837                         return -1;
1838         }
1839 
1840         /* Do the secure computing after ptrace; failures should be fast. */
1841         if (secure_computing(NULL) == -1)
1842                 return -1;
1843 
1844         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1845                 trace_sys_enter(regs, regs->syscallno);
1846 
1847         audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1848                             regs->regs[2], regs->regs[3]);
1849 
1850         return regs->syscallno;
1851 }
1852 
1853 void syscall_trace_exit(struct pt_regs *regs)
1854 {
1855         audit_syscall_exit(regs);
1856 
1857         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1858                 trace_sys_exit(regs, regs_return_value(regs));
1859 
1860         if (test_thread_flag(TIF_SYSCALL_TRACE))
1861                 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1862 
1863         rseq_syscall(regs);
1864 }
1865 
1866 /*
1867  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1868  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1869  * not described in ARM DDI 0487D.a.
1870  * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1871  * be allocated an EL0 meaning in future.
1872  * Userspace cannot use these until they have an architectural meaning.
1873  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1874  * We also reserve IL for the kernel; SS is handled dynamically.
1875  */
1876 #define SPSR_EL1_AARCH64_RES0_BITS \
1877         (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1878          GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1879 #define SPSR_EL1_AARCH32_RES0_BITS \
1880         (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1881 
1882 static int valid_compat_regs(struct user_pt_regs *regs)
1883 {
1884         regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1885 
1886         if (!system_supports_mixed_endian_el0()) {
1887                 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1888                         regs->pstate |= PSR_AA32_E_BIT;
1889                 else
1890                         regs->pstate &= ~PSR_AA32_E_BIT;
1891         }
1892 
1893         if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1894             (regs->pstate & PSR_AA32_A_BIT) == 0 &&
1895             (regs->pstate & PSR_AA32_I_BIT) == 0 &&
1896             (regs->pstate & PSR_AA32_F_BIT) == 0) {
1897                 return 1;
1898         }
1899 
1900         /*
1901          * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1902          * arch/arm.
1903          */
1904         regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
1905                         PSR_AA32_C_BIT | PSR_AA32_V_BIT |
1906                         PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
1907                         PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
1908                         PSR_AA32_T_BIT;
1909         regs->pstate |= PSR_MODE32_BIT;
1910 
1911         return 0;
1912 }
1913 
1914 static int valid_native_regs(struct user_pt_regs *regs)
1915 {
1916         regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1917 
1918         if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1919             (regs->pstate & PSR_D_BIT) == 0 &&
1920             (regs->pstate & PSR_A_BIT) == 0 &&
1921             (regs->pstate & PSR_I_BIT) == 0 &&
1922             (regs->pstate & PSR_F_BIT) == 0) {
1923                 return 1;
1924         }
1925 
1926         /* Force PSR to a valid 64-bit EL0t */
1927         regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1928 
1929         return 0;
1930 }
1931 
1932 /*
1933  * Are the current registers suitable for user mode? (used to maintain
1934  * security in signal handlers)
1935  */
1936 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1937 {
1938         if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1939                 regs->pstate &= ~DBG_SPSR_SS;
1940 
1941         if (is_compat_thread(task_thread_info(task)))
1942                 return valid_compat_regs(regs);
1943         else
1944                 return valid_native_regs(regs);
1945 }

/* [<][>][^][v][top][bottom][index][help] */