mn 396 arch/ia64/include/asm/pal.h mn : 1, /* Min. state save mn 708 arch/ia64/include/asm/pal.h #define pmci_proc_min_state_save_area_regd pme_processor.mn mn 1104 arch/x86/kernel/apic/x2apic_uv_x.c static void get_mn(struct mn *mnp) mn 1135 arch/x86/kernel/apic/x2apic_uv_x.c struct mn mn; mn 1137 arch/x86/kernel/apic/x2apic_uv_x.c get_mn(&mn); mn 1138 arch/x86/kernel/apic/x2apic_uv_x.c hi->gpa_mask = mn.m_val ? mn 1139 arch/x86/kernel/apic/x2apic_uv_x.c (1UL << (mn.m_val + mn.n_val)) - 1 : mn 1142 arch/x86/kernel/apic/x2apic_uv_x.c hi->m_val = mn.m_val; mn 1143 arch/x86/kernel/apic/x2apic_uv_x.c hi->n_val = mn.n_val; mn 1144 arch/x86/kernel/apic/x2apic_uv_x.c hi->m_shift = mn.m_shift; mn 1145 arch/x86/kernel/apic/x2apic_uv_x.c hi->n_lshift = mn.n_lshift ? mn.n_lshift : 0; mn 1157 arch/x86/kernel/apic/x2apic_uv_x.c uv_cpuid.gnode_shift = max_t(unsigned int, uv_cpuid.gnode_shift, mn.n_val); mn 1159 arch/x86/kernel/apic/x2apic_uv_x.c if (mn.m_val) mn 1160 arch/x86/kernel/apic/x2apic_uv_x.c hi->gnode_upper = (u64)hi->gnode_extra << mn.m_val; mn 106 drivers/clk/qcom/clk-rcg.c static u32 md_to_m(struct mn *mn, u32 md) mn 108 drivers/clk/qcom/clk-rcg.c md >>= mn->m_val_shift; mn 109 drivers/clk/qcom/clk-rcg.c md &= BIT(mn->width) - 1; mn 132 drivers/clk/qcom/clk-rcg.c static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md) mn 136 drivers/clk/qcom/clk-rcg.c mask_w = BIT(mn->width) - 1; mn 137 drivers/clk/qcom/clk-rcg.c mask = (mask_w << mn->m_val_shift) | mask_w; mn 141 drivers/clk/qcom/clk-rcg.c m <<= mn->m_val_shift; mn 149 drivers/clk/qcom/clk-rcg.c static u32 ns_m_to_n(struct mn *mn, u32 ns, u32 m) mn 151 drivers/clk/qcom/clk-rcg.c ns = ~ns >> mn->n_val_shift; mn 152 drivers/clk/qcom/clk-rcg.c ns &= BIT(mn->width) - 1; mn 156 drivers/clk/qcom/clk-rcg.c static u32 reg_to_mnctr_mode(struct mn *mn, u32 val) mn 158 drivers/clk/qcom/clk-rcg.c val >>= mn->mnctr_mode_shift; mn 163 drivers/clk/qcom/clk-rcg.c static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns) mn 167 drivers/clk/qcom/clk-rcg.c mask = BIT(mn->width) - 1; mn 168 drivers/clk/qcom/clk-rcg.c mask <<= mn->n_val_shift; mn 174 drivers/clk/qcom/clk-rcg.c n &= BIT(mn->width) - 1; mn 175 drivers/clk/qcom/clk-rcg.c n <<= mn->n_val_shift; mn 182 drivers/clk/qcom/clk-rcg.c static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val) mn 186 drivers/clk/qcom/clk-rcg.c mask = MNCTR_MODE_MASK << mn->mnctr_mode_shift; mn 187 drivers/clk/qcom/clk-rcg.c mask |= BIT(mn->mnctr_en_bit); mn 191 drivers/clk/qcom/clk-rcg.c val |= BIT(mn->mnctr_en_bit); mn 192 drivers/clk/qcom/clk-rcg.c val |= MNCTR_MODE_DUAL << mn->mnctr_mode_shift; mn 202 drivers/clk/qcom/clk-rcg.c struct mn *mn; mn 207 drivers/clk/qcom/clk-rcg.c bool banked_mn = !!rcg->mn[1].width; mn 225 drivers/clk/qcom/clk-rcg.c mn = &rcg->mn[new_bank]; mn 228 drivers/clk/qcom/clk-rcg.c ns |= BIT(mn->mnctr_reset_bit); mn 236 drivers/clk/qcom/clk-rcg.c md = mn_to_md(mn, f->m, f->n, md); mn 240 drivers/clk/qcom/clk-rcg.c ns = mn_to_ns(mn, f->m, f->n, ns); mn 247 drivers/clk/qcom/clk-rcg.c ns = mn_to_reg(mn, f->m, f->n, ns); mn 252 drivers/clk/qcom/clk-rcg.c reg = mn_to_reg(mn, f->m, f->n, reg); mn 259 drivers/clk/qcom/clk-rcg.c ns &= ~BIT(mn->mnctr_reset_bit); mn 297 drivers/clk/qcom/clk-rcg.c bool banked_mn = !!rcg->mn[1].width; mn 307 drivers/clk/qcom/clk-rcg.c f.m = md_to_m(&rcg->mn[bank], md); mn 308 drivers/clk/qcom/clk-rcg.c f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m); mn 346 drivers/clk/qcom/clk-rcg.c struct mn *mn = &rcg->mn; mn 351 drivers/clk/qcom/clk-rcg.c if (rcg->mn.width) { mn 353 drivers/clk/qcom/clk-rcg.c m = md_to_m(mn, md); mn 354 drivers/clk/qcom/clk-rcg.c n = ns_m_to_n(mn, ns, m); mn 360 drivers/clk/qcom/clk-rcg.c mode = reg_to_mnctr_mode(mn, mode); mn 372 drivers/clk/qcom/clk-rcg.c struct mn *mn; mn 374 drivers/clk/qcom/clk-rcg.c bool banked_mn = !!rcg->mn[1].width; mn 383 drivers/clk/qcom/clk-rcg.c mn = &rcg->mn[bank]; mn 385 drivers/clk/qcom/clk-rcg.c m = md_to_m(mn, md); mn 386 drivers/clk/qcom/clk-rcg.c n = ns_m_to_n(mn, ns, m); mn 390 drivers/clk/qcom/clk-rcg.c mode = reg_to_mnctr_mode(mn, reg); mn 477 drivers/clk/qcom/clk-rcg.c struct mn *mn = &rcg->mn; mn 481 drivers/clk/qcom/clk-rcg.c if (rcg->mn.reset_in_cc) mn 486 drivers/clk/qcom/clk-rcg.c if (rcg->mn.width) { mn 487 drivers/clk/qcom/clk-rcg.c mask = BIT(mn->mnctr_reset_bit); mn 491 drivers/clk/qcom/clk-rcg.c md = mn_to_md(mn, f->m, f->n, md); mn 498 drivers/clk/qcom/clk-rcg.c ctl = mn_to_reg(mn, f->m, f->n, ctl); mn 501 drivers/clk/qcom/clk-rcg.c ns = mn_to_reg(mn, f->m, f->n, ns); mn 503 drivers/clk/qcom/clk-rcg.c ns = mn_to_ns(mn, f->m, f->n, ns); mn 79 drivers/clk/qcom/clk-rcg.h struct mn mn; mn 117 drivers/clk/qcom/clk-rcg.h struct mn mn[2]; mn 343 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 394 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 445 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 496 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 547 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 598 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 662 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 711 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 760 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 809 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 858 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 907 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1057 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1106 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1155 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1269 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1317 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1400 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 1980 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 2050 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 2120 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 2184 drivers/clk/qcom/gcc-ipq806x.c .mn = { mn 2299 drivers/clk/qcom/gcc-ipq806x.c .mn[0] = { mn 2307 drivers/clk/qcom/gcc-ipq806x.c .mn[1] = { mn 2371 drivers/clk/qcom/gcc-ipq806x.c .mn[0] = { mn 2379 drivers/clk/qcom/gcc-ipq806x.c .mn[1] = { mn 2443 drivers/clk/qcom/gcc-ipq806x.c .mn[0] = { mn 2451 drivers/clk/qcom/gcc-ipq806x.c .mn[1] = { mn 2515 drivers/clk/qcom/gcc-ipq806x.c .mn[0] = { mn 2523 drivers/clk/qcom/gcc-ipq806x.c .mn[1] = { mn 2653 drivers/clk/qcom/gcc-ipq806x.c .mn[0] = { mn 2661 drivers/clk/qcom/gcc-ipq806x.c .mn[1] = { mn 2706 drivers/clk/qcom/gcc-ipq806x.c .mn[0] = { mn 2714 drivers/clk/qcom/gcc-ipq806x.c .mn[1] = { mn 187 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 238 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 289 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 340 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 391 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 454 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 503 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 552 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 601 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 650 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 705 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 754 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 803 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 917 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 965 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 1018 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 1067 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 1122 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 1177 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 1232 drivers/clk/qcom/gcc-mdm9615.c .mn = { mn 103 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 154 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 205 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 256 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 307 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 358 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 409 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 460 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 509 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 558 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 607 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 656 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 718 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 767 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 816 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 865 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 914 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 963 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1012 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1061 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1110 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1159 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1208 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1257 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1319 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1368 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1417 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1528 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1576 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1624 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1672 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1720 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1773 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1827 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1876 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 1943 drivers/clk/qcom/gcc-msm8660.c .mn = { mn 329 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 380 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 431 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 482 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 533 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 584 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 635 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 686 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 735 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 784 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 833 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 882 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 944 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 993 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1042 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1091 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1140 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1189 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1238 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1287 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1336 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1385 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1434 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1483 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1545 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1594 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1643 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1757 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1805 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1853 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1901 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 1949 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2002 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2056 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2105 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2154 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2203 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2298 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 2365 drivers/clk/qcom/gcc-msm8960.c .mn = { mn 111 drivers/clk/qcom/lcc-ipq806x.c .mn = { mn 225 drivers/clk/qcom/lcc-ipq806x.c .mn = { mn 305 drivers/clk/qcom/lcc-ipq806x.c .mn = { mn 364 drivers/clk/qcom/lcc-ipq806x.c .mn = { mn 94 drivers/clk/qcom/lcc-mdm9615.c .mn = { mn 200 drivers/clk/qcom/lcc-mdm9615.c .mn = { \ mn 344 drivers/clk/qcom/lcc-mdm9615.c .mn = { mn 412 drivers/clk/qcom/lcc-mdm9615.c .mn = { mn 92 drivers/clk/qcom/lcc-msm8960.c .mn = { mn 198 drivers/clk/qcom/lcc-msm8960.c .mn = { \ mn 342 drivers/clk/qcom/lcc-msm8960.c .mn = { mn 410 drivers/clk/qcom/lcc-msm8960.c .mn = { mn 172 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 221 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 270 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 326 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 390 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 454 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 705 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 807 drivers/clk/qcom/mmcc-msm8960.c .mn[0] = { mn 815 drivers/clk/qcom/mmcc-msm8960.c .mn[1] = { mn 867 drivers/clk/qcom/mmcc-msm8960.c .mn[0] = { mn 875 drivers/clk/qcom/mmcc-msm8960.c .mn[1] = { mn 968 drivers/clk/qcom/mmcc-msm8960.c .mn[0] = { mn 976 drivers/clk/qcom/mmcc-msm8960.c .mn[1] = { mn 1046 drivers/clk/qcom/mmcc-msm8960.c .mn[0] = { mn 1054 drivers/clk/qcom/mmcc-msm8960.c .mn[1] = { mn 1134 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 1253 drivers/clk/qcom/mmcc-msm8960.c .mn[0] = { mn 1261 drivers/clk/qcom/mmcc-msm8960.c .mn[1] = { mn 1424 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 1586 drivers/clk/qcom/mmcc-msm8960.c .mn[0] = { mn 1594 drivers/clk/qcom/mmcc-msm8960.c .mn[1] = { mn 1714 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 2049 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 2097 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 2299 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 2346 drivers/clk/qcom/mmcc-msm8960.c .mn = { mn 472 drivers/gpu/drm/amd/amdgpu/amdgpu.h struct amdgpu_mn *mn; mn 607 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); mn 1294 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c amdgpu_mn_lock(p->mn); mn 1337 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c amdgpu_mn_unlock(p->mn); mn 1343 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c amdgpu_mn_unlock(p->mn); mn 86 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c bo->mn = NULL; mn 118 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c void amdgpu_mn_lock(struct amdgpu_mn *mn) mn 120 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c if (mn) mn 121 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c down_write(&mn->lock); mn 129 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c void amdgpu_mn_unlock(struct amdgpu_mn *mn) mn 131 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c if (mn) mn 132 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c up_write(&mn->lock); mn 412 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c bo->mn = amn; mn 442 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c amn = bo->mn; mn 453 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c bo->mn = NULL; mn 75 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h void amdgpu_mn_lock(struct amdgpu_mn *mn); mn 76 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h void amdgpu_mn_unlock(struct amdgpu_mn *mn); mn 83 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} mn 84 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} mn 101 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h struct amdgpu_mn *mn; mn 789 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL; mn 489 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void kfd_process_free_notifier(struct mmu_notifier *mn) mn 491 drivers/gpu/drm/amd/amdkfd/kfd_process.c kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); mn 494 drivers/gpu/drm/amd/amdkfd/kfd_process.c static void kfd_process_notifier_release(struct mmu_notifier *mn, mn 504 drivers/gpu/drm/amd/amdkfd/kfd_process.c p = container_of(mn, struct kfd_process, mmu_notifier); mn 23 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mmu_notifier *mn; mn 35 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct mmu_notifier mn; mn 41 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mmu_notifier *mn; mn 49 drivers/gpu/drm/i915/gem/i915_gem_userptr.c interval_tree_insert(&mo->it, &mo->mn->objects); mn 57 drivers/gpu/drm/i915/gem/i915_gem_userptr.c interval_tree_remove(&mo->it, &mo->mn->objects); mn 80 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_lock(&mo->mn->lock); mn 85 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_unlock(&mo->mn->lock); mn 92 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mmu_notifier *mn = mn 93 drivers/gpu/drm/i915/gem/i915_gem_userptr.c container_of(_mn, struct i915_mmu_notifier, mn); mn 99 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (RB_EMPTY_ROOT(&mn->objects.rb_root)) mn 105 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_lock(&mn->lock); mn 106 drivers/gpu/drm/i915/gem/i915_gem_userptr.c it = interval_tree_iter_first(&mn->objects, range->start, end); mn 130 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_unlock(&mn->lock); mn 133 drivers/gpu/drm/i915/gem/i915_gem_userptr.c unlock = &mn->mm->i915->drm.struct_mutex; mn 160 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_lock(&mn->lock); mn 167 drivers/gpu/drm/i915/gem/i915_gem_userptr.c it = interval_tree_iter_first(&mn->objects, range->start, end); mn 169 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_unlock(&mn->lock); mn 186 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mmu_notifier *mn; mn 188 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn = kmalloc(sizeof(*mn), GFP_KERNEL); mn 189 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (mn == NULL) mn 192 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_lock_init(&mn->lock); mn 193 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn->mn.ops = &i915_gem_userptr_notifier; mn 194 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn->objects = RB_ROOT_CACHED; mn 195 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn->mm = mm; mn 197 drivers/gpu/drm/i915/gem/i915_gem_userptr.c return mn; mn 209 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_lock(&mo->mn->lock); mn 211 drivers/gpu/drm/i915/gem/i915_gem_userptr.c spin_unlock(&mo->mn->lock); mn 218 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mmu_notifier *mn; mn 221 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn = mm->mn; mn 222 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (mn) mn 223 drivers/gpu/drm/i915/gem/i915_gem_userptr.c return mn; mn 225 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn = i915_mmu_notifier_create(mm); mn 226 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (IS_ERR(mn)) mn 227 drivers/gpu/drm/i915/gem/i915_gem_userptr.c err = PTR_ERR(mn); mn 231 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (mm->mn == NULL && !err) { mn 233 drivers/gpu/drm/i915/gem/i915_gem_userptr.c err = __mmu_notifier_register(&mn->mn, mm->mm); mn 236 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mm->mn = fetch_and_zero(&mn); mn 238 drivers/gpu/drm/i915/gem/i915_gem_userptr.c } else if (mm->mn) { mn 248 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (mn && !IS_ERR(mn)) mn 249 drivers/gpu/drm/i915/gem/i915_gem_userptr.c kfree(mn); mn 251 drivers/gpu/drm/i915/gem/i915_gem_userptr.c return err ? ERR_PTR(err) : mm->mn; mn 258 drivers/gpu/drm/i915/gem/i915_gem_userptr.c struct i915_mmu_notifier *mn; mn 267 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mn = i915_mmu_notifier_find(obj->userptr.mm); mn 268 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (IS_ERR(mn)) mn 269 drivers/gpu/drm/i915/gem/i915_gem_userptr.c return PTR_ERR(mn); mn 275 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mo->mn = mn; mn 286 drivers/gpu/drm/i915/gem/i915_gem_userptr.c i915_mmu_notifier_free(struct i915_mmu_notifier *mn, mn 289 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (mn == NULL) mn 292 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mmu_notifier_unregister(&mn->mn, mm); mn 293 drivers/gpu/drm/i915/gem/i915_gem_userptr.c kfree(mn); mn 322 drivers/gpu/drm/i915/gem/i915_gem_userptr.c i915_mmu_notifier_free(struct i915_mmu_notifier *mn, mn 374 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mm->mn = NULL; mn 392 drivers/gpu/drm/i915/gem/i915_gem_userptr.c i915_mmu_notifier_free(mm->mn, mm->mm); mn 10 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h struct nvkm_mm_node *mn; mn 38 drivers/gpu/drm/nouveau/nvkm/core/memory.c nvkm_mm_free(&fb->tags, &tags->mn); mn 61 drivers/gpu/drm/nouveau/nvkm/core/memory.c if (tags->mn && tags->mn->length != nr) { mn 77 drivers/gpu/drm/nouveau/nvkm/core/memory.c if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) { mn 79 drivers/gpu/drm/nouveau/nvkm/core/memory.c clr(device, tags->mn->offset, tags->mn->length); mn 90 drivers/gpu/drm/nouveau/nvkm/core/memory.c tags->mn = NULL; mn 34 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c struct nvkm_mm_node *mn; mn 45 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c .mem = vram->mn, mn 54 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT; mn 61 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c if (!nvkm_mm_contiguous(vram->mn)) mn 63 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT; mn 82 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c struct nvkm_mm_node *next = vram->mn; mn 130 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c node = &vram->mn; mn 53 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c struct nvkm_mm_node *mn; mn 130 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c return (u64)gk20a_instobj(memory)->mn->offset << 12; mn 136 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c return (u64)gk20a_instobj(memory)->mn->length << 12; mn 288 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c .mem = node->mn, mn 304 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT, mn 317 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c struct nvkm_mm_node *r = node->base.mn; mn 335 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c for (i = 0; i < node->base.mn->length; i++) { mn 416 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.mn = &node->r; mn 493 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.mn = r; mn 542 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c size, align, (u64)node->mn->offset << 12); mn 299 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c if (map->tags->mn) { mn 300 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u64 tags = map->tags->mn->offset + (map->offset >> 17); mn 371 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c if (map->tags->mn) { mn 372 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c tags = map->tags->mn->offset + (map->offset >> 16); mn 307 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c if (map->tags->mn) { mn 308 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u32 tags = map->tags->mn->offset + (map->offset >> 16); mn 512 drivers/gpu/drm/radeon/radeon.h struct radeon_mn *mn; mn 40 drivers/gpu/drm/radeon/radeon_mn.c struct mmu_notifier mn; mn 63 drivers/gpu/drm/radeon/radeon_mn.c static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, mn 66 drivers/gpu/drm/radeon/radeon_mn.c struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); mn 128 drivers/gpu/drm/radeon/radeon_mn.c static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm) mn 138 drivers/gpu/drm/radeon/radeon_mn.c radeon_mn_invalidate_range_start(mn, &range); mn 151 drivers/gpu/drm/radeon/radeon_mn.c return &rmn->mn; mn 154 drivers/gpu/drm/radeon/radeon_mn.c static void radeon_mn_free_notifier(struct mmu_notifier *mn) mn 156 drivers/gpu/drm/radeon/radeon_mn.c kfree(container_of(mn, struct radeon_mn, mn)); mn 178 drivers/gpu/drm/radeon/radeon_mn.c struct mmu_notifier *mn; mn 184 drivers/gpu/drm/radeon/radeon_mn.c mn = mmu_notifier_get(&radeon_mn_ops, current->mm); mn 185 drivers/gpu/drm/radeon/radeon_mn.c if (IS_ERR(mn)) mn 186 drivers/gpu/drm/radeon/radeon_mn.c return PTR_ERR(mn); mn 187 drivers/gpu/drm/radeon/radeon_mn.c rmn = container_of(mn, struct radeon_mn, mn); mn 210 drivers/gpu/drm/radeon/radeon_mn.c bo->mn = rmn; mn 234 drivers/gpu/drm/radeon/radeon_mn.c struct radeon_mn *rmn = bo->mn; mn 255 drivers/gpu/drm/radeon/radeon_mn.c mmu_notifier_put(&rmn->mn); mn 256 drivers/gpu/drm/radeon/radeon_mn.c bo->mn = NULL; mn 77 drivers/infiniband/core/umem_odp.c static void ib_umem_notifier_release(struct mmu_notifier *mn, mn 81 drivers/infiniband/core/umem_odp.c container_of(mn, struct ib_ucontext_per_mm, mn); mn 85 drivers/infiniband/core/umem_odp.c if (!per_mm->mn.users) mn 116 drivers/infiniband/core/umem_odp.c static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn, mn 120 drivers/infiniband/core/umem_odp.c container_of(mn, struct ib_ucontext_per_mm, mn); mn 128 drivers/infiniband/core/umem_odp.c if (!per_mm->mn.users) { mn 155 drivers/infiniband/core/umem_odp.c static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, mn 159 drivers/infiniband/core/umem_odp.c container_of(mn, struct ib_ucontext_per_mm, mn); mn 161 drivers/infiniband/core/umem_odp.c if (unlikely(!per_mm->mn.users)) mn 185 drivers/infiniband/core/umem_odp.c return &per_mm->mn; mn 188 drivers/infiniband/core/umem_odp.c static void ib_umem_free_notifier(struct mmu_notifier *mn) mn 191 drivers/infiniband/core/umem_odp.c container_of(mn, struct ib_ucontext_per_mm, mn); mn 210 drivers/infiniband/core/umem_odp.c struct mmu_notifier *mn; mn 255 drivers/infiniband/core/umem_odp.c mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm); mn 256 drivers/infiniband/core/umem_odp.c if (IS_ERR(mn)) { mn 257 drivers/infiniband/core/umem_odp.c ret = PTR_ERR(mn); mn 261 drivers/infiniband/core/umem_odp.c container_of(mn, struct ib_ucontext_per_mm, mn); mn 476 drivers/infiniband/core/umem_odp.c mmu_notifier_put(&per_mm->mn); mn 56 drivers/infiniband/hw/hfi1/mmu_rb.c struct mmu_notifier mn; mn 110 drivers/infiniband/hw/hfi1/mmu_rb.c INIT_HLIST_NODE(&handlr->mn.hlist); mn 112 drivers/infiniband/hw/hfi1/mmu_rb.c handlr->mn.ops = &mn_opts; mn 119 drivers/infiniband/hw/hfi1/mmu_rb.c ret = mmu_notifier_register(&handlr->mn, handlr->mm); mn 137 drivers/infiniband/hw/hfi1/mmu_rb.c mmu_notifier_unregister(&handler->mn, handler->mm); mn 285 drivers/infiniband/hw/hfi1/mmu_rb.c static int mmu_notifier_range_start(struct mmu_notifier *mn, mn 289 drivers/infiniband/hw/hfi1/mmu_rb.c container_of(mn, struct mmu_rb_handler, mn); mn 42 drivers/iommu/amd_iommu_v2.c struct mmu_notifier mn; /* mmu_notifier handle */ mn 337 drivers/iommu/amd_iommu_v2.c mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); mn 356 drivers/iommu/amd_iommu_v2.c static struct pasid_state *mn_to_state(struct mmu_notifier *mn) mn 358 drivers/iommu/amd_iommu_v2.c return container_of(mn, struct pasid_state, mn); mn 361 drivers/iommu/amd_iommu_v2.c static void mn_invalidate_range(struct mmu_notifier *mn, mn 368 drivers/iommu/amd_iommu_v2.c pasid_state = mn_to_state(mn); mn 378 drivers/iommu/amd_iommu_v2.c static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) mn 386 drivers/iommu/amd_iommu_v2.c pasid_state = mn_to_state(mn); mn 641 drivers/iommu/amd_iommu_v2.c pasid_state->mn.ops = &iommu_mn; mn 646 drivers/iommu/amd_iommu_v2.c mmu_notifier_register(&pasid_state->mn, mm); mn 673 drivers/iommu/amd_iommu_v2.c mmu_notifier_unregister(&pasid_state->mn, mm); mn 721 drivers/iommu/amd_iommu_v2.c mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); mn 166 drivers/iommu/intel-svm.c static void intel_invalidate_range(struct mmu_notifier *mn, mn 170 drivers/iommu/intel-svm.c struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); mn 176 drivers/iommu/intel-svm.c static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) mn 178 drivers/iommu/intel-svm.c struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); mn 184 drivers/misc/mic/scif/scif_dma.c static void scif_mmu_notifier_release(struct mmu_notifier *mn, mn 189 drivers/misc/mic/scif/scif_dma.c mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); mn 194 drivers/misc/mic/scif/scif_dma.c static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, mn 199 drivers/misc/mic/scif/scif_dma.c mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); mn 205 drivers/misc/mic/scif/scif_dma.c static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, mn 209 drivers/misc/sgi-gru/grutlbpurge.c static int gru_invalidate_range_start(struct mmu_notifier *mn, mn 212 drivers/misc/sgi-gru/grutlbpurge.c struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, mn 224 drivers/misc/sgi-gru/grutlbpurge.c static void gru_invalidate_range_end(struct mmu_notifier *mn, mn 227 drivers/misc/sgi-gru/grutlbpurge.c struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, mn 252 drivers/misc/sgi-gru/grutlbpurge.c static void gru_free_notifier(struct mmu_notifier *mn) mn 254 drivers/misc/sgi-gru/grutlbpurge.c kfree(container_of(mn, struct gru_mm_struct, ms_notifier)); mn 267 drivers/misc/sgi-gru/grutlbpurge.c struct mmu_notifier *mn; mn 269 drivers/misc/sgi-gru/grutlbpurge.c mn = mmu_notifier_get_locked(&gru_mmuops, current->mm); mn 270 drivers/misc/sgi-gru/grutlbpurge.c if (IS_ERR(mn)) mn 271 drivers/misc/sgi-gru/grutlbpurge.c return ERR_CAST(mn); mn 273 drivers/misc/sgi-gru/grutlbpurge.c return container_of(mn, struct gru_mm_struct, ms_notifier); mn 233 drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h u8 mn[ID_LEN + 1]; mn 2336 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c memcpy(vpd_data->mn, vpd.id, ID_LEN + 1); mn 24 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh; mn 28 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h __assign_str(devname, mn->dev->name); mn 34 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h if (mn->family == AF_INET) { mn 35 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h *p32 = mn->dst_ip.v4; mn 37 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h } else if (mn->family == AF_INET6) { mn 38 drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h *pin6 = mn->dst_ip.v6; mn 85 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh; mn 89 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h __assign_str(devname, mn->dev->name); mn 94 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h if (mn->family == AF_INET) { mn 95 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h *p32 = mn->dst_ip.v4; mn 97 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h } else if (mn->family == AF_INET6) { mn 98 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h *pin6 = mn->dst_ip.v6; mn 1285 drivers/net/ethernet/ni/nixge.c struct device_node *mn, *phy_node; mn 1339 drivers/net/ethernet/ni/nixge.c mn = of_get_child_by_name(pdev->dev.of_node, "mdio"); mn 1340 drivers/net/ethernet/ni/nixge.c if (mn) { mn 1341 drivers/net/ethernet/ni/nixge.c err = nixge_mdio_setup(priv, mn); mn 1342 drivers/net/ethernet/ni/nixge.c of_node_put(mn); mn 2402 drivers/nvme/host/core.c const char *mn; mn 2414 drivers/nvme/host/core.c .mn = "THNSF5256GPUK TOSHIBA", mn 2454 drivers/nvme/host/core.c string_matches(id->mn, q->mn, sizeof(id->mn)) && mn 2481 drivers/nvme/host/core.c memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); mn 2482 drivers/nvme/host/core.c off += sizeof(id->mn); mn 2640 drivers/nvme/host/core.c memcpy(subsys->model, id->mn, sizeof(subsys->model)); mn 305 drivers/nvme/target/admin-cmd.c memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); mn 991 drivers/scsi/qla2xxx/qla_bsg.c struct verify_chip_entry_84xx *mn = NULL; mn 1037 drivers/scsi/qla2xxx/qla_bsg.c mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); mn 1038 drivers/scsi/qla2xxx/qla_bsg.c if (!mn) { mn 1048 drivers/scsi/qla2xxx/qla_bsg.c mn->entry_type = VERIFY_CHIP_IOCB_TYPE; mn 1049 drivers/scsi/qla2xxx/qla_bsg.c mn->entry_count = 1; mn 1055 drivers/scsi/qla2xxx/qla_bsg.c mn->options = cpu_to_le16(options); mn 1056 drivers/scsi/qla2xxx/qla_bsg.c mn->fw_ver = cpu_to_le32(fw_ver); mn 1057 drivers/scsi/qla2xxx/qla_bsg.c mn->fw_size = cpu_to_le32(data_len); mn 1058 drivers/scsi/qla2xxx/qla_bsg.c mn->fw_seq_size = cpu_to_le32(data_len); mn 1059 drivers/scsi/qla2xxx/qla_bsg.c put_unaligned_le64(fw_dma, &mn->dsd.address); mn 1060 drivers/scsi/qla2xxx/qla_bsg.c mn->dsd.length = cpu_to_le32(data_len); mn 1061 drivers/scsi/qla2xxx/qla_bsg.c mn->data_seg_cnt = cpu_to_le16(1); mn 1063 drivers/scsi/qla2xxx/qla_bsg.c rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); mn 1078 drivers/scsi/qla2xxx/qla_bsg.c dma_pool_free(ha->s_dma_pool, mn, mn_dma); mn 1101 drivers/scsi/qla2xxx/qla_bsg.c struct access_chip_84xx *mn = NULL; mn 1116 drivers/scsi/qla2xxx/qla_bsg.c mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); mn 1117 drivers/scsi/qla2xxx/qla_bsg.c if (!mn) { mn 1123 drivers/scsi/qla2xxx/qla_bsg.c mn->entry_type = ACCESS_CHIP_IOCB_TYPE; mn 1124 drivers/scsi/qla2xxx/qla_bsg.c mn->entry_count = 1; mn 1162 drivers/scsi/qla2xxx/qla_bsg.c mn->options = cpu_to_le16(ACO_DUMP_MEMORY); mn 1163 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter1 = mn 1168 drivers/scsi/qla2xxx/qla_bsg.c mn->options = cpu_to_le16(ACO_REQUEST_INFO); mn 1169 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter1 = mn 1172 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter2 = mn 1214 drivers/scsi/qla2xxx/qla_bsg.c mn->options = cpu_to_le16(ACO_LOAD_MEMORY); mn 1215 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter1 = mn 1220 drivers/scsi/qla2xxx/qla_bsg.c mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); mn 1221 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter1 = mn 1224 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter2 = mn 1227 drivers/scsi/qla2xxx/qla_bsg.c mn->parameter3 = mn 1237 drivers/scsi/qla2xxx/qla_bsg.c mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); mn 1238 drivers/scsi/qla2xxx/qla_bsg.c mn->dseg_count = cpu_to_le16(1); mn 1239 drivers/scsi/qla2xxx/qla_bsg.c put_unaligned_le64(mgmt_dma, &mn->dsd.address); mn 1240 drivers/scsi/qla2xxx/qla_bsg.c mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); mn 1243 drivers/scsi/qla2xxx/qla_bsg.c rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); mn 1281 drivers/scsi/qla2xxx/qla_bsg.c dma_pool_free(ha->s_dma_pool, mn, mn_dma); mn 4245 drivers/scsi/qla2xxx/qla_mbx.c struct cs84xx_mgmt_cmd *mn; mn 4254 drivers/scsi/qla2xxx/qla_mbx.c mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); mn 4255 drivers/scsi/qla2xxx/qla_mbx.c if (mn == NULL) { mn 4268 drivers/scsi/qla2xxx/qla_mbx.c memset(mn, 0, sizeof(*mn)); mn 4269 drivers/scsi/qla2xxx/qla_mbx.c mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE; mn 4270 drivers/scsi/qla2xxx/qla_mbx.c mn->p.req.entry_count = 1; mn 4271 drivers/scsi/qla2xxx/qla_mbx.c mn->p.req.options = cpu_to_le16(options); mn 4276 drivers/scsi/qla2xxx/qla_mbx.c mn, sizeof(*mn)); mn 4278 drivers/scsi/qla2xxx/qla_mbx.c rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); mn 4288 drivers/scsi/qla2xxx/qla_mbx.c mn, sizeof(*mn)); mn 4290 drivers/scsi/qla2xxx/qla_mbx.c status[0] = le16_to_cpu(mn->p.rsp.comp_status); mn 4292 drivers/scsi/qla2xxx/qla_mbx.c le16_to_cpu(mn->p.rsp.failure_code) : 0; mn 4309 drivers/scsi/qla2xxx/qla_mbx.c le32_to_cpu(mn->p.rsp.fw_ver)); mn 4314 drivers/scsi/qla2xxx/qla_mbx.c le32_to_cpu(mn->p.rsp.fw_ver); mn 4321 drivers/scsi/qla2xxx/qla_mbx.c dma_pool_free(ha->s_dma_pool, mn, mn_dma); mn 430 drivers/video/fbdev/mmp/hw/mmp_ctrl.h #define CFG_HWC_1BITENA(mn) ((mn)<<25) mn 221 drivers/video/fbdev/pxa168fb.h #define CFG_HWC_1BITENA(mn) ((mn) << 25) mn 32 drivers/xen/gntdev-common.h struct mmu_notifier mn; mn 518 drivers/xen/gntdev.c static int mn_invl_range_start(struct mmu_notifier *mn, mn 521 drivers/xen/gntdev.c struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); mn 549 drivers/xen/gntdev.c static void mn_release(struct mmu_notifier *mn, mn 552 drivers/xen/gntdev.c struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); mn 613 drivers/xen/gntdev.c priv->mn.ops = &gntdev_mmu_ops; mn 614 drivers/xen/gntdev.c ret = mmu_notifier_register(&priv->mn, priv->mm); mn 655 drivers/xen/gntdev.c mmu_notifier_unregister(&priv->mn, priv->mm); mn 97 include/linux/mmu_notifier.h void (*release)(struct mmu_notifier *mn, mn 109 include/linux/mmu_notifier.h int (*clear_flush_young)(struct mmu_notifier *mn, mn 119 include/linux/mmu_notifier.h int (*clear_young)(struct mmu_notifier *mn, mn 130 include/linux/mmu_notifier.h int (*test_young)(struct mmu_notifier *mn, mn 138 include/linux/mmu_notifier.h void (*change_pte)(struct mmu_notifier *mn, mn 193 include/linux/mmu_notifier.h int (*invalidate_range_start)(struct mmu_notifier *mn, mn 195 include/linux/mmu_notifier.h void (*invalidate_range_end)(struct mmu_notifier *mn, mn 216 include/linux/mmu_notifier.h void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, mn 230 include/linux/mmu_notifier.h void (*free_notifier)(struct mmu_notifier *mn); mn 269 include/linux/mmu_notifier.h void mmu_notifier_put(struct mmu_notifier *mn); mn 272 include/linux/mmu_notifier.h extern int mmu_notifier_register(struct mmu_notifier *mn, mn 274 include/linux/mmu_notifier.h extern int __mmu_notifier_register(struct mmu_notifier *mn, mn 276 include/linux/mmu_notifier.h extern void mmu_notifier_unregister(struct mmu_notifier *mn, mn 208 include/linux/nvme.h char mn[40]; mn 125 include/rdma/ib_umem_odp.h struct mmu_notifier mn; mn 46 mm/hmm.c static void hmm_free_notifier(struct mmu_notifier *mn) mn 48 mm/hmm.c struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); mn 55 mm/hmm.c static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) mn 57 mm/hmm.c struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); mn 97 mm/hmm.c static int hmm_invalidate_range_start(struct mmu_notifier *mn, mn 100 mm/hmm.c struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); mn 143 mm/hmm.c static void hmm_invalidate_range_end(struct mmu_notifier *mn, mn 146 mm/hmm.c struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); mn 177 mm/hmm.c struct mmu_notifier *mn; mn 185 mm/hmm.c mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm); mn 186 mm/hmm.c if (IS_ERR(mn)) mn 187 mm/hmm.c return PTR_ERR(mn); mn 188 mm/hmm.c mirror->hmm = container_of(mn, struct hmm, mmu_notifier); mn 2797 mm/ksm.c struct memory_notify *mn = arg; mn 2821 mm/ksm.c ksm_check_stable_tree(mn->start_pfn, mn 2822 mm/ksm.c mn->start_pfn + mn->nr_pages); mn 44 mm/mmu_notifier.c struct mmu_notifier *mn; mn 52 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) mn 59 mm/mmu_notifier.c if (mn->ops->release) mn 60 mm/mmu_notifier.c mn->ops->release(mn, mm); mn 64 mm/mmu_notifier.c mn = hlist_entry(mm->mmu_notifier_mm->list.first, mn 73 mm/mmu_notifier.c hlist_del_init_rcu(&mn->hlist); mn 99 mm/mmu_notifier.c struct mmu_notifier *mn; mn 103 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { mn 104 mm/mmu_notifier.c if (mn->ops->clear_flush_young) mn 105 mm/mmu_notifier.c young |= mn->ops->clear_flush_young(mn, mm, start, end); mn 116 mm/mmu_notifier.c struct mmu_notifier *mn; mn 120 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { mn 121 mm/mmu_notifier.c if (mn->ops->clear_young) mn 122 mm/mmu_notifier.c young |= mn->ops->clear_young(mn, mm, start, end); mn 132 mm/mmu_notifier.c struct mmu_notifier *mn; mn 136 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { mn 137 mm/mmu_notifier.c if (mn->ops->test_young) { mn 138 mm/mmu_notifier.c young = mn->ops->test_young(mn, mm, address); mn 151 mm/mmu_notifier.c struct mmu_notifier *mn; mn 155 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { mn 156 mm/mmu_notifier.c if (mn->ops->change_pte) mn 157 mm/mmu_notifier.c mn->ops->change_pte(mn, mm, address, pte); mn 164 mm/mmu_notifier.c struct mmu_notifier *mn; mn 169 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { mn 170 mm/mmu_notifier.c if (mn->ops->invalidate_range_start) { mn 175 mm/mmu_notifier.c _ret = mn->ops->invalidate_range_start(mn, range); mn 180 mm/mmu_notifier.c mn->ops->invalidate_range_start, _ret, mn 196 mm/mmu_notifier.c struct mmu_notifier *mn; mn 201 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) { mn 215 mm/mmu_notifier.c if (!only_end && mn->ops->invalidate_range) mn 216 mm/mmu_notifier.c mn->ops->invalidate_range(mn, range->mm, mn 219 mm/mmu_notifier.c if (mn->ops->invalidate_range_end) { mn 222 mm/mmu_notifier.c mn->ops->invalidate_range_end(mn, range); mn 234 mm/mmu_notifier.c struct mmu_notifier *mn; mn 238 mm/mmu_notifier.c hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { mn 239 mm/mmu_notifier.c if (mn->ops->invalidate_range) mn 240 mm/mmu_notifier.c mn->ops->invalidate_range(mn, mm, start, end); mn 249 mm/mmu_notifier.c int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) mn 264 mm/mmu_notifier.c mn->mm = mm; mn 265 mm/mmu_notifier.c mn->users = 1; mn 301 mm/mmu_notifier.c hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); mn 333 mm/mmu_notifier.c int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) mn 338 mm/mmu_notifier.c ret = __mmu_notifier_register(mn, mm); mn 347 mm/mmu_notifier.c struct mmu_notifier *mn; mn 350 mm/mmu_notifier.c hlist_for_each_entry_rcu (mn, &mm->mmu_notifier_mm->list, hlist) { mn 351 mm/mmu_notifier.c if (mn->ops != ops) mn 354 mm/mmu_notifier.c if (likely(mn->users != UINT_MAX)) mn 355 mm/mmu_notifier.c mn->users++; mn 357 mm/mmu_notifier.c mn = ERR_PTR(-EOVERFLOW); mn 359 mm/mmu_notifier.c return mn; mn 385 mm/mmu_notifier.c struct mmu_notifier *mn; mn 391 mm/mmu_notifier.c mn = find_get_mmu_notifier(mm, ops); mn 392 mm/mmu_notifier.c if (mn) mn 393 mm/mmu_notifier.c return mn; mn 396 mm/mmu_notifier.c mn = ops->alloc_notifier(mm); mn 397 mm/mmu_notifier.c if (IS_ERR(mn)) mn 398 mm/mmu_notifier.c return mn; mn 399 mm/mmu_notifier.c mn->ops = ops; mn 400 mm/mmu_notifier.c ret = __mmu_notifier_register(mn, mm); mn 403 mm/mmu_notifier.c return mn; mn 405 mm/mmu_notifier.c mn->ops->free_notifier(mn); mn 428 mm/mmu_notifier.c void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) mn 432 mm/mmu_notifier.c if (!hlist_unhashed(&mn->hlist)) { mn 444 mm/mmu_notifier.c if (mn->ops->release) mn 445 mm/mmu_notifier.c mn->ops->release(mn, mm); mn 453 mm/mmu_notifier.c hlist_del_init_rcu(&mn->hlist); mn 471 mm/mmu_notifier.c struct mmu_notifier *mn = container_of(rcu, struct mmu_notifier, rcu); mn 472 mm/mmu_notifier.c struct mm_struct *mm = mn->mm; mn 474 mm/mmu_notifier.c mn->ops->free_notifier(mn); mn 501 mm/mmu_notifier.c void mmu_notifier_put(struct mmu_notifier *mn) mn 503 mm/mmu_notifier.c struct mm_struct *mm = mn->mm; mn 506 mm/mmu_notifier.c if (WARN_ON(!mn->users) || --mn->users) mn 508 mm/mmu_notifier.c hlist_del_init_rcu(&mn->hlist); mn 511 mm/mmu_notifier.c call_srcu(&srcu, &mn->rcu, mmu_notifier_free_rcu); mn 338 mm/page_ext.c struct memory_notify *mn = arg; mn 343 mm/page_ext.c ret = online_page_ext(mn->start_pfn, mn 344 mm/page_ext.c mn->nr_pages, mn->status_change_nid); mn 347 mm/page_ext.c offline_page_ext(mn->start_pfn, mn 348 mm/page_ext.c mn->nr_pages, mn->status_change_nid); mn 351 mm/page_ext.c offline_page_ext(mn->start_pfn, mn 352 mm/page_ext.c mn->nr_pages, mn->status_change_nid); mn 277 mm/vmscan.c struct mem_cgroup_per_node *mn; mn 282 mm/vmscan.c mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); mn 283 mm/vmscan.c WRITE_ONCE(mn->congested, congested); mn 289 mm/vmscan.c struct mem_cgroup_per_node *mn; mn 291 mm/vmscan.c mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); mn 292 mm/vmscan.c return READ_ONCE(mn->congested); mn 228 net/sched/sch_teql.c struct neighbour *mn; mn 230 net/sched/sch_teql.c mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev); mn 232 net/sched/sch_teql.c if (IS_ERR(mn)) mn 233 net/sched/sch_teql.c return PTR_ERR(mn); mn 234 net/sched/sch_teql.c n = mn; mn 378 virt/kvm/kvm_main.c static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) mn 380 virt/kvm/kvm_main.c return container_of(mn, struct kvm, mmu_notifier); mn 383 virt/kvm/kvm_main.c static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, mn 387 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 395 virt/kvm/kvm_main.c static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, mn 400 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 414 virt/kvm/kvm_main.c static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, mn 417 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 440 virt/kvm/kvm_main.c static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, mn 443 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 464 virt/kvm/kvm_main.c static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, mn 469 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 485 virt/kvm/kvm_main.c static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, mn 490 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 515 virt/kvm/kvm_main.c static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, mn 519 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn); mn 531 virt/kvm/kvm_main.c static void kvm_mmu_notifier_release(struct mmu_notifier *mn, mn 534 virt/kvm/kvm_main.c struct kvm *kvm = mmu_notifier_to_kvm(mn);