root/fs/xfs/kmem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmem_alloc
  2. __kmem_vmalloc
  3. kmem_alloc_io
  4. kmem_alloc_large
  5. kmem_realloc
  6. kmem_zone_alloc

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4  * All Rights Reserved.
   5  */
   6 #include "xfs.h"
   7 #include <linux/backing-dev.h>
   8 #include "xfs_message.h"
   9 #include "xfs_trace.h"
  10 
  11 void *
  12 kmem_alloc(size_t size, xfs_km_flags_t flags)
  13 {
  14         int     retries = 0;
  15         gfp_t   lflags = kmem_flags_convert(flags);
  16         void    *ptr;
  17 
  18         trace_kmem_alloc(size, flags, _RET_IP_);
  19 
  20         do {
  21                 ptr = kmalloc(size, lflags);
  22                 if (ptr || (flags & KM_MAYFAIL))
  23                         return ptr;
  24                 if (!(++retries % 100))
  25                         xfs_err(NULL,
  26         "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
  27                                 current->comm, current->pid,
  28                                 (unsigned int)size, __func__, lflags);
  29                 congestion_wait(BLK_RW_ASYNC, HZ/50);
  30         } while (1);
  31 }
  32 
  33 
  34 /*
  35  * __vmalloc() will allocate data pages and auxillary structures (e.g.
  36  * pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
  37  * we need to tell memory reclaim that we are in such a context via
  38  * PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
  39  * and potentially deadlocking.
  40  */
  41 static void *
  42 __kmem_vmalloc(size_t size, xfs_km_flags_t flags)
  43 {
  44         unsigned nofs_flag = 0;
  45         void    *ptr;
  46         gfp_t   lflags = kmem_flags_convert(flags);
  47 
  48         if (flags & KM_NOFS)
  49                 nofs_flag = memalloc_nofs_save();
  50 
  51         ptr = __vmalloc(size, lflags, PAGE_KERNEL);
  52 
  53         if (flags & KM_NOFS)
  54                 memalloc_nofs_restore(nofs_flag);
  55 
  56         return ptr;
  57 }
  58 
  59 /*
  60  * Same as kmem_alloc_large, except we guarantee the buffer returned is aligned
  61  * to the @align_mask. We only guarantee alignment up to page size, we'll clamp
  62  * alignment at page size if it is larger. vmalloc always returns a PAGE_SIZE
  63  * aligned region.
  64  */
  65 void *
  66 kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags)
  67 {
  68         void    *ptr;
  69 
  70         trace_kmem_alloc_io(size, flags, _RET_IP_);
  71 
  72         if (WARN_ON_ONCE(align_mask >= PAGE_SIZE))
  73                 align_mask = PAGE_SIZE - 1;
  74 
  75         ptr = kmem_alloc(size, flags | KM_MAYFAIL);
  76         if (ptr) {
  77                 if (!((uintptr_t)ptr & align_mask))
  78                         return ptr;
  79                 kfree(ptr);
  80         }
  81         return __kmem_vmalloc(size, flags);
  82 }
  83 
  84 void *
  85 kmem_alloc_large(size_t size, xfs_km_flags_t flags)
  86 {
  87         void    *ptr;
  88 
  89         trace_kmem_alloc_large(size, flags, _RET_IP_);
  90 
  91         ptr = kmem_alloc(size, flags | KM_MAYFAIL);
  92         if (ptr)
  93                 return ptr;
  94         return __kmem_vmalloc(size, flags);
  95 }
  96 
  97 void *
  98 kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
  99 {
 100         int     retries = 0;
 101         gfp_t   lflags = kmem_flags_convert(flags);
 102         void    *ptr;
 103 
 104         trace_kmem_realloc(newsize, flags, _RET_IP_);
 105 
 106         do {
 107                 ptr = krealloc(old, newsize, lflags);
 108                 if (ptr || (flags & KM_MAYFAIL))
 109                         return ptr;
 110                 if (!(++retries % 100))
 111                         xfs_err(NULL,
 112         "%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
 113                                 current->comm, current->pid,
 114                                 newsize, __func__, lflags);
 115                 congestion_wait(BLK_RW_ASYNC, HZ/50);
 116         } while (1);
 117 }
 118 
 119 void *
 120 kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
 121 {
 122         int     retries = 0;
 123         gfp_t   lflags = kmem_flags_convert(flags);
 124         void    *ptr;
 125 
 126         trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_);
 127         do {
 128                 ptr = kmem_cache_alloc(zone, lflags);
 129                 if (ptr || (flags & KM_MAYFAIL))
 130                         return ptr;
 131                 if (!(++retries % 100))
 132                         xfs_err(NULL,
 133                 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
 134                                 current->comm, current->pid,
 135                                 __func__, lflags);
 136                 congestion_wait(BLK_RW_ASYNC, HZ/50);
 137         } while (1);
 138 }

/* [<][>][^][v][top][bottom][index][help] */