This source file includes following definitions.
- msm_gem_shrinker_lock
- msm_gem_shrinker_count
- msm_gem_shrinker_scan
- msm_gem_shrinker_vmap
- msm_gem_shrinker_init
- msm_gem_shrinker_cleanup
1
2
3
4
5
6
7 #include "msm_drv.h"
8 #include "msm_gem.h"
9
10 static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
11 {
12
13
14
15
16
17
18
19
20
21
22
23
24 switch (mutex_trylock_recursive(&dev->struct_mutex)) {
25 case MUTEX_TRYLOCK_FAILED:
26 return false;
27
28 case MUTEX_TRYLOCK_SUCCESS:
29 *unlock = true;
30 return true;
31
32 case MUTEX_TRYLOCK_RECURSIVE:
33 *unlock = false;
34 return true;
35 }
36
37 BUG();
38 }
39
40 static unsigned long
41 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
42 {
43 struct msm_drm_private *priv =
44 container_of(shrinker, struct msm_drm_private, shrinker);
45 struct drm_device *dev = priv->dev;
46 struct msm_gem_object *msm_obj;
47 unsigned long count = 0;
48 bool unlock;
49
50 if (!msm_gem_shrinker_lock(dev, &unlock))
51 return 0;
52
53 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
54 if (is_purgeable(msm_obj))
55 count += msm_obj->base.size >> PAGE_SHIFT;
56 }
57
58 if (unlock)
59 mutex_unlock(&dev->struct_mutex);
60
61 return count;
62 }
63
64 static unsigned long
65 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
66 {
67 struct msm_drm_private *priv =
68 container_of(shrinker, struct msm_drm_private, shrinker);
69 struct drm_device *dev = priv->dev;
70 struct msm_gem_object *msm_obj;
71 unsigned long freed = 0;
72 bool unlock;
73
74 if (!msm_gem_shrinker_lock(dev, &unlock))
75 return SHRINK_STOP;
76
77 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
78 if (freed >= sc->nr_to_scan)
79 break;
80 if (is_purgeable(msm_obj)) {
81 msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
82 freed += msm_obj->base.size >> PAGE_SHIFT;
83 }
84 }
85
86 if (unlock)
87 mutex_unlock(&dev->struct_mutex);
88
89 if (freed > 0)
90 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
91
92 return freed;
93 }
94
95 static int
96 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
97 {
98 struct msm_drm_private *priv =
99 container_of(nb, struct msm_drm_private, vmap_notifier);
100 struct drm_device *dev = priv->dev;
101 struct msm_gem_object *msm_obj;
102 unsigned unmapped = 0;
103 bool unlock;
104
105 if (!msm_gem_shrinker_lock(dev, &unlock))
106 return NOTIFY_DONE;
107
108 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
109 if (is_vunmapable(msm_obj)) {
110 msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
111
112
113
114
115 if (++unmapped >= 15)
116 break;
117 }
118 }
119
120 if (unlock)
121 mutex_unlock(&dev->struct_mutex);
122
123 *(unsigned long *)ptr += unmapped;
124
125 if (unmapped > 0)
126 pr_info_ratelimited("Purging %u vmaps\n", unmapped);
127
128 return NOTIFY_DONE;
129 }
130
131
132
133
134
135
136
137 void msm_gem_shrinker_init(struct drm_device *dev)
138 {
139 struct msm_drm_private *priv = dev->dev_private;
140 priv->shrinker.count_objects = msm_gem_shrinker_count;
141 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
142 priv->shrinker.seeks = DEFAULT_SEEKS;
143 WARN_ON(register_shrinker(&priv->shrinker));
144
145 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
146 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
147 }
148
149
150
151
152
153
154
155 void msm_gem_shrinker_cleanup(struct drm_device *dev)
156 {
157 struct msm_drm_private *priv = dev->dev_private;
158
159 if (priv->shrinker.nr_deferred) {
160 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
161 unregister_shrinker(&priv->shrinker);
162 }
163 }