blob: 77fb3980813143d2d9e3432c0ebb994a4bcad032 [file] [log] [blame]
Daniel Vetterbe6a0372015-03-18 10:46:04 +01001/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/oom.h>
26#include <linux/shmem_fs.h>
27#include <linux/slab.h>
28#include <linux/swap.h>
29#include <linux/pci.h>
30#include <linux/dma-buf.h>
Chris Wilsone87666b2016-04-04 14:46:43 +010031#include <linux/vmalloc.h>
Daniel Vetterbe6a0372015-03-18 10:46:04 +010032#include <drm/drmP.h>
33#include <drm/i915_drm.h>
34
35#include "i915_drv.h"
36#include "i915_trace.h"
37
Joonas Lahtinene92075f2017-04-07 13:49:35 +030038static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
Chris Wilson1233e2d2016-10-28 13:58:37 +010039{
Joonas Lahtinene92075f2017-04-07 13:49:35 +030040 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
Linus Torvalds9439b372016-12-13 09:35:09 -080041 case MUTEX_TRYLOCK_RECURSIVE:
42 *unlock = false;
43 return true;
Chris Wilson290271d2017-06-09 12:03:49 +010044
45 case MUTEX_TRYLOCK_FAILED:
Chris Wilsoncd82f372017-08-04 11:41:35 +010046 *unlock = false;
47 preempt_disable();
Chris Wilson290271d2017-06-09 12:03:49 +010048 do {
49 cpu_relax();
50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
Chris Wilson290271d2017-06-09 12:03:49 +010051 *unlock = true;
Chris Wilsoncd82f372017-08-04 11:41:35 +010052 break;
Chris Wilson290271d2017-06-09 12:03:49 +010053 }
54 } while (!need_resched());
Chris Wilsoncd82f372017-08-04 11:41:35 +010055 preempt_enable();
56 return *unlock;
Chris Wilson290271d2017-06-09 12:03:49 +010057
Chris Wilsoncd82f372017-08-04 11:41:35 +010058 case MUTEX_TRYLOCK_SUCCESS:
59 *unlock = true;
60 return true;
Chris Wilson1233e2d2016-10-28 13:58:37 +010061 }
62
Linus Torvalds9439b372016-12-13 09:35:09 -080063 BUG();
Chris Wilson1233e2d2016-10-28 13:58:37 +010064}
65
Joonas Lahtinene92075f2017-04-07 13:49:35 +030066static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
Joonas Lahtinen8f612d02017-04-07 13:49:34 +030067{
68 if (!unlock)
69 return;
70
Joonas Lahtinene92075f2017-04-07 13:49:35 +030071 mutex_unlock(&dev_priv->drm.struct_mutex);
Joonas Lahtinen8f612d02017-04-07 13:49:34 +030072}
73
Chris Wilson15717de2016-08-04 07:52:26 +010074static bool any_vma_pinned(struct drm_i915_gem_object *obj)
Chris Wilsonc1a415e2015-12-04 15:58:54 +000075{
76 struct i915_vma *vma;
Chris Wilsonc1a415e2015-12-04 15:58:54 +000077
Chris Wilsonb2241f12017-05-25 08:25:28 +010078 list_for_each_entry(vma, &obj->vma_list, obj_link) {
79 /* Only GGTT vma may be permanently pinned, and are always
80 * at the start of the list. We can stop hunting as soon
81 * as we see a ppGTT vma.
82 */
83 if (!i915_vma_is_ggtt(vma))
84 break;
85
Chris Wilson3272db52016-08-04 16:32:32 +010086 if (i915_vma_is_pinned(vma))
Chris Wilson15717de2016-08-04 07:52:26 +010087 return true;
Chris Wilsonb2241f12017-05-25 08:25:28 +010088 }
Chris Wilsonc1a415e2015-12-04 15:58:54 +000089
Chris Wilson15717de2016-08-04 07:52:26 +010090 return false;
Chris Wilsonc1a415e2015-12-04 15:58:54 +000091}
92
93static bool swap_available(void)
94{
95 return get_nr_swap_pages() > 0;
96}
97
98static bool can_release_pages(struct drm_i915_gem_object *obj)
99{
Chris Wilson1233e2d2016-10-28 13:58:37 +0100100 if (!obj->mm.pages)
101 return false;
102
Tvrtko Ursulin3599a912016-11-01 14:44:10 +0000103 /* Consider only shrinkable ojects. */
104 if (!i915_gem_object_is_shrinkable(obj))
Chris Wilson1bec9b02016-04-20 12:09:52 +0100105 return false;
106
Chris Wilsonc1a415e2015-12-04 15:58:54 +0000107 /* Only report true if by unbinding the object and putting its pages
108 * we can actually make forward progress towards freeing physical
109 * pages.
110 *
111 * If the pages are pinned for any other reason than being bound
112 * to the GPU, simply unbinding from the GPU is not going to succeed
113 * in releasing our pin count on the pages themselves.
114 */
Chris Wilson1233e2d2016-10-28 13:58:37 +0100115 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
Chris Wilson15717de2016-08-04 07:52:26 +0100116 return false;
117
118 if (any_vma_pinned(obj))
Chris Wilsonc1a415e2015-12-04 15:58:54 +0000119 return false;
120
121 /* We can only return physical pages to the system if we can either
122 * discard the contents (because the user has marked them as being
123 * purgeable) or if we can move their contents out to swap.
124 */
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100125 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
Chris Wilsonc1a415e2015-12-04 15:58:54 +0000126}
127
Chris Wilson03ac84f2016-10-28 13:58:36 +0100128static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
129{
130 if (i915_gem_object_unbind(obj) == 0)
Chris Wilson548625e2016-11-01 12:11:34 +0000131 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
Chris Wilson1233e2d2016-10-28 13:58:37 +0100132 return !READ_ONCE(obj->mm.pages);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100133}
134
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100135/**
136 * i915_gem_shrink - Shrink buffer object caches
137 * @dev_priv: i915 device
138 * @target: amount of memory to make available, in pages
139 * @flags: control flags for selecting cache types
140 *
141 * This function is the main interface to the shrinker. It will try to release
142 * up to @target pages of main memory backing storage from buffer objects.
143 * Selection of the specific caches can be done with @flags. This is e.g. useful
144 * when purgeable objects should be removed from caches preferentially.
145 *
146 * Note that it's not guaranteed that released amount is actually available as
147 * free system memory - the pages might still be in-used to due to other reasons
148 * (like cpu mmaps) or the mm core has reused them before we could grab them.
149 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
150 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
151 *
152 * Also note that any kind of pinning (both per-vma address space pins and
153 * backing storage pins at the buffer object level) result in the shrinker code
154 * having to skip the object.
155 *
156 * Returns:
157 * The number of pages of backing storage actually released.
158 */
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100159unsigned long
160i915_gem_shrink(struct drm_i915_private *dev_priv,
Chris Wilson14387542015-10-01 12:18:25 +0100161 unsigned long target, unsigned flags)
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100162{
163 const struct {
164 struct list_head *list;
165 unsigned int bit;
166 } phases[] = {
167 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
168 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
169 { NULL, 0 },
170 }, *phase;
171 unsigned long count = 0;
Chris Wilson1233e2d2016-10-28 13:58:37 +0100172 bool unlock;
173
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300174 if (!shrinker_lock(dev_priv, &unlock))
Chris Wilson1233e2d2016-10-28 13:58:37 +0100175 return 0;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100176
Chris Wilson3abafa52015-10-01 12:18:26 +0100177 trace_i915_gem_shrink(dev_priv, target, flags);
Chris Wilsonc0336662016-05-06 15:40:21 +0100178 i915_gem_retire_requests(dev_priv);
Chris Wilson3abafa52015-10-01 12:18:26 +0100179
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100180 /*
Praveen Paneri178a30c2016-05-02 14:10:28 +0530181 * Unbinding of objects will require HW access; Let us not wake the
182 * device just to recover a little memory. If absolutely necessary,
183 * we will force the wake during oom-notifier.
184 */
185 if ((flags & I915_SHRINK_BOUND) &&
186 !intel_runtime_pm_get_if_in_use(dev_priv))
187 flags &= ~I915_SHRINK_BOUND;
188
189 /*
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100190 * As we may completely rewrite the (un)bound list whilst unbinding
191 * (due to retiring requests) we have to strictly process only
192 * one element of the list at the time, and recheck the list
193 * on every iteration.
194 *
195 * In particular, we must hold a reference whilst removing the
196 * object as we may end up waiting for and/or retiring the objects.
197 * This might release the final reference (held by the active list)
198 * and result in the object being freed from under us. This is
199 * similar to the precautions the eviction code must take whilst
200 * removing objects.
201 *
202 * Also note that although these lists do not hold a reference to
203 * the object we can safely grab one here: The final object
204 * unreferencing and the bound_list are both protected by the
205 * dev->struct_mutex and so we won't ever be able to observe an
206 * object on the bound_list with a reference count equals 0.
207 */
208 for (phase = phases; phase->list; phase++) {
209 struct list_head still_in_list;
Chris Wilson2a1d7752016-07-26 12:01:51 +0100210 struct drm_i915_gem_object *obj;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100211
212 if ((flags & phase->bit) == 0)
213 continue;
214
215 INIT_LIST_HEAD(&still_in_list);
Chris Wilson2a1d7752016-07-26 12:01:51 +0100216 while (count < target &&
217 (obj = list_first_entry_or_null(phase->list,
218 typeof(*obj),
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200219 global_link))) {
220 list_move_tail(&obj->global_link, &still_in_list);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100221 if (!obj->mm.pages) {
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200222 list_del_init(&obj->global_link);
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100223 continue;
224 }
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100225
226 if (flags & I915_SHRINK_PURGEABLE &&
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100227 obj->mm.madv != I915_MADV_DONTNEED)
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100228 continue;
229
Chris Wilsoneae2c432016-04-08 12:11:12 +0100230 if (flags & I915_SHRINK_VMAPS &&
Chris Wilsona4f5ea62016-10-28 13:58:35 +0100231 !is_vmalloc_addr(obj->mm.mapping))
Chris Wilsoneae2c432016-04-08 12:11:12 +0100232 continue;
233
Chris Wilson45353ce2016-10-12 13:48:24 +0100234 if (!(flags & I915_SHRINK_ACTIVE) &&
235 (i915_gem_object_is_active(obj) ||
Chris Wilsondd689282017-03-01 15:41:28 +0000236 i915_gem_object_is_framebuffer(obj)))
Chris Wilson5763ff02015-10-01 12:18:29 +0100237 continue;
238
Chris Wilsonc1a415e2015-12-04 15:58:54 +0000239 if (!can_release_pages(obj))
240 continue;
241
Chris Wilson1233e2d2016-10-28 13:58:37 +0100242 if (unsafe_drop_pages(obj)) {
Chris Wilson7b7a1192016-10-31 12:40:48 +0000243 /* May arrive from get_pages on another bo */
244 mutex_lock_nested(&obj->mm.lock,
Chris Wilson548625e2016-11-01 12:11:34 +0000245 I915_MM_SHRINKER);
Chris Wilson1233e2d2016-10-28 13:58:37 +0100246 if (!obj->mm.pages) {
247 __i915_gem_object_invalidate(obj);
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200248 list_del_init(&obj->global_link);
Chris Wilson1233e2d2016-10-28 13:58:37 +0100249 count += obj->base.size >> PAGE_SHIFT;
250 }
251 mutex_unlock(&obj->mm.lock);
252 }
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100253 }
Chris Wilson53597272016-11-01 08:48:43 +0000254 list_splice_tail(&still_in_list, phase->list);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100255 }
256
Praveen Paneri178a30c2016-05-02 14:10:28 +0530257 if (flags & I915_SHRINK_BOUND)
258 intel_runtime_pm_put(dev_priv);
259
Chris Wilsonc0336662016-05-06 15:40:21 +0100260 i915_gem_retire_requests(dev_priv);
Chris Wilson1233e2d2016-10-28 13:58:37 +0100261
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300262 shrinker_unlock(dev_priv, unlock);
Chris Wilsonc9c0f5e2015-10-01 12:18:27 +0100263
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100264 return count;
265}
266
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100267/**
Daniel Vetter1f2449c2015-10-06 14:47:55 +0200268 * i915_gem_shrink_all - Shrink buffer object caches completely
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100269 * @dev_priv: i915 device
270 *
271 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
272 * caches completely. It also first waits for and retires all outstanding
273 * requests to also be able to release backing storage for active objects.
274 *
275 * This should only be used in code to intentionally quiescent the gpu or as a
276 * last-ditch effort when memory seems to have run out.
277 *
278 * Returns:
279 * The number of pages of backing storage actually released.
280 */
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100281unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
282{
Chris Wilson0eafec62016-08-04 16:32:41 +0100283 unsigned long freed;
284
Chris Wilson519d5242017-02-08 10:47:10 +0000285 intel_runtime_pm_get(dev_priv);
Chris Wilson0eafec62016-08-04 16:32:41 +0100286 freed = i915_gem_shrink(dev_priv, -1UL,
287 I915_SHRINK_BOUND |
288 I915_SHRINK_UNBOUND |
289 I915_SHRINK_ACTIVE);
Chris Wilson519d5242017-02-08 10:47:10 +0000290 intel_runtime_pm_put(dev_priv);
291
Chris Wilson0eafec62016-08-04 16:32:41 +0100292 return freed;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100293}
294
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100295static unsigned long
296i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
297{
298 struct drm_i915_private *dev_priv =
299 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100300 struct drm_i915_gem_object *obj;
301 unsigned long count;
302 bool unlock;
303
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300304 if (!shrinker_lock(dev_priv, &unlock))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100305 return 0;
306
Chris Wilsonbed50ae2016-07-01 17:23:10 +0100307 i915_gem_retire_requests(dev_priv);
308
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100309 count = 0;
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200310 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
Chris Wilson6f0ac202016-04-04 14:46:41 +0100311 if (can_release_pages(obj))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100312 count += obj->base.size >> PAGE_SHIFT;
313
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200314 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
Chris Wilson573adb32016-08-04 16:32:39 +0100315 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100316 count += obj->base.size >> PAGE_SHIFT;
317 }
318
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300319 shrinker_unlock(dev_priv, unlock);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100320
321 return count;
322}
323
324static unsigned long
325i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
326{
327 struct drm_i915_private *dev_priv =
328 container_of(shrinker, struct drm_i915_private, mm.shrinker);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100329 unsigned long freed;
330 bool unlock;
331
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300332 if (!shrinker_lock(dev_priv, &unlock))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100333 return SHRINK_STOP;
334
335 freed = i915_gem_shrink(dev_priv,
336 sc->nr_to_scan,
337 I915_SHRINK_BOUND |
338 I915_SHRINK_UNBOUND |
339 I915_SHRINK_PURGEABLE);
340 if (freed < sc->nr_to_scan)
341 freed += i915_gem_shrink(dev_priv,
342 sc->nr_to_scan - freed,
343 I915_SHRINK_BOUND |
344 I915_SHRINK_UNBOUND);
Chris Wilson1d24ad42017-06-01 14:33:29 +0100345 if (freed < sc->nr_to_scan && current_is_kswapd()) {
346 intel_runtime_pm_get(dev_priv);
347 freed += i915_gem_shrink(dev_priv,
348 sc->nr_to_scan - freed,
349 I915_SHRINK_ACTIVE |
350 I915_SHRINK_BOUND |
351 I915_SHRINK_UNBOUND);
352 intel_runtime_pm_put(dev_priv);
353 }
Joonas Lahtinen8f612d02017-04-07 13:49:34 +0300354
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300355 shrinker_unlock(dev_priv, unlock);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100356
357 return freed;
358}
359
Chris Wilson168cf362016-04-05 10:22:25 +0100360static bool
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300361shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
362 int timeout_ms)
Chris Wilson168cf362016-04-05 10:22:25 +0100363{
Chris Wilson5cba5be2016-08-05 10:14:13 +0100364 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
Chris Wilson168cf362016-04-05 10:22:25 +0100365
Chris Wilson5cba5be2016-08-05 10:14:13 +0100366 do {
Chris Wilsonea746f32016-09-09 14:11:49 +0100367 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300368 shrinker_lock(dev_priv, unlock))
Chris Wilson5cba5be2016-08-05 10:14:13 +0100369 break;
370
Chris Wilson168cf362016-04-05 10:22:25 +0100371 schedule_timeout_killable(1);
372 if (fatal_signal_pending(current))
373 return false;
Chris Wilson5cba5be2016-08-05 10:14:13 +0100374
375 if (time_after(jiffies, timeout)) {
Chris Wilson168cf362016-04-05 10:22:25 +0100376 pr_err("Unable to lock GPU to purge memory.\n");
377 return false;
378 }
Chris Wilson5cba5be2016-08-05 10:14:13 +0100379 } while (1);
Chris Wilson168cf362016-04-05 10:22:25 +0100380
Chris Wilson168cf362016-04-05 10:22:25 +0100381 return true;
382}
383
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100384static int
385i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
386{
387 struct drm_i915_private *dev_priv =
388 container_of(nb, struct drm_i915_private, mm.oom_notifier);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100389 struct drm_i915_gem_object *obj;
Chris Wilson1768d452016-04-20 12:09:51 +0100390 unsigned long unevictable, bound, unbound, freed_pages;
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300391 bool unlock;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100392
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300393 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100394 return NOTIFY_DONE;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100395
396 freed_pages = i915_gem_shrink_all(dev_priv);
397
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100398 /* Because we may be allocating inside our own driver, we cannot
399 * assert that there are no objects with pinned pages that are not
400 * being pointed to by hardware.
401 */
Chris Wilson1768d452016-04-20 12:09:51 +0100402 unbound = bound = unevictable = 0;
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200403 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100404 if (!obj->mm.pages)
405 continue;
406
Chris Wilson1768d452016-04-20 12:09:51 +0100407 if (!can_release_pages(obj))
408 unevictable += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100409 else
Chris Wilson1768d452016-04-20 12:09:51 +0100410 unbound += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100411 }
Joonas Lahtinen56cea322016-11-02 12:16:04 +0200412 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
Chris Wilsonfbbd37b2016-10-28 13:58:42 +0100413 if (!obj->mm.pages)
414 continue;
415
Chris Wilson1768d452016-04-20 12:09:51 +0100416 if (!can_release_pages(obj))
417 unevictable += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100418 else
Chris Wilson1768d452016-04-20 12:09:51 +0100419 bound += obj->base.size >> PAGE_SHIFT;
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100420 }
421
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300422 shrinker_unlock(dev_priv, unlock);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100423
424 if (freed_pages || unbound || bound)
Chris Wilson1768d452016-04-20 12:09:51 +0100425 pr_info("Purging GPU memory, %lu pages freed, "
426 "%lu pages still pinned.\n",
427 freed_pages, unevictable);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100428 if (unbound || bound)
Chris Wilson1768d452016-04-20 12:09:51 +0100429 pr_err("%lu and %lu pages still available in the "
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100430 "bound and unbound GPU page lists.\n",
431 bound, unbound);
432
433 *(unsigned long *)ptr += freed_pages;
434 return NOTIFY_DONE;
435}
436
Chris Wilsone87666b2016-04-04 14:46:43 +0100437static int
438i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
439{
440 struct drm_i915_private *dev_priv =
441 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
Chris Wilson8ef85612016-04-28 09:56:39 +0100442 struct i915_vma *vma, *next;
443 unsigned long freed_pages = 0;
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300444 bool unlock;
Chris Wilson8ef85612016-04-28 09:56:39 +0100445 int ret;
Chris Wilsone87666b2016-04-04 14:46:43 +0100446
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300447 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
Chris Wilsone87666b2016-04-04 14:46:43 +0100448 return NOTIFY_DONE;
Chris Wilsone87666b2016-04-04 14:46:43 +0100449
Chris Wilson8ef85612016-04-28 09:56:39 +0100450 /* Force everything onto the inactive lists */
Chris Wilson22dd3bb2016-09-09 14:11:50 +0100451 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
Chris Wilson8ef85612016-04-28 09:56:39 +0100452 if (ret)
453 goto out;
Chris Wilsone87666b2016-04-04 14:46:43 +0100454
Praveen Paneriea9d9762016-05-02 14:10:29 +0530455 intel_runtime_pm_get(dev_priv);
Chris Wilson8ef85612016-04-28 09:56:39 +0100456 freed_pages += i915_gem_shrink(dev_priv, -1UL,
457 I915_SHRINK_BOUND |
458 I915_SHRINK_UNBOUND |
459 I915_SHRINK_ACTIVE |
460 I915_SHRINK_VMAPS);
Praveen Paneriea9d9762016-05-02 14:10:29 +0530461 intel_runtime_pm_put(dev_priv);
Chris Wilson8ef85612016-04-28 09:56:39 +0100462
463 /* We also want to clear any cached iomaps as they wrap vmap */
464 list_for_each_entry_safe(vma, next,
465 &dev_priv->ggtt.base.inactive_list, vm_link) {
466 unsigned long count = vma->node.size >> PAGE_SHIFT;
467 if (vma->iomap && i915_vma_unbind(vma) == 0)
468 freed_pages += count;
469 }
470
471out:
Joonas Lahtinene92075f2017-04-07 13:49:35 +0300472 shrinker_unlock(dev_priv, unlock);
Chris Wilsone87666b2016-04-04 14:46:43 +0100473
474 *(unsigned long *)ptr += freed_pages;
475 return NOTIFY_DONE;
476}
477
Daniel Vettereb0b44a2015-03-18 14:47:59 +0100478/**
479 * i915_gem_shrinker_init - Initialize i915 shrinker
480 * @dev_priv: i915 device
481 *
482 * This function registers and sets up the i915 shrinker and OOM handler.
483 */
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100484void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
485{
486 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
487 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
488 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
Imre Deaka8a40582016-01-19 15:26:28 +0200489 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100490
491 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
Imre Deaka8a40582016-01-19 15:26:28 +0200492 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
Chris Wilsone87666b2016-04-04 14:46:43 +0100493
494 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
495 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
Imre Deaka8a40582016-01-19 15:26:28 +0200496}
497
498/**
499 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
500 * @dev_priv: i915 device
501 *
502 * This function unregisters the i915 shrinker and OOM handler.
503 */
504void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
505{
Chris Wilsone87666b2016-04-04 14:46:43 +0100506 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
Imre Deaka8a40582016-01-19 15:26:28 +0200507 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
508 unregister_shrinker(&dev_priv->mm.shrinker);
Daniel Vetterbe6a0372015-03-18 10:46:04 +0100509}