blob: 5c191a1afaaf11d6bf0e2daff4e0dbfef0c7a42c [file] [log] [blame]
Oscar Mateob20385f2014-07-24 17:04:10 +01001/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
Oscar Mateo73e4d072014-07-24 17:04:48 +010031/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
Oscar Mateob20385f2014-07-24 17:04:10 +010035 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
Oscar Mateo73e4d072014-07-24 17:04:48 +010039 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
Oscar Mateob20385f2014-07-24 17:04:10 +010090 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
Oscar Mateo73e4d072014-07-24 17:04:48 +010092 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
Oscar Mateob20385f2014-07-24 17:04:10 +0100133 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100134#include <linux/interrupt.h>
Oscar Mateob20385f2014-07-24 17:04:10 +0100135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
Peter Antoine3bbaba02015-07-10 20:13:11 +0300139#include "intel_mocs.h"
Oscar Mateo127f1002014-07-24 17:04:11 +0100140
Michael H. Nguyen468c6812014-11-13 17:51:49 +0000141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
Oscar Mateo8c8579172014-07-24 17:04:14 +0100142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
Thomas Daniele981e7b2014-07-24 17:04:39 +0100145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
Oscar Mateo8670d6f2014-07-24 17:04:17 +0100158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
Ben Widawsky84b790f2014-07-24 17:04:36 +0100188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
Michel Thierrye5815a22015-04-08 12:13:32 +0100193
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +0200195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
Ville Syrjälä0d925ea2015-11-04 23:20:11 +0200196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
Mika Kuoppalad852c7b2015-06-25 18:35:06 +0300200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
Michel Thierrye5815a22015-04-08 12:13:32 +0100201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200203} while (0)
Michel Thierrye5815a22015-04-08 12:13:32 +0100204
Ville Syrjälä9244a812015-11-04 23:20:09 +0200205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
Michel Thierry2dba3232015-07-30 11:06:23 +0100206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
Ville Syrjälä9244a812015-11-04 23:20:09 +0200208} while (0)
Michel Thierry2dba3232015-07-30 11:06:23 +0100209
Ben Widawsky84b790f2014-07-24 17:04:36 +0100210enum {
211 ADVANCED_CONTEXT = 0,
Michel Thierry2dba3232015-07-30 11:06:23 +0100212 LEGACY_32B_CONTEXT,
Ben Widawsky84b790f2014-07-24 17:04:36 +0100213 ADVANCED_AD_CONTEXT,
214 LEGACY_64B_CONTEXT
215};
Michel Thierry2dba3232015-07-30 11:06:23 +0100216#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
218 LEGACY_64B_CONTEXT :\
219 LEGACY_32B_CONTEXT)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100220enum {
221 FAULT_AND_HANG = 0,
222 FAULT_AND_HALT, /* Debug only */
223 FAULT_AND_STREAM,
224 FAULT_AND_CONTINUE /* Unsupported */
225};
226#define GEN8_CTX_ID_SHIFT 32
Chris Wilson7069b142016-04-28 09:56:52 +0100227#define GEN8_CTX_ID_WIDTH 21
Michel Thierry71562912016-02-23 10:31:49 +0000228#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
229#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
Ben Widawsky84b790f2014-07-24 17:04:36 +0100230
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100231/* Typical size of the average request (2 pipecontrols and a MI_BB) */
232#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
233
Chris Wilsone2efd132016-05-24 14:53:34 +0100234static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +0100235 struct intel_engine_cs *engine);
Chris Wilsone2efd132016-05-24 14:53:34 +0100236static int intel_lr_context_pin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000237 struct intel_engine_cs *engine);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000238
Oscar Mateo73e4d072014-07-24 17:04:48 +0100239/**
240 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
241 * @dev: DRM device.
242 * @enable_execlists: value of i915.enable_execlists module parameter.
243 *
244 * Only certain platforms support Execlists (the prerequisites being
Thomas Daniel27401d12014-12-11 12:48:35 +0000245 * support for Logical Ring Contexts and Aliasing PPGTT or better).
Oscar Mateo73e4d072014-07-24 17:04:48 +0100246 *
247 * Return: 1 if Execlists is supported and has to be enabled.
248 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100249int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
Oscar Mateo127f1002014-07-24 17:04:11 +0100250{
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800251 /* On platforms with execlist available, vGPU will only
252 * support execlist mode, no ring buffer mode.
253 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100254 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
Zhiyuan Lva0bd6c32015-08-28 15:41:16 +0800255 return 1;
256
Chris Wilsonc0336662016-05-06 15:40:21 +0100257 if (INTEL_GEN(dev_priv) >= 9)
Damien Lespiau70ee45e2014-11-14 15:05:59 +0000258 return 1;
259
Oscar Mateo127f1002014-07-24 17:04:11 +0100260 if (enable_execlists == 0)
261 return 0;
262
Daniel Vetter5a21b662016-05-24 17:13:53 +0200263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264 USES_PPGTT(dev_priv) &&
265 i915.use_mmio_flip >= 0)
Oscar Mateo127f1002014-07-24 17:04:11 +0100266 return 1;
267
268 return 0;
269}
Oscar Mateoede7d422014-07-24 17:04:12 +0100270
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000271static void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000272logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000273{
Chris Wilsonc0336662016-05-06 15:40:21 +0100274 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000275
Chris Wilsonc0336662016-05-06 15:40:21 +0100276 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000277 engine->idle_lite_restore_wa = ~0;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000278
Chris Wilsonc0336662016-05-06 15:40:21 +0100279 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
280 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000281 (engine->id == VCS || engine->id == VCS2);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000282
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000283 engine->ctx_desc_template = GEN8_CTX_VALID;
Chris Wilsonc0336662016-05-06 15:40:21 +0100284 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000285 GEN8_CTX_ADDRESSING_MODE_SHIFT;
Chris Wilsonc0336662016-05-06 15:40:21 +0100286 if (IS_GEN8(dev_priv))
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000287 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
288 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000289
290 /* TODO: WaDisableLiteRestore when we start using semaphore
291 * signalling between Command Streamers */
292 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
293
294 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
295 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000296 if (engine->disable_lite_restore_wa)
297 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000298}
299
300/**
301 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
302 * descriptor for a pinned context
303 *
304 * @ctx: Context to work on
Chris Wilson9021ad02016-05-24 14:53:37 +0100305 * @engine: Engine the descriptor will be used with
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000306 *
307 * The context descriptor encodes various attributes of a context,
308 * including its GTT address and some flags. Because it's fairly
309 * expensive to calculate, we'll just do it once and cache the result,
310 * which remains valid until the context is unpinned.
311 *
312 * This is what a descriptor looks like, from LSB to MSB:
Chris Wilsonef87bba2016-04-28 09:56:50 +0100313 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000314 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
Chris Wilson7069b142016-04-28 09:56:52 +0100315 * bits 32-52: ctx ID, a globally unique tag
Chris Wilsonef87bba2016-04-28 09:56:50 +0100316 * bits 53-54: mbz, reserved for use by hardware
317 * bits 55-63: group ID, currently unused and set to 0
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000318 */
319static void
Chris Wilsone2efd132016-05-24 14:53:34 +0100320intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000321 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000322{
Chris Wilson9021ad02016-05-24 14:53:37 +0100323 struct intel_context *ce = &ctx->engine[engine->id];
Chris Wilson7069b142016-04-28 09:56:52 +0100324 u64 desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000325
Chris Wilson7069b142016-04-28 09:56:52 +0100326 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
327
328 desc = engine->ctx_desc_template; /* bits 0-11 */
Chris Wilson9021ad02016-05-24 14:53:37 +0100329 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
330 /* bits 12-31 */
Chris Wilson7069b142016-04-28 09:56:52 +0100331 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000332
Chris Wilson9021ad02016-05-24 14:53:37 +0100333 ce->lrc_desc = desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000334}
335
Chris Wilsone2efd132016-05-24 14:53:34 +0100336uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000337 struct intel_engine_cs *engine)
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000338{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000339 return ctx->engine[engine->id].lrc_desc;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000340}
341
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300342static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
343 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100344{
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300345
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000346 struct intel_engine_cs *engine = rq0->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +0100347 struct drm_i915_private *dev_priv = rq0->i915;
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300348 uint64_t desc[2];
Ben Widawsky84b790f2014-07-24 17:04:36 +0100349
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300350 if (rq1) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000351 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300352 rq1->elsp_submitted++;
353 } else {
354 desc[1] = 0;
355 }
Ben Widawsky84b790f2014-07-24 17:04:36 +0100356
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000357 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300358 rq0->elsp_submitted++;
Ben Widawsky84b790f2014-07-24 17:04:36 +0100359
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300360 /* You must always write both descriptors in the order below. */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000361 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
362 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
Chris Wilson6daccb02015-01-16 11:34:35 +0200363
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000364 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100365 /* The context is automatically loaded after the following */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000366 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100367
Mika Kuoppala1cff8cc2015-07-06 11:09:25 +0300368 /* ELSP is a wo register, use another nearby reg for posting */
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000369 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
Ben Widawsky84b790f2014-07-24 17:04:36 +0100370}
371
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000372static void
373execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
374{
375 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
376 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
377 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
378 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
379}
380
381static void execlists_update_context(struct drm_i915_gem_request *rq)
Oscar Mateoae1250b2014-07-24 17:04:37 +0100382{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000383 struct intel_engine_cs *engine = rq->engine;
Mika Kuoppala05d98242015-07-03 17:09:33 +0300384 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000385 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100386
Mika Kuoppala05d98242015-07-03 17:09:33 +0300387 reg_state[CTX_RING_TAIL+1] = rq->tail;
Oscar Mateoae1250b2014-07-24 17:04:37 +0100388
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000389 /* True 32b PPGTT with dynamic page allocation: update PDP
390 * registers and point the unallocated PDPs to scratch page.
391 * PML4 is allocated during ppgtt init, so this is not needed
392 * in 48-bit mode.
393 */
394 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
395 execlists_update_context_pdps(ppgtt, reg_state);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100396}
397
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300398static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
399 struct drm_i915_gem_request *rq1)
Ben Widawsky84b790f2014-07-24 17:04:36 +0100400{
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000401 struct drm_i915_private *dev_priv = rq0->i915;
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100402 unsigned int fw_domains = rq0->engine->fw_domains;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000403
Mika Kuoppala05d98242015-07-03 17:09:33 +0300404 execlists_update_context(rq0);
Oscar Mateoae1250b2014-07-24 17:04:37 +0100405
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300406 if (rq1)
Mika Kuoppala05d98242015-07-03 17:09:33 +0300407 execlists_update_context(rq1);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100408
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100409 spin_lock_irq(&dev_priv->uncore.lock);
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100410 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000411
Mika Kuoppalacc3c4252015-07-03 17:09:36 +0300412 execlists_elsp_write(rq0, rq1);
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000413
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100414 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100415 spin_unlock_irq(&dev_priv->uncore.lock);
Ben Widawsky84b790f2014-07-24 17:04:36 +0100416}
417
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000418static void execlists_context_unqueue(struct intel_engine_cs *engine)
Michel Thierryacdd8842014-07-24 17:04:38 +0100419{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000420 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000421 struct drm_i915_gem_request *cursor, *tmp;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100422
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000423 assert_spin_locked(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100424
Peter Antoine779949f2015-05-11 16:03:27 +0100425 /*
426 * If irqs are not active generate a warning as batches that finish
427 * without the irqs may get lost and a GPU Hang may occur.
428 */
Chris Wilsonc0336662016-05-06 15:40:21 +0100429 WARN_ON(!intel_irqs_enabled(engine->i915));
Peter Antoine779949f2015-05-11 16:03:27 +0100430
Michel Thierryacdd8842014-07-24 17:04:38 +0100431 /* Try to read in pairs */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000432 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
Michel Thierryacdd8842014-07-24 17:04:38 +0100433 execlist_link) {
434 if (!req0) {
435 req0 = cursor;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000436 } else if (req0->ctx == cursor->ctx) {
Michel Thierryacdd8842014-07-24 17:04:38 +0100437 /* Same ctx: ignore first request, as second request
438 * will update tail past first request's workload */
Oscar Mateoe1fee722014-07-24 17:04:40 +0100439 cursor->elsp_submitted = req0->elsp_submitted;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100440 list_del(&req0->execlist_link);
441 i915_gem_request_unreference(req0);
Michel Thierryacdd8842014-07-24 17:04:38 +0100442 req0 = cursor;
443 } else {
444 req1 = cursor;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000445 WARN_ON(req1->elsp_submitted);
Michel Thierryacdd8842014-07-24 17:04:38 +0100446 break;
447 }
448 }
449
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000450 if (unlikely(!req0))
451 return;
452
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000453 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
Michel Thierry53292cd2015-04-15 18:11:33 +0100454 /*
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000455 * WaIdleLiteRestore: make sure we never cause a lite restore
456 * with HEAD==TAIL.
457 *
458 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
459 * resubmit the request. See gen8_emit_request() for where we
460 * prepare the padding after the end of the request.
Michel Thierry53292cd2015-04-15 18:11:33 +0100461 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000462 struct intel_ringbuffer *ringbuf;
Michel Thierry53292cd2015-04-15 18:11:33 +0100463
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000464 ringbuf = req0->ctx->engine[engine->id].ringbuf;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000465 req0->tail += 8;
466 req0->tail &= ringbuf->size - 1;
Michel Thierry53292cd2015-04-15 18:11:33 +0100467 }
468
Mika Kuoppalad8cb8872015-07-03 17:09:32 +0300469 execlists_submit_requests(req0, req1);
Michel Thierryacdd8842014-07-24 17:04:38 +0100470}
471
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000472static unsigned int
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100473execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100474{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000475 struct drm_i915_gem_request *head_req;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100476
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000477 assert_spin_locked(&engine->execlist_lock);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100478
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000479 head_req = list_first_entry_or_null(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000480 struct drm_i915_gem_request,
Thomas Daniele981e7b2014-07-24 17:04:39 +0100481 execlist_link);
482
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100483 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
484 return 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100485
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000486 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
487
488 if (--head_req->elsp_submitted > 0)
489 return 0;
490
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100491 list_del(&head_req->execlist_link);
492 i915_gem_request_unreference(head_req);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000493
494 return 1;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100495}
496
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000497static u32
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000498get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000499 u32 *context_id)
Ben Widawsky91a41032016-01-05 10:30:07 -0800500{
Chris Wilsonc0336662016-05-06 15:40:21 +0100501 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000502 u32 status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800503
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000504 read_pointer %= GEN8_CSB_ENTRIES;
Ben Widawsky91a41032016-01-05 10:30:07 -0800505
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000506 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000507
508 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
509 return 0;
510
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000511 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000512 read_pointer));
513
514 return status;
Ben Widawsky91a41032016-01-05 10:30:07 -0800515}
516
Oscar Mateo73e4d072014-07-24 17:04:48 +0100517/**
Daniel Vetter3f7531c2014-12-10 17:41:43 +0100518 * intel_lrc_irq_handler() - handle Context Switch interrupts
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100519 * @engine: Engine Command Streamer to handle.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100520 *
521 * Check the unread Context Status Buffers and manage the submission of new
522 * contexts to the ELSP accordingly.
523 */
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100524static void intel_lrc_irq_handler(unsigned long data)
Thomas Daniele981e7b2014-07-24 17:04:39 +0100525{
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100526 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
Chris Wilsonc0336662016-05-06 15:40:21 +0100527 struct drm_i915_private *dev_priv = engine->i915;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100528 u32 status_pointer;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000529 unsigned int read_pointer, write_pointer;
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000530 u32 csb[GEN8_CSB_ENTRIES][2];
531 unsigned int csb_read = 0, i;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000532 unsigned int submit_contexts = 0;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100533
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100534 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000535
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000536 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
Thomas Daniele981e7b2014-07-24 17:04:39 +0100537
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000538 read_pointer = engine->next_context_status_buffer;
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800539 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
Thomas Daniele981e7b2014-07-24 17:04:39 +0100540 if (read_pointer > write_pointer)
Michel Thierrydfc53c52015-09-28 13:25:12 +0100541 write_pointer += GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100542
Thomas Daniele981e7b2014-07-24 17:04:39 +0100543 while (read_pointer < write_pointer) {
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000544 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
545 break;
546 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
547 &csb[csb_read][1]);
548 csb_read++;
Michel Thierry5af05fe2015-09-04 12:59:15 +0100549 }
Thomas Daniele981e7b2014-07-24 17:04:39 +0100550
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000551 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
Thomas Daniele981e7b2014-07-24 17:04:39 +0100552
Ben Widawsky5590a5f2016-01-05 10:30:05 -0800553 /* Update the read pointer to the old write pointer. Manual ringbuffer
554 * management ftw </sarcasm> */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000555 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000556 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000557 engine->next_context_status_buffer << 8));
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000558
Tvrtko Ursulin37566852016-04-12 14:37:31 +0100559 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000560
Tvrtko Ursulin26720ab2016-03-17 12:59:46 +0000561 spin_lock(&engine->execlist_lock);
562
563 for (i = 0; i < csb_read; i++) {
564 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
565 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
566 if (execlists_check_remove_request(engine, csb[i][1]))
567 WARN(1, "Lite Restored request removed from queue\n");
568 } else
569 WARN(1, "Preemption without Lite Restore\n");
570 }
571
572 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
573 GEN8_CTX_STATUS_ELEMENT_SWITCH))
574 submit_contexts +=
575 execlists_check_remove_request(engine, csb[i][1]);
576 }
577
578 if (submit_contexts) {
579 if (!engine->disable_lite_restore_wa ||
580 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
581 execlists_context_unqueue(engine);
582 }
583
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000584 spin_unlock(&engine->execlist_lock);
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000585
586 if (unlikely(submit_contexts > 2))
587 DRM_ERROR("More than two context complete events?\n");
Thomas Daniele981e7b2014-07-24 17:04:39 +0100588}
589
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +0000590static void execlists_context_queue(struct drm_i915_gem_request *request)
Michel Thierryacdd8842014-07-24 17:04:38 +0100591{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000592 struct intel_engine_cs *engine = request->engine;
Nick Hoath6d3d8272015-01-15 13:10:39 +0000593 struct drm_i915_gem_request *cursor;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100594 int num_elements = 0;
Michel Thierryacdd8842014-07-24 17:04:38 +0100595
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100596 spin_lock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100597
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000598 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100599 if (++num_elements > 2)
600 break;
601
602 if (num_elements > 2) {
Nick Hoath6d3d8272015-01-15 13:10:39 +0000603 struct drm_i915_gem_request *tail_req;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100604
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000605 tail_req = list_last_entry(&engine->execlist_queue,
Nick Hoath6d3d8272015-01-15 13:10:39 +0000606 struct drm_i915_gem_request,
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100607 execlist_link);
608
John Harrisonae707972015-05-29 17:44:14 +0100609 if (request->ctx == tail_req->ctx) {
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100610 WARN(tail_req->elsp_submitted != 0,
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000611 "More than 2 already-submitted reqs queued\n");
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100612 list_del(&tail_req->execlist_link);
613 i915_gem_request_unreference(tail_req);
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100614 }
615 }
616
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100617 i915_gem_request_reference(request);
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000618 list_add_tail(&request->execlist_link, &engine->execlist_queue);
Tvrtko Ursulina3d12762016-04-28 09:56:57 +0100619 request->ctx_hw_id = request->ctx->hw_id;
Oscar Mateof1ad5a12014-07-24 17:04:41 +0100620 if (num_elements == 0)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000621 execlists_context_unqueue(engine);
Michel Thierryacdd8842014-07-24 17:04:38 +0100622
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100623 spin_unlock_bh(&engine->execlist_lock);
Michel Thierryacdd8842014-07-24 17:04:38 +0100624}
625
John Harrison2f200552015-05-29 17:43:53 +0100626static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100627{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000628 struct intel_engine_cs *engine = req->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100629 uint32_t flush_domains;
630 int ret;
631
632 flush_domains = 0;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000633 if (engine->gpu_caches_dirty)
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100634 flush_domains = I915_GEM_GPU_DOMAINS;
635
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000636 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100637 if (ret)
638 return ret;
639
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000640 engine->gpu_caches_dirty = false;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100641 return 0;
642}
643
John Harrison535fbe82015-05-29 17:43:32 +0100644static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100645 struct list_head *vmas)
646{
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000647 const unsigned other_rings = ~intel_engine_flag(req->engine);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100648 struct i915_vma *vma;
649 uint32_t flush_domains = 0;
650 bool flush_chipset = false;
651 int ret;
652
653 list_for_each_entry(vma, vmas, exec_list) {
654 struct drm_i915_gem_object *obj = vma->obj;
655
Chris Wilson03ade512015-04-27 13:41:18 +0100656 if (obj->active & other_rings) {
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000657 ret = i915_gem_object_sync(obj, req->engine, &req);
Chris Wilson03ade512015-04-27 13:41:18 +0100658 if (ret)
659 return ret;
660 }
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100661
662 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
663 flush_chipset |= i915_gem_clflush_object(obj, false);
664
665 flush_domains |= obj->base.write_domain;
666 }
667
668 if (flush_domains & I915_GEM_DOMAIN_GTT)
669 wmb();
670
671 /* Unconditionally invalidate gpu caches and ensure that we do flush
672 * any residual writes from the previous batch.
673 */
John Harrison2f200552015-05-29 17:43:53 +0100674 return logical_ring_invalidate_all_caches(req);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100675}
676
John Harrison40e895c2015-05-29 17:43:26 +0100677int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000678{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100679 struct intel_engine_cs *engine = request->engine;
Chris Wilson9021ad02016-05-24 14:53:37 +0100680 struct intel_context *ce = &request->ctx->engine[engine->id];
Chris Wilsonbfa01202016-04-28 09:56:48 +0100681 int ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000682
Chris Wilson63103462016-04-28 09:56:49 +0100683 /* Flush enough space to reduce the likelihood of waiting after
684 * we start building the request - in which case we will just
685 * have to repeat work.
686 */
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100687 request->reserved_space += EXECLISTS_REQUEST_SIZE;
Chris Wilson63103462016-04-28 09:56:49 +0100688
Chris Wilson9021ad02016-05-24 14:53:37 +0100689 if (!ce->state) {
Chris Wilson978f1e02016-04-28 09:56:54 +0100690 ret = execlists_context_deferred_alloc(request->ctx, engine);
691 if (ret)
692 return ret;
693 }
694
Chris Wilson9021ad02016-05-24 14:53:37 +0100695 request->ringbuf = ce->ringbuf;
Mika Kuoppalaf3cc01f2015-07-06 11:08:30 +0300696
Alex Daia7e02192015-12-16 11:45:55 -0800697 if (i915.enable_guc_submission) {
698 /*
699 * Check that the GuC has space for the request before
700 * going any further, as the i915_add_request() call
701 * later on mustn't fail ...
702 */
Dave Gordon7c2c2702016-05-13 15:36:32 +0100703 ret = i915_guc_wq_check_space(request);
Alex Daia7e02192015-12-16 11:45:55 -0800704 if (ret)
705 return ret;
706 }
707
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100708 ret = intel_lr_context_pin(request->ctx, engine);
709 if (ret)
710 return ret;
Dave Gordone28e4042016-01-19 19:02:55 +0000711
Chris Wilsonbfa01202016-04-28 09:56:48 +0100712 ret = intel_ring_begin(request, 0);
713 if (ret)
714 goto err_unpin;
715
Chris Wilson9021ad02016-05-24 14:53:37 +0100716 if (!ce->initialised) {
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100717 ret = engine->init_context(request);
718 if (ret)
719 goto err_unpin;
720
Chris Wilson9021ad02016-05-24 14:53:37 +0100721 ce->initialised = true;
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100722 }
723
724 /* Note that after this point, we have committed to using
725 * this request as it is being used to both track the
726 * state of engine initialisation and liveness of the
727 * golden renderstate above. Think twice before you try
728 * to cancel/unwind this request now.
729 */
730
Chris Wilson0e93cdd2016-04-29 09:07:06 +0100731 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
Chris Wilsonbfa01202016-04-28 09:56:48 +0100732 return 0;
733
734err_unpin:
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100735 intel_lr_context_unpin(request->ctx, engine);
Dave Gordone28e4042016-01-19 19:02:55 +0000736 return ret;
John Harrisonbc0dce32015-03-19 12:30:07 +0000737}
738
John Harrisonbc0dce32015-03-19 12:30:07 +0000739/*
740 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
John Harrisonae707972015-05-29 17:44:14 +0100741 * @request: Request to advance the logical ringbuffer of.
John Harrisonbc0dce32015-03-19 12:30:07 +0000742 *
743 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
744 * really happens during submission is that the context and current tail will be placed
745 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
746 * point, the tail *inside* the context is updated and the ELSP written to.
747 */
Chris Wilson7c17d372016-01-20 15:43:35 +0200748static int
John Harrisonae707972015-05-29 17:44:14 +0100749intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
John Harrisonbc0dce32015-03-19 12:30:07 +0000750{
Chris Wilson7c17d372016-01-20 15:43:35 +0200751 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000752 struct intel_engine_cs *engine = request->engine;
John Harrisonbc0dce32015-03-19 12:30:07 +0000753
Chris Wilson7c17d372016-01-20 15:43:35 +0200754 intel_logical_ring_advance(ringbuf);
755 request->tail = ringbuf->tail;
John Harrisonbc0dce32015-03-19 12:30:07 +0000756
Chris Wilson7c17d372016-01-20 15:43:35 +0200757 /*
758 * Here we add two extra NOOPs as padding to avoid
759 * lite restore of a context with HEAD==TAIL.
760 *
761 * Caller must reserve WA_TAIL_DWORDS for us!
762 */
763 intel_logical_ring_emit(ringbuf, MI_NOOP);
764 intel_logical_ring_emit(ringbuf, MI_NOOP);
765 intel_logical_ring_advance(ringbuf);
Alex Daid1675192015-08-12 15:43:43 +0100766
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000767 if (intel_engine_stopped(engine))
Chris Wilson7c17d372016-01-20 15:43:35 +0200768 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000769
Chris Wilsona16a4052016-04-28 09:56:56 +0100770 /* We keep the previous context alive until we retire the following
771 * request. This ensures that any the context object is still pinned
772 * for any residual writes the HW makes into it on the context switch
773 * into the next object following the breadcrumb. Otherwise, we may
774 * retire the context too early.
775 */
776 request->previous_context = engine->last_context;
777 engine->last_context = request->ctx;
Tvrtko Ursulinf4e2dec2016-01-28 10:29:57 +0000778
Dave Gordon7c2c2702016-05-13 15:36:32 +0100779 if (i915.enable_guc_submission)
780 i915_guc_submit(request);
Alex Daid1675192015-08-12 15:43:43 +0100781 else
782 execlists_context_queue(request);
Chris Wilson7c17d372016-01-20 15:43:35 +0200783
784 return 0;
John Harrisonbc0dce32015-03-19 12:30:07 +0000785}
786
Oscar Mateo73e4d072014-07-24 17:04:48 +0100787/**
788 * execlists_submission() - submit a batchbuffer for execution, Execlists style
789 * @dev: DRM device.
790 * @file: DRM file.
791 * @ring: Engine Command Streamer to submit to.
792 * @ctx: Context to employ for this submission.
793 * @args: execbuffer call arguments.
794 * @vmas: list of vmas.
795 * @batch_obj: the batchbuffer to submit.
796 * @exec_start: batchbuffer start virtual address pointer.
John Harrison8e004ef2015-02-13 11:48:10 +0000797 * @dispatch_flags: translated execbuffer call flags.
Oscar Mateo73e4d072014-07-24 17:04:48 +0100798 *
799 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
800 * away the submission details of the execbuffer ioctl call.
801 *
802 * Return: non-zero if the submission fails.
803 */
John Harrison5f19e2b2015-05-29 17:43:27 +0100804int intel_execlists_submission(struct i915_execbuffer_params *params,
Oscar Mateo454afeb2014-07-24 17:04:22 +0100805 struct drm_i915_gem_execbuffer2 *args,
John Harrison5f19e2b2015-05-29 17:43:27 +0100806 struct list_head *vmas)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100807{
John Harrison5f19e2b2015-05-29 17:43:27 +0100808 struct drm_device *dev = params->dev;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000809 struct intel_engine_cs *engine = params->engine;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100810 struct drm_i915_private *dev_priv = dev->dev_private;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000811 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
John Harrison5f19e2b2015-05-29 17:43:27 +0100812 u64 exec_start;
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100813 int instp_mode;
814 u32 instp_mask;
815 int ret;
816
817 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
818 instp_mask = I915_EXEC_CONSTANTS_MASK;
819 switch (instp_mode) {
820 case I915_EXEC_CONSTANTS_REL_GENERAL:
821 case I915_EXEC_CONSTANTS_ABSOLUTE:
822 case I915_EXEC_CONSTANTS_REL_SURFACE:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000823 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100824 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
825 return -EINVAL;
826 }
827
828 if (instp_mode != dev_priv->relative_constants_mode) {
829 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
830 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
831 return -EINVAL;
832 }
833
834 /* The HW changed the meaning on this bit on gen6 */
835 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
836 }
837 break;
838 default:
839 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
840 return -EINVAL;
841 }
842
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100843 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
844 DRM_DEBUG("sol reset is gen7 only\n");
845 return -EINVAL;
846 }
847
John Harrison535fbe82015-05-29 17:43:32 +0100848 ret = execlists_move_to_gpu(params->request, vmas);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100849 if (ret)
850 return ret;
851
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000852 if (engine == &dev_priv->engine[RCS] &&
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100853 instp_mode != dev_priv->relative_constants_mode) {
Chris Wilson987046a2016-04-28 09:56:46 +0100854 ret = intel_ring_begin(params->request, 4);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100855 if (ret)
856 return ret;
857
858 intel_logical_ring_emit(ringbuf, MI_NOOP);
859 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
Ville Syrjäläf92a9162015-11-04 23:20:07 +0200860 intel_logical_ring_emit_reg(ringbuf, INSTPM);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100861 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
862 intel_logical_ring_advance(ringbuf);
863
864 dev_priv->relative_constants_mode = instp_mode;
865 }
866
John Harrison5f19e2b2015-05-29 17:43:27 +0100867 exec_start = params->batch_obj_vm_offset +
868 args->batch_start_offset;
869
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000870 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100871 if (ret)
872 return ret;
873
John Harrison95c24162015-05-29 17:43:31 +0100874 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
John Harrison5e4be7b2015-02-13 11:48:11 +0000875
John Harrison8a8edb52015-05-29 17:43:33 +0100876 i915_gem_execbuffer_move_to_active(vmas, params->request);
Oscar Mateoba8b7cc2014-07-24 17:04:33 +0100877
Oscar Mateo454afeb2014-07-24 17:04:22 +0100878 return 0;
879}
880
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100881void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000882{
Nick Hoath6d3d8272015-01-15 13:10:39 +0000883 struct drm_i915_gem_request *req, *tmp;
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100884 LIST_HEAD(cancel_list);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000885
Chris Wilsonc0336662016-05-06 15:40:21 +0100886 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000887
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100888 spin_lock_bh(&engine->execlist_lock);
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100889 list_replace_init(&engine->execlist_queue, &cancel_list);
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +0100890 spin_unlock_bh(&engine->execlist_lock);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000891
Tvrtko Ursuline39d42f2016-04-28 09:56:58 +0100892 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000893 list_del(&req->execlist_link);
Nick Hoathf8210792015-01-29 16:55:07 +0000894 i915_gem_request_unreference(req);
Thomas Danielc86ee3a92014-11-13 10:27:05 +0000895 }
896}
897
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000898void intel_logical_ring_stop(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +0100899{
Chris Wilsonc0336662016-05-06 15:40:21 +0100900 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100901 int ret;
902
Tvrtko Ursulin117897f2016-03-16 11:00:40 +0000903 if (!intel_engine_initialized(engine))
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100904 return;
905
Tvrtko Ursulin666796d2016-03-16 11:00:39 +0000906 ret = intel_engine_idle(engine);
Chris Wilsonf4457ae2016-04-13 17:35:08 +0100907 if (ret)
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100908 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000909 engine->name, ret);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100910
911 /* TODO: Is this correct with Execlists enabled? */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000912 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
913 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
914 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
Oscar Mateo9832b9d2014-07-24 17:04:30 +0100915 return;
916 }
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000917 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
Oscar Mateo454afeb2014-07-24 17:04:22 +0100918}
919
John Harrison4866d722015-05-29 17:43:55 +0100920int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
Oscar Mateo48e29f52014-07-24 17:04:29 +0100921{
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +0000922 struct intel_engine_cs *engine = req->engine;
Oscar Mateo48e29f52014-07-24 17:04:29 +0100923 int ret;
924
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000925 if (!engine->gpu_caches_dirty)
Oscar Mateo48e29f52014-07-24 17:04:29 +0100926 return 0;
927
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000928 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
Oscar Mateo48e29f52014-07-24 17:04:29 +0100929 if (ret)
930 return ret;
931
Tvrtko Ursuline2f80392016-03-16 11:00:36 +0000932 engine->gpu_caches_dirty = false;
Oscar Mateo48e29f52014-07-24 17:04:29 +0100933 return 0;
934}
935
Chris Wilsone2efd132016-05-24 14:53:34 +0100936static int intel_lr_context_pin(struct i915_gem_context *ctx,
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100937 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000938{
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100939 struct drm_i915_private *dev_priv = ctx->i915;
Chris Wilson9021ad02016-05-24 14:53:37 +0100940 struct intel_context *ce = &ctx->engine[engine->id];
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100941 void *vaddr;
942 u32 *lrc_reg_state;
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000943 int ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000944
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100945 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
Tvrtko Ursulinca825802016-01-15 15:10:27 +0000946
Chris Wilson9021ad02016-05-24 14:53:37 +0100947 if (ce->pin_count++)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100948 return 0;
949
Chris Wilson9021ad02016-05-24 14:53:37 +0100950 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
951 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
Nick Hoathe84fe802015-09-11 12:53:46 +0100952 if (ret)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100953 goto err;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000954
Chris Wilson9021ad02016-05-24 14:53:37 +0100955 vaddr = i915_gem_object_pin_map(ce->state);
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100956 if (IS_ERR(vaddr)) {
957 ret = PTR_ERR(vaddr);
Tvrtko Ursulin82352e92016-01-15 17:12:45 +0000958 goto unpin_ctx_obj;
959 }
960
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100961 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
962
Chris Wilson9021ad02016-05-24 14:53:37 +0100963 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +0100964 if (ret)
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100965 goto unpin_map;
Alex Daid1675192015-08-12 15:43:43 +0100966
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100967 i915_gem_context_reference(ctx);
Chris Wilson9021ad02016-05-24 14:53:37 +0100968 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +0000969 intel_lr_context_descriptor_update(ctx, engine);
Chris Wilson9021ad02016-05-24 14:53:37 +0100970
971 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
972 ce->lrc_reg_state = lrc_reg_state;
973 ce->state->dirty = true;
Daniel Vettere93c28f2015-09-02 14:33:42 +0200974
Nick Hoathe84fe802015-09-11 12:53:46 +0100975 /* Invalidate GuC TLB. */
976 if (i915.enable_guc_submission)
977 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
Oscar Mateodcb4c122014-11-13 10:28:10 +0000978
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100979 return 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000980
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +0100981unpin_map:
Chris Wilson9021ad02016-05-24 14:53:37 +0100982 i915_gem_object_unpin_map(ce->state);
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000983unpin_ctx_obj:
Chris Wilson9021ad02016-05-24 14:53:37 +0100984 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100985err:
Chris Wilson9021ad02016-05-24 14:53:37 +0100986 ce->pin_count = 0;
Thomas Daniel7ba717c2014-11-13 10:28:56 +0000987 return ret;
Oscar Mateodcb4c122014-11-13 10:28:10 +0000988}
989
Chris Wilsone2efd132016-05-24 14:53:34 +0100990void intel_lr_context_unpin(struct i915_gem_context *ctx,
Tvrtko Ursuline52928232016-01-28 10:29:54 +0000991 struct intel_engine_cs *engine)
Oscar Mateodcb4c122014-11-13 10:28:10 +0000992{
Chris Wilson9021ad02016-05-24 14:53:37 +0100993 struct intel_context *ce = &ctx->engine[engine->id];
Daniel Vetteraf3302b2015-12-04 17:27:15 +0100994
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100995 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
Chris Wilson9021ad02016-05-24 14:53:37 +0100996 GEM_BUG_ON(ce->pin_count == 0);
Tvrtko Ursulin321fe302016-01-28 10:29:55 +0000997
Chris Wilson9021ad02016-05-24 14:53:37 +0100998 if (--ce->pin_count)
Chris Wilson24f1d3c2016-04-28 09:56:53 +0100999 return;
1000
Chris Wilson9021ad02016-05-24 14:53:37 +01001001 intel_unpin_ringbuffer_obj(ce->ringbuf);
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001002
Chris Wilson9021ad02016-05-24 14:53:37 +01001003 i915_gem_object_unpin_map(ce->state);
1004 i915_gem_object_ggtt_unpin(ce->state);
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001005
Chris Wilson9021ad02016-05-24 14:53:37 +01001006 ce->lrc_vma = NULL;
1007 ce->lrc_desc = 0;
1008 ce->lrc_reg_state = NULL;
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001009
1010 i915_gem_context_unreference(ctx);
Oscar Mateodcb4c122014-11-13 10:28:10 +00001011}
1012
John Harrisone2be4fa2015-05-29 17:43:54 +01001013static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
Michel Thierry771b9a52014-11-11 16:47:33 +00001014{
1015 int ret, i;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001016 struct intel_engine_cs *engine = req->engine;
John Harrisone2be4fa2015-05-29 17:43:54 +01001017 struct intel_ringbuffer *ringbuf = req->ringbuf;
Chris Wilsonc0336662016-05-06 15:40:21 +01001018 struct i915_workarounds *w = &req->i915->workarounds;
Michel Thierry771b9a52014-11-11 16:47:33 +00001019
Boyer, Waynecd7feaa2016-01-06 17:15:29 -08001020 if (w->count == 0)
Michel Thierry771b9a52014-11-11 16:47:33 +00001021 return 0;
1022
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001023 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001024 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001025 if (ret)
1026 return ret;
1027
Chris Wilson987046a2016-04-28 09:56:46 +01001028 ret = intel_ring_begin(req, w->count * 2 + 2);
Michel Thierry771b9a52014-11-11 16:47:33 +00001029 if (ret)
1030 return ret;
1031
1032 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1033 for (i = 0; i < w->count; i++) {
Ville Syrjäläf92a9162015-11-04 23:20:07 +02001034 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
Michel Thierry771b9a52014-11-11 16:47:33 +00001035 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1036 }
1037 intel_logical_ring_emit(ringbuf, MI_NOOP);
1038
1039 intel_logical_ring_advance(ringbuf);
1040
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001041 engine->gpu_caches_dirty = true;
John Harrison4866d722015-05-29 17:43:55 +01001042 ret = logical_ring_flush_all_caches(req);
Michel Thierry771b9a52014-11-11 16:47:33 +00001043 if (ret)
1044 return ret;
1045
1046 return 0;
1047}
1048
Arun Siluvery83b8a982015-07-08 10:27:05 +01001049#define wa_ctx_emit(batch, index, cmd) \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001050 do { \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001051 int __index = (index)++; \
1052 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001053 return -ENOSPC; \
1054 } \
Arun Siluvery83b8a982015-07-08 10:27:05 +01001055 batch[__index] = (cmd); \
Arun Siluvery17ee9502015-06-19 19:07:01 +01001056 } while (0)
1057
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001058#define wa_ctx_emit_reg(batch, index, reg) \
Ville Syrjäläf0f59a02015-11-18 15:33:26 +02001059 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
Arun Siluvery9e000842015-07-03 14:27:31 +01001060
1061/*
1062 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1063 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1064 * but there is a slight complication as this is applied in WA batch where the
1065 * values are only initialized once so we cannot take register value at the
1066 * beginning and reuse it further; hence we save its value to memory, upload a
1067 * constant value with bit21 set and then we restore it back with the saved value.
1068 * To simplify the WA, a constant value is formed by using the default value
1069 * of this register. This shouldn't be a problem because we are only modifying
1070 * it for a short period and this batch in non-premptible. We can ofcourse
1071 * use additional instructions that read the actual value of the register
1072 * at that time and set our bit of interest but it makes the WA complicated.
1073 *
1074 * This WA is also required for Gen9 so extracting as a function avoids
1075 * code duplication.
1076 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001077static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
Arun Siluvery9e000842015-07-03 14:27:31 +01001078 uint32_t *const batch,
1079 uint32_t index)
1080{
1081 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1082
Arun Siluverya4106a72015-07-14 15:01:29 +01001083 /*
1084 * WaDisableLSQCROPERFforOCL:skl
1085 * This WA is implemented in skl_init_clock_gating() but since
1086 * this batch updates GEN8_L3SQCREG4 with default value we need to
1087 * set this bit here to retain the WA during flush.
1088 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001089 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
Arun Siluverya4106a72015-07-14 15:01:29 +01001090 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1091
Arun Siluveryf1afe242015-08-04 16:22:20 +01001092 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001093 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001094 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001095 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001096 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001097
Arun Siluvery83b8a982015-07-08 10:27:05 +01001098 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001099 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001100 wa_ctx_emit(batch, index, l3sqc4_flush);
Arun Siluvery9e000842015-07-03 14:27:31 +01001101
Arun Siluvery83b8a982015-07-08 10:27:05 +01001102 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1103 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1104 PIPE_CONTROL_DC_FLUSH_ENABLE));
1105 wa_ctx_emit(batch, index, 0);
1106 wa_ctx_emit(batch, index, 0);
1107 wa_ctx_emit(batch, index, 0);
1108 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001109
Arun Siluveryf1afe242015-08-04 16:22:20 +01001110 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
Arun Siluvery83b8a982015-07-08 10:27:05 +01001111 MI_SRM_LRM_GLOBAL_GTT));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001112 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001113 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
Arun Siluvery83b8a982015-07-08 10:27:05 +01001114 wa_ctx_emit(batch, index, 0);
Arun Siluvery9e000842015-07-03 14:27:31 +01001115
1116 return index;
1117}
1118
Arun Siluvery17ee9502015-06-19 19:07:01 +01001119static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1120 uint32_t offset,
1121 uint32_t start_alignment)
1122{
1123 return wa_ctx->offset = ALIGN(offset, start_alignment);
1124}
1125
1126static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1127 uint32_t offset,
1128 uint32_t size_alignment)
1129{
1130 wa_ctx->size = offset - wa_ctx->offset;
1131
1132 WARN(wa_ctx->size % size_alignment,
1133 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1134 wa_ctx->size, size_alignment);
1135 return 0;
1136}
1137
1138/**
1139 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1140 *
1141 * @ring: only applicable for RCS
1142 * @wa_ctx: structure representing wa_ctx
1143 * offset: specifies start of the batch, should be cache-aligned. This is updated
1144 * with the offset value received as input.
1145 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1146 * @batch: page in which WA are loaded
1147 * @offset: This field specifies the start of the batch, it should be
1148 * cache-aligned otherwise it is adjusted accordingly.
1149 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1150 * initialized at the beginning and shared across all contexts but this field
1151 * helps us to have multiple batches at different offsets and select them based
1152 * on a criteria. At the moment this batch always start at the beginning of the page
1153 * and at this point we don't have multiple wa_ctx batch buffers.
1154 *
1155 * The number of WA applied are not known at the beginning; we use this field
1156 * to return the no of DWORDS written.
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001157 *
Arun Siluvery17ee9502015-06-19 19:07:01 +01001158 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1159 * so it adds NOOPs as padding to make it cacheline aligned.
1160 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1161 * makes a complete batch buffer.
1162 *
1163 * Return: non-zero if we exceed the PAGE_SIZE limit.
1164 */
1165
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001166static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001167 struct i915_wa_ctx_bb *wa_ctx,
1168 uint32_t *const batch,
1169 uint32_t *offset)
1170{
Arun Siluvery0160f052015-06-23 15:46:57 +01001171 uint32_t scratch_addr;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001172 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1173
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001174 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001175 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001176
Arun Siluveryc82435b2015-06-19 18:37:13 +01001177 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
Chris Wilsonc0336662016-05-06 15:40:21 +01001178 if (IS_BROADWELL(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001179 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Andrzej Hajda604ef732015-09-21 15:33:35 +02001180 if (rc < 0)
1181 return rc;
1182 index = rc;
Arun Siluveryc82435b2015-06-19 18:37:13 +01001183 }
1184
Arun Siluvery0160f052015-06-23 15:46:57 +01001185 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1186 /* Actual scratch location is at 128 bytes offset */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001187 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
Arun Siluvery0160f052015-06-23 15:46:57 +01001188
Arun Siluvery83b8a982015-07-08 10:27:05 +01001189 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1190 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1191 PIPE_CONTROL_GLOBAL_GTT_IVB |
1192 PIPE_CONTROL_CS_STALL |
1193 PIPE_CONTROL_QW_WRITE));
1194 wa_ctx_emit(batch, index, scratch_addr);
1195 wa_ctx_emit(batch, index, 0);
1196 wa_ctx_emit(batch, index, 0);
1197 wa_ctx_emit(batch, index, 0);
Arun Siluvery0160f052015-06-23 15:46:57 +01001198
Arun Siluvery17ee9502015-06-19 19:07:01 +01001199 /* Pad to end of cacheline */
1200 while (index % CACHELINE_DWORDS)
Arun Siluvery83b8a982015-07-08 10:27:05 +01001201 wa_ctx_emit(batch, index, MI_NOOP);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001202
1203 /*
1204 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1205 * execution depends on the length specified in terms of cache lines
1206 * in the register CTX_RCS_INDIRECT_CTX
1207 */
1208
1209 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1210}
1211
1212/**
1213 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1214 *
1215 * @ring: only applicable for RCS
1216 * @wa_ctx: structure representing wa_ctx
1217 * offset: specifies start of the batch, should be cache-aligned.
1218 * size: size of the batch in DWORDS but HW expects in terms of cachelines
Arun Siluvery4d78c8d2015-06-23 15:50:43 +01001219 * @batch: page in which WA are loaded
Arun Siluvery17ee9502015-06-19 19:07:01 +01001220 * @offset: This field specifies the start of this batch.
1221 * This batch is started immediately after indirect_ctx batch. Since we ensure
1222 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1223 *
1224 * The number of DWORDS written are returned using this field.
1225 *
1226 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1227 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1228 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001229static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001230 struct i915_wa_ctx_bb *wa_ctx,
1231 uint32_t *const batch,
1232 uint32_t *offset)
1233{
1234 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1235
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001236 /* WaDisableCtxRestoreArbitration:bdw,chv */
Arun Siluvery83b8a982015-07-08 10:27:05 +01001237 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
Arun Siluvery7ad00d12015-06-19 18:37:12 +01001238
Arun Siluvery83b8a982015-07-08 10:27:05 +01001239 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001240
1241 return wa_ctx_end(wa_ctx, *offset = index, 1);
1242}
1243
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001244static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001245 struct i915_wa_ctx_bb *wa_ctx,
1246 uint32_t *const batch,
1247 uint32_t *offset)
1248{
Arun Siluverya4106a72015-07-14 15:01:29 +01001249 int ret;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001250 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1251
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001252 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001253 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1254 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001255 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
Arun Siluvery0504cff2015-07-14 15:01:27 +01001256
Arun Siluverya4106a72015-07-14 15:01:29 +01001257 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001258 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
Arun Siluverya4106a72015-07-14 15:01:29 +01001259 if (ret < 0)
1260 return ret;
1261 index = ret;
1262
Arun Siluvery0504cff2015-07-14 15:01:27 +01001263 /* Pad to end of cacheline */
1264 while (index % CACHELINE_DWORDS)
1265 wa_ctx_emit(batch, index, MI_NOOP);
1266
1267 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1268}
1269
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001270static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001271 struct i915_wa_ctx_bb *wa_ctx,
1272 uint32_t *const batch,
1273 uint32_t *offset)
1274{
1275 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1276
Arun Siluvery9b014352015-07-14 15:01:30 +01001277 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001278 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1279 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
Arun Siluvery9b014352015-07-14 15:01:30 +01001280 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
Ville Syrjälä8f40db72015-11-04 23:20:08 +02001281 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
Arun Siluvery9b014352015-07-14 15:01:30 +01001282 wa_ctx_emit(batch, index,
1283 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1284 wa_ctx_emit(batch, index, MI_NOOP);
1285 }
1286
Tim Goreb1e429f2016-03-21 14:37:29 +00001287 /* WaClearTdlStateAckDirtyBits:bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001288 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
Tim Goreb1e429f2016-03-21 14:37:29 +00001289 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1290
1291 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1292 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1293
1294 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1295 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1296
1297 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1298 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1299
1300 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1301 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1302 wa_ctx_emit(batch, index, 0x0);
1303 wa_ctx_emit(batch, index, MI_NOOP);
1304 }
1305
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001306 /* WaDisableCtxRestoreArbitration:skl,bxt */
Chris Wilsonc0336662016-05-06 15:40:21 +01001307 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1308 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
Arun Siluvery0907c8f2015-07-14 15:01:28 +01001309 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1310
Arun Siluvery0504cff2015-07-14 15:01:27 +01001311 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1312
1313 return wa_ctx_end(wa_ctx, *offset = index, 1);
1314}
1315
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001316static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001317{
1318 int ret;
1319
Chris Wilsonc0336662016-05-06 15:40:21 +01001320 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001321 PAGE_ALIGN(size));
Chris Wilsonfe3db792016-04-25 13:32:13 +01001322 if (IS_ERR(engine->wa_ctx.obj)) {
Arun Siluvery17ee9502015-06-19 19:07:01 +01001323 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01001324 ret = PTR_ERR(engine->wa_ctx.obj);
1325 engine->wa_ctx.obj = NULL;
1326 return ret;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001327 }
1328
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001329 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001330 if (ret) {
1331 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1332 ret);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001333 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001334 return ret;
1335 }
1336
1337 return 0;
1338}
1339
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001340static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001341{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001342 if (engine->wa_ctx.obj) {
1343 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1344 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1345 engine->wa_ctx.obj = NULL;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001346 }
1347}
1348
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001349static int intel_init_workaround_bb(struct intel_engine_cs *engine)
Arun Siluvery17ee9502015-06-19 19:07:01 +01001350{
1351 int ret;
1352 uint32_t *batch;
1353 uint32_t offset;
1354 struct page *page;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001355 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001356
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001357 WARN_ON(engine->id != RCS);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001358
Arun Siluvery5e60d792015-06-23 15:50:44 +01001359 /* update this when WA for higher Gen are added */
Chris Wilsonc0336662016-05-06 15:40:21 +01001360 if (INTEL_GEN(engine->i915) > 9) {
Arun Siluvery0504cff2015-07-14 15:01:27 +01001361 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
Chris Wilsonc0336662016-05-06 15:40:21 +01001362 INTEL_GEN(engine->i915));
Arun Siluvery5e60d792015-06-23 15:50:44 +01001363 return 0;
Arun Siluvery0504cff2015-07-14 15:01:27 +01001364 }
Arun Siluvery5e60d792015-06-23 15:50:44 +01001365
Arun Siluveryc4db7592015-06-19 18:37:11 +01001366 /* some WA perform writes to scratch page, ensure it is valid */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001367 if (engine->scratch.obj == NULL) {
1368 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
Arun Siluveryc4db7592015-06-19 18:37:11 +01001369 return -EINVAL;
1370 }
1371
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001372 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001373 if (ret) {
1374 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1375 return ret;
1376 }
1377
Dave Gordon033908a2015-12-10 18:51:23 +00001378 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001379 batch = kmap_atomic(page);
1380 offset = 0;
1381
Chris Wilsonc0336662016-05-06 15:40:21 +01001382 if (IS_GEN8(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001383 ret = gen8_init_indirectctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001384 &wa_ctx->indirect_ctx,
1385 batch,
1386 &offset);
1387 if (ret)
1388 goto out;
1389
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001390 ret = gen8_init_perctx_bb(engine,
Arun Siluvery17ee9502015-06-19 19:07:01 +01001391 &wa_ctx->per_ctx,
1392 batch,
1393 &offset);
1394 if (ret)
1395 goto out;
Chris Wilsonc0336662016-05-06 15:40:21 +01001396 } else if (IS_GEN9(engine->i915)) {
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001397 ret = gen9_init_indirectctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001398 &wa_ctx->indirect_ctx,
1399 batch,
1400 &offset);
1401 if (ret)
1402 goto out;
1403
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001404 ret = gen9_init_perctx_bb(engine,
Arun Siluvery0504cff2015-07-14 15:01:27 +01001405 &wa_ctx->per_ctx,
1406 batch,
1407 &offset);
1408 if (ret)
1409 goto out;
Arun Siluvery17ee9502015-06-19 19:07:01 +01001410 }
1411
1412out:
1413 kunmap_atomic(batch);
1414 if (ret)
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001415 lrc_destroy_wa_ctx_obj(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001416
1417 return ret;
1418}
1419
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001420static void lrc_init_hws(struct intel_engine_cs *engine)
1421{
Chris Wilsonc0336662016-05-06 15:40:21 +01001422 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001423
1424 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1425 (u32)engine->status_page.gfx_addr);
1426 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1427}
1428
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001429static int gen8_init_common_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001430{
Chris Wilsonc0336662016-05-06 15:40:21 +01001431 struct drm_i915_private *dev_priv = engine->i915;
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00001432 unsigned int next_context_status_buffer_hw;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001433
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001434 lrc_init_hws(engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01001435
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001436 I915_WRITE_IMR(engine,
1437 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1438 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
Oscar Mateo73d477f2014-07-24 17:04:31 +01001439
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001440 I915_WRITE(RING_MODE_GEN7(engine),
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001441 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1442 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001443 POSTING_READ(RING_MODE_GEN7(engine));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001444
1445 /*
1446 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1447 * zero, we need to read the write pointer from hardware and use its
1448 * value because "this register is power context save restored".
1449 * Effectively, these states have been observed:
1450 *
1451 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1452 * BDW | CSB regs not reset | CSB regs reset |
1453 * CHT | CSB regs not reset | CSB regs not reset |
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001454 * SKL | ? | ? |
1455 * BXT | ? | ? |
Michel Thierrydfc53c52015-09-28 13:25:12 +01001456 */
Ben Widawsky5590a5f2016-01-05 10:30:05 -08001457 next_context_status_buffer_hw =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001458 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
Michel Thierrydfc53c52015-09-28 13:25:12 +01001459
1460 /*
1461 * When the CSB registers are reset (also after power-up / gpu reset),
1462 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1463 * this special case, so the first element read is CSB[0].
1464 */
1465 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1466 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1467
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001468 engine->next_context_status_buffer = next_context_status_buffer_hw;
1469 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001470
Tomas Elffc0768c2016-03-21 16:26:59 +00001471 intel_engine_init_hangcheck(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001472
Peter Antoine0ccdacf2016-04-13 15:03:25 +01001473 return intel_mocs_init_engine(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001474}
1475
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001476static int gen8_init_render_ring(struct intel_engine_cs *engine)
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001477{
Chris Wilsonc0336662016-05-06 15:40:21 +01001478 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001479 int ret;
1480
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001481 ret = gen8_init_common_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001482 if (ret)
1483 return ret;
1484
1485 /* We need to disable the AsyncFlip performance optimisations in order
1486 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1487 * programmed to '1' on all products.
1488 *
1489 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1490 */
1491 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1492
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001493 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1494
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001495 return init_workarounds_ring(engine);
Oscar Mateo9b1136d2014-07-24 17:04:24 +01001496}
1497
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001498static int gen9_init_render_ring(struct intel_engine_cs *engine)
Damien Lespiau82ef8222015-02-09 19:33:08 +00001499{
1500 int ret;
1501
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001502 ret = gen8_init_common_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001503 if (ret)
1504 return ret;
1505
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001506 return init_workarounds_ring(engine);
Damien Lespiau82ef8222015-02-09 19:33:08 +00001507}
1508
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001509static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1510{
1511 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001512 struct intel_engine_cs *engine = req->engine;
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001513 struct intel_ringbuffer *ringbuf = req->ringbuf;
1514 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1515 int i, ret;
1516
Chris Wilson987046a2016-04-28 09:56:46 +01001517 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001518 if (ret)
1519 return ret;
1520
1521 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1522 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1523 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1524
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001525 intel_logical_ring_emit_reg(ringbuf,
1526 GEN8_RING_PDP_UDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001527 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001528 intel_logical_ring_emit_reg(ringbuf,
1529 GEN8_RING_PDP_LDW(engine, i));
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001530 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1531 }
1532
1533 intel_logical_ring_emit(ringbuf, MI_NOOP);
1534 intel_logical_ring_advance(ringbuf);
1535
1536 return 0;
1537}
1538
John Harrisonbe795fc2015-05-29 17:44:03 +01001539static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
John Harrison8e004ef2015-02-13 11:48:10 +00001540 u64 offset, unsigned dispatch_flags)
Oscar Mateo15648582014-07-24 17:04:32 +01001541{
John Harrisonbe795fc2015-05-29 17:44:03 +01001542 struct intel_ringbuffer *ringbuf = req->ringbuf;
John Harrison8e004ef2015-02-13 11:48:10 +00001543 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
Oscar Mateo15648582014-07-24 17:04:32 +01001544 int ret;
1545
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001546 /* Don't rely in hw updating PDPs, specially in lite-restore.
1547 * Ideally, we should set Force PD Restore in ctx descriptor,
1548 * but we can't. Force Restore would be a second option, but
1549 * it is unsafe in case of lite-restore (because the ctx is
Michel Thierry2dba3232015-07-30 11:06:23 +01001550 * not idle). PML4 is allocated during ppgtt init so this is
1551 * not needed in 48-bit.*/
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001552 if (req->ctx->ppgtt &&
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001553 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
Zhiyuan Lv331f38e2015-08-28 15:41:14 +08001554 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
Chris Wilsonc0336662016-05-06 15:40:21 +01001555 !intel_vgpu_active(req->i915)) {
Michel Thierry2dba3232015-07-30 11:06:23 +01001556 ret = intel_logical_ring_emit_pdps(req);
1557 if (ret)
1558 return ret;
1559 }
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001560
Tvrtko Ursulin666796d2016-03-16 11:00:39 +00001561 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
Michel Thierry7a01a0a2015-06-26 13:46:14 +01001562 }
1563
Chris Wilson987046a2016-04-28 09:56:46 +01001564 ret = intel_ring_begin(req, 4);
Oscar Mateo15648582014-07-24 17:04:32 +01001565 if (ret)
1566 return ret;
1567
1568 /* FIXME(BDW): Address space and security selectors. */
Abdiel Janulgue69225282015-06-16 13:39:42 +03001569 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1570 (ppgtt<<8) |
1571 (dispatch_flags & I915_DISPATCH_RS ?
1572 MI_BATCH_RESOURCE_STREAMER : 0));
Oscar Mateo15648582014-07-24 17:04:32 +01001573 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1574 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1575 intel_logical_ring_emit(ringbuf, MI_NOOP);
1576 intel_logical_ring_advance(ringbuf);
1577
1578 return 0;
1579}
1580
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001581static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001582{
Chris Wilsonc0336662016-05-06 15:40:21 +01001583 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo73d477f2014-07-24 17:04:31 +01001584 unsigned long flags;
1585
Daniel Vetter7cd512f2014-09-15 11:38:57 +02001586 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
Oscar Mateo73d477f2014-07-24 17:04:31 +01001587 return false;
1588
1589 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001590 if (engine->irq_refcount++ == 0) {
1591 I915_WRITE_IMR(engine,
1592 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1593 POSTING_READ(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001594 }
1595 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1596
1597 return true;
1598}
1599
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001600static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
Oscar Mateo73d477f2014-07-24 17:04:31 +01001601{
Chris Wilsonc0336662016-05-06 15:40:21 +01001602 struct drm_i915_private *dev_priv = engine->i915;
Oscar Mateo73d477f2014-07-24 17:04:31 +01001603 unsigned long flags;
1604
1605 spin_lock_irqsave(&dev_priv->irq_lock, flags);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001606 if (--engine->irq_refcount == 0) {
1607 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1608 POSTING_READ(RING_IMR(engine->mmio_base));
Oscar Mateo73d477f2014-07-24 17:04:31 +01001609 }
1610 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1611}
1612
John Harrison7deb4d32015-05-29 17:43:59 +01001613static int gen8_emit_flush(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001614 u32 invalidate_domains,
1615 u32 unused)
1616{
John Harrison7deb4d32015-05-29 17:43:59 +01001617 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001618 struct intel_engine_cs *engine = ringbuf->engine;
Chris Wilsonc0336662016-05-06 15:40:21 +01001619 struct drm_i915_private *dev_priv = request->i915;
Oscar Mateo47122742014-07-24 17:04:28 +01001620 uint32_t cmd;
1621 int ret;
1622
Chris Wilson987046a2016-04-28 09:56:46 +01001623 ret = intel_ring_begin(request, 4);
Oscar Mateo47122742014-07-24 17:04:28 +01001624 if (ret)
1625 return ret;
1626
1627 cmd = MI_FLUSH_DW + 1;
1628
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001629 /* We always require a command barrier so that subsequent
1630 * commands, such as breadcrumb interrupts, are strictly ordered
1631 * wrt the contents of the write cache being flushed to memory
1632 * (and thus being coherent from the CPU).
1633 */
1634 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1635
1636 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1637 cmd |= MI_INVALIDATE_TLB;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001638 if (engine == &dev_priv->engine[VCS])
Chris Wilsonf0a1fb12015-01-22 13:42:00 +00001639 cmd |= MI_INVALIDATE_BSD;
Oscar Mateo47122742014-07-24 17:04:28 +01001640 }
1641
1642 intel_logical_ring_emit(ringbuf, cmd);
1643 intel_logical_ring_emit(ringbuf,
1644 I915_GEM_HWS_SCRATCH_ADDR |
1645 MI_FLUSH_DW_USE_GTT);
1646 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1647 intel_logical_ring_emit(ringbuf, 0); /* value */
1648 intel_logical_ring_advance(ringbuf);
1649
1650 return 0;
1651}
1652
John Harrison7deb4d32015-05-29 17:43:59 +01001653static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
Oscar Mateo47122742014-07-24 17:04:28 +01001654 u32 invalidate_domains,
1655 u32 flush_domains)
1656{
John Harrison7deb4d32015-05-29 17:43:59 +01001657 struct intel_ringbuffer *ringbuf = request->ringbuf;
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001658 struct intel_engine_cs *engine = ringbuf->engine;
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00001659 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001660 bool vf_flush_wa = false;
Oscar Mateo47122742014-07-24 17:04:28 +01001661 u32 flags = 0;
1662 int ret;
1663
1664 flags |= PIPE_CONTROL_CS_STALL;
1665
1666 if (flush_domains) {
1667 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1668 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
Francisco Jerez965fd602016-01-13 18:59:39 -08001669 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
Chris Wilson40a24482015-08-21 16:08:41 +01001670 flags |= PIPE_CONTROL_FLUSH_ENABLE;
Oscar Mateo47122742014-07-24 17:04:28 +01001671 }
1672
1673 if (invalidate_domains) {
1674 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1675 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1676 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1677 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1678 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1679 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1680 flags |= PIPE_CONTROL_QW_WRITE;
1681 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
Oscar Mateo47122742014-07-24 17:04:28 +01001682
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001683 /*
1684 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1685 * pipe control.
1686 */
Chris Wilsonc0336662016-05-06 15:40:21 +01001687 if (IS_GEN9(request->i915))
Ben Widawsky1a5a9ce2015-12-17 09:49:57 -08001688 vf_flush_wa = true;
1689 }
Imre Deak9647ff32015-01-25 13:27:11 -08001690
Chris Wilson987046a2016-04-28 09:56:46 +01001691 ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
Oscar Mateo47122742014-07-24 17:04:28 +01001692 if (ret)
1693 return ret;
1694
Imre Deak9647ff32015-01-25 13:27:11 -08001695 if (vf_flush_wa) {
1696 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1697 intel_logical_ring_emit(ringbuf, 0);
1698 intel_logical_ring_emit(ringbuf, 0);
1699 intel_logical_ring_emit(ringbuf, 0);
1700 intel_logical_ring_emit(ringbuf, 0);
1701 intel_logical_ring_emit(ringbuf, 0);
1702 }
1703
Oscar Mateo47122742014-07-24 17:04:28 +01001704 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1705 intel_logical_ring_emit(ringbuf, flags);
1706 intel_logical_ring_emit(ringbuf, scratch_addr);
1707 intel_logical_ring_emit(ringbuf, 0);
1708 intel_logical_ring_emit(ringbuf, 0);
1709 intel_logical_ring_emit(ringbuf, 0);
1710 intel_logical_ring_advance(ringbuf);
1711
1712 return 0;
1713}
1714
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001715static u32 gen8_get_seqno(struct intel_engine_cs *engine)
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001716{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001717 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001718}
1719
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001720static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001721{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001722 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Oscar Mateoe94e37a2014-07-24 17:04:25 +01001723}
1724
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001725static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
Imre Deak319404d2015-08-14 18:35:27 +03001726{
Imre Deak319404d2015-08-14 18:35:27 +03001727 /*
1728 * On BXT A steppings there is a HW coherency issue whereby the
1729 * MI_STORE_DATA_IMM storing the completed request's seqno
1730 * occasionally doesn't invalidate the CPU cache. Work around this by
1731 * clflushing the corresponding cacheline whenever the caller wants
1732 * the coherency to be guaranteed. Note that this cacheline is known
1733 * to be clean at this point, since we only write it in
1734 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1735 * this clflush in practice becomes an invalidate operation.
1736 */
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001737 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001738}
1739
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001740static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
Imre Deak319404d2015-08-14 18:35:27 +03001741{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001742 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
Imre Deak319404d2015-08-14 18:35:27 +03001743
1744 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001745 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
Imre Deak319404d2015-08-14 18:35:27 +03001746}
1747
Chris Wilson7c17d372016-01-20 15:43:35 +02001748/*
1749 * Reserve space for 2 NOOPs at the end of each request to be
1750 * used as a workaround for not being allowed to do lite
1751 * restore with HEAD==TAIL (WaIdleLiteRestore).
1752 */
1753#define WA_TAIL_DWORDS 2
1754
John Harrisonc4e76632015-05-29 17:44:01 +01001755static int gen8_emit_request(struct drm_i915_gem_request *request)
Oscar Mateo4da46e12014-07-24 17:04:27 +01001756{
John Harrisonc4e76632015-05-29 17:44:01 +01001757 struct intel_ringbuffer *ringbuf = request->ringbuf;
Oscar Mateo4da46e12014-07-24 17:04:27 +01001758 int ret;
1759
Chris Wilson987046a2016-04-28 09:56:46 +01001760 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001761 if (ret)
1762 return ret;
1763
Chris Wilson7c17d372016-01-20 15:43:35 +02001764 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1765 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001766
Oscar Mateo4da46e12014-07-24 17:04:27 +01001767 intel_logical_ring_emit(ringbuf,
Chris Wilson7c17d372016-01-20 15:43:35 +02001768 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1769 intel_logical_ring_emit(ringbuf,
Chris Wilsona58c01a2016-04-29 13:18:21 +01001770 intel_hws_seqno_address(request->engine) |
Chris Wilson7c17d372016-01-20 15:43:35 +02001771 MI_FLUSH_DW_USE_GTT);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001772 intel_logical_ring_emit(ringbuf, 0);
John Harrisonc4e76632015-05-29 17:44:01 +01001773 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
Oscar Mateo4da46e12014-07-24 17:04:27 +01001774 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1775 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001776 return intel_logical_ring_advance_and_submit(request);
1777}
Oscar Mateo4da46e12014-07-24 17:04:27 +01001778
Chris Wilson7c17d372016-01-20 15:43:35 +02001779static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1780{
1781 struct intel_ringbuffer *ringbuf = request->ringbuf;
1782 int ret;
1783
Chris Wilson987046a2016-04-28 09:56:46 +01001784 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
Chris Wilson7c17d372016-01-20 15:43:35 +02001785 if (ret)
1786 return ret;
1787
Michał Winiarskice81a652016-04-12 15:51:55 +02001788 /* We're using qword write, seqno should be aligned to 8 bytes. */
1789 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1790
Chris Wilson7c17d372016-01-20 15:43:35 +02001791 /* w/a for post sync ops following a GPGPU operation we
1792 * need a prior CS_STALL, which is emitted by the flush
1793 * following the batch.
Michel Thierry53292cd2015-04-15 18:11:33 +01001794 */
Michał Winiarskice81a652016-04-12 15:51:55 +02001795 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
Chris Wilson7c17d372016-01-20 15:43:35 +02001796 intel_logical_ring_emit(ringbuf,
1797 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1798 PIPE_CONTROL_CS_STALL |
1799 PIPE_CONTROL_QW_WRITE));
Chris Wilsona58c01a2016-04-29 13:18:21 +01001800 intel_logical_ring_emit(ringbuf,
1801 intel_hws_seqno_address(request->engine));
Chris Wilson7c17d372016-01-20 15:43:35 +02001802 intel_logical_ring_emit(ringbuf, 0);
1803 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
Michał Winiarskice81a652016-04-12 15:51:55 +02001804 /* We're thrashing one dword of HWS. */
1805 intel_logical_ring_emit(ringbuf, 0);
Chris Wilson7c17d372016-01-20 15:43:35 +02001806 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
Michał Winiarskice81a652016-04-12 15:51:55 +02001807 intel_logical_ring_emit(ringbuf, MI_NOOP);
Chris Wilson7c17d372016-01-20 15:43:35 +02001808 return intel_logical_ring_advance_and_submit(request);
Oscar Mateo4da46e12014-07-24 17:04:27 +01001809}
1810
John Harrisonbe013632015-05-29 17:43:45 +01001811static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
Damien Lespiaucef437a2015-02-10 19:32:19 +00001812{
Damien Lespiaucef437a2015-02-10 19:32:19 +00001813 struct render_state so;
Damien Lespiaucef437a2015-02-10 19:32:19 +00001814 int ret;
1815
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001816 ret = i915_gem_render_state_prepare(req->engine, &so);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001817 if (ret)
1818 return ret;
1819
1820 if (so.rodata == NULL)
1821 return 0;
1822
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001823 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
John Harrisonbe013632015-05-29 17:43:45 +01001824 I915_DISPATCH_SECURE);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001825 if (ret)
1826 goto out;
1827
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00001828 ret = req->engine->emit_bb_start(req,
Arun Siluvery84e81022015-07-20 10:46:10 +01001829 (so.ggtt_offset + so.aux_batch_offset),
1830 I915_DISPATCH_SECURE);
1831 if (ret)
1832 goto out;
1833
John Harrisonb2af0372015-05-29 17:43:50 +01001834 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
Damien Lespiaucef437a2015-02-10 19:32:19 +00001835
Damien Lespiaucef437a2015-02-10 19:32:19 +00001836out:
1837 i915_gem_render_state_fini(&so);
1838 return ret;
1839}
1840
John Harrison87531812015-05-29 17:43:44 +01001841static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
Thomas Daniele7778be2014-12-02 12:50:48 +00001842{
1843 int ret;
1844
John Harrisone2be4fa2015-05-29 17:43:54 +01001845 ret = intel_logical_ring_workarounds_emit(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001846 if (ret)
1847 return ret;
1848
Peter Antoine3bbaba02015-07-10 20:13:11 +03001849 ret = intel_rcs_context_init_mocs(req);
1850 /*
1851 * Failing to program the MOCS is non-fatal.The system will not
1852 * run at peak performance. So generate an error and carry on.
1853 */
1854 if (ret)
1855 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1856
John Harrisonbe013632015-05-29 17:43:45 +01001857 return intel_lr_context_render_state_init(req);
Thomas Daniele7778be2014-12-02 12:50:48 +00001858}
1859
Oscar Mateo73e4d072014-07-24 17:04:48 +01001860/**
1861 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1862 *
1863 * @ring: Engine Command Streamer.
1864 *
1865 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001866void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001867{
John Harrison6402c332014-10-31 12:00:26 +00001868 struct drm_i915_private *dev_priv;
Oscar Mateo9832b9d2014-07-24 17:04:30 +01001869
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00001870 if (!intel_engine_initialized(engine))
Oscar Mateo48d82382014-07-24 17:04:23 +01001871 return;
1872
Tvrtko Ursulin27af5ee2016-04-04 12:11:56 +01001873 /*
1874 * Tasklet cannot be active at this point due intel_mark_active/idle
1875 * so this is just for documentation.
1876 */
1877 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1878 tasklet_kill(&engine->irq_tasklet);
1879
Chris Wilsonc0336662016-05-06 15:40:21 +01001880 dev_priv = engine->i915;
John Harrison6402c332014-10-31 12:00:26 +00001881
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001882 if (engine->buffer) {
1883 intel_logical_ring_stop(engine);
1884 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
Dave Gordonb0366a52015-12-08 15:02:36 +00001885 }
Oscar Mateo48d82382014-07-24 17:04:23 +01001886
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001887 if (engine->cleanup)
1888 engine->cleanup(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01001889
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001890 i915_cmd_parser_fini_ring(engine);
1891 i915_gem_batch_pool_fini(&engine->batch_pool);
Oscar Mateo48d82382014-07-24 17:04:23 +01001892
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001893 if (engine->status_page.obj) {
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001894 i915_gem_object_unpin_map(engine->status_page.obj);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001895 engine->status_page.obj = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01001896 }
Chris Wilson24f1d3c2016-04-28 09:56:53 +01001897 intel_lr_context_unpin(dev_priv->kernel_context, engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01001898
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001899 engine->idle_lite_restore_wa = 0;
1900 engine->disable_lite_restore_wa = false;
1901 engine->ctx_desc_template = 0;
Tvrtko Ursulinca825802016-01-15 15:10:27 +00001902
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001903 lrc_destroy_wa_ctx_obj(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01001904 engine->i915 = NULL;
Oscar Mateo454afeb2014-07-24 17:04:22 +01001905}
1906
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001907static void
Chris Wilsone1382ef2016-05-06 15:40:20 +01001908logical_ring_default_vfuncs(struct intel_engine_cs *engine)
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001909{
1910 /* Default vfuncs which can be overriden by each engine. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001911 engine->init_hw = gen8_init_common_ring;
1912 engine->emit_request = gen8_emit_request;
1913 engine->emit_flush = gen8_emit_flush;
1914 engine->irq_get = gen8_logical_ring_get_irq;
1915 engine->irq_put = gen8_logical_ring_put_irq;
1916 engine->emit_bb_start = gen8_emit_bb_start;
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001917 engine->get_seqno = gen8_get_seqno;
1918 engine->set_seqno = gen8_set_seqno;
Chris Wilsonc0336662016-05-06 15:40:21 +01001919 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
Chris Wilsonc04e0f32016-04-09 10:57:54 +01001920 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001921 engine->set_seqno = bxt_a_set_seqno;
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00001922 }
1923}
1924
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001925static inline void
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001926logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001927{
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00001928 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1929 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
Chris Wilsone1382ef2016-05-06 15:40:20 +01001930 init_waitqueue_head(&engine->irq_queue);
Tvrtko Ursulind9f3af92016-01-12 17:32:35 +00001931}
1932
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001933static int
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001934lrc_setup_hws(struct intel_engine_cs *engine,
1935 struct drm_i915_gem_object *dctx_obj)
1936{
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001937 void *hws;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001938
1939 /* The HWSP is part of the default context object in LRC mode. */
1940 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1941 LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001942 hws = i915_gem_object_pin_map(dctx_obj);
1943 if (IS_ERR(hws))
1944 return PTR_ERR(hws);
1945 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001946 engine->status_page.obj = dctx_obj;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01001947
1948 return 0;
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01001949}
1950
Chris Wilsone1382ef2016-05-06 15:40:20 +01001951static const struct logical_ring_info {
1952 const char *name;
1953 unsigned exec_id;
1954 unsigned guc_id;
1955 u32 mmio_base;
1956 unsigned irq_shift;
1957} logical_rings[] = {
1958 [RCS] = {
1959 .name = "render ring",
1960 .exec_id = I915_EXEC_RENDER,
1961 .guc_id = GUC_RENDER_ENGINE,
1962 .mmio_base = RENDER_RING_BASE,
1963 .irq_shift = GEN8_RCS_IRQ_SHIFT,
1964 },
1965 [BCS] = {
1966 .name = "blitter ring",
1967 .exec_id = I915_EXEC_BLT,
1968 .guc_id = GUC_BLITTER_ENGINE,
1969 .mmio_base = BLT_RING_BASE,
1970 .irq_shift = GEN8_BCS_IRQ_SHIFT,
1971 },
1972 [VCS] = {
1973 .name = "bsd ring",
1974 .exec_id = I915_EXEC_BSD,
1975 .guc_id = GUC_VIDEO_ENGINE,
1976 .mmio_base = GEN6_BSD_RING_BASE,
1977 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
1978 },
1979 [VCS2] = {
1980 .name = "bsd2 ring",
1981 .exec_id = I915_EXEC_BSD,
1982 .guc_id = GUC_VIDEO_ENGINE2,
1983 .mmio_base = GEN8_BSD2_RING_BASE,
1984 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
1985 },
1986 [VECS] = {
1987 .name = "video enhancement ring",
1988 .exec_id = I915_EXEC_VEBOX,
1989 .guc_id = GUC_VIDEOENHANCE_ENGINE,
1990 .mmio_base = VEBOX_RING_BASE,
1991 .irq_shift = GEN8_VECS_IRQ_SHIFT,
1992 },
1993};
1994
1995static struct intel_engine_cs *
1996logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
Oscar Mateo454afeb2014-07-24 17:04:22 +01001997{
Chris Wilsone1382ef2016-05-06 15:40:20 +01001998 const struct logical_ring_info *info = &logical_rings[id];
Tvrtko Ursulin37566852016-04-12 14:37:31 +01001999 struct drm_i915_private *dev_priv = to_i915(dev);
Chris Wilsone1382ef2016-05-06 15:40:20 +01002000 struct intel_engine_cs *engine = &dev_priv->engine[id];
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002001 enum forcewake_domains fw_domains;
Chris Wilsone1382ef2016-05-06 15:40:20 +01002002
2003 engine->id = id;
2004 engine->name = info->name;
2005 engine->exec_id = info->exec_id;
2006 engine->guc_id = info->guc_id;
2007 engine->mmio_base = info->mmio_base;
2008
Chris Wilsonc0336662016-05-06 15:40:21 +01002009 engine->i915 = dev_priv;
Oscar Mateo48d82382014-07-24 17:04:23 +01002010
2011 /* Intentionally left blank. */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002012 engine->buffer = NULL;
Oscar Mateo48d82382014-07-24 17:04:23 +01002013
Tvrtko Ursulin37566852016-04-12 14:37:31 +01002014 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2015 RING_ELSP(engine),
2016 FW_REG_WRITE);
2017
2018 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2019 RING_CONTEXT_STATUS_PTR(engine),
2020 FW_REG_READ | FW_REG_WRITE);
2021
2022 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2023 RING_CONTEXT_STATUS_BUF_BASE(engine),
2024 FW_REG_READ);
2025
2026 engine->fw_domains = fw_domains;
2027
Chris Wilsone1382ef2016-05-06 15:40:20 +01002028 INIT_LIST_HEAD(&engine->active_list);
2029 INIT_LIST_HEAD(&engine->request_list);
2030 INIT_LIST_HEAD(&engine->buffers);
2031 INIT_LIST_HEAD(&engine->execlist_queue);
2032 spin_lock_init(&engine->execlist_lock);
2033
2034 tasklet_init(&engine->irq_tasklet,
2035 intel_lrc_irq_handler, (unsigned long)engine);
2036
2037 logical_ring_init_platform_invariants(engine);
2038 logical_ring_default_vfuncs(engine);
2039 logical_ring_default_irqs(engine, info->irq_shift);
2040
2041 intel_engine_init_hangcheck(engine);
Chris Wilsonc0336662016-05-06 15:40:21 +01002042 i915_gem_batch_pool_init(dev, &engine->batch_pool);
Chris Wilsone1382ef2016-05-06 15:40:20 +01002043
2044 return engine;
2045}
2046
2047static int
2048logical_ring_init(struct intel_engine_cs *engine)
2049{
Chris Wilsone2efd132016-05-24 14:53:34 +01002050 struct i915_gem_context *dctx = engine->i915->kernel_context;
Chris Wilsone1382ef2016-05-06 15:40:20 +01002051 int ret;
2052
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002053 ret = i915_cmd_parser_init_ring(engine);
Oscar Mateo48d82382014-07-24 17:04:23 +01002054 if (ret)
Dave Gordonb0366a52015-12-08 15:02:36 +00002055 goto error;
Oscar Mateo48d82382014-07-24 17:04:23 +01002056
Chris Wilson978f1e02016-04-28 09:56:54 +01002057 ret = execlists_context_deferred_alloc(dctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01002058 if (ret)
Dave Gordonb0366a52015-12-08 15:02:36 +00002059 goto error;
Nick Hoathe84fe802015-09-11 12:53:46 +01002060
2061 /* As this is the default context, always pin it */
Chris Wilson24f1d3c2016-04-28 09:56:53 +01002062 ret = intel_lr_context_pin(dctx, engine);
Nick Hoathe84fe802015-09-11 12:53:46 +01002063 if (ret) {
Chris Wilson24f1d3c2016-04-28 09:56:53 +01002064 DRM_ERROR("Failed to pin context for %s: %d\n",
2065 engine->name, ret);
Dave Gordonb0366a52015-12-08 15:02:36 +00002066 goto error;
Nick Hoathe84fe802015-09-11 12:53:46 +01002067 }
Oscar Mateo564ddb22014-08-21 11:40:54 +01002068
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01002069 /* And setup the hardware status page. */
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002070 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2071 if (ret) {
2072 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2073 goto error;
2074 }
Tvrtko Ursulin04794ad2016-04-12 15:40:41 +01002075
Dave Gordonb0366a52015-12-08 15:02:36 +00002076 return 0;
2077
2078error:
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002079 intel_logical_ring_cleanup(engine);
Oscar Mateo564ddb22014-08-21 11:40:54 +01002080 return ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002081}
2082
2083static int logical_render_ring_init(struct drm_device *dev)
2084{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002085 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002086 int ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002087
Oscar Mateo73d477f2014-07-24 17:04:31 +01002088 if (HAS_L3_DPF(dev))
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002089 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002090
Tvrtko Ursulinc9cacf92016-01-12 17:32:34 +00002091 /* Override some for render ring. */
Damien Lespiau82ef8222015-02-09 19:33:08 +00002092 if (INTEL_INFO(dev)->gen >= 9)
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002093 engine->init_hw = gen9_init_render_ring;
Damien Lespiau82ef8222015-02-09 19:33:08 +00002094 else
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002095 engine->init_hw = gen8_init_render_ring;
2096 engine->init_context = gen8_init_rcs_context;
2097 engine->cleanup = intel_fini_pipe_control;
2098 engine->emit_flush = gen8_emit_flush_render;
2099 engine->emit_request = gen8_emit_request_render;
Oscar Mateo9b1136d2014-07-24 17:04:24 +01002100
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002101 ret = intel_init_pipe_control(engine);
Daniel Vetter99be1df2014-11-20 00:33:06 +01002102 if (ret)
2103 return ret;
2104
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002105 ret = intel_init_workaround_bb(engine);
Arun Siluvery17ee9502015-06-19 19:07:01 +01002106 if (ret) {
2107 /*
2108 * We continue even if we fail to initialize WA batch
2109 * because we only expect rare glitches but nothing
2110 * critical to prevent us from using GPU
2111 */
2112 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2113 ret);
2114 }
2115
Chris Wilsone1382ef2016-05-06 15:40:20 +01002116 ret = logical_ring_init(engine);
Arun Siluveryc4db7592015-06-19 18:37:11 +01002117 if (ret) {
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002118 lrc_destroy_wa_ctx_obj(engine);
Arun Siluveryc4db7592015-06-19 18:37:11 +01002119 }
Arun Siluvery17ee9502015-06-19 19:07:01 +01002120
2121 return ret;
Oscar Mateo454afeb2014-07-24 17:04:22 +01002122}
2123
2124static int logical_bsd_ring_init(struct drm_device *dev)
2125{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002126 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002127
Chris Wilsone1382ef2016-05-06 15:40:20 +01002128 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002129}
2130
2131static int logical_bsd2_ring_init(struct drm_device *dev)
2132{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002133 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002134
Chris Wilsone1382ef2016-05-06 15:40:20 +01002135 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002136}
2137
2138static int logical_blt_ring_init(struct drm_device *dev)
2139{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002140 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002141
Chris Wilsone1382ef2016-05-06 15:40:20 +01002142 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002143}
2144
2145static int logical_vebox_ring_init(struct drm_device *dev)
2146{
Chris Wilsone1382ef2016-05-06 15:40:20 +01002147 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002148
Chris Wilsone1382ef2016-05-06 15:40:20 +01002149 return logical_ring_init(engine);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002150}
2151
Oscar Mateo73e4d072014-07-24 17:04:48 +01002152/**
2153 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2154 * @dev: DRM device.
2155 *
2156 * This function inits the engines for an Execlists submission style (the equivalent in the
Tvrtko Ursulin117897f2016-03-16 11:00:40 +00002157 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
Oscar Mateo73e4d072014-07-24 17:04:48 +01002158 * those engines that are present in the hardware.
2159 *
2160 * Return: non-zero if the initialization failed.
2161 */
Oscar Mateo454afeb2014-07-24 17:04:22 +01002162int intel_logical_rings_init(struct drm_device *dev)
2163{
2164 struct drm_i915_private *dev_priv = dev->dev_private;
2165 int ret;
2166
2167 ret = logical_render_ring_init(dev);
2168 if (ret)
2169 return ret;
2170
2171 if (HAS_BSD(dev)) {
2172 ret = logical_bsd_ring_init(dev);
2173 if (ret)
2174 goto cleanup_render_ring;
2175 }
2176
2177 if (HAS_BLT(dev)) {
2178 ret = logical_blt_ring_init(dev);
2179 if (ret)
2180 goto cleanup_bsd_ring;
2181 }
2182
2183 if (HAS_VEBOX(dev)) {
2184 ret = logical_vebox_ring_init(dev);
2185 if (ret)
2186 goto cleanup_blt_ring;
2187 }
2188
2189 if (HAS_BSD2(dev)) {
2190 ret = logical_bsd2_ring_init(dev);
2191 if (ret)
2192 goto cleanup_vebox_ring;
2193 }
2194
Oscar Mateo454afeb2014-07-24 17:04:22 +01002195 return 0;
2196
Oscar Mateo454afeb2014-07-24 17:04:22 +01002197cleanup_vebox_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002198 intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002199cleanup_blt_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002200 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002201cleanup_bsd_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002202 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002203cleanup_render_ring:
Tvrtko Ursulin4a570db2016-03-16 11:00:38 +00002204 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
Oscar Mateo454afeb2014-07-24 17:04:22 +01002205
2206 return ret;
2207}
2208
Jeff McGee0cea6502015-02-13 10:27:56 -06002209static u32
Chris Wilsonc0336662016-05-06 15:40:21 +01002210make_rpcs(struct drm_i915_private *dev_priv)
Jeff McGee0cea6502015-02-13 10:27:56 -06002211{
2212 u32 rpcs = 0;
2213
2214 /*
2215 * No explicit RPCS request is needed to ensure full
2216 * slice/subslice/EU enablement prior to Gen9.
2217 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002218 if (INTEL_GEN(dev_priv) < 9)
Jeff McGee0cea6502015-02-13 10:27:56 -06002219 return 0;
2220
2221 /*
2222 * Starting in Gen9, render power gating can leave
2223 * slice/subslice/EU in a partially enabled state. We
2224 * must make an explicit request through RPCS for full
2225 * enablement.
2226 */
Chris Wilsonc0336662016-05-06 15:40:21 +01002227 if (INTEL_INFO(dev_priv)->has_slice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002228 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002229 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002230 GEN8_RPCS_S_CNT_SHIFT;
2231 rpcs |= GEN8_RPCS_ENABLE;
2232 }
2233
Chris Wilsonc0336662016-05-06 15:40:21 +01002234 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
Jeff McGee0cea6502015-02-13 10:27:56 -06002235 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
Chris Wilsonc0336662016-05-06 15:40:21 +01002236 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002237 GEN8_RPCS_SS_CNT_SHIFT;
2238 rpcs |= GEN8_RPCS_ENABLE;
2239 }
2240
Chris Wilsonc0336662016-05-06 15:40:21 +01002241 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2242 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002243 GEN8_RPCS_EU_MIN_SHIFT;
Chris Wilsonc0336662016-05-06 15:40:21 +01002244 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
Jeff McGee0cea6502015-02-13 10:27:56 -06002245 GEN8_RPCS_EU_MAX_SHIFT;
2246 rpcs |= GEN8_RPCS_ENABLE;
2247 }
2248
2249 return rpcs;
2250}
2251
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002252static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
Michel Thierry71562912016-02-23 10:31:49 +00002253{
2254 u32 indirect_ctx_offset;
2255
Chris Wilsonc0336662016-05-06 15:40:21 +01002256 switch (INTEL_GEN(engine->i915)) {
Michel Thierry71562912016-02-23 10:31:49 +00002257 default:
Chris Wilsonc0336662016-05-06 15:40:21 +01002258 MISSING_CASE(INTEL_GEN(engine->i915));
Michel Thierry71562912016-02-23 10:31:49 +00002259 /* fall through */
2260 case 9:
2261 indirect_ctx_offset =
2262 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2263 break;
2264 case 8:
2265 indirect_ctx_offset =
2266 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2267 break;
2268 }
2269
2270 return indirect_ctx_offset;
2271}
2272
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002273static int
Chris Wilsone2efd132016-05-24 14:53:34 +01002274populate_lr_context(struct i915_gem_context *ctx,
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002275 struct drm_i915_gem_object *ctx_obj,
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002276 struct intel_engine_cs *engine,
2277 struct intel_ringbuffer *ringbuf)
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002278{
Chris Wilsonc0336662016-05-06 15:40:21 +01002279 struct drm_i915_private *dev_priv = ctx->i915;
Daniel Vetterae6c4802014-08-06 15:04:53 +02002280 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002281 void *vaddr;
2282 u32 *reg_state;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002283 int ret;
2284
Thomas Daniel2d965532014-08-19 10:13:36 +01002285 if (!ppgtt)
2286 ppgtt = dev_priv->mm.aliasing_ppgtt;
2287
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002288 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2289 if (ret) {
2290 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2291 return ret;
2292 }
2293
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002294 vaddr = i915_gem_object_pin_map(ctx_obj);
2295 if (IS_ERR(vaddr)) {
2296 ret = PTR_ERR(vaddr);
2297 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002298 return ret;
2299 }
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002300 ctx_obj->dirty = true;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002301
2302 /* The second page of the context object contains some fields which must
2303 * be set up prior to the first execution. */
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002304 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002305
2306 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2307 * commands followed by (reg, value) pairs. The values we are setting here are
2308 * only for the first context restore: on a subsequent save, the GPU will
2309 * recreate this batchbuffer with new values (including all the missing
2310 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002311 reg_state[CTX_LRI_HEADER_0] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002312 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2313 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2314 RING_CONTEXT_CONTROL(engine),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002315 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2316 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
Chris Wilsonc0336662016-05-06 15:40:21 +01002317 (HAS_RESOURCE_STREAMER(dev_priv) ?
Michel Thierry99cf8ea2016-02-25 09:48:58 +00002318 CTX_CTRL_RS_CTX_ENABLE : 0)));
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002319 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2320 0);
2321 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2322 0);
Thomas Daniel7ba717c2014-11-13 10:28:56 +00002323 /* Ring buffer start address is not known until the buffer is pinned.
2324 * It is written to the context image in execlists_update_context()
2325 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002326 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2327 RING_START(engine->mmio_base), 0);
2328 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2329 RING_CTL(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002330 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002331 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2332 RING_BBADDR_UDW(engine->mmio_base), 0);
2333 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2334 RING_BBADDR(engine->mmio_base), 0);
2335 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2336 RING_BBSTATE(engine->mmio_base),
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002337 RING_BB_PPGTT);
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002338 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2339 RING_SBBADDR_UDW(engine->mmio_base), 0);
2340 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2341 RING_SBBADDR(engine->mmio_base), 0);
2342 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2343 RING_SBBSTATE(engine->mmio_base), 0);
2344 if (engine->id == RCS) {
2345 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2346 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2347 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2348 RING_INDIRECT_CTX(engine->mmio_base), 0);
2349 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2350 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2351 if (engine->wa_ctx.obj) {
2352 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002353 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2354
2355 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2356 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2357 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2358
2359 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002360 intel_lr_indirect_ctx_offset(engine) << 6;
Arun Siluvery17ee9502015-06-19 19:07:01 +01002361
2362 reg_state[CTX_BB_PER_CTX_PTR+1] =
2363 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2364 0x01;
2365 }
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002366 }
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002367 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002368 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2369 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002370 /* PDP values well be assigned later if needed */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002371 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2372 0);
2373 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2374 0);
2375 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2376 0);
2377 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2378 0);
2379 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2380 0);
2381 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2382 0);
2383 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2384 0);
2385 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2386 0);
Michel Thierryd7b26332015-04-08 12:13:34 +01002387
Michel Thierry2dba3232015-07-30 11:06:23 +01002388 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2389 /* 64b PPGTT (48bit canonical)
2390 * PDP0_DESCRIPTOR contains the base address to PML4 and
2391 * other PDP Descriptors are ignored.
2392 */
2393 ASSIGN_CTX_PML4(ppgtt, reg_state);
2394 } else {
2395 /* 32b PPGTT
2396 * PDP*_DESCRIPTOR contains the base address of space supported.
2397 * With dynamic page allocation, PDPs may not be allocated at
2398 * this point. Point the unallocated PDPs to the scratch page
2399 */
Tvrtko Ursulinc6a2ac72016-02-26 16:58:32 +00002400 execlists_update_context_pdps(ppgtt, reg_state);
Michel Thierry2dba3232015-07-30 11:06:23 +01002401 }
2402
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002403 if (engine->id == RCS) {
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002404 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
Ville Syrjälä0d925ea2015-11-04 23:20:11 +02002405 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
Chris Wilsonc0336662016-05-06 15:40:21 +01002406 make_rpcs(dev_priv));
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002407 }
2408
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002409 i915_gem_object_unpin_map(ctx_obj);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002410
2411 return 0;
2412}
2413
Oscar Mateo73e4d072014-07-24 17:04:48 +01002414/**
Dave Gordonc5d46ee2016-01-05 12:21:33 +00002415 * intel_lr_context_size() - return the size of the context for an engine
2416 * @ring: which engine to find the context size for
2417 *
2418 * Each engine may require a different amount of space for a context image,
2419 * so when allocating (or copying) an image, this function can be used to
2420 * find the right size for the specific engine.
2421 *
2422 * Return: size (in bytes) of an engine-specific context image
2423 *
2424 * Note: this size includes the HWSP, which is part of the context image
2425 * in LRC mode, but does not include the "shared data page" used with
2426 * GuC submission. The caller should account for this if using the GuC.
2427 */
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002428uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
Oscar Mateo8c8579172014-07-24 17:04:14 +01002429{
2430 int ret = 0;
2431
Chris Wilsonc0336662016-05-06 15:40:21 +01002432 WARN_ON(INTEL_GEN(engine->i915) < 8);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002433
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002434 switch (engine->id) {
Oscar Mateo8c8579172014-07-24 17:04:14 +01002435 case RCS:
Chris Wilsonc0336662016-05-06 15:40:21 +01002436 if (INTEL_GEN(engine->i915) >= 9)
Michael H. Nguyen468c6812014-11-13 17:51:49 +00002437 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2438 else
2439 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002440 break;
2441 case VCS:
2442 case BCS:
2443 case VECS:
2444 case VCS2:
2445 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2446 break;
2447 }
2448
2449 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002450}
2451
Oscar Mateo73e4d072014-07-24 17:04:48 +01002452/**
Chris Wilson978f1e02016-04-28 09:56:54 +01002453 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
Oscar Mateo73e4d072014-07-24 17:04:48 +01002454 * @ctx: LR context to create.
Chris Wilson978f1e02016-04-28 09:56:54 +01002455 * @engine: engine to be used with the context.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002456 *
2457 * This function can be called more than once, with different engines, if we plan
2458 * to use the context with them. The context backing objects and the ringbuffers
2459 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2460 * the creation is a deferred call: it's better to make sure first that we need to use
2461 * a given ring with the context.
2462 *
Masanari Iida32197aa2014-10-20 23:53:13 +09002463 * Return: non-zero on error.
Oscar Mateo73e4d072014-07-24 17:04:48 +01002464 */
Chris Wilsone2efd132016-05-24 14:53:34 +01002465static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
Chris Wilson978f1e02016-04-28 09:56:54 +01002466 struct intel_engine_cs *engine)
Oscar Mateoede7d422014-07-24 17:04:12 +01002467{
Oscar Mateo8c8579172014-07-24 17:04:14 +01002468 struct drm_i915_gem_object *ctx_obj;
Chris Wilson9021ad02016-05-24 14:53:37 +01002469 struct intel_context *ce = &ctx->engine[engine->id];
Oscar Mateo8c8579172014-07-24 17:04:14 +01002470 uint32_t context_size;
Oscar Mateo84c23772014-07-24 17:04:15 +01002471 struct intel_ringbuffer *ringbuf;
Oscar Mateo8c8579172014-07-24 17:04:14 +01002472 int ret;
2473
Chris Wilson9021ad02016-05-24 14:53:37 +01002474 WARN_ON(ce->state);
Oscar Mateoede7d422014-07-24 17:04:12 +01002475
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002476 context_size = round_up(intel_lr_context_size(engine), 4096);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002477
Alex Daid1675192015-08-12 15:43:43 +01002478 /* One extra page as the sharing data between driver and GuC */
2479 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2480
Chris Wilsonc0336662016-05-06 15:40:21 +01002481 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
Chris Wilsonfe3db792016-04-25 13:32:13 +01002482 if (IS_ERR(ctx_obj)) {
Dan Carpenter3126a662015-04-30 17:30:50 +03002483 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
Chris Wilsonfe3db792016-04-25 13:32:13 +01002484 return PTR_ERR(ctx_obj);
Oscar Mateo8c8579172014-07-24 17:04:14 +01002485 }
2486
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002487 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
Chris Wilson01101fa2015-09-03 13:01:39 +01002488 if (IS_ERR(ringbuf)) {
2489 ret = PTR_ERR(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002490 goto error_deref_obj;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002491 }
2492
Tvrtko Ursulin0bc40be2016-03-16 11:00:37 +00002493 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002494 if (ret) {
2495 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
Nick Hoathe84fe802015-09-11 12:53:46 +01002496 goto error_ringbuf;
Oscar Mateo84c23772014-07-24 17:04:15 +01002497 }
2498
Chris Wilson9021ad02016-05-24 14:53:37 +01002499 ce->ringbuf = ringbuf;
2500 ce->state = ctx_obj;
2501 ce->initialised = engine->init_context == NULL;
Oscar Mateoede7d422014-07-24 17:04:12 +01002502
2503 return 0;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002504
Chris Wilson01101fa2015-09-03 13:01:39 +01002505error_ringbuf:
2506 intel_ringbuffer_free(ringbuf);
Nick Hoathe84fe802015-09-11 12:53:46 +01002507error_deref_obj:
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002508 drm_gem_object_unreference(&ctx_obj->base);
Chris Wilson9021ad02016-05-24 14:53:37 +01002509 ce->ringbuf = NULL;
2510 ce->state = NULL;
Oscar Mateo8670d6f2014-07-24 17:04:17 +01002511 return ret;
Oscar Mateoede7d422014-07-24 17:04:12 +01002512}
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002513
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002514void intel_lr_context_reset(struct drm_i915_private *dev_priv,
Chris Wilsone2efd132016-05-24 14:53:34 +01002515 struct i915_gem_context *ctx)
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002516{
Tvrtko Ursuline2f80392016-03-16 11:00:36 +00002517 struct intel_engine_cs *engine;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002518
Dave Gordonb4ac5af2016-03-24 11:20:38 +00002519 for_each_engine(engine, dev_priv) {
Chris Wilson9021ad02016-05-24 14:53:37 +01002520 struct intel_context *ce = &ctx->engine[engine->id];
2521 struct drm_i915_gem_object *ctx_obj = ce->state;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002522 void *vaddr;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002523 uint32_t *reg_state;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002524
2525 if (!ctx_obj)
2526 continue;
2527
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002528 vaddr = i915_gem_object_pin_map(ctx_obj);
2529 if (WARN_ON(IS_ERR(vaddr)))
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002530 continue;
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002531
2532 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2533 ctx_obj->dirty = true;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002534
2535 reg_state[CTX_RING_HEAD+1] = 0;
2536 reg_state[CTX_RING_TAIL+1] = 0;
2537
Tvrtko Ursulin7d774ca2016-04-12 15:40:42 +01002538 i915_gem_object_unpin_map(ctx_obj);
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002539
Chris Wilson9021ad02016-05-24 14:53:37 +01002540 ce->ringbuf->head = 0;
2541 ce->ringbuf->tail = 0;
Thomas Daniel3e5b6f02015-02-16 16:12:53 +00002542 }
2543}