[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200317103332.842961594@linuxfoundation.org>
Date: Tue, 17 Mar 2020 11:55:00 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Chris Wilson <chris@...is-wilson.co.uk>,
Mika Kuoppala <mika.kuoppala@...ux.intel.com>,
Tvrtko Ursulin <tvrtko.ursulin@...el.com>,
Jani Nikula <jani.nikula@...el.com>
Subject: [PATCH 5.5 090/151] drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
From: Chris Wilson <chris@...is-wilson.co.uk>
commit eafc2aa20fba319b6e791a1b0c45a91511eccb6b upstream.
If we stop filling the ELSP due to an incompatible virtual engine
request, check if we should enable the timeslice on behalf of the queue.
This fixes the case where we are inspecting the last->next element when
we know that the last element is the last request in the execution queue,
and so decided we did not need to enable timeslicing despite the intent
to do so!
Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: Chris Wilson <chris@...is-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@...ux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@...el.com>
Cc: <stable@...r.kernel.org> # v5.4+
Reviewed-by: Mika Kuoppala <mika.kuoppala@...ux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200306113012.3184606-1-chris@chris-wilson.co.uk
(cherry picked from commit 3df2deed411e0f1b7312baf0139aab8bba4c0410)
Signed-off-by: Jani Nikula <jani.nikula@...el.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 29 ++++++++++++++++++-----------
1 file changed, 18 insertions(+), 11 deletions(-)
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1501,11 +1501,9 @@ need_timeslice(struct intel_engine_cs *e
if (!intel_engine_has_timeslices(engine))
return false;
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return false;
-
- hint = max(rq_prio(list_next_entry(rq, sched.link)),
- engine->execlists.queue_priority_hint);
+ hint = engine->execlists.queue_priority_hint;
+ if (!list_is_last(&rq->sched.link, &engine->active.requests))
+ hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
return hint >= effective_prio(rq);
}
@@ -1547,6 +1545,18 @@ static void set_timeslice(struct intel_e
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+ if (timer_pending(&execlists->timer))
+ return;
+
+ set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
static void record_preemption(struct intel_engine_execlists *execlists)
{
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
@@ -1705,11 +1715,7 @@ static void execlists_dequeue(struct int
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- if (!execlists->timer.expires &&
- need_timeslice(engine, last))
- set_timer_ms(&execlists->timer,
- timeslice(engine));
-
+ start_timeslice(engine);
return;
}
}
@@ -1744,7 +1750,8 @@ static void execlists_dequeue(struct int
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this for another */
+ start_timeslice(engine);
+ return; /* leave this for another sibling */
}
GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
Powered by blists - more mailing lists