linux-brain/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Daniel Vetter 2454fcea33 drm-misc-next for v5.3:
UAPI Changes:
 
 Cross-subsystem Changes:
 - Add code to signal all dma-fences when freed with pending signals.
 - Annotate reservation object access in CONFIG_DEBUG_MUTEXES
 
 Core Changes:
 - Assorted documentation fixes.
 - Use irqsave/restore spinlock to add crc entry.
 - Move code around to drm_client, for internal modeset clients.
 - Make drm_crtc.h and drm_debugfs.h self-contained.
 - Remove drm_fb_helper_connector.
 - Add bootsplash to todo.
 - Fix lock ordering in pan_display_legacy.
 - Support pinning buffers to current location in gem-vram.
 - Remove the now unused locking functions from gem-vram.
 - Remove the now unused kmap-object argument from vram helpers.
 - Stop checking return value of debugfs_create.
 - Add atomic encoder enable/disable helpers.
 - pass drm_atomic_state to atomic connector check.
 - Add atomic support for bridge enable/disable.
 - Add self refresh helpers to core.
 
 Driver Changes:
 - Add extra delay to make MTP SDM845 work.
 - Small fixes to virtio, vkms, sii902x, sii9234, ast, mcde, analogix, rockchip.
 - Add zpos and ?BGR8888 support to meson.
 - More removals of drm_os_linux and drmP headers for amd, radeon, sti, r128, r128, savage, sis.
 - Allow synopsis to unwedge the i2c hdmi bus.
 - Add orientation quirks for GPD panels.
 - Edid cleanups and fixing handling for edid < 1.2.
 - Add runtime pm to stm.
 - Handle s/r in dw-hdmi.
 - Add hooks for power on/off to dsi for stm.
 - Remove virtio dirty tracking code, done in drm core.
 - Rework BO handling in ast and mgag200.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAl0DYU8ACgkQ/lWMcqZw
 E8NNWw/+MhcRakQmrNDMRIj4DvukzPW2efXbhRFuvthUvVN7rOHMzQZBc3le+gUb
 2GGoEeUYG7XoA/Nj3ZQMUoalrjODywtLClBClC4Blped0mZ4JPiI7bTsrNILn1N1
 hZ0+DbffMCAKqKN8TftK/TrFF9IEM8JSftqD/1RdkiXVcMH3NKuLABHZxzPxH2BH
 XuSqIL5lDyAtanixB53aDf2gw9iipUphYoFlKhdx9dr5Ql96RhiOcDgFhXnFiQu4
 O9z3W6tRI2VPoCzsnhNy3Eah7rBDnZwvyfGa9YU/Q+VeHegb601p8OmNNwpshWE1
 ohixBbADj0dr+K3T/lVW30kovg34i4L5K3O7MR0HxWYSA7+v3AHyG7/GWLxbBNQn
 AFHTRbBph8aP/Dn24ucbKaB7wHi31j7b0Hxj+oJR8RoGhuOYcMOuZrCHqpAxStto
 riSVDCRcq/KcPiuqZZ1UnzFWlQMhNFUwumloPiXFkJ4mcSdK9IbdKBd2eqbRdaU1
 eTOA4istVgNgaNbgLvVB2ltjqXrsdio7/jh6RhobFPqHISiL7iMZg3C/KRBXrkUB
 lYMeGkiE3Wp77zdycdofuEbMfAYUwLts8EYjVsM6xo0BKlBYhpeVuBOYeQEkU7PV
 PpGYqQVeZUoD1OyGlMWIYoyb5Ya7OLUDpooOJdFqoPzUfDki31E=
 =4uQX
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2019-06-14' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v5.3:

UAPI Changes:

Cross-subsystem Changes:
- Add code to signal all dma-fences when freed with pending signals.
- Annotate reservation object access in CONFIG_DEBUG_MUTEXES

Core Changes:
- Assorted documentation fixes.
- Use irqsave/restore spinlock to add crc entry.
- Move code around to drm_client, for internal modeset clients.
- Make drm_crtc.h and drm_debugfs.h self-contained.
- Remove drm_fb_helper_connector.
- Add bootsplash to todo.
- Fix lock ordering in pan_display_legacy.
- Support pinning buffers to current location in gem-vram.
- Remove the now unused locking functions from gem-vram.
- Remove the now unused kmap-object argument from vram helpers.
- Stop checking return value of debugfs_create.
- Add atomic encoder enable/disable helpers.
- pass drm_atomic_state to atomic connector check.
- Add atomic support for bridge enable/disable.
- Add self refresh helpers to core.

Driver Changes:
- Add extra delay to make MTP SDM845 work.
- Small fixes to virtio, vkms, sii902x, sii9234, ast, mcde, analogix, rockchip.
- Add zpos and ?BGR8888 support to meson.
- More removals of drm_os_linux and drmP headers for amd, radeon, sti, r128, r128, savage, sis.
- Allow synopsis to unwedge the i2c hdmi bus.
- Add orientation quirks for GPD panels.
- Edid cleanups and fixing handling for edid < 1.2.
- Add runtime pm to stm.
- Handle s/r in dw-hdmi.
- Add hooks for power on/off to dsi for stm.
- Remove virtio dirty tracking code, done in drm core.
- Rework BO handling in ast and mgag200.

Tiny conflict in drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c,
needed #include <linux/slab.h> to make it compile.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/0e01de30-9797-853c-732f-4a5bd6e61445@linux.intel.com
2019-06-14 11:44:24 +02:00

255 lines
6.9 KiB
C

/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
struct amdgpu_task_info ti;
memset(&ti, 0, sizeof(struct amdgpu_task_info));
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
return;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq);
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
else
drm_sched_suspend_timeout(&ring->sched);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm)
{
size_t size = sizeof(struct amdgpu_job);
if (num_ibs == 0)
return -EINVAL;
size += sizeof(struct amdgpu_ib) * num_ibs;
*job = kzalloc(size, GFP_KERNEL);
if (!*job)
return -ENOMEM;
/*
* Initialize the scheduler to at least some ring so that we always
* have a pointer to adev.
*/
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->sched_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job)
{
int r;
r = amdgpu_job_alloc(adev, 1, job, NULL);
if (r)
return r;
r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
if (r)
kfree(*job);
return r;
}
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
drm_sched_job_cleanup(s_job);
amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
enum drm_sched_priority priority;
struct amdgpu_ring *ring;
int r;
if (!f)
return -EINVAL;
r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
job->owner = owner;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
return 0;
}
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence)
{
int r;
job->base.sched = &ring->sched;
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
job->fence = dma_fence_get(*fence);
if (r)
return r;
amdgpu_job_free(job);
return 0;
}
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence;
bool explicit = false;
int r;
fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (drm_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
fence, false);
if (r)
DRM_ERROR("Error adding fence (%d)\n", r);
}
}
while (fence == NULL && vm && !job->vmid) {
r = amdgpu_vmid_grab(vm, ring, &job->sync,
&job->base.s_fence->finished,
job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
fence = amdgpu_sync_get_fence(&job->sync, NULL);
}
return fence;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
int r;
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
} else {
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}
const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
.free_job = amdgpu_job_free_cb
};