drm/etnaviv: provide MMU context to etnaviv_gem_mapping_get

In preparation to having a context per process, etnaviv_gem_mapping_get
should not use the current GPU context, but needs to be told which
context to use.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
This commit is contained in:
Lucas Stach 2019-07-05 19:17:26 +02:00
parent d80d842a47
commit e6364d70cf
3 changed files with 21 additions and 13 deletions

View File

@ -248,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
struct drm_gem_object *obj, struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *mmu_context)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct etnaviv_vram_mapping *mapping;
@ -256,7 +257,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
int ret = 0;
mutex_lock(&etnaviv_obj->lock);
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu_context);
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
if (mapping) {
/*
* Holding the object lock prevents the use count changing
@ -265,12 +266,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
* the MMU owns this mapping to close this race.
*/
if (mapping->use == 0) {
mutex_lock(&gpu->mmu_context->lock);
if (mapping->context == gpu->mmu_context)
mutex_lock(&mmu_context->lock);
if (mapping->context == mmu_context)
mapping->use += 1;
else
mapping = NULL;
mutex_unlock(&gpu->mmu_context->lock);
mutex_unlock(&mmu_context->lock);
if (mapping)
goto out;
} else {
@ -303,15 +304,18 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node);
}
mapping->context = gpu->mmu_context;
etnaviv_iommu_context_get(mmu_context);
mapping->context = mmu_context;
mapping->use = 1;
ret = etnaviv_iommu_map_gem(gpu->mmu_context, etnaviv_obj,
gpu->memory_base, mapping);
if (ret < 0)
ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, gpu->memory_base,
mapping);
if (ret < 0) {
etnaviv_iommu_context_put(mmu_context);
kfree(mapping);
else
} else {
list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
}
out:
mutex_unlock(&etnaviv_obj->lock);
@ -529,8 +533,10 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
WARN_ON(mapping->use);
if (context)
if (context) {
etnaviv_iommu_unmap_gem(context, mapping);
etnaviv_iommu_context_put(context);
}
list_del(&mapping->obj_node);
kfree(mapping);

View File

@ -119,7 +119,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
struct drm_gem_object *obj, struct etnaviv_gpu *gpu);
struct drm_gem_object *obj, struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *mmu_context);
void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping);
#endif /* __ETNAVIV_GEM_H__ */

View File

@ -224,7 +224,8 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
struct etnaviv_vram_mapping *mapping;
mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
submit->gpu);
submit->gpu,
submit->gpu->mmu_context);
if (IS_ERR(mapping)) {
ret = PTR_ERR(mapping);
break;