drm/amdgpu: Keep track of amount of pinned CPU visible VRAM

Instead of CPU invisible VRAM. Preparation for the following, no
functional change intended.

v2:
* Also change amdgpu_vram_mgr_bo_invisible_size to
  amdgpu_vram_mgr_bo_visible_size, allowing further simplification
  (Christian König)

Cc: stable@vger.kernel.org
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Michel Dänzer 2018-07-11 12:06:31 +02:00 committed by Alex Deucher
parent aa16b6c6b4
commit ddc21af4d0
5 changed files with 15 additions and 20 deletions

View File

@ -1590,7 +1590,7 @@ struct amdgpu_device {
/* tracking pinned memory */
u64 vram_pin_size;
u64 invisible_pin_size;
u64 visible_pin_size;
u64 gart_pin_size;
/* amdkfd interface */

View File

@ -504,7 +504,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
vram_gtt.vram_size = adev->gmc.real_vram_size;
vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.vram_cpu_accessible_size -= adev->visible_pin_size;
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= adev->gart_pin_size;
@ -525,8 +525,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size =
adev->gmc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
adev->gmc.visible_vram_size - adev->visible_pin_size;
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =

View File

@ -917,7 +917,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
adev->visible_pin_size += amdgpu_vram_mgr_bo_visible_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
adev->gart_pin_size += amdgpu_bo_size(bo);
}
@ -969,7 +969,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
adev->vram_pin_size -= amdgpu_bo_size(bo);
adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
adev->visible_pin_size -= amdgpu_vram_mgr_bo_visible_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
adev->gart_pin_size -= amdgpu_bo_size(bo);
}

View File

@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);

View File

@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
}
/**
* amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
* amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
*
* @bo: &amdgpu_bo buffer object (must be in VRAM)
*
* Returns:
* How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
* How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
*/
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_mem_reg *mem = &bo->tbo.mem;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
u64 usage = 0;
u64 usage;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return 0;
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return amdgpu_bo_size(bo);
while (nodes && pages) {
usage += nodes->size << PAGE_SHIFT;
usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
pages -= nodes->size;
++nodes;
}
if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
usage += amdgpu_vram_mgr_vis_size(adev, nodes);
return usage;
}