summaryrefslogtreecommitdiff
path: root/amdgpu
diff options
context:
space:
mode:
authorJammy Zhou <Jammy.Zhou@amd.com>2015-07-13 20:57:44 +0800
committerAlex Deucher <alexander.deucher@amd.com>2015-08-05 13:47:52 -0400
commit8aeffcc1cf3360fddd97f4a6b6f7300f401142ae (patch)
tree1196cf4db93a65d76bead1b4e6d221d85cdc180a /amdgpu
parent95d0f35dafff6c588da47c28332c252881f2e07c (diff)
downloaddrm-8aeffcc1cf3360fddd97f4a6b6f7300f401142ae.tar.gz
amdgpu: add amdgpu_bo_va_op for va map/unmap support v3
The following interfaces are changed accordingly: - amdgpu_bo_alloc - amdgpu_create_bo_from_user_mem v2: update the interfaces v3: remove virtual_mc_base_address from amdgpu_bo Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'amdgpu')
-rw-r--r--amdgpu/amdgpu.h54
-rw-r--r--amdgpu/amdgpu_bo.c130
-rw-r--r--amdgpu/amdgpu_internal.h1
3 files changed, 59 insertions, 126 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index bc6751ae..f14b7f45 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -156,19 +156,6 @@ struct amdgpu_bo_alloc_request {
};
/**
- * Structure describing memory allocation request
- *
- * \sa amdgpu_bo_alloc()
-*/
-struct amdgpu_bo_alloc_result {
- /** Assigned virtual MC Base Address */
- uint64_t virtual_mc_base_address;
-
- /** Handle of allocated memory to be used by the given process only. */
- amdgpu_bo_handle buf_handle;
-};
-
-/**
* Special UMD specific information associated with buffer.
*
* It may be need to pass some buffer charactersitic as part
@@ -213,13 +200,6 @@ struct amdgpu_bo_info {
*/
uint64_t phys_alignment;
- /**
- * Assigned virtual MC Base Address.
- * \note This information will be returned only if this buffer was
- * allocated in the same process otherwise 0 will be returned.
- */
- uint64_t virtual_mc_base_address;
-
/** Heap where to allocate memory. */
uint32_t preferred_heap;
@@ -242,9 +222,6 @@ struct amdgpu_bo_import_result {
/** Buffer size */
uint64_t alloc_size;
-
- /** Assigned virtual MC Base Address */
- uint64_t virtual_mc_base_address;
};
/**
@@ -558,8 +535,7 @@ int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
* See #amdgpu_device_initialize()
* \param alloc_buffer - \c [in] Pointer to the structure describing an
* allocation request
- * \param info - \c [out] Pointer to structure which return
- * information about allocated memory
+ * \param buf_handle - \c [out] Allocated buffer handle
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
@@ -568,7 +544,7 @@ int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
*/
int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
- struct amdgpu_bo_alloc_result *info);
+ amdgpu_bo_handle *buf_handle);
/**
* Associate opaque data with buffer to be queried by another UMD
@@ -652,7 +628,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
* want to map to GPU address space (make GPU accessible)
* (This address must be correctly aligned).
* \param size - [in] Size of allocation (must be correctly aligned)
- * \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as
+ * \param buf_handle - [out] Buffer handle for the userptr memory
* resource on submission and be used in other operations.
*
*
@@ -677,7 +653,7 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
*/
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu, uint64_t size,
- struct amdgpu_bo_alloc_result *info);
+ amdgpu_bo_handle *buf_handle);
/**
* Free previosuly allocated memory
@@ -1173,4 +1149,26 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
uint64_t *start,
uint64_t *end);
+/**
+ * VA mapping/unmapping for the buffer object
+ *
+ * \param bo - \c [in] BO handle
+ * \param offset - \c [in] Start offset to map
+ * \param size - \c [in] Size to map
+ * \param addr - \c [in] Start virtual address.
+ * \param flags - \c [in] Supported flags for mapping/unmapping
+ * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops);
+
#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index 1ef15162..a17bd0f5 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -52,72 +52,6 @@ static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
}
-/* map the buffer to the GPU virtual address space */
-static int amdgpu_bo_map(amdgpu_bo_handle bo, uint32_t alignment)
-{
- amdgpu_device_handle dev = bo->dev;
- struct drm_amdgpu_gem_va va;
- int r;
-
- memset(&va, 0, sizeof(va));
-
- bo->virtual_mc_base_address = amdgpu_vamgr_find_va(dev->vamgr,
- bo->alloc_size, alignment, 0);
-
- if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
- return -ENOSPC;
-
- va.handle = bo->handle;
- va.operation = AMDGPU_VA_OP_MAP;
- va.flags = AMDGPU_VM_PAGE_READABLE |
- AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE;
- va.va_address = bo->virtual_mc_base_address;
- va.offset_in_bo = 0;
- va.map_size = ALIGN(bo->alloc_size, getpagesize());
-
- r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
- if (r) {
- amdgpu_bo_free_internal(bo);
- return r;
- }
-
- return 0;
-}
-
-/* unmap the buffer from the GPU virtual address space */
-static void amdgpu_bo_unmap(amdgpu_bo_handle bo)
-{
- amdgpu_device_handle dev = bo->dev;
- struct drm_amdgpu_gem_va va;
- int r;
-
- if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS)
- return;
-
- memset(&va, 0, sizeof(va));
-
- va.handle = bo->handle;
- va.operation = AMDGPU_VA_OP_UNMAP;
- va.flags = AMDGPU_VM_PAGE_READABLE |
- AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE;
- va.va_address = bo->virtual_mc_base_address;
- va.offset_in_bo = 0;
- va.map_size = ALIGN(bo->alloc_size, getpagesize());
-
- r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
- if (r) {
- fprintf(stderr, "amdgpu: VA_OP_UNMAP failed with %d\n", r);
- return;
- }
-
- amdgpu_vamgr_free_va(bo->dev->vamgr, bo->virtual_mc_base_address,
- bo->alloc_size);
-
- bo->virtual_mc_base_address = AMDGPU_INVALID_VA_ADDRESS;
-}
-
void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
{
/* Remove the buffer from the hash tables. */
@@ -136,7 +70,6 @@ void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
amdgpu_bo_cpu_unmap(bo);
}
- amdgpu_bo_unmap(bo);
amdgpu_close_kms_handle(bo->dev, bo->handle);
pthread_mutex_destroy(&bo->cpu_access_mutex);
free(bo);
@@ -144,7 +77,7 @@ void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
int amdgpu_bo_alloc(amdgpu_device_handle dev,
struct amdgpu_bo_alloc_request *alloc_buffer,
- struct amdgpu_bo_alloc_result *info)
+ amdgpu_bo_handle *buf_handle)
{
struct amdgpu_bo *bo;
union drm_amdgpu_gem_create args;
@@ -183,14 +116,7 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
- r = amdgpu_bo_map(bo, alloc_buffer->phys_alignment);
- if (r) {
- amdgpu_bo_free_internal(bo);
- return r;
- }
-
- info->buf_handle = bo;
- info->virtual_mc_base_address = bo->virtual_mc_base_address;
+ *buf_handle = bo;
return 0;
}
@@ -255,7 +181,6 @@ int amdgpu_bo_query_info(amdgpu_bo_handle bo,
memset(info, 0, sizeof(*info));
info->alloc_size = bo_info.bo_size;
info->phys_alignment = bo_info.alignment;
- info->virtual_mc_base_address = bo->virtual_mc_base_address;
info->preferred_heap = bo_info.domains;
info->alloc_flags = bo_info.domain_flags;
info->metadata.flags = metadata.data.flags;
@@ -421,8 +346,6 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
- output->virtual_mc_base_address =
- bo->virtual_mc_base_address;
return 0;
}
@@ -484,19 +407,11 @@ int amdgpu_bo_import(amdgpu_device_handle dev,
bo->dev = dev;
pthread_mutex_init(&bo->cpu_access_mutex, NULL);
- r = amdgpu_bo_map(bo, 1 << 20);
- if (r) {
- pthread_mutex_unlock(&dev->bo_table_mutex);
- amdgpu_bo_reference(&bo, NULL);
- return r;
- }
-
util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
pthread_mutex_unlock(&dev->bo_table_mutex);
output->buf_handle = bo;
output->alloc_size = bo->alloc_size;
- output->virtual_mc_base_address = bo->virtual_mc_base_address;
return 0;
}
@@ -615,7 +530,7 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
void *cpu,
uint64_t size,
- struct amdgpu_bo_alloc_result *info)
+ amdgpu_bo_handle *buf_handle)
{
int r;
struct amdgpu_bo *bo;
@@ -647,15 +562,7 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
bo->alloc_size = size;
bo->handle = args.handle;
- r = amdgpu_bo_map(bo, 1 << 12);
- if (r) {
- amdgpu_bo_free_internal(bo);
- return r;
- }
-
- info->buf_handle = bo;
- info->virtual_mc_base_address = bo->virtual_mc_base_address;
- info->virtual_mc_base_address += off;
+ *buf_handle = bo;
return r;
}
@@ -766,3 +673,32 @@ int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
free(list);
return r;
}
+
+int amdgpu_bo_va_op(amdgpu_bo_handle bo,
+ uint64_t offset,
+ uint64_t size,
+ uint64_t addr,
+ uint64_t flags,
+ uint32_t ops)
+{
+ amdgpu_device_handle dev = bo->dev;
+ struct drm_amdgpu_gem_va va;
+ int r;
+
+ if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP)
+ return -EINVAL;
+
+ memset(&va, 0, sizeof(va));
+ va.handle = bo->handle;
+ va.operation = ops;
+ va.flags = AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE;
+ va.va_address = addr;
+ va.offset_in_bo = offset;
+ va.map_size = ALIGN(size, getpagesize());
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
+
+ return r;
+}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index bf7788dd..526a93f8 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -88,7 +88,6 @@ struct amdgpu_bo {
struct amdgpu_device *dev;
uint64_t alloc_size;
- uint64_t virtual_mc_base_address;
uint32_t handle;
uint32_t flink_name;