-
Notifications
You must be signed in to change notification settings - Fork 38
usdm: expose IOVA mapping APIs for user memory #131
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -1010,3 +1010,121 @@ int qaeUnregisterDevice(int fd) | |
| return ret; | ||
| } | ||
| #endif /* ICP_THREAD_SPECIFIC_USDM */ | ||
|
|
||
| /* Maximum size for qaeMemMapContiguousIova: 2GB */ | ||
| #define QAE_IOVA_MAP_MAX_SIZE (2UL * 1024 * 1024 * 1024) | ||
|
|
||
| uint64_t qaeMemMapContiguousIova(void *virt, uint32_t size) | ||
| { | ||
| uint64_t iova; | ||
|
|
||
| /* Fail if in no-IOMMU mode - requires IOMMU for DMA mapping */ | ||
| if (g_noiommu_enabled) | ||
| { | ||
| CMD_DEBUG("%s:%d Cannot map IOVA in no-IOMMU mode\n", | ||
| __func__, __LINE__); | ||
| return 0; | ||
| } | ||
|
|
||
| /* Fail if VFIO container is not registered */ | ||
| if (vfio_container_fd < 0) | ||
| { | ||
| CMD_DEBUG("%s:%d VFIO container not registered. " | ||
| "Call qaeRegisterDevice() first.\n", | ||
| __func__, __LINE__); | ||
| return 0; | ||
| } | ||
|
|
||
| /* Validate size is within allowed range */ | ||
| if (size == 0 || size > QAE_IOVA_MAP_MAX_SIZE) | ||
| { | ||
| CMD_DEBUG("%s:%d Invalid size=%u. Must be > 0 and <= 2GB\n", | ||
| __func__, __LINE__, size); | ||
| return 0; | ||
| } | ||
|
|
||
| /* Allocate IOVA address space with IOVA_SLAB_SIZE alignment */ | ||
| iova = allocate_iova(size, IOVA_SLAB_SIZE); | ||
| if (!iova) | ||
| { | ||
| CMD_DEBUG("%s:%d Failed to allocate IOVA for size=%u. " | ||
| "IOVA space may be fragmented.\n", | ||
| __func__, __LINE__, size); | ||
| return 0; | ||
| } | ||
|
|
||
| /* Map the virtual address to the allocated IOVA */ | ||
| if (dma_map_slab(virt, iova, size)) | ||
| { | ||
| CMD_DEBUG("%s:%d Failed to DMA map virt=%p to iova=0x%lx size=%u\n", | ||
| __func__, __LINE__, virt, (unsigned long)iova, size); | ||
| iova_release(iova, size); | ||
| return 0; | ||
| } | ||
|
|
||
| /* | ||
| * Register the mapping in the page table for qaeVirtToPhysNUMA support. | ||
| * We use 4KB page granularity (hp_en=0) since user-provided memory may | ||
| * be backed by either regular pages or huge pages, and 4KB granularity | ||
| * works correctly for both cases. The load_addr_fptr is set globally | ||
| * at init time based on system huge page configuration. | ||
| */ | ||
| store_mmap_range(&g_page_table, virt, iova, size, 0); | ||
|
|
||
| return iova; | ||
| } | ||
|
|
||
| int qaeMemUnmapContiguousIova(void *virt, uint32_t size) | ||
| { | ||
| uint64_t iova; | ||
| int ret = 0; | ||
|
|
||
| /* Fail if in no-IOMMU mode */ | ||
| if (g_noiommu_enabled) | ||
| { | ||
| CMD_DEBUG("%s:%d Cannot unmap IOVA in no-IOMMU mode\n", | ||
| __func__, __LINE__); | ||
| return 1; | ||
| } | ||
|
|
||
| /* Validate virtual address */ | ||
| if (virt == NULL) | ||
| { | ||
| CMD_DEBUG("%s:%d NULL virtual address provided\n", | ||
| __func__, __LINE__); | ||
| return 1; | ||
| } | ||
|
|
||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add a check |
||
| /* Lookup the IOVA from the page table using the virtual address */ | ||
| iova = qaeVirtToPhysNUMA(virt); | ||
| if (iova == 0) | ||
| { | ||
| CMD_DEBUG("%s:%d Could not find IOVA for virt=%p\n", | ||
| __func__, __LINE__, virt); | ||
| return 1; | ||
| } | ||
|
|
||
| /* Align IOVA to slab boundary for release */ | ||
| iova = iova & ~(IOVA_SLAB_SIZE - 1); | ||
|
|
||
| /* Clear the page table entries for qaeVirtToPhysNUMA */ | ||
| store_mmap_range(&g_page_table, virt, 0, size, 0); | ||
|
|
||
| /* Unmap from IOMMU if container is active */ | ||
| if (vfio_container_fd >= 0) | ||
| { | ||
| ret = dma_unmap_slab(iova, size); | ||
| if (ret) | ||
| { | ||
| CMD_DEBUG("%s:%d Failed to DMA unmap iova=0x%lx size=%u\n", | ||
| __func__, __LINE__, (unsigned long)iova, size); | ||
| /* Continue to release IOVA even on unmap failure */ | ||
| } | ||
| } | ||
|
|
||
| /* Release the IOVA address space */ | ||
| iova_release(iova, size); | ||
|
||
|
|
||
| return ret; | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
General comment: Is the expectation that this API is used for a large block of memory, subsequently managed by the user for multiple buffers or is it something done for every buffer used by the appl?
i.e. can they later call qatVirtToPhysNUMA(pVirt) for some offset into the mapped memory to get the corresponding iova ? If not and the application calls the new API for every buffer, is there any danger of running out of IOVAs as each is aligned to 2MB? 262,144 iova slabs possible
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't know. @Wangl56 can you comment on this?
Regarding
qatVirtToPhysNUMA(), good catch. At the moment we are not storing the mappings into the usdm page table. This is needed in order to use the memory mapped through this new api using QATlib.