diff --git a/quickassist/utilities/libusdm_drv/qae_mem.h b/quickassist/utilities/libusdm_drv/qae_mem.h index 49939965..f66013fc 100644 --- a/quickassist/utilities/libusdm_drv/qae_mem.h +++ b/quickassist/utilities/libusdm_drv/qae_mem.h @@ -278,6 +278,121 @@ int qaeRegisterDevice(int fd); ****************************************************************************/ int qaeUnregisterDevice(int fd); +/** + ***************************************************************************** + * @ingroup CommonMemoryDriver + * qaeMemMapContiguousIova + * + * @brief + * Allocates a contiguous IOVA (I/O Virtual Address) region and maps the + * provided user-allocated virtual memory to this IOVA for DMA operations + * with QAT hardware. This function allows users to use their own memory + * buffers (e.g., pre-allocated buffers) for zero-copy DMA transfers. + * + * The memory region is mapped with both read and write permissions for + * DMA operations. The IOVA is aligned to IOVA_SLAB_SIZE (2MB) internally. + * + * After successful mapping, qaeVirtToPhysNUMA() can be used to translate + * addresses within the mapped region to their corresponding IOVA. + * + * This function is thread-safe when ICP_THREAD_SPECIFIC_USDM is defined. + * + * @param[in] virt Pointer to the user-allocated virtual memory to map. + * Must be a valid, accessible virtual address. + * @param[in] size Size in bytes of the memory region to map. Must be + * greater than 0 and not exceed 2GB (2147483648 bytes). + * Will be rounded up to the nearest page boundary + * internally. + * + * @retval >0 The allocated IOVA address on success. This value should be + * used as the physical address for QAT DMA operations and must + * be passed to qaeMemUnmapContiguousIova() when done. + * @retval 0 Failure - could not allocate IOVA or map the memory. This can + * occur if: + * - This function is not supported (e.g. in the out-of-tree driver) + * - Size is 0 or exceeds 2GB + * - No contiguous IOVA space is available (fragmentation) + * - VFIO container is not registered (qaeRegisterDevice not called) + * - VFIO IOMMU mapping failed + * - VFIO is configured in no-IOMMU mode (not supported for this API) + * + * @pre + * qaeRegisterDevice() must have been called prior to using this API + * for the DMA mapping to be effective. The virtual address provided must + * be valid and accessible. The virtual memory region must remain valid + * and not be freed while the IOVA mapping is active. + * + * @post + * On success, the virtual memory is mapped to the returned IOVA and + * can be used for DMA operations with QAT hardware. The mapping is also + * registered in the USDM page table for qaeVirtToPhysNUMA() lookups. + * + * @note + * This function is only available in VFIO mode and will fail in + * no-IOMMU mode. Memory allocated through qaeMemAllocNUMA() should + * NOT be mapped using this API as it is already managed by USDM + * internally. + * + * This function may fail due to IOVA address space fragmentation even + * if there is sufficient total free IOVA space. This can happen after + * many allocations and deallocations of varying sizes. + * + ****************************************************************************/ +uint64_t qaeMemMapContiguousIova(void *virt, uint32_t size); + +/** + ***************************************************************************** + * @ingroup CommonMemoryDriver + * qaeMemUnmapContiguousIova + * + * @brief + * Unmaps a previously mapped IOVA region and releases the IOVA address + * space. This function should be called when the user-mapped memory is + * no longer needed for DMA operations. + * + * This function reverses the operation performed by + * qaeMemMapContiguousIova(). It removes the IOMMU mapping via VFIO, + * clears the page table entries, and releases the IOVA address space + * in the global bitmap for reuse. + * + * The IOVA is automatically looked up from the page table using the + * provided virtual address, so callers do not need to track the IOVA + * returned by qaeMemMapContiguousIova(). + * + * This function is thread-safe when ICP_THREAD_SPECIFIC_USDM is defined. + * + * @param[in] virt Pointer to the virtual memory that was mapped. This must + * be the same pointer passed to qaeMemMapContiguousIova(). + * Must not be NULL. + * @param[in] size Size in bytes of the memory region. Must match the size + * used when qaeMemMapContiguousIova() was called. + * + * @retval 0 Success - the IOVA was unmapped and released. + * @retval 1 Failure - could not unmap the IOVA. This can occur if: + * - This function is not supported (e.g. in the out-of-tree driver) + * - virt is NULL + * - The virtual address was not previously mapped + * - VFIO IOMMU unmapping failed + * - VFIO is configured in no-IOMMU mode + * + * @pre + * The memory must have been previously mapped via + * qaeMemMapContiguousIova() and no DMA operations should be in progress. + * The size parameter must match the size used during mapping. + * + * @post + * The IOVA is unmapped and the address space is released for reuse. + * The user's virtual memory is not affected and remains valid. + * qaeVirtToPhysNUMA() will no longer return valid results for this region. + * + * @note + * This function is only available in VFIO mode and will fail in + * no-IOMMU mode. Do NOT use this function to unmap memory that was + * allocated through qaeMemAllocNUMA() - use qaeMemFreeNUMA() instead. + * + ****************************************************************************/ +int qaeMemUnmapContiguousIova(void *virt, uint32_t size); + #ifndef __KERNEL__ /*! Define a constant for user space to select any available NUMA node */ #define NUMA_ANY_NODE (-1) diff --git a/quickassist/utilities/libusdm_drv/user_space/vfio/qae_mem_utils_vfio.c b/quickassist/utilities/libusdm_drv/user_space/vfio/qae_mem_utils_vfio.c index de168302..ca537d3d 100644 --- a/quickassist/utilities/libusdm_drv/user_space/vfio/qae_mem_utils_vfio.c +++ b/quickassist/utilities/libusdm_drv/user_space/vfio/qae_mem_utils_vfio.c @@ -1010,3 +1010,121 @@ int qaeUnregisterDevice(int fd) return ret; } #endif /* ICP_THREAD_SPECIFIC_USDM */ + +/* Maximum size for qaeMemMapContiguousIova: 2GB */ +#define QAE_IOVA_MAP_MAX_SIZE (2UL * 1024 * 1024 * 1024) + +uint64_t qaeMemMapContiguousIova(void *virt, uint32_t size) +{ + uint64_t iova; + + /* Fail if in no-IOMMU mode - requires IOMMU for DMA mapping */ + if (g_noiommu_enabled) + { + CMD_DEBUG("%s:%d Cannot map IOVA in no-IOMMU mode\n", + __func__, __LINE__); + return 0; + } + + /* Fail if VFIO container is not registered */ + if (vfio_container_fd < 0) + { + CMD_DEBUG("%s:%d VFIO container not registered. " + "Call qaeRegisterDevice() first.\n", + __func__, __LINE__); + return 0; + } + + /* Validate size is within allowed range */ + if (size == 0 || size > QAE_IOVA_MAP_MAX_SIZE) + { + CMD_DEBUG("%s:%d Invalid size=%u. Must be > 0 and <= 2GB\n", + __func__, __LINE__, size); + return 0; + } + + /* Allocate IOVA address space with IOVA_SLAB_SIZE alignment */ + iova = allocate_iova(size, IOVA_SLAB_SIZE); + if (!iova) + { + CMD_DEBUG("%s:%d Failed to allocate IOVA for size=%u. " + "IOVA space may be fragmented.\n", + __func__, __LINE__, size); + return 0; + } + + /* Map the virtual address to the allocated IOVA */ + if (dma_map_slab(virt, iova, size)) + { + CMD_DEBUG("%s:%d Failed to DMA map virt=%p to iova=0x%lx size=%u\n", + __func__, __LINE__, virt, (unsigned long)iova, size); + iova_release(iova, size); + return 0; + } + + /* + * Register the mapping in the page table for qaeVirtToPhysNUMA support. + * We use 4KB page granularity (hp_en=0) since user-provided memory may + * be backed by either regular pages or huge pages, and 4KB granularity + * works correctly for both cases. The load_addr_fptr is set globally + * at init time based on system huge page configuration. + */ + store_mmap_range(&g_page_table, virt, iova, size, 0); + + return iova; +} + +int qaeMemUnmapContiguousIova(void *virt, uint32_t size) +{ + uint64_t iova; + int ret = 0; + + /* Fail if in no-IOMMU mode */ + if (g_noiommu_enabled) + { + CMD_DEBUG("%s:%d Cannot unmap IOVA in no-IOMMU mode\n", + __func__, __LINE__); + return 1; + } + + /* Validate virtual address */ + if (virt == NULL) + { + CMD_DEBUG("%s:%d NULL virtual address provided\n", + __func__, __LINE__); + return 1; + } + + /* Lookup the IOVA from the page table using the virtual address */ + iova = qaeVirtToPhysNUMA(virt); + if (iova == 0) + { + CMD_DEBUG("%s:%d Could not find IOVA for virt=%p\n", + __func__, __LINE__, virt); + return 1; + } + + /* Align IOVA to slab boundary for release */ + iova = iova & ~(IOVA_SLAB_SIZE - 1); + + /* Clear the page table entries for qaeVirtToPhysNUMA */ + store_mmap_range(&g_page_table, virt, 0, size, 0); + + /* Unmap from IOMMU if container is active */ + if (vfio_container_fd >= 0) + { + ret = dma_unmap_slab(iova, size); + if (ret) + { + CMD_DEBUG("%s:%d Failed to DMA unmap iova=0x%lx size=%u\n", + __func__, __LINE__, (unsigned long)iova, size); + /* Continue to release IOVA even on unmap failure */ + } + } + + /* Release the IOVA address space */ + iova_release(iova, size); + + return ret; +} +