diff --git a/shared/source/debug_settings/debug_variables_base.inl b/shared/source/debug_settings/debug_variables_base.inl index 692260cd45065..37013191db33f 100644 --- a/shared/source/debug_settings/debug_variables_base.inl +++ b/shared/source/debug_settings/debug_variables_base.inl @@ -533,6 +533,7 @@ DECLARE_DEBUG_VARIABLE(int32_t, RenderCompressedImagesEnabled, -1, "-1: default, DECLARE_DEBUG_VARIABLE(int32_t, RenderCompressedBuffersEnabled, -1, "-1: default, 0: disabled, 1: enabled") DECLARE_DEBUG_VARIABLE(int32_t, EnableUsmConcurrentAccessSupport, 0, "0: default, >0: bitmask with usm capabilities to enable concurrent access for (bit0: host, bit1: device, bit2: shared single-device, bit3: shared cross-device, bit4: shared system)") DECLARE_DEBUG_VARIABLE(int32_t, EnableSharedSystemUsmSupport, -1, "-1: default, 0: shared system memory disabled, 1: shared system memory enabled") +DECLARE_DEBUG_VARIABLE(int32_t, DisableMadviseAutoReset, 0, "0: default, madvise autoreset vm_bind flag not disabled , 1: madvise autoreset vm_bind flag disabled") DECLARE_DEBUG_VARIABLE(int32_t, EnablePassInlineData, -1, "-1: default, 0: Do not allow to pass inline data 1: Enable passing of inline data") DECLARE_DEBUG_VARIABLE(int32_t, ForceFineGrainedSVMSupport, -1, "-1: default, 0: Do not report Fine Grained SVM capabilities 1: Report SVM Fine Grained capabilities if device supports SVM") DECLARE_DEBUG_VARIABLE(int32_t, UseAsyncDrmExec, -1, "-1: default, 0: Disabled 1: Enabled. If enabled, pass EXEC_OBJECT_ASYNC to exec ioctl.") diff --git a/shared/source/os_interface/linux/drm_neo.cpp b/shared/source/os_interface/linux/drm_neo.cpp index 9ed33c3721ffc..3503dc52721bf 100644 --- a/shared/source/os_interface/linux/drm_neo.cpp +++ b/shared/source/os_interface/linux/drm_neo.cpp @@ -1730,7 +1730,11 @@ int Drm::createDrmVirtualMemory(uint32_t &drmVmId) { auto &productHelper = rootDeviceEnvironment.getHelper(); VmBindParams vmBind{}; vmBind.vmId = static_cast(ctl.vmId); - vmBind.flags = DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR; + if (debugManager.flags.DisableMadviseAutoReset.get() == 1) { + vmBind.flags = DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR; + } else { + vmBind.flags = DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET; + } vmBind.length = this->getSharedSystemAllocAddressRange(); vmBind.sharedSystemUsmEnabled = true; vmBind.sharedSystemUsmBind = true; diff --git a/shared/test/common/test_files/igdrcl.config b/shared/test/common/test_files/igdrcl.config index 3b15f0f147415..061e6a651457b 100644 --- a/shared/test/common/test_files/igdrcl.config +++ b/shared/test/common/test_files/igdrcl.config @@ -189,6 +189,7 @@ EnableStateComputeModeTracking = -1 EnableSetPair = -1 EnableUsmConcurrentAccessSupport = 0 EnableSharedSystemUsmSupport = -1 +DisableMadviseAutoReset = 0 EnablePassInlineData = -1 ForceFineGrainedSVMSupport = -1 EnableImmediateCmdListHeapSharing = -1 diff --git a/third_party/uapi/drm-next/xe/xe_drm.h b/third_party/uapi/drm-next/xe/xe_drm.h index 89ab5493547f9..98bc8858e4bd4 100644 --- a/third_party/uapi/drm-next/xe/xe_drm.h +++ b/third_party/uapi/drm-next/xe/xe_drm.h @@ -3,8 +3,8 @@ * Copyright © 2023 Intel Corporation */ -#ifndef _XE_DRM_H_ -#define _XE_DRM_H_ +#ifndef _UAPI_XE_DRM_H_ +#define _UAPI_XE_DRM_H_ #include "drm.h" @@ -140,7 +140,7 @@ extern "C" { * redefine the interface more easily than an ever growing struct of * increasing complexity, and for large parts of that interface to be * entirely optional. The downside is more pointer chasing; chasing across - * the boundary with pointers encapsulated inside u64. + * the __user boundary with pointers encapsulated inside u64. * * Example chaining: * @@ -1013,6 +1013,20 @@ struct drm_xe_vm_destroy { * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO * handle MBZ, and the BO offset MBZ. + * - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with + * %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying + * CPU address space range is unmapped (typically with munmap(2) or brk(2)). + * The madvise values set with %DRM_IOCTL_XE_MADVISE are reset to the values + * that were present immediately after the %DRM_IOCTL_XE_VM_BIND. + * The reset GPU virtual address range is the intersection of the range bound + * using %DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range + * unmapped. + * This functionality is present to mimic the behaviour of CPU address space + * madvises set using madvise(2), which are typically reset on unmap. + * Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus + * not invoke autoreset. Neither will stack variables going out of scope. + * Therefore it's recommended to always explicitly reset the madvises when + * freeing the memory backing a region used in a %DRM_IOCTL_XE_MADVISE call. * * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be: * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in @@ -1119,6 +1133,7 @@ struct drm_xe_vm_bind_op { #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) #define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5) +#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6) /** @flags: Bind flags */ __u32 flags; @@ -2258,4 +2273,4 @@ struct drm_xe_vm_query_mem_range_attr { } #endif -#endif /* _XE_DRM_H_ */ +#endif /* _UAPI_XE_DRM_H_ */