Skip to content

Commit 88a794f

Browse files
jingxu10ishaghosh27Harryalways317emmanuel-ferdmanAngryLoki
authored
Jingxu10/release 26000 (#3476)
* Update README.md (#3472) Co-authored-by: ishaghosh27 <[email protected]> * added import os for exit() to fix NameError (#3475) Name error coming up on code if reached to that line, Co-authored-by: Harish Vadaparty <[email protected]> * [Doc] Update Custom OPs broken references (#739) Signed-off-by: Emmanuel Ferdman <[email protected]> Small PR - Commit f63ed97 moved a bunch of files. This PR adjusts the source for changes. * Add options to build with system LIBXSMM/oneDNN/sleef/MKL/ideep/gtest Signed-off-by: Sv. Lockal <[email protected]> --------- Signed-off-by: Sv. Lockal <[email protected]> Co-authored-by: ishaghosh27 <[email protected]> Co-authored-by: Harish Vadaparty <[email protected]> Co-authored-by: Emmanuel Ferdman <[email protected]> Co-authored-by: Sv. Lockal <[email protected]>
1 parent 8572514 commit 88a794f

File tree

7 files changed

+91
-45
lines changed

7 files changed

+91
-45
lines changed

cmake/Modules/FindoneMKL.cmake

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@ endfunction()
7272
# IPEX CPU lib always download and install mkl-static lib and use static linker for mkl-static lib.
7373
# IPEX CPU lib can manual config to use the dynamic link for oneMKL lib.
7474
if(BUILD_MODULE_TYPE STREQUAL "GPU")
75+
set(USE_SYSTEM_MKL ON)
76+
endif()
77+
78+
if(USE_SYSTEM_MKL)
7579
get_mkl_from_env_var()
7680
else()
7781
if(BUILD_WITH_XPU)

cmake/cpu/Options.cmake

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,13 @@ set(Options_CPU_cmake_included true)
77
# The options to build cpu
88
include(CMakeDependentOption)
99

10+
option(USE_SYSTEM_LIBXSMM "Use system LIBXSMM library" OFF)
11+
option(USE_SYSTEM_ONEDNN "Use system oneDNN library" OFF)
12+
option(USE_SYSTEM_SLEEF "Use system SLEEF library" OFF)
13+
option(USE_SYSTEM_MKL "Use system MKL library" OFF)
14+
option(USE_SYSTEM_IDEEP "Use system ideep library" OFF)
15+
option(USE_SYSTEM_GTEST "Use system GoogleTest library" OFF)
16+
1017
option(BUILD_LIBXSMM_VIA_CMAKE "Build LIBXSMM via CMake" ON)
1118
option(USE_LIBXSMM "Enable LIBXSMM" ON)
1219
option(USE_DNNL_GRAPH_COMPILER "Build with DNNL Graph Compiler" ON)

csrc/cpu/CMakeLists.txt

Lines changed: 52 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,20 @@ if((DEFINED ENV{DNNL_GRAPH_BUILD_COMPILER_BACKEND}) AND USE_DNNL_GRAPH_COMPILER)
3737
endif()
3838

3939
set(THIRD_PARTY_BUILD_PATH_NAME "cpu_third_party")
40-
add_subdirectory(${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/mkl-dnn ${THIRD_PARTY_BUILD_PATH_NAME}/ideep/mkl-dnn EXCLUDE_FROM_ALL)
41-
# add_subdirectory(${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/mkl-dnn cpu_third_party/mkl-dnn)
40+
if(USE_SYSTEM_ONEDNN)
41+
find_package(dnnl 3.4.1 CONFIG REQUIRED)
42+
get_target_property(ONEDNN_INCLUDE_DIR DNNL::dnnl INTERFACE_INCLUDE_DIRECTORIES)
43+
set(ONEDNN_LIBRARY DNNL::dnnl)
44+
set(ONEDNN_GENERATED_INCLUDE ${ONEDNN_INCLUDE_DIR})
45+
else()
46+
add_subdirectory(${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/mkl-dnn ${THIRD_PARTY_BUILD_PATH_NAME}/ideep/mkl-dnn EXCLUDE_FROM_ALL)
47+
set(ONEDNN_INCLUDE_DIR ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/mkl-dnn/include)
48+
set(ONEDNN_LIBRARY dnnl)
49+
50+
# path of oneDNN .h.in generated file
51+
file(RELATIVE_PATH CUR_DIR_REL_PATH "${IPEX_ROOT_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}")
52+
set(ONEDNN_GENERATED_INCLUDE "${CMAKE_BINARY_DIR}/${CUR_DIR_REL_PATH}/${THIRD_PARTY_BUILD_PATH_NAME}/ideep/mkl-dnn/include")
53+
endif()
4254

4355
IF(IPEX_DISP_OP)
4456
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DIPEX_DISP_OP")
@@ -120,7 +132,7 @@ add_library(${PLUGIN_NAME_CPU} SHARED ${IPEX_CPU_CPP_SRCS})
120132
# For IPEX_API macro
121133
target_compile_definitions(${PLUGIN_NAME_CPU} PUBLIC "BUILD_IPEX_MAIN_LIB")
122134

123-
set_target_properties(${PLUGIN_NAME_CPU} PROPERTIES ONEDNN_INCLUDE_DIR "${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/mkl-dnn/include")
135+
set_target_properties(${PLUGIN_NAME_CPU} PROPERTIES ONEDNN_INCLUDE_DIR ${ONEDNN_INCLUDE_DIR})
124136

125137
# includes
126138
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_ROOT_DIR})
@@ -133,19 +145,21 @@ target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_CPU_ROOT_DIR}/jit)
133145
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_JIT_CPP_ROOT})
134146
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_UTLIS_CPP_ROOT})
135147

136-
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/mkl-dnn/include)
148+
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${ONEDNN_INCLUDE_DIR})
137149

138150
if(USE_LIBXSMM)
139151
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_CPU_ROOT_DIR}/tpp)
140-
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/libxsmm/include)
152+
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${LIBXSMM_INCLUDE_DIRS})
141153
endif(USE_LIBXSMM)
142154

143-
# path of oneDNN .h.in generated file
144-
file(RELATIVE_PATH CUR_DIR_REL_PATH "${IPEX_ROOT_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}")
145-
set(ONEDNN_GENERATED_INCLUDE "${CMAKE_BINARY_DIR}/${CUR_DIR_REL_PATH}/${THIRD_PARTY_BUILD_PATH_NAME}/ideep/mkl-dnn/include")
146155
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${ONEDNN_GENERATED_INCLUDE})
147156

148-
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/include)
157+
if(USE_SYSTEM_IDEEP)
158+
find_path(IDEEP_INCLUDE_DIR ideep.hpp REQUIRED)
159+
else()
160+
set(IDEEP_INCLUDE_DIR ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/ideep/include)
161+
endif()
162+
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IDEEP_INCLUDE_DIR})
149163
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${PYTHON_INCLUDE_DIR})
150164

151165
if(BUILD_CPU_WITH_ONECCL)
@@ -165,12 +179,17 @@ if(CLANG_FORMAT)
165179
endif()
166180

167181
if(USE_LIBXSMM)
168-
if(BUILD_LIBXSMM_VIA_CMAKE)
182+
if(USE_SYSTEM_LIBXSMM)
183+
find_package(PkgConfig REQUIRED)
184+
pkg_check_modules(LIBXSMM REQUIRED libxsmm)
185+
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${LIBXSMM_INCLUDE_DIRS})
186+
target_link_libraries(${PLUGIN_NAME_CPU} PRIVATE ${LIBXSMM_LIBRARIES})
187+
elseif(BUILD_LIBXSMM_VIA_CMAKE)
169188
add_subdirectory(${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/libxsmm cpu_third_party/libxsmm EXCLUDE_FROM_ALL)
170189
add_definitions(-DLIBXSMM_DEFAULT_CONFIG)
171-
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/libxsmm/include)
190+
set(LIBXSMM_INCLUDE_DIRS ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/libxsmm/include)
172191
target_link_libraries(${PLUGIN_NAME_CPU} PRIVATE xsmm)
173-
else(BUILD_LIBXSMM_VIA_CMAKE)
192+
else()
174193
include(${CMAKE_ROOT}/Modules/ExternalProject.cmake)
175194
set(args
176195
CC=${CMAKE_C_COMPILER}
@@ -188,20 +207,31 @@ if(USE_LIBXSMM)
188207
${args}
189208
INSTALL_COMMAND ""
190209
)
210+
set(LIBXSMM_INCLUDE_DIRS ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/libxsmm/include)
191211
target_link_libraries(${PLUGIN_NAME_CPU} PRIVATE ${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/libxsmm/lib/libxsmm.a)
192212
endif(BUILD_LIBXSMM_VIA_CMAKE)
193213
endif(USE_LIBXSMM)
194214

195-
# setup sleef options:
196-
set(SLEEF_BUILD_SHARED_LIBS OFF CACHE BOOL "Build sleef as static library" FORCE)
197-
set(SLEEF_BUILD_DFT OFF CACHE BOOL "Don't build sleef DFT lib" FORCE)
198-
set(SLEEF_BUILD_GNUABI_LIBS OFF CACHE BOOL "Don't build sleef gnuabi libs" FORCE)
199-
set(SLEEF_BUILD_TESTS OFF CACHE BOOL "Don't build sleef tests" FORCE)
200-
set(SLEEF_BUILD_SCALAR_LIB OFF CACHE BOOL "libsleefscalar will be built." FORCE)
201-
add_subdirectory(${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/sleef ${THIRD_PARTY_BUILD_PATH_NAME}/sleef EXCLUDE_FROM_ALL)
202-
target_link_libraries(${PLUGIN_NAME_CPU} PRIVATE sleef)
215+
if(USE_SYSTEM_SLEEF)
216+
find_package(PkgConfig REQUIRED)
217+
pkg_check_modules(SLEEF REQUIRED sleef)
218+
target_include_directories(${PLUGIN_NAME_CPU} PUBLIC ${SLEEF_INCLUDE_DIRS})
219+
target_link_libraries(${PLUGIN_NAME_CPU} PRIVATE ${SLEEF_LIBRARIES})
220+
else()
221+
# setup sleef options:
222+
set(SLEEF_BUILD_SHARED_LIBS OFF CACHE BOOL "Build sleef as static library" FORCE)
223+
set(SLEEF_BUILD_DFT OFF CACHE BOOL "Don't build sleef DFT lib" FORCE)
224+
set(SLEEF_BUILD_GNUABI_LIBS OFF CACHE BOOL "Don't build sleef gnuabi libs" FORCE)
225+
set(SLEEF_BUILD_TESTS OFF CACHE BOOL "Don't build sleef tests" FORCE)
226+
set(SLEEF_BUILD_SCALAR_LIB OFF CACHE BOOL "libsleefscalar will be built." FORCE)
227+
add_subdirectory(${IPEX_CPU_CPP_THIRD_PARTY_ROOT}/sleef ${THIRD_PARTY_BUILD_PATH_NAME}/sleef EXCLUDE_FROM_ALL)
228+
target_link_libraries(${PLUGIN_NAME_CPU} PRIVATE sleef)
229+
endif()
230+
231+
if(NOT USE_SYSTEM_ONEDNN)
232+
add_dependencies(${PLUGIN_NAME_CPU} dnnl)
233+
endif()
203234

204-
add_dependencies(${PLUGIN_NAME_CPU} dnnl)
205235
# If Graph Compiler is built, then it should link to its LLVM dependencies,
206236
# and not the LLVM symbols exposed by PyTorch.
207237
if ((DEFINED ENV{DNNL_GRAPH_BUILD_COMPILER_BACKEND}) AND USE_DNNL_GRAPH_COMPILER)
@@ -213,7 +243,7 @@ if ((DEFINED ENV{DNNL_GRAPH_BUILD_COMPILER_BACKEND}) AND USE_DNNL_GRAPH_COMPILER
213243
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--exclude-libs=${DNNL_GRAPHCOMPILER_LLVM_LIB_EXCLUDE}")
214244
endif()
215245
else()
216-
target_link_libraries(${PLUGIN_NAME_CPU} PUBLIC dnnl)
246+
target_link_libraries(${PLUGIN_NAME_CPU} PUBLIC ${ONEDNN_LIBRARY})
217247
endif()
218248
find_package(oneMKL QUIET)
219249
if (ONEMKL_FOUND)

examples/cpu/inference/python/jupyter-notebooks/README.md

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,17 @@
22

33
Intel® Extension for PyTorch (IPEX) extends PyTorch* with optimizations for extra performance boost on Intel® hardware. While most of the optimizations will be upstreamed in future PyTorch* releases, the extension delivers up-to-date features and optimizations for PyTorch workloads on Intel® hardware. The optimization approaches generally include operator optimization, graph optimization and runtime optimization.
44

5+
Before selecting a sample, please make sure to (1) Check [Prerequisites](#prerequisites), (2) complete [Environment Setup](#environment-setup), and (3) see instructions to [Run the Sample](#run-the-sample).
6+
57
## Jupyter Notebooks Overview
68

79
| Sample name | Description | Time to Complete | Category | Validated for AI Tools Selector |
810
|---|---|---|---|---|
911
[Getting Started with Intel® Extension for PyTorch* (IPEX)](https://github.com/intel/intel-extension-for-pytorch/tree/main/examples/cpu/inference/python/jupyter-notebooks/IPEX_Getting_Started.ipynb ) | This code sample demonstrates how to begin using the Intel® Extension for PyTorch* (IPEX). It will guide users how to run a PyTorch inference workload on CPU by using oneAPI AI Analytics Toolkit and also analyze the CPU usage via oneDNN verbose logs.| 15 minutes| Getting Started | Y |
10-
[PyTorch Inference Optimizations with Intel® Advanced Matrix Extensions (Intel® AMX) Bfloat16 Integer8](https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-cpu/blob/cpu-device/examples/cpu/inference/python/jupyter-notebooks/IntelPyTorch_InferenceOptimizations_AMX_BF16_INT8.ipynb) | This code sample demonstrates how to perform inference using the ResNet50 and BERT models using the Intel® Extension for PyTorch* (IPEX). IPEX allows you to speed up inference on Intel® Xeon Scalable processors with lower precision data formats and specialized computer instructions. The bfloat16 (BF16) data format uses half the bit width of floating-point-32 (FP32), which lessens the amount of memory needed and execution time to process. Likewise, the integer8 (INT8) data format uses half the bit width of BF16. | 5 minutes | Code Optimization | Y|
11-
[Interactive Chat Based on DialoGPT Model Using Intel® Extension for PyTorch* Quantization](https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-cpu/blob/cpu-device/examples/cpu/inference/python/jupyter-notebooks/IntelPytorch_Interactive_Chat_Quantization.ipynb)| This code sample demonstrates how to create interactive chat based on pre-trained DialoGPT model and add the Intel® Extension for PyTorch* (IPEX) quantization to it. The sample shows how to create interactive chat based on the pre-trained DialoGPT model from HuggingFace and how to add INT8 dynamic quantization to it. The Intel® Extension for PyTorch* (IPEX) gives users the ability to speed up operations on processors with INT8 data format and specialized computer instructions.| 10 minutes | Concepts and Functionality| Y|
12-
[Optimize PyTorch Models using Intel® Extension for PyTorch* (IPEX) Quantization](https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-cpu/blob/cpu-device/examples/cpu/inference/python/jupyter-notebooks/IntelPytorch_Quantization.ipynb)|This code sample demonstrates how to quantize a ResNet50 model that is calibrated by the CIFAR10 dataset using the Intel® Extension for PyTorch* (IPEX). IPEX gives users the ability to speed up inference on Intel® Xeon Scalable processors with INT8 data format and specialized computer instructions. The INT8 data format uses quarter the bit width of floating-point-32 (FP32), lowering the amount of memory needed and execution time to process.| 5 minutes| Concepts and Functionality| Y|
13-
[Optimize PyTorch Models using Intel® Extension for PyTorch* (IPEX)](https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-cpu/blob/cpu-device/examples/cpu/inference/python/jupyter-notebooks/optimize_pytorch_models_with_ipex.ipynb)| This sample notebook shows how to get started with Intel® Extension for PyTorch* (IPEX) for sample Computer Vision and NLP workloads. The sample starts by loading two models from the PyTorch hub: Faster-RCNN (Faster R-CNN) and distilbert (DistilBERT). After loading the models, the sample applies sequential optimizations from Intel® Extension for PyTorch* (IPEX) and examines performance gains for each incremental change.| 30 minutes | Code Optimization |Y|
12+
[PyTorch Inference Optimizations with Intel® Advanced Matrix Extensions (Intel® AMX) Bfloat16 Integer8](https://github.com/intel/intel-extension-for-pytorch/blob/main/examples/cpu/inference/python/jupyter-notebooks/IntelPyTorch_InferenceOptimizations_AMX_BF16_INT8.ipynb) | This code sample demonstrates how to perform inference using the ResNet50 and BERT models using the Intel® Extension for PyTorch* (IPEX). IPEX allows you to speed up inference on Intel® Xeon Scalable processors with lower precision data formats and specialized computer instructions. The bfloat16 (BF16) data format uses half the bit width of floating-point-32 (FP32), which lessens the amount of memory needed and execution time to process. Likewise, the integer8 (INT8) data format uses half the bit width of BF16. | 5 minutes | Code Optimization | Y|
13+
[Interactive Chat Based on DialoGPT Model Using Intel® Extension for PyTorch* Quantization](https://github.com/intel/intel-extension-for-pytorch/blob/main/examples/cpu/inference/python/jupyter-notebooks/IntelPytorch_Interactive_Chat_Quantization.ipynb)| This code sample demonstrates how to create interactive chat based on pre-trained DialoGPT model and add the Intel® Extension for PyTorch* (IPEX) quantization to it. The sample shows how to create interactive chat based on the pre-trained DialoGPT model from HuggingFace and how to add INT8 dynamic quantization to it. The Intel® Extension for PyTorch* (IPEX) gives users the ability to speed up operations on processors with INT8 data format and specialized computer instructions.| 10 minutes | Concepts and Functionality| Y|
14+
[Optimize PyTorch Models using Intel® Extension for PyTorch (IPEX) Quantization](https://github.com/intel/intel-extension-for-pytorch/blob/main/examples/cpu/inference/python/jupyter-notebooks/IntelPytorch_Quantization.ipynb)|This code sample demonstrates how to quantize a ResNet50 model that is calibrated by the CIFAR10 dataset using the Intel® Extension for PyTorch* (IPEX). IPEX gives users the ability to speed up inference on Intel® Xeon Scalable processors with INT8 data format and specialized computer instructions. The INT8 data format uses quarter the bit width of floating-point-32 (FP32), lowering the amount of memory needed and execution time to process.| 5 minutes| Concepts and Functionality| Y|
15+
[Optimize PyTorch Models using Intel® Extension for PyTorch* (IPEX)](https://github.com/intel/intel-extension-for-pytorch/blob/main/examples/cpu/inference/python/jupyter-notebooks/optimize_pytorch_models_with_ipex.ipynb)| This sample notebook shows how to get started with Intel® Extension for PyTorch* (IPEX) for sample Computer Vision and NLP workloads. The sample starts by loading two models from the PyTorch hub: Faster-RCNN (Faster R-CNN) and distilbert (DistilBERT). After loading the models, the sample applies sequential optimizations from Intel® Extension for PyTorch* (IPEX) and examines performance gains for each incremental change.| 30 minutes | Code Optimization |Y|
1416

1517
>**Note**: For Key Implementation Details, please refer to the .ipynb file of a sample.
1618

intel_extension_for_pytorch/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# This Python file uses the following encoding: utf-8
22
import re
3-
3+
import os
44
import torch
55

66

@@ -33,7 +33,7 @@
3333
+ f"{ipex_version}.*, but PyTorch {torch.__version__} is found. "
3434
+ "Please switch to the matching version and run again."
3535
)
36-
exit(127)
36+
os.exit(127)
3737

3838

3939
import os

tests/cpu/bench/custom_op_bench/README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
# Running benchmarks for Intel Extension for PyTorch Custom OPs
2-
Evaluate performance for custom operator with [launcher](../../../../tutorials/intro_launch.md).
2+
Evaluate performance for custom operator with [launcher](../../../../docs/tutorials/performance_tuning/launch_script.md).
33
## Prepare envrioment
4-
Follow [performance_tuning_guide](../../../../tutorials/Performance_Tuning.md) to install Memory_Allocator(you can choose Tcmalloc or Jemalloc).
4+
Follow [performance_tuning_guide](../../../../docs/tutorials/performance_tuning/tuning_guide.md) to install Memory_Allocator(you can choose Tcmalloc or Jemalloc).
55
Install intel-openmp:
66

77
```
88
pip install intel-openmp=2024.1.2
99
```
1010

11-
## Evaluate [Interaction](../../../../intel_extension_for_pytorch/nn/functional/interaction.py)
11+
## Evaluate [Interaction](../../../../intel_extension_for_pytorch/cpu/nn/interaction.py)
1212

1313
1.Inference: 1 instance per core in real world scenario
1414

@@ -34,7 +34,7 @@ python -m intel_extension_for_pytorch.cpu.launch --node-id 0 optimizer.py --opti
3434
python -m intel_extension_for_pytorch.cpu.launch --node-id 0 optimizer.py --optimizer adam # for adam
3535
```
3636

37-
## Evaluate IPEX [MergedEmbeddingBag](../../../../intel_extension_for_pytorch/nn/module/merged_embeddingbag.py)
37+
## Evaluate IPEX [MergedEmbeddingBag](../../../../intel_extension_for_pytorch/nn/modules/merged_embeddingbag.py)
3838
```
3939
export CORES=`lscpu | grep Core | awk '{print $4}'`
4040
export BATCHSIZE=$((128*CORES))

tests/cpu/cpp/CMakeLists.txt

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -32,24 +32,33 @@ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-Bsymbolic-functions")
3232
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-Bsymbolic-functions")
3333

3434
# Set the include dir
35-
include_directories(${PYTORCH_INSTALL_DIR}/include)
36-
include_directories(${PYTORCH_INSTALL_DIR}/include/torch/csrc/api/include/)
37-
include_directories(${THIRD_PARTY_ROOT}/googletest/googletest/include)
35+
include_directories(${TORCH_INCLUDE_DIRS})
3836
include_directories(${IPEX_PROJECT_TOP_DIR})
3937
include_directories(${IPEX_PROJECT_TOP_DIR}/csrc/include)
4038

4139
link_directories(${PYTORCH_INSTALL_DIR}/lib)
4240
# search the lib directory for gtest
4341
link_directories(${CPP_TEST_BUILD_DIR}/lib)
4442

45-
# add gtest cmake path
46-
add_subdirectory(${THIRD_PARTY_ROOT}/googletest ${CPP_TEST_BUILD_DIR}/third_party/googletest EXCLUDE_FROM_ALL)
47-
4843
# Add the Test Files
4944
set(IPEX_CPP_TEST_SOURCES test_runtime_api.cpp test_dyndisp_and_isa_api.cpp)
5045

5146
add_executable(${CPU_CPP_TEST_NAME} ${IPEX_CPP_TEST_SOURCES})
5247

48+
if(USE_SYSTEM_GTEST)
49+
find_package(GTest REQUIRED)
50+
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC GTest::gtest GTest::gtest_main)
51+
else()
52+
# add gtest cmake path
53+
add_subdirectory(${THIRD_PARTY_ROOT}/googletest ${CPP_TEST_BUILD_DIR}/third_party/googletest EXCLUDE_FROM_ALL)
54+
55+
# Link GTest
56+
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC gtest_main)
57+
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC gtest)
58+
59+
target_include_directories(${CPU_CPP_TEST_NAME} PRIVATE ${THIRD_PARTY_ROOT}/googletest/googletest/include)
60+
endif()
61+
5362
set(BUILD_STATIC_ONEMKL ON)
5463
find_package(oneMKL QUIET)
5564
if (ONEMKL_FOUND)
@@ -58,14 +67,8 @@ endif()
5867

5968
target_link_directories(${CPU_CPP_TEST_NAME} PRIVATE ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/)
6069

61-
# Link GTest
62-
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC gtest_main)
63-
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC gtest)
64-
6570
# Link Pytorch
66-
target_link_directories(${CPU_CPP_TEST_NAME} PRIVATE ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR})
67-
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC torch_cpu)
68-
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC c10)
71+
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC ${TORCH_LIBRARIES})
6972

7073
# Link IPEX
7174
target_link_libraries(${CPU_CPP_TEST_NAME} PUBLIC intel-ext-pt-cpu)

0 commit comments

Comments
 (0)