Skip to content

Commit

Permalink
Align directories and codegen with stock PyTorch (#310)
Browse files Browse the repository at this point in the history
Draft version for now. Only for design review.

Can be built, can be tested with simple ut, like `test_xpu.py`.

**Not tested with other uts**
 
Added supports includes
- `REGISTER_XPU_DISPATCH(add_stub, add_kernel)`
- `TORCH_IMPL_FUNC`

---------

Signed-off-by: majing <Jing1.Ma@intel.com>
Signed-off-by: leizhenyuan <zhenyuan.lei@intel.com>
Co-authored-by: Xu Yutao <yutao.xu@intel.com>
Co-authored-by: majing <Jing1.Ma@intel.com>
Co-authored-by: Guo Yejun <yejun.guo@intel.com>
Co-authored-by: leizhenyuan <zhenyuan.lei@intel.com>
Co-authored-by: Feng Yuan <feng1.yuan@intel.com>
Co-authored-by: chunhuanMeng <105194461+chunhuanMeng@users.noreply.github.com>
  • Loading branch information
7 people authored Sep 26, 2024
1 parent d9ae62d commit 229a887
Show file tree
Hide file tree
Showing 214 changed files with 7,905 additions and 12,265 deletions.
4 changes: 2 additions & 2 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ code = 'CLANGFORMAT'
include_patterns = [
'src/aten/*.h',
'src/aten/*.cpp',
'src/aten/sycl/*.h',
'src/aten/sycl/*.cpp',
'src/ATen/native/xpu/sycl/*.h',
'src/ATen/native/xpu/sycl/*.cpp',
'aten/src/ATen/*.h',
'aten/src/ATen/mps/**/*.mm',
'aten/src/ATen/xpu/**/*.h',
Expand Down
4 changes: 4 additions & 0 deletions cmake/BuildFlags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "MSVC"
list(APPEND SYCL_HOST_FLAGS -O0)
endif(CMAKE_BUILD_TYPE MATCHES Debug)

if(USE_PER_OPERATOR_HEADERS)
list(APPEND SYCL_HOST_FLAGS -DAT_PER_OPERATOR_HEADERS)
endif()

# -- Kernel flags (SYCL_KERNEL_OPTIONS)
# The fast-math will be enabled by default in SYCL compiler.
# Refer to [https://clang.llvm.org/docs/UsersManual.html#cmdoption-fno-fast-math]
Expand Down
64 changes: 59 additions & 5 deletions cmake/Codegen.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ if(Codegen_GPU_cmake_included)
endif()
set(Codegen_GPU_cmake_included true)

set(BUILD_TORCH_XPU_ATEN_GENERATED "${CMAKE_BINARY_DIR}/aten/src/ATen/xpu")
set(BUILD_TORCH_XPU_ATEN_GENERATED "${CMAKE_BINARY_DIR}/xpu/ATen/")
file(MAKE_DIRECTORY ${BUILD_TORCH_XPU_ATEN_GENERATED})

set(RegisterXPU_PATH ${BUILD_TORCH_XPU_ATEN_GENERATED}/RegisterXPU.cpp)
Expand Down Expand Up @@ -43,10 +43,64 @@ function(GEN_BACKEND file_yaml)
)
endfunction(GEN_BACKEND)

GEN_BACKEND(
xpu_functions.yaml
XPUNativeFunctions.h
RegisterXPU.cpp)

set(RegisterXPU_PATH ${BUILD_TORCH_XPU_ATEN_GENERATED}/RegisterXPU.cpp)
set(XPUFallback_PATH ${TORCH_XPU_OPS_ROOT}/src/ATen/native/xpu/XPUFallback.template)
function(GEN_XPU file_yaml)
set(generated_files "")
foreach(f ${ARGN})
list(APPEND generated_files "${BUILD_TORCH_XPU_ATEN_GENERATED}/${f}")
endforeach()
file(GLOB_RECURSE depend_files ${TORCH_XPU_OPS_ROOT}/yaml/${file_yaml})
set(CODEGEN_TEMPLATE ${TORCH_XPU_OPS_ROOT}/yaml/)

# Codegen prepare process
if(WIN32)
string(REPLACE "/" "\\" LinkPATH "${CODEGEN_TEMPLATE}templates")
string(REPLACE "/" "\\" TargetPATH "${CMAKE_SOURCE_DIR}/aten/src/ATen/templates")
execute_process(COMMAND cmd /c mklink /D ${LinkPATH} ${TargetPATH})
string(REPLACE "/" "\\" RegisterXPU_PATH_BACKSLASH "${RegisterXPU_PATH}")
string(REPLACE "/" "\\" XPUFallback_PATH_BACKSLASH "${XPUFallback_PATH}")
set(REGISTER_FALLBACK_CMD ${FILE_DISPLAY_CMD} ${XPUFallback_PATH_BACKSLASH} ">>" ${RegisterXPU_PATH_BACKSLASH})
else()
execute_process(COMMAND ln -s ${CMAKE_SOURCE_DIR}/aten/src/ATen/templates ${CODEGEN_TEMPLATE}) # soft link to pytorch templates
set(REGISTER_FALLBACK_CMD ${FILE_DISPLAY_CMD} ${XPUFallback_PATH} ">>" ${RegisterXPU_PATH})
endif()

add_custom_command(
OUTPUT ${generated_files}
COMMAND
"${PYTHON_EXECUTABLE}" -m torchgen.gen
--source-path ${TORCH_XPU_OPS_ROOT}/yaml/
--install-dir ${BUILD_TORCH_XPU_ATEN_GENERATED}
--per-operator-headers
--static-dispatch-backend
--backend-whitelist=XPU
COMMAND
${REGISTER_FALLBACK_CMD}
# Codegen post-process
COMMAND "${PYTHON_EXECUTABLE}" ${TORCH_XPU_OPS_ROOT}/tools/codegen/remove_headers.py --register_xpu_path ${RegisterXPU_PATH}
${SIMPLE_TRACE}
WORKING_DIRECTORY ${TORCH_ROOT}
DEPENDS
${depended_files}
${TORCH_XPU_OPS_ROOT}/yaml/native/${file_yaml}
${XPUFallback_PATH}
)
endfunction(GEN_XPU)

# GEN_BACKEND(
# xpu_functions.yaml
# XPUNativeFunctions.h
# RegisterXPU.cpp)

GEN_XPU(
native_functions.yaml
XPUFunctions.h
RegisterXPU.cpp
)




list(APPEND xpu_generated_src ${RegisterXPU_PATH})
Expand Down
6 changes: 1 addition & 5 deletions src/ATen/native/sparse/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,9 @@
#include <ATen/core/op_registration/adaption.h>
#include <torch/library.h>

#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_nnz_native.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors_native.h>
#endif
#include <ATen/ops/_values_native.h>

namespace at::native::xpu {

Expand Down
Loading

0 comments on commit 229a887

Please sign in to comment.