diff --git a/recipes/libtorch/all/conan-official-libtorch-vars.cmake b/recipes/libtorch/all/conan-official-libtorch-vars.cmake new file mode 100644 index 0000000000000..089918545ff23 --- /dev/null +++ b/recipes/libtorch/all/conan-official-libtorch-vars.cmake @@ -0,0 +1,12 @@ +# ATenConfig.cmake variables +set(ATEN_FOUND 1) + +# Caffe2Config.cmake variables +set(Caffe2_MAIN_LIBS torch) +set(CAFFE2_INCLUDE_DIRS ${${CMAKE_FIND_PACKAGE_NAME}_INCLUDE_DIRS}) + +# TorchConfig.cmake variables +set(TORCH_FOUND TRUE) +set(TORCH_INCLUDE_DIRS ${${CMAKE_FIND_PACKAGE_NAME}_INCLUDE_DIRS}) +set(TORCH_LIBRARIES ${${CMAKE_FIND_PACKAGE_NAME}_LIBRARIES}) +set(TORCH_CXX_FLAGS) diff --git a/recipes/libtorch/all/conan_deps.cmake b/recipes/libtorch/all/conan_deps.cmake new file mode 100644 index 0000000000000..f0a2e233b8ea4 --- /dev/null +++ b/recipes/libtorch/all/conan_deps.cmake @@ -0,0 +1,59 @@ +# A wrapper for https://github.com/pytorch/pytorch/blob/v2.4.0/cmake/Dependencies.cmake + +# Moved initialization of these vars here so they can be overridden +set(ATen_CPU_DEPENDENCY_LIBS) +set(ATen_XPU_DEPENDENCY_LIBS) +set(ATen_CUDA_DEPENDENCY_LIBS) +set(ATen_HIP_DEPENDENCY_LIBS) +set(ATen_PUBLIC_CUDA_DEPENDENCY_LIBS) +set(ATen_PUBLIC_HIP_DEPENDENCY_LIBS) +set(Caffe2_DEPENDENCY_LIBS) +set(Caffe2_CUDA_DEPENDENCY_LIBS) + +find_package(cpuinfo REQUIRED CONFIG) +find_package(fp16 REQUIRED CONFIG) +find_package(fmt REQUIRED CONFIG) +find_package(httplib REQUIRED CONFIG) + +list(APPEND Caffe2_DEPENDENCY_LIBS + cpuinfo + fp16::fp16 + fmt::fmt +) +add_library(fp16 ALIAS fp16::fp16) + +if(CONAN_LIBTORCH_USE_PTHREADPOOL) + find_package(pthreadpool REQUIRED CONFIG) + list(APPEND Caffe2_DEPENDENCY_LIBS pthreadpool::pthreadpool) + add_library(pthreadpool ALIAS pthreadpool::pthreadpool) +endif() + +if(CONAN_LIBTORCH_USE_FLATBUFFERS) + find_package(flatbuffers REQUIRED CONFIG) + list(APPEND Caffe2_DEPENDENCY_LIBS flatbuffers::flatbuffers) +endif() + +if(CONAN_LIBTORCH_USE_SLEEF) + find_package(sleef REQUIRED CONFIG) + list(APPEND ATen_CPU_DEPENDENCY_LIBS sleef::sleef) +endif() + +if(USE_XNNPACK) + find_package(xnnpack REQUIRED CONFIG) + list(APPEND Caffe2_DEPENDENCY_LIBS xnnpack::xnnpack) + add_library(XNNPACK INTERFACE) +endif() + +if(USE_FBGEMM) + find_package(fbgemmLibrary REQUIRED CONFIG) + list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm) +endif() + +if(USE_PYTORCH_QNNPACK) + find_package(fxdiv REQUIRED CONFIG) + find_package(psimd REQUIRED CONFIG) +endif() + +if(USE_MIMALLOC) + find_package(mimalloc REQUIRED CONFIG) +endif() diff --git a/recipes/libtorch/all/conandata.yml b/recipes/libtorch/all/conandata.yml new file mode 100644 index 0000000000000..b3b8485adbbae --- /dev/null +++ b/recipes/libtorch/all/conandata.yml @@ -0,0 +1,25 @@ +sources: + "2.4.0": + url: "https://github.com/pytorch/pytorch/releases/download/v2.4.0/pytorch-v2.4.0.tar.gz" + sha256: "a890d4342149adbc6c8b116a9afe437fe347527a9ecc0650086cdec82ecdcfb7" +patches: + "2.4.0": + - patch_file: "patches/2.4.0/0001-use-conan-dependencies.patch" + patch_description: "Use Conan dependencies" + patch_type: "conan" + - patch_file: "patches/2.4.0/0002-fix-a-minor-glog-incompatibility.patch" + patch_description: "Fix a small incompatibility with newer glog" + patch_type: "portability" + - patch_file: "patches/2.4.0/0003-fix-cmake-logic-bug.patch" + patch_description: "Fix a CMake logic bug when kineto is disabled" + patch_type: "portability" + - patch_file: "patches/2.4.0/0004-add-a-missing-include.patch" + patch_description: "Add a missing include" + patch_type: "bugfix" + - patch_file: "patches/2.4.0/0005-kineto-unvendor-fmt.patch" + patch_description: "Unvendor fmt in vendored kineto" + base_path: "third_party/kineto" + patch_type: "conan" + - patch_file: "patches/2.4.0/0006-dont-build-protoc-on-apple.patch" + patch_description: "Don't build custom protoc on Apple OS-s" + patch_type: "conan" diff --git a/recipes/libtorch/all/conanfile.py b/recipes/libtorch/all/conanfile.py new file mode 100644 index 0000000000000..f63649f3a0a54 --- /dev/null +++ b/recipes/libtorch/all/conanfile.py @@ -0,0 +1,667 @@ +import os +from pathlib import Path + +from conan import ConanFile +from conan.errors import ConanInvalidConfiguration +from conan.tools.apple import is_apple_os +from conan.tools.build import check_min_cppstd +from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout +from conan.tools.env import VirtualBuildEnv, Environment +from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rm, rmdir, save +from conan.tools.microsoft import is_msvc, is_msvc_static_runtime +from conan.tools.scm import Version + +required_conan_version = ">=1.60.0 <2 || >=2.0.6" + + +class LibtorchConan(ConanFile): + name = "libtorch" + description = "Tensors and Dynamic neural networks with strong GPU acceleration." + license = "BSD-3-Clause" + url = "https://github.com/conan-io/conan-center-index" + homepage = "https://pytorch.org" + topics = ("machine-learning", "deep-learning", "neural-network", "gpu", "tensor") + + package_type = "library" + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + "blas": ["eigen", "openblas", "veclib"], + "build_lazy_ts_backend": [True, False], + "build_lite_interpreter": [True, False], + "coreml_delegate": [True, False], + "fakelowp": [True, False], + "observers": [True, False], + "utilities": [True, False], + "vulkan_fp16_inference": [True, False], + "vulkan_relaxed_precision": [True, False], + "with_fbgemm": [True, False], + "with_gflags": [True, False], + "with_glog": [True, False], + "with_itt": [True, False], + "with_kineto": [True, False], + "with_mimalloc": [True, False], + "with_nnpack": [True, False], + "with_numa": [True, False], + "with_opencl": [True, False], + "with_openmp": [True, False], + "with_qnnpack": [True, False], + "with_vulkan": [True, False], + "with_xnnpack": [True, False], + # TODO + # "build_lazy_cuda_linalg": [True, False], + # "debug_cuda": [True, False], + # "with_cuda": [True, False], + # "with_cudnn": [True, False], + # "with_cusparselt": [True, False], + # "with_magma": [True, False], + # "with_metal": [True, False], + # "with_mkldnn": [True, False], + # "with_mps": [True, False], + # "with_nccl": [True, False], + # "with_nnapi": [True, False], + # "with_nvrtc": [True, False], + # "with_rocm": [True, False], + # "with_snpe": [True, False], + # "with_xpu": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "blas": "openblas", + "build_lazy_ts_backend": True, + "build_lite_interpreter": False, + "coreml_delegate": False, + "fakelowp": False, + "observers": False, + "utilities": False, + "vulkan_fp16_inference": False, + "vulkan_relaxed_precision": False, + "with_fbgemm": False, # TODO: enable after #24749 + "with_gflags": False, + "with_glog": False, + "with_itt": True, + "with_kineto": True, + "with_mimalloc": False, + "with_nnpack": True, + "with_numa": True, + "with_opencl": False, + "with_openmp": False, # TODO: enable after #22360 + "with_qnnpack": True, + "with_vulkan": True, + "with_xnnpack": True, + # TODO + # "build_lazy_cuda_linalg": False, + # "debug_cuda": False, + # "with_cuda": False, + # "with_cudnn": True, + # "with_cusparselt": True, + # "with_mkldnn": False, + # "with_magma": True, + # "with_metal": False, + # "with_mps": True, + # "with_nccl": True, + # "with_nnapi": False, + # "with_nvrtc": False, + # "with_rocm": False, + # "with_snpe": False, + # "with_xpu": False, + } + options_description = { + "blas": "Which BLAS backend to use", + "build_lazy_cuda_linalg": "Build cuda linalg ops as separate library", + "build_lazy_ts_backend": "Build the lazy Torchscript backend, not compatible with mobile builds", + "build_lite_interpreter": "Build Lite Interpreter", + "coreml_delegate": "Use the CoreML backend through delegate APIs", + "debug_cuda": "When compiling DEBUG, also attempt to compile CUDA with debug flags (may cause nvcc to OOM)", + "fakelowp": "Use FakeLowp operators instead of FBGEMM", + "observers": "Use observers module", + "utilities": "Build utility executables", + "vulkan_fp16_inference": "Vulkan - Use fp16 inference", + "vulkan_relaxed_precision": "Vulkan - Use relaxed precision math in the kernels (mediump)", + "with_cuda": "Use CUDA", + "with_cudnn": "Use cuDNN", + "with_cusparselt": "Use cuSPARSELt", + "with_fbgemm": "Use FBGEMM (quantized 8-bit server operators)", + "with_gflags": "Use GFLAGS", + "with_glog": "Use GLOG", + "with_itt": "Use Intel VTune Profiler ITT functionality", + "with_kineto": "Use Kineto profiling library", + "with_magma": "Use MAGMA linear algebra library", + "with_metal": "Use Metal for iOS build", + "with_mimalloc": "Use mimalloc", + "with_mkldnn": "Use MKLDNN. Only available on x86, x86_64, and AArch64.", + "with_mps": "Use MPS for macOS build", + "with_nccl": "Use NCCL and RCCL", + "with_nnapi": "Use NNAPI for Android build", + "with_nnpack": "Use NNPACK CPU acceleration library", + "with_numa": "Use NUMA. Only available on Linux.", + "with_nvrtc": "Use NVRTC", + "with_opencl": "Use OpenCL", + "with_openmp": "Use OpenMP for parallel code", + "with_qnnpack": "Use ATen/QNNPACK (quantized 8-bit operators)", + "with_rocm": "Use ROCm (HIP)", + "with_snpe": "Use Qualcomm's SNPE library", + "with_vulkan": "Use Vulkan GPU backend", + "with_xnnpack": "Use XNNPACK", + "with_xpu": "Use XPU (SYCL) backend for Intel GPUs", + } + no_copy_source = True + provides = ["miniz", "pocketfft", "kineto", "nnpack", "qnnpack"] + + @property + def _min_cppstd(self): + return 17 + + @property + def _compilers_minimum_version(self): + return { + "gcc": "7", + "clang": "5", + "apple-clang": "9", + "msvc": "191", + "Visual Studio": "15", + } + + @property + def _is_mobile_os(self): + return self.settings.os in ["Android", "iOS"] + + def export_sources(self): + export_conandata_patches(self) + copy(self, "conan_deps.cmake", self.recipe_folder, os.path.join(self.export_sources_folder, "src")) + copy(self, "conan-official-libtorch-vars.cmake", self.recipe_folder, os.path.join(self.export_sources_folder, "src")) + + def config_options(self): + if self.settings.os == "Windows": + del self.options.fPIC + del self.options.with_qnnpack + if not is_apple_os(self): + self.options.rm_safe("with_metal") + self.options.rm_safe("with_magma") + self.options.rm_safe("with_mps") + if self.settings.os != "Linux": + del self.options.with_numa + if self.settings.os != "Android": + self.options.rm_safe("with_nnapi") + self.options.rm_safe("with_snpe") + self.options.with_vulkan = False + if self._is_mobile_os: + self.options.blas = "eigen" + self.options.build_lazy_ts_backend = False + del self.options.with_fbgemm + # del self.options.distributed + if self.settings.arch not in ["x86", "x86_64", "armv8"]: + self.options.rm_safe("with_mkldnn") + if not is_apple_os(self) or self.settings.os not in ["Linux", "Android"]: + del self.options.with_nnpack + self.options.with_itt = self.settings.arch in ["x86", "x86_64"] + + def configure(self): + if self.options.shared: + self.options.rm_safe("fPIC") + if not self.options.get_safe("with_cuda"): + self.options.rm_safe("build_lazy_cuda_linalg") + self.options.rm_safe("debug_cuda") + self.options.rm_safe("with_cudnn") + self.options.rm_safe("with_cusparselt") + self.options.rm_safe("with_nvrtc") + if not self.options.get_safe("with_rocm"): + self.options.rm_safe("with_nccl") + if not self.options.with_vulkan: + self.options.rm_safe("vulkan_fp16_inference") + self.options.rm_safe("vulkan_relaxed_precision") + if not self.options.get_safe("fbgemm"): + self.options.rm_safe("fakelowp") + + # numa static can't be linked into shared libs. + # Because Caffe2_detectron_ops* libs are always shared, we have to force + # libnuma shared even if libtorch:shared=False + if self.options.get_safe("with_numa"): + self.options["libnuma"].shared = True + + def layout(self): + cmake_layout(self, src_folder="src") + + @property + def _depends_on_sleef(self): + return not self._is_mobile_os and not self.settings.os == "Emscripten" + + @property + def _depends_on_pthreadpool(self): + return self._is_mobile_os or self._use_nnpack_family + + @property + def _depends_on_flatbuffers(self): + return not self._is_mobile_os + + @property + def _blas_cmake_option_value(self): + return { + "eigen": "Eigen", + "atlas": "ATLAS", + "openblas": "OpenBLAS", + "mkl": "MKL", + "veclib": "vecLib", + "flame": "FLAME", + "generic": "Generic" + }[str(self.options.blas)] + + @property + def _use_nnpack_family(self): + return any(self.options.get_safe(f"with_{name}") for name in ["nnpack", "qnnpack", "xnnpack"]) + + def requirements(self): + self.requires("cpuinfo/cci.20231129") + self.requires("eigen/3.4.0") + # fmt/11.x is not yet supported as of v2.4.0 + self.requires("fmt/10.2.1", transitive_headers=True, transitive_libs=True) + self.requires("foxi/cci.20210217", libs=False) + self.requires("onnx/1.16.1", transitive_headers=True, transitive_libs=True) + self.requires("protobuf/3.21.12") + self.requires("fp16/cci.20210320") + self.requires("cpp-httplib/0.16.0") + self.requires("libbacktrace/cci.20210118") + if self._depends_on_sleef: + self.requires("sleef/3.6") + if self._depends_on_flatbuffers: + self.requires("flatbuffers/24.3.25", libs=False) + if self.options.blas == "openblas": + # Also provides LAPACK, currently + self.requires("openblas/0.3.27") + if self.options.with_openmp: + self.requires("openmp/system", transitive_headers=True, transitive_libs=True) + if self.options.with_fbgemm: + self.requires("fbgemm/0.8.0", transitive_headers=True, transitive_libs=True) + if self.options.with_gflags: + self.requires("gflags/2.2.2") + if self.options.with_glog: + self.requires("glog/0.7.1", transitive_headers=True, transitive_libs=True) + if self.options.get_safe("with_qnnpack"): + self.requires("fxdiv/cci.20200417") + self.requires("psimd/cci.20200517") + if self.options.with_xnnpack: + self.requires("xnnpack/cci.20240229") + if self.options.with_itt: + self.requires("ittapi/3.24.4") + if self._depends_on_pthreadpool: + self.requires("pthreadpool/cci.20231129") + if self.options.get_safe("with_numa"): + self.requires("libnuma/2.0.16") + if self.options.with_opencl: + self.requires("opencl-headers/2023.12.14") + self.requires("opencl-icd-loader/2023.12.14") + if self.options.with_vulkan: + self.requires("vulkan-headers/1.3.268.0") + self.requires("vulkan-loader/1.3.268.0") + if self.options.with_mimalloc: + self.requires("mimalloc/2.1.7") + + # miniz cannot be unvendored due to being slightly modified + + # TODO: unvendor + # - pocketfft + # - kineto + # - nnpack + # - qnnpack + # TODO: add a recipe for + # - magma + # TODO: "distributed" option with sub-options for the following packages: + # - openmpi + # - ucc + # - gloo + # - tensorpipe + + def validate(self): + if self.settings.compiler.cppstd: + check_min_cppstd(self, self._min_cppstd) + minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) + if minimum_version and Version(self.settings.compiler.version) < minimum_version: + raise ConanInvalidConfiguration( + f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support." + ) + + if self.options.get_safe("with_numa") and not self.dependencies["libnuma"].options.shared: + raise ConanInvalidConfiguration( + "libtorch requires libnuma shared. Set '-o libnuma/*:shared=True', or disable numa with " + "' -o libtorch/*:with_numa=False'" + ) + + if self.options.blas != "openblas": + # FIXME: add an independent LAPACK package to CCI + raise ConanInvalidConfiguration("'-o libtorch/*:blas=openblas' is currently required for LAPACK support") + + if self.options.blas == "veclib" and not is_apple_os(self): + raise ConanInvalidConfiguration("veclib only available on Apple family OS") + + if self.options.get_safe("with_cuda"): + self.output.warning("cuda recipe is not available, assuming that NVIDIA CUDA SDK is installed on your system") + if self.options.get_safe("with_cudnn"): + self.output.warning("cudnn recipe is not available, assuming that NVIDIA CuDNN is installed on your system") + if self.options.get_safe("with_nvrtc"): + self.output.warning("nvrtc recipe is not available, assuming that NVIDIA NVRTC is installed on your system") + if self.options.get_safe("with_cusparselt"): + self.output.warning("cusparselt recipe is not available, assuming that NVIDIA cuSPARSELt is installed on your system") + if self.options.get_safe("with_nccl"): + self.output.warning("nccl recipe is not available, assuming that NVIDIA NCCL is installed on your system") + if self.options.get_safe("with_rocm"): + self.output.warning("rocm recipe is not available, assuming that ROCm is installed on your system") + if self.options.get_safe("with_xpu"): + self.output.warning("xpu recipe is not available, assuming that Intel oneAPI is installed on your system") + + def build_requirements(self): + self.tool_requires("cmake/[>=3.18 <4]") + self.tool_requires("cpython/[~3.12]") + if self._depends_on_flatbuffers: + self.tool_requires("flatbuffers/") + + def source(self): + get(self, **self.conan_data["sources"][self.version], strip_root=True) + + # Keep only a restricted set of vendored dependencies. + # Do it before build() to limit the amount of files to copy. + allowed = ["pocketfft", "kineto", "miniz-2.1.0"] + for path in Path(self.source_folder, "third_party").iterdir(): + if path.is_dir() and path.name not in allowed: + rmdir(self, path) + + def generate(self): + tc = CMakeToolchain(self) + tc.variables["CMAKE_PROJECT_Torch_INCLUDE"] = "conan_deps.cmake" + tc.variables["USE_SYSTEM_LIBS"] = True + tc.variables["BUILD_TEST"] = False + tc.variables["ATEN_NO_TEST"] = True + tc.variables["BUILD_BINARY"] = self.options.utilities + tc.variables["BUILD_CUSTOM_PROTOBUF"] = False + tc.variables["BUILD_PYTHON"] = False + tc.variables["BUILD_LITE_INTERPRETER"] = self.options.build_lite_interpreter + tc.variables["CAFFE2_USE_MSVC_STATIC_RUNTIME"] = is_msvc_static_runtime(self) + tc.variables["USE_CUDA"] = self.options.get_safe("with_cuda", False) + tc.variables["USE_XPU"] = self.options.get_safe("with_xpu", False) + tc.variables["BUILD_LAZY_CUDA_LINALG"] = self.options.get_safe("build_lazy_cuda_linalg", False) + tc.variables["USE_ROCM"] = self.options.get_safe("with_rocm", False) + tc.variables["CAFFE2_STATIC_LINK_CUDA"] = False + tc.variables["USE_CUDNN"] = self.options.get_safe("with_cudnn", False) + tc.variables["USE_STATIC_CUDNN"] = False + tc.variables["USE_CUSPARSELT"] = self.options.get_safe("with_cusparselt", False) + tc.variables["USE_FBGEMM"] = self.options.with_fbgemm + tc.variables["USE_KINETO"] = self.options.with_kineto + tc.variables["USE_CUPTI_SO"] = True + tc.variables["USE_FAKELOWP"] = self.options.get_safe("fakelowp", False) + tc.variables["USE_GFLAGS"] = self.options.with_gflags + tc.variables["USE_GLOG"] = self.options.with_glog + tc.variables["USE_LITE_PROTO"] = self.dependencies["protobuf"].options.lite + tc.variables["USE_MAGMA"] = self.options.get_safe("with_magma", False) + tc.variables["USE_PYTORCH_METAL"] = self.options.get_safe("with_metal", False) + tc.variables["USE_PYTORCH_METAL_EXPORT"] = self.options.get_safe("with_metal", False) + tc.variables["USE_NATIVE_ARCH"] = False + tc.variables["USE_MPS"] = self.options.get_safe("with_mps", False) + tc.variables["USE_NCCL"] = self.options.get_safe("with_nccl", False) + tc.variables["USE_RCCL"] = self.options.get_safe("with_nccl", False) and self.options.get_safe("with_rocm", False) + tc.variables["USE_STATIC_NCCL"] = False + tc.variables["USE_SYSTEM_NCCL"] = True + tc.variables["USE_NNAPI"] = self.options.get_safe("with_nnapi", False) + tc.variables["USE_NNPACK"] = self.options.get_safe("with_nnpack", False) + tc.variables["USE_NUMA"] = self.options.get_safe("with_numa", False) + tc.variables["USE_NVRTC"] = self.options.get_safe("with_nvrtc", False) + tc.variables["USE_NUMPY"] = False + tc.variables["USE_OBSERVERS"] = self.options.observers + tc.variables["USE_OPENCL"] = self.options.with_opencl + tc.variables["USE_OPENMP"] = self.options.with_openmp + tc.variables["USE_PROF"] = False # requires htrace + tc.variables["USE_PYTORCH_QNNPACK"] = self.options.get_safe("with_qnnpack", False) + tc.variables["USE_SNPE"] = self.options.get_safe("with_snpe", False) + tc.variables["USE_SYSTEM_EIGEN_INSTALL"] = True + tc.variables["USE_VALGRIND"] = False + tc.variables["USE_VULKAN"] = self.options.with_vulkan + tc.variables["USE_VULKAN_FP16_INFERENCE"] = self.options.get_safe("vulkan_fp16_inference", False) + tc.variables["USE_VULKAN_RELAXED_PRECISION"] = self.options.get_safe("vulkan_relaxed_precision", False) + tc.variables["USE_XNNPACK"] = self.options.with_xnnpack + tc.variables["USE_ITT"] = self.options.with_itt + tc.variables["USE_MKLDNN"] = self.options.get_safe("with_mkldnn", False) + tc.variables["USE_MKLDNN_CBLAS"] = False # This option is useless for libtorch + tc.variables["USE_DISTRIBUTED"] = False # TODO: self.options.distributed + tc.variables["ONNX_ML"] = True + tc.variables["HAVE_SOVERSION"] = True + tc.variables["USE_CCACHE"] = False + tc.variables["DEBUG_CUDA"] = self.options.get_safe("debug_cuda", False) + tc.variables["USE_COREML_DELEGATE"] = self.options.coreml_delegate + tc.variables["BUILD_LAZY_TS_BACKEND"] = self.options.build_lazy_ts_backend + tc.variables["USE_MIMALLOC"] = self.options.with_mimalloc + + tc.variables["BLAS"] = self._blas_cmake_option_value + + tc.variables["MSVC_Z7_OVERRIDE"] = False + + # Custom variables for our CMake wrapper + tc.variables["CONAN_LIBTORCH_USE_FLATBUFFERS"] = self._depends_on_flatbuffers + tc.variables["CONAN_LIBTORCH_USE_PTHREADPOOL"] = self._depends_on_pthreadpool + tc.variables["CONAN_LIBTORCH_USE_SLEEF"] = self._depends_on_sleef + + tc.generate() + + deps = CMakeDeps(self) + deps.set_property("cpuinfo", "cmake_target_name", "cpuinfo") + deps.set_property("flatbuffers", "cmake_target_name", "flatbuffers::flatbuffers") + deps.set_property("fmt", "cmake_target_name", "fmt::fmt-header-only") + deps.set_property("foxi", "cmake_target_name", "foxi_loader") + deps.set_property("gflags", "cmake_target_name", "gflags") + deps.set_property("ittapi", "cmake_file_name", "ITT") + deps.set_property("libbacktrace", "cmake_file_name", "Backtrace") + deps.set_property("mimalloc", "cmake_target_name", "mimalloc-static") + deps.set_property("psimd", "cmake_target_name", "psimd") + deps.generate() + + VirtualBuildEnv(self).generate() + + # To install pyyaml + env = Environment() + env.append_path("PYTHONPATH", self._site_packages_dir) + env.append_path("PATH", os.path.join(self._site_packages_dir, "bin")) + env.vars(self).save_script("pythonpath") + + @property + def _site_packages_dir(self): + return os.path.join(self.build_folder, "site-packages") + + def _pip_install(self, packages): + self.run(f"python -m pip install {' '.join(packages)} --no-cache-dir --target={self._site_packages_dir}") + + def _patch_sources(self): + apply_conandata_patches(self) + # Recreate some for add_subdirectory() to work + for pkg in ["foxi", "fmt", "FXdiv", "psimd", "mimalloc"]: + save(self, os.path.join(self.source_folder, "third_party", pkg, "CMakeLists.txt"), "") + # Use FindOpenMP from Conan or CMake + modules_dir = os.path.join(self.source_folder, "cmake", "modules") + rm(self, "FindOpenMP.cmake", modules_dir) + + def _regenerate_flatbuffers(self): + # Re-generate mobile_bytecode_generated.h to allow any flatbuffers version to be used. + # As of v24.3.25, only updates the flatbuffers version in the generated file. + self.run(f"flatc --cpp --gen-mutable --no-prefix --scoped-enums mobile_bytecode.fbs", + cwd=os.path.join(self.source_folder, "torch", "csrc", "jit", "serialization")) + + def build(self): + self._patch_sources() + self._pip_install(["pyyaml", "typing-extensions"]) + if self._depends_on_flatbuffers: + self._regenerate_flatbuffers() + cmake = CMake(self) + cmake.configure() + try: + cmake.build() + except Exception: + # The build is likely to run out of memory in the CI, so try again + cmake.build(cli_args=["--parallel", "1"]) + + def package(self): + copy(self, "LICENSE", self.source_folder, os.path.join(self.package_folder, "licenses")) + cmake = CMake(self) + cmake.install() + os.rename(os.path.join(self.package_folder, "share"), os.path.join(self.package_folder, "res")) + rmdir(self, os.path.join(self.package_folder, "res", "cmake")) + copy(self, "conan-official-libtorch-vars.cmake", self.source_folder, os.path.join(self.package_folder, self._modules_dir)) + + @property + def _modules_dir(self): + return os.path.join("lib", "cmake", "Torch") + + def package_info(self): + def _lib_exists(name): + return bool(list(Path(self.package_folder, "lib").glob(f"*{name}.*"))) + + def _add_whole_archive_lib(component, libname, shared=False): + # Reproduces https://github.com/pytorch/pytorch/blob/v2.4.0/cmake/TorchConfig.cmake.in#L27-L43 + if shared: + self.cpp_info.components[component].libs.append(libname) + else: + lib_folder = os.path.join(self.package_folder, "lib") + if is_apple_os(self): + lib_fullpath = os.path.join(lib_folder, f"lib{libname}.a") + whole_archive = f"-Wl,-force_load,{lib_fullpath}" + elif is_msvc(self): + lib_fullpath = os.path.join(lib_folder, libname) + whole_archive = f"-WHOLEARCHIVE:{lib_fullpath}" + else: + lib_fullpath = os.path.join(lib_folder, f"lib{libname}.a") + whole_archive = f"-Wl,--whole-archive,{lib_fullpath},--no-whole-archive" + self.cpp_info.components[component].exelinkflags.append(whole_archive) + self.cpp_info.components[component].sharedlinkflags.append(whole_archive) + + def _sleef(): + return ["sleef::sleef"] if self._depends_on_sleef else [] + + def _openblas(): + return ["openblas::openblas"] if self.options.blas == "openblas" else [] + + def _lapack(): + return ["openblas::openblas"] + + def _openmp(): + return ["openmp::openmp"] if self.options.with_openmp else [] + + def _fbgemm(): + return ["fbgemm::fbgemm"] if self.options.with_fbgemm else [] + + def _gflags(): + return ["gflags::gflags"] if self.options.with_gflags else [] + + def _glog(): + return ["glog::glog"] if self.options.with_glog else [] + + def _nnpack(): + return [] # TODO + + def _xnnpack(): + return ["xnnpack::xnnpack"] if self.options.with_xnnpack else [] + + def _qnnpack(): + return ["pytorch_qnnpack"] if self.options.get_safe("with_qnnpack") else [] + + def _libnuma(): + return ["libnuma::libnuma"] if self.options.get_safe("with_numa") else [] + + def _opencl(): + return ["opencl-headers::opencl-headers", "opencl-icd-loader::opencl-icd-loader"] if self.options.with_opencl else [] + + def _vulkan(): + return ["vulkan-headers::vulkan-headers", "vulkan-loader::vulkan-loader"] if self.options.with_vulkan else [] + + def _onednn(): + return ["onednn::onednn"] if self.options.get_safe("with_mkldnn", False) else [] + + def _mimalloc(): + return ["mimalloc::mimalloc"] if self.options.with_mimalloc else [] + + def _protobuf(): + return ["protobuf::libprotobuf-lite"] if self.dependencies["protobuf"].options.lite else ["protobuf::libprotobuf"] + + def _flatbuffers(): + return ["flatbuffers::flatbuffers"] if self._depends_on_flatbuffers else [] + + def _kineto(): + return ["kineto"] if self.options.with_kineto else [] + + def _itt(): + return ["ittapi::ittapi"] if self.options.with_itt else [] + + self.cpp_info.set_property("cmake_file_name", "Torch") + + # Export official CMake variables + self.cpp_info.builddirs.append(self._modules_dir) + cmake_vars_module = os.path.join(self._modules_dir, "conan-official-libtorch-vars.cmake") + self.cpp_info.set_property("cmake_build_modules", [cmake_vars_module]) + + self.cpp_info.components["_headers"].includedirs.append(os.path.join("include", "torch", "csrc", "api", "include")) + self.cpp_info.components["_headers"].resdirs = ["res"] + self.cpp_info.components["_headers"].requires.extend(["onnx::onnx"] + _flatbuffers()) + + self.cpp_info.components["c10"].set_property("cmake_target_name", "c10") + self.cpp_info.components["c10"].libs = ["c10"] + self.cpp_info.components["c10"].requires.extend( + ["_headers", "fmt::fmt", "cpuinfo::cpuinfo", "libbacktrace::libbacktrace", "cpp-httplib::cpp-httplib"] + + _gflags() + _glog() + _libnuma() + _mimalloc() + ) + if self.settings.os == "Android": + self.cpp_info.components["c10"].system_libs.append("log") + + self.cpp_info.components["torch"].set_property("cmake_target_name", "torch") + _add_whole_archive_lib("torch", "torch", shared=self.options.shared) + self.cpp_info.components["torch"].requires.append("torch_cpu") + + self.cpp_info.components["torch_cpu"].set_property("cmake_target_name", "torch_cpu") + _add_whole_archive_lib("torch_cpu", "torch_cpu", shared=self.options.shared) + self.cpp_info.components["torch_cpu"].requires.append("_headers") + self.cpp_info.components["torch_cpu"].requires.append("c10") + + ## TODO: Eventually remove this workaround in the future + ## We put all these external dependencies and system libs of torch_cpu in an empty component instead, + ## due to "whole archive" trick. Indeed, conan doesn't honor libs order per component we expect in this case + ## (conan generators put exelinkflags/sharedlinkflags after system/external libs) + self.cpp_info.components["torch_cpu"].requires.append("torch_cpu_link_order_workaround") + self.cpp_info.components["torch_cpu_link_order_workaround"].requires.extend( + ["_headers", "c10", "eigen::eigen", "fmt::fmt", "foxi::foxi"] + + _fbgemm() + _sleef() + _onednn() + _protobuf() + _fbgemm() + _kineto() + _openblas() + _lapack() + + _vulkan() + _opencl() + _openmp() + _nnpack() + _xnnpack() + _qnnpack() + _itt() + ) + if self.settings.os == "Linux": + self.cpp_info.components["torch_cpu_link_order_workaround"].system_libs.extend(["dl", "m", "pthread", "rt"]) + if self.options.blas == "veclib": + self.cpp_info.components["torch_cpu_link_order_workaround"].frameworks.append("Accelerate") + + if self.options.shared: + ## TODO: Eventually remove this workaround in the future + self.cpp_info.components["torch_cpu_link_order_workaround"].requires.extend(_protobuf()) + else: + if _lib_exists("Caffe2_perfkernels_avx"): + _add_whole_archive_lib("caffe2_perfkernels_avx", "Caffe2_perfkernels_avx", shared=self.options.shared) + self.cpp_info.components["caffe2_perfkernels_avx"].requires.append("c10") + self.cpp_info.components["torch_cpu"].requires.append("caffe2_perfkernels_avx") + + if _lib_exists("Caffe2_perfkernels_avx2"): + _add_whole_archive_lib("caffe2_perfkernels_avx2", "Caffe2_perfkernels_avx2", shared=self.options.shared) + self.cpp_info.components["caffe2_perfkernels_avx2"].requires.append("c10") + self.cpp_info.components["torch_cpu"].requires.append("caffe2_perfkernels_avx2") + + if _lib_exists("Caffe2_perfkernels_avx512"): + _add_whole_archive_lib("caffe2_perfkernels_avx512", "Caffe2_perfkernels_avx512", shared=self.options.shared) + self.cpp_info.components["caffe2_perfkernels_avx512"].requires.append("c10") + self.cpp_info.components["torch_cpu"].requires.append("caffe2_perfkernels_avx512") + + if self.options.observers: + _add_whole_archive_lib("caffe2_observers", "caffe2_observers", shared=self.options.shared) + self.cpp_info.components["caffe2_observers"].requires.append("torch") + + if self.options.get_safe("with_qnnpack"): + self.cpp_info.components["clog"].libs = ["clog"] + self.cpp_info.components["pytorch_qnnpack"].libs = ["pytorch_qnnpack"] + self.cpp_info.components["pytorch_qnnpack"].requires.extend([ + "clog", "cpuinfo::cpuinfo", "fp16::fp16", "fxdiv::fxdiv", "psimd::psimd", "pthreadpool::pthreadpool" + ]) + + if self.options.with_kineto: + self.cpp_info.components["kineto"].libs = ["kineto"] + self.cpp_info.components["kineto"].includedirs.append(os.path.join("include", "kineto")) + self.cpp_info.components["kineto"].requires.extend(["fmt::fmt"]) diff --git a/recipes/libtorch/all/patches/2.4.0/0001-use-conan-dependencies.patch b/recipes/libtorch/all/patches/2.4.0/0001-use-conan-dependencies.patch new file mode 100644 index 0000000000000..02263e84fe645 --- /dev/null +++ b/recipes/libtorch/all/patches/2.4.0/0001-use-conan-dependencies.patch @@ -0,0 +1,127 @@ +From e76442d20f526d7151b0ea06369a5dbfbc89a124 Mon Sep 17 00:00:00 2001 +From: Martin Valgur +Date: Mon, 29 Jul 2024 18:35:04 +0300 +Subject: [PATCH] use conan dependencies + +--- + aten/CMakeLists.txt | 12 ++++++------ + aten/src/ATen/CMakeLists.txt | 9 --------- + c10/CMakeLists.txt | 2 +- + cmake/BuildVariables.cmake | 4 ++-- + cmake/Dependencies.cmake | 21 ++++----------------- + 5 files changed, 13 insertions(+), 35 deletions(-) + +diff --git a/aten/CMakeLists.txt b/aten/CMakeLists.txt +index bda6aea327..3c394539ec 100644 +--- a/aten/CMakeLists.txt ++++ b/aten/CMakeLists.txt +@@ -40,12 +40,12 @@ set(ATen_XPU_SRCS) + set(ATen_XPU_INCLUDE) + set(ATen_XPU_TEST_SRCS) + set(ATen_VULKAN_TEST_SRCS) +-set(ATen_CPU_DEPENDENCY_LIBS) +-set(ATen_XPU_DEPENDENCY_LIBS) +-set(ATen_CUDA_DEPENDENCY_LIBS) +-set(ATen_HIP_DEPENDENCY_LIBS) +-set(ATen_PUBLIC_CUDA_DEPENDENCY_LIBS) +-set(ATen_PUBLIC_HIP_DEPENDENCY_LIBS) ++#set(ATen_CPU_DEPENDENCY_LIBS) ++#set(ATen_XPU_DEPENDENCY_LIBS) ++#set(ATen_CUDA_DEPENDENCY_LIBS) ++#set(ATen_HIP_DEPENDENCY_LIBS) ++#set(ATen_PUBLIC_CUDA_DEPENDENCY_LIBS) ++#set(ATen_PUBLIC_HIP_DEPENDENCY_LIBS) + set(ATEN_INSTALL_BIN_SUBDIR "bin" CACHE PATH "ATen install binary subdirectory") + set(ATEN_INSTALL_LIB_SUBDIR "lib" CACHE PATH "ATen install library subdirectory") + set(ATEN_INSTALL_INCLUDE_SUBDIR "include" CACHE PATH "ATen install include subdirectory") +diff --git a/aten/src/ATen/CMakeLists.txt b/aten/src/ATen/CMakeLists.txt +index 0087dd95d9..ab59c1c36b 100644 +--- a/aten/src/ATen/CMakeLists.txt ++++ b/aten/src/ATen/CMakeLists.txt +@@ -455,16 +455,7 @@ if(NOT EMSCRIPTEN AND NOT INTERN_BUILD_MOBILE) + set_property(TARGET sleef PROPERTY FOLDER "dependencies") + list(APPEND ATen_THIRD_PARTY_INCLUDE ${CMAKE_BINARY_DIR}/include) + link_directories(${CMAKE_BINARY_DIR}/sleef/lib) +- else() +- add_library(sleef SHARED IMPORTED) +- find_library(SLEEF_LIBRARY sleef) +- if(NOT SLEEF_LIBRARY) +- message(FATAL_ERROR "Cannot find sleef") +- endif() +- message("Found sleef: ${SLEEF_LIBRARY}") +- set_target_properties(sleef PROPERTIES IMPORTED_LOCATION "${SLEEF_LIBRARY}") + endif() +- list(APPEND ATen_CPU_DEPENDENCY_LIBS sleef) + + if(NOT MSVC) + set(CMAKE_C_FLAGS_DEBUG ${OLD_CMAKE_C_FLAGS_DEBUG}) +diff --git a/c10/CMakeLists.txt b/c10/CMakeLists.txt +index 82eb969038..94173660b9 100644 +--- a/c10/CMakeLists.txt ++++ b/c10/CMakeLists.txt +@@ -94,7 +94,7 @@ if(NOT BUILD_LIBTORCHLESS) + if(C10_USE_GLOG) + target_link_libraries(c10 PUBLIC glog::glog) + endif() +- target_link_libraries(c10 PRIVATE fmt::fmt-header-only) ++ target_link_libraries(c10 PRIVATE fmt::fmt-header-only httplib::httplib) + + if(C10_USE_NUMA) + message(STATUS "NUMA paths:") +diff --git a/cmake/BuildVariables.cmake b/cmake/BuildVariables.cmake +index c849c26014..36b26448fd 100644 +--- a/cmake/BuildVariables.cmake ++++ b/cmake/BuildVariables.cmake +@@ -23,8 +23,8 @@ set(Caffe2_CPU_INCLUDE) + set(Caffe2_GPU_INCLUDE) + + # Lists for Caffe2 dependency libraries, for CPU and CUDA respectively. +-set(Caffe2_DEPENDENCY_LIBS "") +-set(Caffe2_CUDA_DEPENDENCY_LIBS "") ++#set(Caffe2_DEPENDENCY_LIBS "") ++#set(Caffe2_CUDA_DEPENDENCY_LIBS "") + # This variable contains dependency libraries of Caffe2 which requires whole + # symbol linkage. One example is the onnx lib where we need all its schema + # symbols. However, if the lib is whole linked in caffe2 lib, we don't want +diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake +index c4661e39e1..b17edad046 100644 +--- a/cmake/Dependencies.cmake ++++ b/cmake/Dependencies.cmake +@@ -1306,22 +1306,10 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_DISABLE_ONNX) + target_compile_options(onnx PRIVATE -Wno-deprecated-declarations) + endif() + else() +- add_library(onnx SHARED IMPORTED) +- find_library(ONNX_LIBRARY onnx) +- if(NOT ONNX_LIBRARY) +- message(FATAL_ERROR "Cannot find onnx") +- endif() +- set_property(TARGET onnx PROPERTY IMPORTED_LOCATION ${ONNX_LIBRARY}) +- add_library(onnx_proto SHARED IMPORTED) +- find_library(ONNX_PROTO_LIBRARY onnx_proto) +- if(NOT ONNX_PROTO_LIBRARY) +- message(FATAL_ERROR "Cannot find onnx") +- endif() +- set_property(TARGET onnx_proto PROPERTY IMPORTED_LOCATION ${ONNX_PROTO_LIBRARY}) +- message("-- Found onnx: ${ONNX_LIBRARY} ${ONNX_PROTO_LIBRARY}") +- list(APPEND Caffe2_DEPENDENCY_LIBS onnx_proto onnx) +- endif() +- include_directories(${FOXI_INCLUDE_DIRS}) ++ find_package(ONNX REQUIRED CONFIG) ++ list(APPEND Caffe2_DEPENDENCY_LIBS onnx::onnx) ++ endif() ++ find_package(foxi REQUIRED CONFIG) + list(APPEND Caffe2_DEPENDENCY_LIBS foxi_loader) + # Recover the build shared libs option. + set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS}) +@@ -1539,7 +1527,6 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt) + # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know + # `fmt` is compatible with a superset of the compilers that PyTorch is, it + # shouldn't be too bad to just disable the checks. +-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "") + + list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only) + set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE) +-- +2.43.0 + diff --git a/recipes/libtorch/all/patches/2.4.0/0002-fix-a-minor-glog-incompatibility.patch b/recipes/libtorch/all/patches/2.4.0/0002-fix-a-minor-glog-incompatibility.patch new file mode 100644 index 0000000000000..aed25a5dfe5de --- /dev/null +++ b/recipes/libtorch/all/patches/2.4.0/0002-fix-a-minor-glog-incompatibility.patch @@ -0,0 +1,25 @@ +From 90c281c0c0ae524e69d7450ac0f572cdfcea82d6 Mon Sep 17 00:00:00 2001 +From: Martin Valgur +Date: Mon, 29 Jul 2024 20:57:34 +0300 +Subject: [PATCH 1/2] fix a minor glog incompatibility + +--- + c10/util/Logging.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp +index 66a24980a4..520bb3e723 100644 +--- a/c10/util/Logging.cpp ++++ b/c10/util/Logging.cpp +@@ -324,7 +324,7 @@ void UpdateLoggingLevelsFromFlags() { + + void ShowLogInfoToStderr() { + FLAGS_logtostderr = 1; +- FLAGS_minloglevel = std::min(FLAGS_minloglevel, google::GLOG_INFO); ++ FLAGS_minloglevel = std::min(FLAGS_minloglevel, static_cast(google::GLOG_INFO)); + } + } // namespace c10 + +-- +2.43.0 + diff --git a/recipes/libtorch/all/patches/2.4.0/0003-fix-cmake-logic-bug.patch b/recipes/libtorch/all/patches/2.4.0/0003-fix-cmake-logic-bug.patch new file mode 100644 index 0000000000000..5502db3e8acf5 --- /dev/null +++ b/recipes/libtorch/all/patches/2.4.0/0003-fix-cmake-logic-bug.patch @@ -0,0 +1,27 @@ +From 5b49aeb0a2e70cffae601f4105aeb6ee01281cb2 Mon Sep 17 00:00:00 2001 +From: Martin Valgur +Date: Mon, 29 Jul 2024 20:57:59 +0300 +Subject: [PATCH 2/2] fix a cmake logic bug + +Otherwise adds torch/csrc/jit/mobile/profiler_edge.cpp, which depends on kineto. + +--- + CMakeLists.txt | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index c4cd4b2c2a..3bd5cd0e93 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -298,7 +298,7 @@ endif() + + option(USE_SLEEF_FOR_ARM_VEC256 "Use sleef for arm" OFF) + option(USE_SOURCE_DEBUG_ON_MOBILE "Enable" ON) +-option(USE_LITE_INTERPRETER_PROFILER "Enable" ON) ++option(USE_LITE_INTERPRETER_PROFILER "Enable" "${USE_KINETO}") + cmake_dependent_option( + USE_LITE_AOTI "Include AOTI sources" OFF + "BUILD_LITE_INTERPRETER" OFF) +-- +2.43.0 + diff --git a/recipes/libtorch/all/patches/2.4.0/0004-add-a-missing-include.patch b/recipes/libtorch/all/patches/2.4.0/0004-add-a-missing-include.patch new file mode 100644 index 0000000000000..68eed9dbbc852 --- /dev/null +++ b/recipes/libtorch/all/patches/2.4.0/0004-add-a-missing-include.patch @@ -0,0 +1,10 @@ +--- a/binaries/dump_operator_names.cc ++++ b/binaries/dump_operator_names.cc +@@ -21,6 +21,7 @@ + #include + + #include ++#include + + namespace torch { + namespace jit { diff --git a/recipes/libtorch/all/patches/2.4.0/0005-kineto-unvendor-fmt.patch b/recipes/libtorch/all/patches/2.4.0/0005-kineto-unvendor-fmt.patch new file mode 100644 index 0000000000000..d4d923a1488c4 --- /dev/null +++ b/recipes/libtorch/all/patches/2.4.0/0005-kineto-unvendor-fmt.patch @@ -0,0 +1,43 @@ +From 6c64ee0c5d73b6686fd7d5ee0225510066ee5543 Mon Sep 17 00:00:00 2001 +From: Martin Valgur +Date: Tue, 30 Jul 2024 18:18:57 +0300 +Subject: [PATCH] kineto: unvendor fmt + +--- + libkineto/CMakeLists.txt | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/libkineto/CMakeLists.txt b/libkineto/CMakeLists.txt +index aff278b..2c460b9 100644 +--- a/libkineto/CMakeLists.txt ++++ b/libkineto/CMakeLists.txt +@@ -159,7 +159,7 @@ set_target_properties(kineto_base kineto_api PROPERTIES + CXX_EXTENSIONS NO) + + set(KINETO_COMPILE_OPTIONS "-DKINETO_NAMESPACE=libkineto") +-list(APPEND KINETO_COMPILE_OPTIONS "-DFMT_HEADER_ONLY") ++list(APPEND KINETO_COMPILE_OPTIONS "${fmt_DEFINITIONS}") + list(APPEND KINETO_COMPILE_OPTIONS "-DENABLE_IPC_FABRIC") + if(NOT MSVC) + list(APPEND KINETO_COMPILE_OPTIONS "-std=c++17") +@@ -183,7 +183,7 @@ endif() + target_compile_options(kineto_base PRIVATE "${KINETO_COMPILE_OPTIONS}") + target_compile_options(kineto_api PRIVATE "${KINETO_COMPILE_OPTIONS}") + +-if(NOT TARGET fmt) ++if(FALSE) + if(NOT FMT_SOURCE_DIR) + set(FMT_SOURCE_DIR "${LIBKINETO_THIRDPARTY_DIR}/fmt" + CACHE STRING "fmt source directory from submodules") +@@ -201,7 +201,7 @@ if(NOT TARGET fmt) + set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE) + endif() + +-set(FMT_INCLUDE_DIR "${FMT_SOURCE_DIR}/include") ++set(FMT_INCLUDE_DIR "${fmt_INCLUDE_DIR}") + message(STATUS "Kineto: FMT_SOURCE_DIR = ${FMT_SOURCE_DIR}") + message(STATUS "Kineto: FMT_INCLUDE_DIR = ${FMT_INCLUDE_DIR}") + if (NOT CUPTI_INCLUDE_DIR) +-- +2.43.0 + diff --git a/recipes/libtorch/all/patches/2.4.0/0006-dont-build-protoc-on-apple.patch b/recipes/libtorch/all/patches/2.4.0/0006-dont-build-protoc-on-apple.patch new file mode 100644 index 0000000000000..e7cfd17578164 --- /dev/null +++ b/recipes/libtorch/all/patches/2.4.0/0006-dont-build-protoc-on-apple.patch @@ -0,0 +1,11 @@ +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -781,7 +781,7 @@ + endif() + + # The below means we are cross compiling for arm64 or x86_64 on MacOSX +-if(NOT IOS ++if(FALSE + AND CMAKE_SYSTEM_NAME STREQUAL "Darwin" + AND CMAKE_OSX_ARCHITECTURES MATCHES "^(x86_64|arm64)$") + set(CROSS_COMPILING_MACOSX TRUE) diff --git a/recipes/libtorch/all/test_package/CMakeLists.txt b/recipes/libtorch/all/test_package/CMakeLists.txt new file mode 100644 index 0000000000000..7f5314ce26ff3 --- /dev/null +++ b/recipes/libtorch/all/test_package/CMakeLists.txt @@ -0,0 +1,8 @@ +cmake_minimum_required(VERSION 3.15) +project(test_package) + +find_package(Torch REQUIRED CONFIG) + +add_executable(${PROJECT_NAME} test_package.cpp) +target_link_libraries(${PROJECT_NAME} torch) +target_compile_features(test_package PRIVATE cxx_std_17) diff --git a/recipes/libtorch/all/test_package/conanfile.py b/recipes/libtorch/all/test_package/conanfile.py new file mode 100644 index 0000000000000..ef5d7042163ec --- /dev/null +++ b/recipes/libtorch/all/test_package/conanfile.py @@ -0,0 +1,26 @@ +from conan import ConanFile +from conan.tools.build import can_run +from conan.tools.cmake import cmake_layout, CMake +import os + + +class TestPackageConan(ConanFile): + settings = "os", "arch", "compiler", "build_type" + generators = "CMakeDeps", "CMakeToolchain", "VirtualRunEnv" + test_type = "explicit" + + def requirements(self): + self.requires(self.tested_reference_str) + + def layout(self): + cmake_layout(self) + + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def test(self): + if can_run(self): + bin_path = os.path.join(self.cpp.build.bindir, "test_package") + self.run(bin_path, env="conanrun") diff --git a/recipes/libtorch/all/test_package/test_package.cpp b/recipes/libtorch/all/test_package/test_package.cpp new file mode 100644 index 0000000000000..ca31e23db7a82 --- /dev/null +++ b/recipes/libtorch/all/test_package/test_package.cpp @@ -0,0 +1,31 @@ +#include + +#include + +struct Net: torch::nn::Module { + Net(int64_t N, int64_t M): linear(register_module("linear", torch::nn::Linear(N, M))) { + another_bias = register_parameter("b", torch::randn(M)); + } + + torch::Tensor forward(torch::Tensor input) { + return linear(input) + another_bias; + } + + torch::nn::Linear linear; + torch::Tensor another_bias; +}; + +int main() { + torch::Tensor tensor = torch::eye(3); + std::cout << tensor << std::endl; + + Net net(4, 5); + for (const auto& p : net.parameters()) { + std::cout << p << std::endl; + } + for (const auto& pair : net.named_parameters()) { + std::cout << pair.key() << ": " << pair.value() << std::endl; + } + + return 0; +} diff --git a/recipes/libtorch/config.yml b/recipes/libtorch/config.yml new file mode 100644 index 0000000000000..e1d4aed9fe78f --- /dev/null +++ b/recipes/libtorch/config.yml @@ -0,0 +1,3 @@ +versions: + "2.4.0": + folder: all