From 2376faebcf3cbf94a112c264ba82307df6f80780 Mon Sep 17 00:00:00 2001 From: erick-xanadu <110487834+erick-xanadu@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:46:51 -0500 Subject: [PATCH 01/10] [RTD] Fixes documentation (#1412) **Context:** ![Before](https://github.com/user-attachments/assets/3ec337c1-a898-4abd-a9c6-990d8767679c) **Description of the Change:** ![After](https://github.com/user-attachments/assets/a07c6a1c-f98d-4b00-b9aa-9afd0a893673) **Benefits:** No floating `}` and well formatted contents. **Possible Drawbacks:** Do we also want to show the libraries we use? **Related GitHub Issues:** --- doc/index.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/index.rst b/doc/index.rst index d5c8ab7f9f..879fa14bb0 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -54,11 +54,11 @@ Catalyst .. mdinclude:: ../README.md :start-line: 20 - :end-line: 73 + :end-line: 72 .. mdinclude:: ../README.md - :start-line: 142 - :end-line: 163 + :start-line: 134 + :end-line: 155 .. toctree:: :maxdepth: 2 From b29046b80a3f39ee511e14694ac8eae229ef54e0 Mon Sep 17 00:00:00 2001 From: Raul Torres <138264735+rauletorresc@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:16:35 -0500 Subject: [PATCH 02/10] Fix some typos and phrasing in the docs (#1416) **Context:** Fix some typos and phrasing in the docs --- frontend/catalyst/debug/compiler_functions.py | 4 ++-- frontend/test/lit/test_mlir_plugin.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/frontend/catalyst/debug/compiler_functions.py b/frontend/catalyst/debug/compiler_functions.py index 2fa16b4218..7dc1a17756 100644 --- a/frontend/catalyst/debug/compiler_functions.py +++ b/frontend/catalyst/debug/compiler_functions.py @@ -42,7 +42,7 @@ def get_compilation_stage(fn, stage): All the available stages are: - - MILR: ``mlir``, ``HLOLoweringPass``, ``QuantumCompilationPass``, ``BufferizationPass``, + - MLIR: ``mlir``, ``HLOLoweringPass``, ``QuantumCompilationPass``, ``BufferizationPass``, and ``MLIRToLLVMDialect``. - LLVM: ``llvm_ir``, ``CoroOpt``, ``O2Opt``, ``Enzyme``, and ``last``. @@ -139,7 +139,7 @@ def replace_ir(fn, stage, new_ir): Available stages include: - - MILR: ``mlir``, ``HLOLoweringPass``, ``QuantumCompilationPass``, ``BufferizationPass``, + - MLIR: ``mlir``, ``HLOLoweringPass``, ``QuantumCompilationPass``, ``BufferizationPass``, and ``MLIRToLLVMDialect``. - LLVM: ``llvm_ir``, ``CoroOpt``, ``O2Opt``, ``Enzyme``, and ``last``. diff --git a/frontend/test/lit/test_mlir_plugin.py b/frontend/test/lit/test_mlir_plugin.py index 39b0c58e04..44a0d4f488 100644 --- a/frontend/test/lit/test_mlir_plugin.py +++ b/frontend/test/lit/test_mlir_plugin.py @@ -15,14 +15,14 @@ # RUN: %PYTHON %s | FileCheck %s """ -This test makes sure that we can use plugins from the compiler +This test makes sure that we can use plugins from the compiler. Given the standalone-plugin in the MLIR repository, can we verify that it works when loading it from python? This test uses a lot of machinery that is not exposed to the user. However, testing the standalone-plugin (as written in the LLVM repository) -is impractical. The standalone-plugin rewrites a symbols with the name +is impractical. The standalone-plugin rewrites all symbols with the name `bar` to symbols with the name `foo`. However, since the standalone plugin is meant to be more of an example, it does not modify the uses of symbol `bar` and change them to `foo`. @@ -50,7 +50,7 @@ def module(): print(module.mlir) ``` -It would succeed in generate correct MLIR during the lowering from JAXPR to MLIR. +It would succeed at generating correct MLIR during the lowering from JAXPR to MLIR. However, after the `standalone-switch-bar-foo` pass, the verifier would fail because it would see callsites to `@bar` but no definitions for `@bar`. From 07ffe5b0a584daa09d7958332d725bc919c78521 Mon Sep 17 00:00:00 2001 From: Mehrdad Malek <39844030+mehrdad2m@users.noreply.github.com> Date: Tue, 7 Jan 2025 11:04:50 -0500 Subject: [PATCH 03/10] Small improvements in catalyst cli docs, option help, and intermediate file naming (#1405) **Context:** This PR fixes some small bugs or lack of documentation for catalyst-cli gathered from @dime10's feedbacks. - The `--help` flag seems to dump a lot of unrelated options which makes it difficult to navigate through catalyst options. - The possible stages for the --checkpoint-stage option are not mentioned in the documentation - When using `--checkpoint-stage`, `--save-ir-after-each=pipeline` no longer works. - The output from --save-ir-after-each=pass produces one output for each function when dealing with a function pass which results in a large number of outputs and one has to find the function of interest randomly. **Description of the Change:** - `--help` option now prints all the catalyst-cli specific options first before jumping into mlir-opt options - Added more details to documentation - Fixed the bug for `save-ir-after-each` and `save-ir-after-each` coexisting together. - Output from `save-ir-after-each` now appends the name of the function to the file name making it easier to identify the desired output. **Benefits:** easier experience for catalyst-cli user **Possible Drawbacks:** **Related GitHub Issues:** --------- Co-authored-by: Joey Carter Co-authored-by: erick-xanadu <110487834+erick-xanadu@users.noreply.github.com> Co-authored-by: David Ittah --- doc/catalyst-cli/catalyst-cli.rst | 36 ++++++++++++++++++---- doc/releases/changelog-0.10.0.md | 8 +++++ mlir/lib/Driver/CompilerDriver.cpp | 48 +++++++++++++++++++----------- 3 files changed, 69 insertions(+), 23 deletions(-) diff --git a/doc/catalyst-cli/catalyst-cli.rst b/doc/catalyst-cli/catalyst-cli.rst index b04c3aaf90..d11fe3aa50 100644 --- a/doc/catalyst-cli/catalyst-cli.rst +++ b/doc/catalyst-cli/catalyst-cli.rst @@ -32,11 +32,11 @@ each stage individually. For example: .. note:: - The Catalyst CLI tool is currently only available when Catalyst is built from source, and is not - included when installing Catalyst via pip or from wheels. + If Catalyst is built from source, the ``catalyst-cli`` executable will be located in + the ``mlir/build/bin/`` directory relative to the root of your Catalyst source directory. - After building Catalyst, the ``catalyst-cli`` executable will be available in the - ``mlir/build/bin/`` directory. + If Catalyst is installed via pip or from wheels, the executable will be located + in the ``catalyst/bin/`` directory relative to the environment’s installation directory. Usage ----- @@ -98,6 +98,23 @@ intermediate files are saved. Keep intermediate files after each pipeline in the compilation. By default, no intermediate files are saved. Using ``--keep-intermediate`` is equivalent to using ``--save-ir-after-each=pipeline``. +``--{passname}`` +""""""""""""""" + +Enable a specific pass. For example, to enable the ``remove-chained-self-inverse`` pass, use +``--remove-chained-self-inverse``. + +Catalyst's main ``mlir`` stage is split up into a sequence of pass pipelines that can also be run +individually via this option. In that case, the name of the pipeline is substituted for the pass +name. Currently, the following pipelines are available: +``enforce-runtime-invariants-pipeline``, +``hlo_lowering-pipeline``, +``quantum-compilation-pipeline``, +``bufferization-pipeline``, +``llvm-dialect-lowring-pipeline``, and finally +``default-catalyst-pipeline`` which encompasses all the above as the default pipeline used by the +Catalyst CLI tool if no pass option is specified. + ``--catalyst-pipeline=`` """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" @@ -113,7 +130,7 @@ applies the pass ``inline-nested-module``, we would specify this pipeline config .. code-block:: - --catalyst-pipeline=pipe1(split-multiple-tapes;apply-transform-sequence),pipe2(inline-nested-module) + --catalyst-pipeline="pipe1(split-multiple-tapes;apply-transform-sequence),pipe2(inline-nested-module)" ``--workspace=`` """""""""""""""""""""" @@ -138,7 +155,14 @@ Enable asynchronous QNodes. """"""""""""""""""""""""""""""""""" Define a *checkpoint stage*, used to indicate that the compiler should start only after reaching the -given pass. +given stage. The stages that are currently available are: + +* MLIR: ``mlir`` (start with first MLIR stage), ``{pipeline}`` such as any of the built-in pipeline + names described under the ``--{passname}`` option, OR any custom pipeline names if the + ``--catalyst-pipeline={pipeline(...),...}`` option is used. +* LLVM: ``llvm_ir`` (start with first LLVM stage), ``CoroOpt``, ``O2Opt``, ``Enzyme``. + Note that ``CoroOpt`` (Coroutine lowering), ``O2Opt`` (O2 optimization), and ``Enzyme`` + (automatic differentiation) passes are only run conditionally as needed. ``--dump-catalyst-pipeline[=]`` """"""""""""""""""""""""""""""""""""""""""" diff --git a/doc/releases/changelog-0.10.0.md b/doc/releases/changelog-0.10.0.md index 95edec5cf1..f8b56f1aba 100644 --- a/doc/releases/changelog-0.10.0.md +++ b/doc/releases/changelog-0.10.0.md @@ -167,6 +167,10 @@ array arguments of the function, in particular when non-64bit datatypes are used. [(#1338)](https://github.com/PennyLaneAI/catalyst/pull/1338) +* Fixed a bug in catalyst cli where using `checkpoint-stage` would cause `save-ir-after-each` + to not work properly. + [(#1405)](https://github.com/PennyLaneAI/catalyst/pull/1405) +

Internal changes ⚙️

* Catalyst no longer depends on or pins the `scipy` package. Instead, OpenBLAS is sourced directly @@ -272,6 +276,10 @@ for transformation passes. [(#1368)](https://github.com/PennyLaneAI/catalyst/pull/1368) +* Added more details to catalyst-cli documentation specifiying available options for + checkpoint-stage and default pipelines + [(#1405)](https://github.com/PennyLaneAI/catalyst/pull/1405) +

Contributors ✍️

This release contains contributions from (in alphabetical order): diff --git a/mlir/lib/Driver/CompilerDriver.cpp b/mlir/lib/Driver/CompilerDriver.cpp index d29f27202d..04078b4703 100644 --- a/mlir/lib/Driver/CompilerDriver.cpp +++ b/mlir/lib/Driver/CompilerDriver.cpp @@ -490,7 +490,11 @@ LogicalResult preparePassManager(PassManager &pm, const CompilerOptions &options std::string tmp; llvm::raw_string_ostream s{tmp}; s << *op; - dumpToFile(options, output.nextPipelineDumpFilename(pipelineName.str()), tmp); + std::string fileName = pipelineName.str(); + if (auto funcOp = dyn_cast(op)) { + fileName += "_" + funcOp.getName().str(); + } + dumpToFile(options, output.nextPipelineDumpFilename(fileName), tmp); } }; @@ -551,7 +555,7 @@ LogicalResult runPipeline(PassManager &pm, const CompilerOptions &options, Compi llvm::errs() << "Failed to run pipeline: " << pipeline.getName() << "\n"; return failure(); } - if (options.keepIntermediate && options.checkpointStage.empty()) { + if (options.keepIntermediate && (options.checkpointStage.empty() || output.isCheckpointFound)) { std::string tmp; llvm::raw_string_ostream s{tmp}; s << moduleOp; @@ -564,7 +568,7 @@ LogicalResult runLowering(const CompilerOptions &options, MLIRContext *ctx, Modu CompilerOutput &output, TimingScope &timing) { - if (options.keepIntermediate && options.checkpointStage.empty()) { + if (options.keepIntermediate && (options.checkpointStage.empty() || output.isCheckpointFound)) { std::string tmp; llvm::raw_string_ostream s{tmp}; s << moduleOp; @@ -861,26 +865,30 @@ int QuantumDriverMainFromCL(int argc, char **argv) // --------- // Any modifications made to the command-line interface should be documented in // doc/catalyst-cli/catalyst-cli.rst - cl::opt WorkspaceDir("workspace", cl::desc("Workspace directory"), cl::init(".")); + cl::OptionCategory CatalystCat("Catalyst-cli Options", ""); + cl::opt WorkspaceDir("workspace", cl::desc("Workspace directory"), cl::init("."), + cl::cat(CatalystCat)); cl::opt ModuleName("module-name", cl::desc("Module name"), - cl::init("catalyst_module")); + cl::init("catalyst_module"), cl::cat(CatalystCat)); cl::opt SaveAfterEach( "save-ir-after-each", cl::desc("Keep intermediate files after each pass or pipeline"), cl::values(clEnumValN(SaveTemps::AfterPass, "pass", "Save IR after each pass")), cl::values(clEnumValN(SaveTemps::AfterPipeline, "pipeline", "Save IR after each pipeline")), - cl::init(SaveTemps::None)); + cl::init(SaveTemps::None), cl::cat(CatalystCat)); cl::opt KeepIntermediate( "keep-intermediate", cl::desc("Keep intermediate files"), cl::init(false), - cl::callback([&](const bool &) { SaveAfterEach.setValue(SaveTemps::AfterPipeline); })); + cl::callback([&](const bool &) { SaveAfterEach.setValue(SaveTemps::AfterPipeline); }), + cl::cat(CatalystCat)); cl::opt AsyncQNodes("async-qnodes", cl::desc("Enable asynchronous QNodes"), - cl::init(false)); - cl::opt Verbose("verbose", cl::desc("Set verbose"), cl::init(false)); - cl::list CatalystPipeline("catalyst-pipeline", - cl::desc("Catalyst Compiler pass pipelines"), - cl::ZeroOrMore, cl::CommaSeparated); + cl::init(false), cl::cat(CatalystCat)); + cl::opt Verbose("verbose", cl::desc("Set verbose"), cl::init(false), + cl::cat(CatalystCat)); + cl::list CatalystPipeline( + "catalyst-pipeline", cl::desc("Catalyst Compiler pass pipelines"), cl::ZeroOrMore, + cl::CommaSeparated, cl::cat(CatalystCat)); cl::opt CheckpointStage("checkpoint-stage", cl::desc("Checkpoint stage"), - cl::init("")); + cl::init(""), cl::cat(CatalystCat)); cl::opt LoweringAction( "tool", cl::desc("Select the tool to isolate"), cl::values(clEnumValN(Action::OPT, "opt", "run quantum-opt on the MLIR input")), @@ -889,9 +897,10 @@ int QuantumDriverMainFromCL(int argc, char **argv) cl::values(clEnumValN(Action::LLC, "llc", "run llc on the llvm IR input")), cl::values(clEnumValN(Action::All, "all", "run quantum-opt, mlir-translate, and llc on the MLIR input")), - cl::init(Action::All)); - cl::opt DumpPassPipeline( - "dump-catalyst-pipeline", cl::desc("Print the pipeline that will be run"), cl::init(false)); + cl::init(Action::All), cl::cat(CatalystCat)); + cl::opt DumpPassPipeline("dump-catalyst-pipeline", + cl::desc("Print the pipeline that will be run"), cl::init(false), + cl::cat(CatalystCat)); // Create dialect registry DialectRegistry registry; @@ -904,8 +913,13 @@ int QuantumDriverMainFromCL(int argc, char **argv) // Register and parse command line options. std::string inputFilename, outputFilename; + std::string helpStr = "Catalyst Command Line Interface options. \n" + "Below, there is a complete list of options for the Catalyst CLI tool" + "In the first section, you can find the options that are used to" + "configure the Catalyst compiler. Next, you can find the options" + "specific to the mlir-opt tool.\n"; std::tie(inputFilename, outputFilename) = - registerAndParseCLIOptions(argc, argv, "quantum compiler", registry); + registerAndParseCLIOptions(argc, argv, helpStr, registry); llvm::InitLLVM y(argc, argv); MlirOptMainConfig config = MlirOptMainConfig::createFromCLOptions(); From f7d0603c06cb30a02c1251a065585719a63f22ef Mon Sep 17 00:00:00 2001 From: David Ittah Date: Wed, 8 Jan 2025 12:07:45 -0500 Subject: [PATCH 04/10] Cherry-pick PR #1408 into the RC (#1423) --- .github/workflows/build-enzyme-v0.0.130.yaml | 155 ------------------ .../workflows/build-wheel-linux-arm64.yaml | 61 +++---- .../workflows/build-wheel-linux-x86_64.yaml | 141 +++++++--------- .../workflows/build-wheel-macos-arm64.yaml | 128 +++++++-------- .../workflows/build-wheel-macos-x86_64.yaml | 127 +++++++------- .../scripts/linux_arm64/rh8/build_catalyst.sh | 4 +- .../scripts/linux_arm64/rh8/build_lld.sh | 41 ----- .../scripts/linux_arm64/rh8/build_llvm.sh | 24 +-- .../scripts/linux_arm64/rh8/test_wheels.sh | 10 +- mlir/Makefile | 20 ++- mlir/patches/mlir-buffer-deallocation.patch | 14 ++ 11 files changed, 234 insertions(+), 491 deletions(-) delete mode 100644 .github/workflows/build-enzyme-v0.0.130.yaml delete mode 100644 .github/workflows/scripts/linux_arm64/rh8/build_lld.sh create mode 100644 mlir/patches/mlir-buffer-deallocation.patch diff --git a/.github/workflows/build-enzyme-v0.0.130.yaml b/.github/workflows/build-enzyme-v0.0.130.yaml deleted file mode 100644 index 297dadaec7..0000000000 --- a/.github/workflows/build-enzyme-v0.0.130.yaml +++ /dev/null @@ -1,155 +0,0 @@ -name: Build Enzyme v0.0.130 - -on: - push: - branches: [ main ] - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - determine_runner: - if: github.event.pull_request.draft == false - name: Determine runner type to use - uses: ./.github/workflows/determine-workflow-runner.yml - with: - default_runner: ubuntu-22.04 - - constants: - name: "Set build matrix" - uses: ./.github/workflows/constants.yaml - needs: [determine_runner] - with: - multiple_compilers: ${{ github.trigger == 'push' && github.ref_name == 'main' }} - runs_on: ${{ needs.determine_runner.outputs.runner_group }} - - llvm: - name: LLVM Build - needs: [constants, determine_runner] - runs-on: ${{ needs.determine_runner.outputs.runner_group }} - strategy: - matrix: - compiler: ${{ fromJson(needs.constants.outputs.compilers) }} - - steps: - - name: Checkout Catalyst repo - uses: actions/checkout@v4 - - # Both the LLVM source and build folder are required for further dialect builds. - # Caching is significantly faster than git cloning since LLVM is such a large repository. - - - name: Cache LLVM Source - id: cache-llvm-source - uses: actions/cache@v4 - with: - path: mlir/llvm-project - key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source - enableCrossOsArchive: true - - - name: Clone LLVM Submodule - if: steps.cache-llvm-source.outputs.cache-hit != 'true' - uses: actions/checkout@v4 - with: - repository: llvm/llvm-project - ref: ${{ needs.constants.outputs.llvm_version }} - path: mlir/llvm-project - - - name: Cache LLVM Build - id: cache-llvm-build - uses: actions/cache@v4 - with: - path: llvm-build - key: ${{ runner.os }}-llvm-${{ needs.constants.outputs.llvm_version }}-default-build-${{ matrix.compiler }} - - - name: Install Deps - if: steps.cache-llvm-build.outputs.cache-hit != 'true' - run: | - sudo apt-get update - sudo apt-get install -y python3 python3-pip cmake ninja-build clang lld - python3 --version | grep ${{ needs.constants.outputs.primary_python_version }} - python3 -m pip install numpy pybind11 - - - name: Build LLVM - if: steps.cache-llvm-build.outputs.cache-hit != 'true' - # Note: Disable instrumentation for the mlir runtime support library, - # as user programs aren't instrumented. - run: | - # echo 'target_compile_options(mlir_c_runner_utils PRIVATE "-fno-sanitize=all")' \ - # >> mlir/llvm-project/mlir/lib/ExecutionEngine/CMakeLists.txt - C_COMPILER=$(which ${{ needs.constants.outputs[format('c_compiler.{0}', matrix.compiler)] }}) \ - CXX_COMPILER=$(which ${{ needs.constants.outputs[format('cxx_compiler.{0}', matrix.compiler)] }}) \ - LLVM_BUILD_DIR="$(pwd)/llvm-build" \ - COMPILER_LAUNCHER="" \ - make llvm - - enzyme: - name: Enzyme Build - needs: [constants, llvm, determine_runner] - runs-on: ${{ needs.determine_runner.outputs.runner_group }} - strategy: - matrix: - compiler: ${{ fromJson(needs.constants.outputs.compilers) }} - - steps: - - name: Checkout Catalyst repo - uses: actions/checkout@v4 - - - name: Cache Enzyme Source - id: cache-enzyme-source - uses: actions/cache@v4 - with: - path: mlir/Enzyme - key: enzyme-v0.0.130-default-source - enableCrossOsArchive: true - - - name: Clone Enzyme Submodule - if: steps.cache-enzyme-build.outputs.cache-hit != 'true' - uses: actions/checkout@v4 - with: - repository: EnzymeAD/Enzyme - ref: v0.0.130 - path: mlir/Enzyme - - - name: Cache Enzyme Build - id: cache-enzyme-build - uses: actions/cache@v4 - with: - path: enzyme-build - key: ${{ runner.os }}-enzyme-${{ needs.constants.outputs.llvm_version }}-v0.0.130-default-build-${{ matrix.compiler }} - - - name: Get Cached LLVM Source - id: cache-llvm-source - if: steps.cache-enzyme-build.outputs.cache-hit != 'true' - uses: actions/cache@v4 - with: - path: mlir/llvm-project - key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source - enableCrossOsArchive: true - fail-on-cache-miss: true - - - name: Get Cached LLVM Build - id: cache-llvm-build - if: steps.cache-enzyme-build.outputs.cache-hit != 'true' - uses: actions/cache@v4 - with: - path: llvm-build - key: ${{ runner.os }}-llvm-${{ needs.constants.outputs.llvm_version }}-default-build-${{ matrix.compiler }} - fail-on-cache-miss: true - - - name: Install Deps - if: steps.cache-enzyme-build.outputs.cache-hit != 'true' - run: | - sudo apt-get update - sudo apt-get install -y cmake ninja-build clang lld - - - name: Build Enzyme - if: steps.cache-enzyme-build.outputs.cache-hit != 'true' - run: | - C_COMPILER=$(which ${{ needs.constants.outputs[format('c_compiler.{0}', matrix.compiler)] }}) \ - CXX_COMPILER=$(which ${{ needs.constants.outputs[format('cxx_compiler.{0}', matrix.compiler)] }}) \ - LLVM_BUILD_DIR="$(pwd)/llvm-build" \ - ENZYME_BUILD_DIR="$(pwd)/enzyme-build" \ - COMPILER_LAUNCHER="" \ - make enzyme diff --git a/.github/workflows/build-wheel-linux-arm64.yaml b/.github/workflows/build-wheel-linux-arm64.yaml index fdf81109d7..52aca51cd0 100644 --- a/.github/workflows/build-wheel-linux-arm64.yaml +++ b/.github/workflows/build-wheel-linux-arm64.yaml @@ -62,7 +62,7 @@ jobs: id: cache-llvm-source uses: actions/cache@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source enableCrossOsArchive: True @@ -70,7 +70,7 @@ jobs: id: cache-mhlo-source uses: actions/cache@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-default-source enableCrossOsArchive: True @@ -78,7 +78,7 @@ jobs: id: cache-enzyme-source uses: actions/cache@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-default-source enableCrossOsArchive: True @@ -88,7 +88,7 @@ jobs: with: repository: llvm/llvm-project ref: ${{ needs.constants.outputs.llvm_version }} - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project - name: Clone MHLO Submodule if: steps.cache-mhlo-source.outputs.cache-hit != 'true' @@ -96,7 +96,7 @@ jobs: with: repository: tensorflow/mlir-hlo ref: ${{ needs.constants.outputs.mhlo_version }} - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo - name: Clone Enzyme Submodule if: steps.cache-enzyme-source.outputs.cache-hit != 'true' @@ -104,21 +104,21 @@ jobs: with: repository: EnzymeAD/Enzyme ref: ${{ needs.constants.outputs.enzyme_version }} - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme # Cache external project builds - name: Restore LLVM Build id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ matrix.container_name }}-llvm-${{ needs.constants.outputs.llvm_version }}-wheel-build - name: Restore MHLO Build id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ matrix.container_name }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build lookup-only: True @@ -126,22 +126,10 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ matrix.container_name }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build lookup-only: True - - name: Build LLD - if: steps.cache-llvm-build.outputs.cache-hit != 'true' - run: | - set -x - # With GCC 13, LLVM fails some tests, then we use GCC 12 instead (Copied from below) - export GCC_VERSION=12 - docker run --rm --platform linux/aarch64 \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v `pwd`:/catalyst \ - -i ${{ matrix.container_img }} \ - bash /catalyst/.github/workflows/scripts/linux_arm64/rh8/build_lld.sh $GCC_VERSION ${{ matrix.python_version.major_minor }} ${{ matrix.python_version.patch }} ${{ matrix.python_version.package }} - - name: Build LLVM / MLIR if: steps.cache-llvm-build.outputs.cache-hit != 'true' run: | @@ -159,7 +147,7 @@ jobs: if: steps.cache-llvm-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ matrix.container_name }}-llvm-${{ needs.constants.outputs.llvm_version }}-wheel-build - name: Build MHLO Dialect @@ -178,7 +166,7 @@ jobs: if: steps.cache-mhlo-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ matrix.container_name }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build - name: Build Enzyme @@ -197,7 +185,7 @@ jobs: if: steps.cache-enzyme-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ matrix.container_name }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build catalyst-linux-wheels-arm64: @@ -230,7 +218,7 @@ jobs: id: cache-llvm-source uses: actions/cache/restore@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -239,7 +227,7 @@ jobs: id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ matrix.container_name }}-llvm-${{ needs.constants.outputs.llvm_version }}-wheel-build fail-on-cache-miss: True @@ -247,7 +235,7 @@ jobs: id: cache-mhlo-source uses: actions/cache/restore@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -256,7 +244,7 @@ jobs: id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ matrix.container_name }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build fail-on-cache-miss: True @@ -264,7 +252,7 @@ jobs: id: cache-enzyme-source uses: actions/cache/restore@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -273,7 +261,7 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ matrix.container_name }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build fail-on-cache-miss: True @@ -292,7 +280,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: catalyst-linux_arm64-wheel-py-${{ matrix.python_version.major_minor}}.zip - path: wheel/ + path: ${{ github.workspace }}/wheel/ retention-days: 14 test-wheels: @@ -326,16 +314,7 @@ jobs: uses: actions/download-artifact@v4 with: name: catalyst-linux_arm64-wheel-py-${{ matrix.python_version.major_minor }}.zip - path: dist - - # Needed for accessing llvm-symbolizer - - name: Get Cached LLVM Build - id: cache-llvm-build - uses: actions/cache@v4 - with: - path: llvm-build - key: ${{ matrix.container_name }}-llvm-${{ needs.constants.outputs.llvm_version }}-wheel-build - fail-on-cache-miss: True + path: ${{ github.workspace }}/dist - name: Run Python Pytest Tests run: | diff --git a/.github/workflows/build-wheel-linux-x86_64.yaml b/.github/workflows/build-wheel-linux-x86_64.yaml index b7104482fe..a831c9ebb3 100644 --- a/.github/workflows/build-wheel-linux-x86_64.yaml +++ b/.github/workflows/build-wheel-linux-x86_64.yaml @@ -71,7 +71,7 @@ jobs: id: cache-llvm-source uses: actions/cache@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-container-source enableCrossOsArchive: True @@ -79,7 +79,7 @@ jobs: id: cache-mhlo-source uses: actions/cache@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-container-source enableCrossOsArchive: True @@ -87,7 +87,7 @@ jobs: id: cache-enzyme-source uses: actions/cache@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-container-source enableCrossOsArchive: True @@ -97,7 +97,7 @@ jobs: with: repository: llvm/llvm-project ref: ${{ needs.constants.outputs.llvm_version }} - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project - name: Clone MHLO Submodule if: steps.cache-mhlo-source.outputs.cache-hit != 'true' @@ -105,7 +105,7 @@ jobs: with: repository: tensorflow/mlir-hlo ref: ${{ needs.constants.outputs.mhlo_version }} - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo - name: Clone Enzyme Submodule if: steps.cache-enzyme-source.outputs.cache-hit != 'true' @@ -113,21 +113,21 @@ jobs: with: repository: EnzymeAD/Enzyme ref: ${{ needs.constants.outputs.enzyme_version }} - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme # Cache external project builds - name: Restore LLVM Build id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ matrix.container_img }}-llvm-${{ needs.constants.outputs.llvm_version }}-${{matrix.python_version}}-wheel-build - name: Restore MHLO Build id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ matrix.container_img }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build lookup-only: True @@ -135,7 +135,7 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ matrix.container_img }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build lookup-only: True @@ -158,49 +158,29 @@ jobs: PYTHON_BINS=$(find /opt/_internal/cpython-${{ matrix.python_version }}.*/bin -maxdepth 1 -type d | tr '\n' ':' | sed 's/:$//') echo $PYTHON_BINS >> $GITHUB_PATH - # Required for MHLO and building MLIR with protected symbols. + # LLD is required for MHLO builds. # (Don't forget to add the build directory to PATH in subsequent steps, so # other tools can find it, in particular collect2 invoked by gcc.) - - name: Build LLD - if: steps.cache-llvm-build.outputs.cache-hit != 'true' - run: | - cmake -S mlir/llvm-project/llvm -B llvm-build -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="lld" - - cmake --build llvm-build --target lld - - name: Build LLVM / MLIR if: steps.cache-llvm-build.outputs.cache-hit != 'true' run: | export PATH=$GITHUB_WORKSPACE/llvm-build/bin:$PATH - cmake -S mlir/llvm-project/llvm -B llvm-build -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_BUILD_EXAMPLES=OFF \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="mlir" \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_INSTALL_UTILS=ON \ - -DLLVM_ENABLE_ZLIB=FORCE_ON \ - -DLLVM_ENABLE_ZSTD=OFF \ - -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ - -DPython3_NumPy_INCLUDE_DIRS=$(python${{ matrix.python_version }} -c "import numpy as np; print(np.get_include())") \ - -DCMAKE_CXX_VISIBILITY_PRESET=default \ - -DLLVM_ENABLE_LLD=ON - - # TODO: when updating LLVM, test to see if mlir/unittests/Bytecode/BytecodeTest.cpp:55 is passing - # and remove filter - # This tests fails on CI/CD not locally. - LIT_FILTER_OUT="Bytecode" cmake --build llvm-build --target check-mlir + PYTHON=$(which python${{ matrix.python_version }}) \ + C_COMPILER=$(which gcc) \ + CXX_COMPILER=$(which g++) \ + LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ + LLVM_PROJECTS="lld;mlir" \ + LLVM_TARGETS="lld check-mlir" \ + ENABLE_ZLIB=FORCE_ON \ + ENABLE_LLD=OFF \ + make llvm - name: Save LLVM Build id: save-llvm-build if: steps.cache-llvm-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ matrix.container_img }}-llvm-${{ needs.constants.outputs.llvm_version }}-${{matrix.python_version}}-wheel-build - name: Build MHLO Dialect @@ -213,45 +193,45 @@ jobs: export PATCH_FILE=mlir/patches/mhlo-Add-PassesIncGen-in-transforms-CMakeList.patch if patch --dry-run -p1 -N $TARGET_FILE $PATCH_FILE > /dev/null 2>&1; then patch -p1 $TARGET_FILE $PATCH_FILE; fi - cmake -S mlir/mlir-hlo -B mhlo-build -G Ninja \ + cmake -S mlir/mlir-hlo -B $GITHUB_WORKSPACE/mhlo-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ - -DMLIR_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir \ + -DMLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DLLVM_ENABLE_ZLIB=FORCE_ON \ -DLLVM_ENABLE_ZSTD=OFF \ -DCMAKE_CXX_VISIBILITY_PRESET=default \ -DLLVM_ENABLE_LLD=ON - LIT_FILTER_OUT="chlo_legalize_to_mhlo" cmake --build mhlo-build --target check-mlir-hlo + LIT_FILTER_OUT="chlo_legalize_to_mhlo" cmake --build $GITHUB_WORKSPACE/mhlo-build --target check-mlir-hlo - name: Save MHLO Build id: save-mhlo-build if: steps.cache-mhlo-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ matrix.container_img }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build - name: Build Enzyme if: steps.cache-enzyme-build.outputs.cache-hit != 'true' run: | export PATH=$GITHUB_WORKSPACE/llvm-build/bin:$PATH - cmake -S mlir/Enzyme/enzyme -B enzyme-build -G Ninja \ + cmake -S mlir/Enzyme/enzyme -B $GITHUB_WORKSPACE/enzyme-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm \ + -DLLVM_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm" \ -DENZYME_STATIC_LIB=ON \ -DCMAKE_CXX_VISIBILITY_PRESET=default \ -DCMAKE_CXX_FLAGS="-fuse-ld=lld" - cmake --build enzyme-build --target EnzymeStatic-19 + cmake --build $GITHUB_WORKSPACE/enzyme-build --target EnzymeStatic-19 - name: Save Enzyme Build id: save-enzyme-build if: steps.cache-enzyme-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ matrix.container_img }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build catalyst-linux-wheels-x86-64: @@ -289,7 +269,7 @@ jobs: id: cache-llvm-source uses: actions/cache/restore@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-container-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -298,7 +278,7 @@ jobs: id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ matrix.container_img }}-llvm-${{ needs.constants.outputs.llvm_version }}-3.10-wheel-build fail-on-cache-miss: True @@ -306,7 +286,7 @@ jobs: id: cache-mhlo-source uses: actions/cache/restore@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-container-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -315,7 +295,7 @@ jobs: id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ matrix.container_img }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build fail-on-cache-miss: True @@ -323,7 +303,7 @@ jobs: id: cache-enzyme-source uses: actions/cache/restore@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-container-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -332,28 +312,28 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ matrix.container_img }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build fail-on-cache-miss: True # Build Catalyst-Runtime - name: Build Catalyst-Runtime run: | - cmake -S runtime -B runtime-build -G Ninja \ + cmake -S runtime -B $GITHUB_WORKSPACE/runtime-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=$GITHUB_WORKSPACE/runtime-build/lib \ + -DCMAKE_LIBRARY_OUTPUT_DIRECTORY="$GITHUB_WORKSPACE/runtime-build/lib" \ -DPython_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DENABLE_OPENQASM=ON - cmake --build runtime-build --target rt_capi rtd_openqasm rtd_null_qubit + cmake --build $GITHUB_WORKSPACE/runtime-build --target rt_capi rtd_openqasm rtd_null_qubit # Build OQC-Runtime - name: Build OQC-Runtime run: | C_COMPILER=$(which gcc) \ CXX_COMPILER=$(which g++) \ - OQC_BUILD_DIR=$GITHUB_WORKSPACE/oqc-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ + OQC_BUILD_DIR="$GITHUB_WORKSPACE/oqc-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ PYTHON=$(which python${{ matrix.python_version }}) \ make oqc @@ -362,8 +342,8 @@ jobs: run: | C_COMPILER=$(which gcc) \ CXX_COMPILER=$(which g++) \ - OQD_BUILD_DIR=$GITHUB_WORKSPACE/oqd-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ + OQD_BUILD_DIR="$GITHUB_WORKSPACE/oqd-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ PYTHON=$(which python${{ matrix.python_version }}) \ make oqd @@ -371,28 +351,27 @@ jobs: - name: Build MLIR Dialects run: | export PATH=$GITHUB_WORKSPACE/llvm-build/bin:$PATH - cmake -S mlir -B quantum-build -G Ninja \ + cmake -S mlir -B $GITHUB_WORKSPACE/quantum-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ -DQUANTUM_ENABLE_BINDINGS_PYTHON=ON \ -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DPython3_NumPy_INCLUDE_DIRS=$(python${{ matrix.python_version }} -c "import numpy as np; print(np.get_include())") \ - -DMLIR_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir \ - -DMHLO_DIR=$GITHUB_WORKSPACE/mhlo-build/lib/cmake/mlir-hlo \ - -DMHLO_BINARY_DIR=$GITHUB_WORKSPACE/mhlo-build/bin \ - -DEnzyme_DIR=$GITHUB_WORKSPACE/enzyme-build \ - -DENZYME_SRC_DIR=$GITHUB_WORKSPACE/mlir/Enzyme \ + -DMLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ + -DMHLO_DIR="$GITHUB_WORKSPACE/mhlo-build/lib/cmake/mlir-hlo" \ + -DMHLO_BINARY_DIR="$GITHUB_WORKSPACE/mhlo-build/bin" \ + -DEnzyme_DIR="$GITHUB_WORKSPACE/enzyme-build" \ + -DENZYME_SRC_DIR="$GITHUB_WORKSPACE/mlir/Enzyme" \ -DLLVM_ENABLE_ZLIB=FORCE_ON \ -DLLVM_ENABLE_ZSTD=OFF \ -DLLVM_ENABLE_LLD=ON - cmake --build quantum-build --target check-dialects catalyst-cli + cmake --build $GITHUB_WORKSPACE/quantum-build --target check-dialects catalyst-cli - name: Build Plugin wheel # Run only on Thursday at the given time if: github.event.schedule == '35 4 * * 4' run: | - CCACHE_DIR="$(pwd)/.ccache" \ MLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ make plugin-wheel @@ -400,13 +379,13 @@ jobs: - name: Build wheel run: | PYTHON=python${{ matrix.python_version }} \ - LLVM_BUILD_DIR=$GITHUB_WORKSPACE/llvm-build \ - MHLO_BUILD_DIR=$GITHUB_WORKSPACE/mhlo-build \ - DIALECTS_BUILD_DIR=$GITHUB_WORKSPACE/quantum-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ - OQC_BUILD_DIR=$GITHUB_WORKSPACE/oqc-build \ - OQD_BUILD_DIR=$GITHUB_WORKSPACE/oqd-build \ - ENZYME_BUILD_DIR=$GITHUB_WORKSPACE/enzyme-build \ + LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ + MHLO_BUILD_DIR="$GITHUB_WORKSPACE/mhlo-build" \ + DIALECTS_BUILD_DIR="$GITHUB_WORKSPACE/quantum-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ + OQC_BUILD_DIR="$GITHUB_WORKSPACE/oqc-build" \ + OQD_BUILD_DIR="$GITHUB_WORKSPACE/oqd-build" \ + ENZYME_BUILD_DIR="$GITHUB_WORKSPACE/enzyme-build" \ make wheel - name: Repair wheel using auditwheel @@ -418,7 +397,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: catalyst-manylinux_2_28_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: wheel/ + path: ${{ github.workspace }}/wheel/ retention-days: 14 - name: Upload Standalone Plugin Wheel Artifact @@ -427,7 +406,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: standalone-plugin-manylinux_2_28_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: standalone_plugin_wheel/dist + path: ${{ github.workspace }}/standalone_plugin_wheel/dist retention-days: 14 test-wheels: @@ -449,7 +428,7 @@ jobs: uses: actions/download-artifact@v4 with: name: catalyst-manylinux_2_28_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: dist + path: ${{ github.workspace }}/dist - name: Download Standalone Plugin Wheel Artifact # Run only on Thursday at the given time @@ -457,7 +436,7 @@ jobs: uses: actions/download-artifact@v4 with: name: standalone-plugin-manylinux_2_28_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: standalone_plugin_wheel/wheel + path: ${{ github.workspace }}/standalone_plugin_wheel/wheel - name: Set up Python ${{ matrix.python_version }} uses: actions/setup-python@v5 @@ -486,7 +465,7 @@ jobs: if: github.event.schedule == '35 4 * * 4' run: | python${{ matrix.python_version }} -m pip install standalone_plugin_wheel/wheel/*.whl --no-deps - + - name: Run Python Pytest Tests run: | python${{ matrix.python_version }} -m pytest frontend/test/pytest -n auto diff --git a/.github/workflows/build-wheel-macos-arm64.yaml b/.github/workflows/build-wheel-macos-arm64.yaml index b6a79be800..63fd776744 100644 --- a/.github/workflows/build-wheel-macos-arm64.yaml +++ b/.github/workflows/build-wheel-macos-arm64.yaml @@ -64,7 +64,7 @@ jobs: id: cache-llvm-source uses: actions/cache@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source enableCrossOsArchive: True @@ -72,7 +72,7 @@ jobs: id: cache-mhlo-source uses: actions/cache@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-default-source enableCrossOsArchive: True @@ -80,7 +80,7 @@ jobs: id: cache-enzyme-source uses: actions/cache@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-default-source enableCrossOsArchive: True @@ -90,7 +90,7 @@ jobs: with: repository: llvm/llvm-project ref: ${{ needs.constants.outputs.llvm_version }} - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project - name: Clone MHLO Submodule if: steps.cache-mhlo-source.outputs.cache-hit != 'true' @@ -98,7 +98,7 @@ jobs: with: repository: tensorflow/mlir-hlo ref: ${{ needs.constants.outputs.mhlo_version }} - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo - name: Clone Enzyme Submodule if: steps.cache-enzyme-source.outputs.cache-hit != 'true' @@ -106,21 +106,21 @@ jobs: with: repository: EnzymeAD/Enzyme ref: ${{ needs.constants.outputs.enzyme_version }} - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme # Cache external project builds - name: Restore LLVM Build id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ runner.os }}-${{ runner.arch }}-llvm-${{ needs.constants.outputs.llvm_version }}-${{matrix.python_version}}-wheel-build - name: Restore MHLO Build id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ runner.os }}-${{ runner.arch }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build lookup-only: True @@ -128,7 +128,7 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ runner.os }}-${{ runner.arch }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build lookup-only: True @@ -146,32 +146,18 @@ jobs: - name: Build LLVM / MLIR if: steps.cache-llvm-build.outputs.cache-hit != 'true' run: | - cmake -S mlir/llvm-project/llvm -B llvm-build -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_BUILD_EXAMPLES=OFF \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="mlir" \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_INSTALL_UTILS=ON \ - -DLLVM_ENABLE_ZLIB=FORCE_ON \ - -DLLVM_ENABLE_ZSTD=OFF \ - -DLLVM_ENABLE_LLD=OFF \ - -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ - -DPython3_NumPy_INCLUDE_DIRS=$(python${{ matrix.python_version }} -c "import numpy as np; print(np.get_include())") \ - -DCMAKE_CXX_VISIBILITY_PRESET=default - - # TODO: when updating LLVM, test to see if mlir/unittests/Bytecode/BytecodeTest.cpp:55 is passing - # and remove filter - # This tests fails on CI/CD not locally. - LIT_FILTER_OUT="Bytecode" cmake --build llvm-build --target check-mlir + PYTHON=$(which python${{ matrix.python_version }}) \ + LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ + LLVM_TARGETS="check-mlir" \ + ENABLE_ZLIB=FORCE_ON \ + make llvm - name: Save LLVM Build id: save-llvm-build if: steps.cache-llvm-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ runner.os }}-${{ runner.arch }}-llvm-${{ needs.constants.outputs.llvm_version }}-${{matrix.python_version}}-wheel-build - name: Build MHLO Dialect @@ -183,43 +169,43 @@ jobs: export PATCH_FILE=mlir/patches/mhlo-Add-PassesIncGen-in-transforms-CMakeList.patch if patch --dry-run -p1 -N $TARGET_FILE $PATCH_FILE > /dev/null 2>&1; then patch -p1 $TARGET_FILE $PATCH_FILE; fi - cmake -S mlir/mlir-hlo -B mhlo-build -G Ninja \ + cmake -S mlir/mlir-hlo -B $GITHUB_WORKSPACE/mhlo-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ - -DMLIR_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir \ + -DMLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DLLVM_ENABLE_LLD=OFF \ -DLLVM_ENABLE_ZLIB=FORCE_ON \ -DLLVM_ENABLE_ZSTD=OFF \ -DCMAKE_CXX_VISIBILITY_PRESET=default - cmake --build mhlo-build --target check-mlir-hlo + cmake --build $GITHUB_WORKSPACE/mhlo-build --target check-mlir-hlo - name: Save MHLO Build id: save-mhlo-build if: steps.cache-mhlo-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ runner.os }}-${{ runner.arch }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build - name: Build Enzyme if: steps.cache-enzyme-build.outputs.cache-hit != 'true' run: | - cmake -S mlir/Enzyme/enzyme -B enzyme-build -G Ninja \ + cmake -S mlir/Enzyme/enzyme -B $GITHUB_WORKSPACE/enzyme-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm \ + -DLLVM_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm" \ -DENZYME_STATIC_LIB=ON \ -DCMAKE_CXX_VISIBILITY_PRESET=default - cmake --build enzyme-build --target EnzymeStatic-19 + cmake --build $GITHUB_WORKSPACE/enzyme-build --target EnzymeStatic-19 - name: Save Enzyme Build id: save-enzyme-build if: steps.cache-enzyme-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ runner.os }}-${{ runner.arch }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build catalyst-macos-wheels-arm64: @@ -262,7 +248,7 @@ jobs: id: cache-llvm-source uses: actions/cache/restore@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -271,7 +257,7 @@ jobs: id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ runner.os }}-${{ runner.arch }}-llvm-${{ needs.constants.outputs.llvm_version }}-3.10-wheel-build fail-on-cache-miss: True @@ -279,7 +265,7 @@ jobs: id: cache-mhlo-source uses: actions/cache/restore@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -288,7 +274,7 @@ jobs: id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ runner.os }}-${{ runner.arch }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build fail-on-cache-miss: True @@ -296,7 +282,7 @@ jobs: id: cache-enzyme-source uses: actions/cache/restore@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -305,7 +291,7 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ runner.os }}-${{ runner.arch }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build fail-on-cache-miss: True @@ -314,56 +300,56 @@ jobs: run: | # On GH images, gfortran is only available as a specific version. export FC=gfortran-14 - cmake -S runtime -B runtime-build -G Ninja \ + cmake -S runtime -B $GITHUB_WORKSPACE/runtime-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=$GITHUB_WORKSPACE/runtime-build/lib \ + -DCMAKE_LIBRARY_OUTPUT_DIRECTORY="$GITHUB_WORKSPACE/runtime-build/lib" \ -DPython_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DENABLE_OPENQASM=ON - cmake --build runtime-build --target rt_capi rtd_openqasm rtd_null_qubit + cmake --build $GITHUB_WORKSPACE/runtime-build --target rt_capi rtd_openqasm rtd_null_qubit - name: Test Catalyst-Runtime run: | python${{ matrix.python_version }} -m pip install 'amazon-braket-pennylane-plugin>1.27.1' - cmake --build runtime-build --target runner_tests_openqasm - ./runtime-build/tests/runner_tests_openqasm + cmake --build $GITHUB_WORKSPACE/runtime-build --target runner_tests_openqasm + $GITHUB_WORKSPACE/runtime-build/tests/runner_tests_openqasm # Build OQC-Runtime - name: Build OQC-Runtime run: | - OQC_BUILD_DIR=$GITHUB_WORKSPACE/oqc-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ + OQC_BUILD_DIR="$GITHUB_WORKSPACE/oqc-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ PYTHON=$(which python${{ matrix.python_version }}) \ make oqc # Build OQD-Runtime - name: Build OQD-Runtime run: | - OQD_BUILD_DIR=$GITHUB_WORKSPACE/oqd-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ + OQD_BUILD_DIR="$GITHUB_WORKSPACE/oqd-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ PYTHON=$(which python${{ matrix.python_version }}) \ make oqd # Build Quantum and Gradient Dialects - name: Build MLIR Dialects run: | - cmake -S mlir -B quantum-build -G Ninja \ + cmake -S mlir -B $GITHUB_WORKSPACE/quantum-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ -DQUANTUM_ENABLE_BINDINGS_PYTHON=ON \ -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DPython3_NumPy_INCLUDE_DIRS=$(python${{ matrix.python_version }} -c "import numpy as np; print(np.get_include())") \ - -DMLIR_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir \ - -DMHLO_DIR=$GITHUB_WORKSPACE/mhlo-build/lib/cmake/mlir-hlo \ - -DMHLO_BINARY_DIR=$GITHUB_WORKSPACE/mhlo-build/bin \ - -DEnzyme_DIR=$GITHUB_WORKSPACE/enzyme-build \ - -DENZYME_SRC_DIR=$GITHUB_WORKSPACE/mlir/Enzyme \ + -DMLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ + -DMHLO_DIR="$GITHUB_WORKSPACE/mhlo-build/lib/cmake/mlir-hlo" \ + -DMHLO_BINARY_DIR="$GITHUB_WORKSPACE/mhlo-build/bin" \ + -DEnzyme_DIR="$GITHUB_WORKSPACE/enzyme-build" \ + -DENZYME_SRC_DIR="$GITHUB_WORKSPACE/mlir/Enzyme" \ -DLLVM_ENABLE_ZLIB=FORCE_ON \ -DLLVM_ENABLE_ZSTD=OFF \ -DLLVM_ENABLE_LLD=OFF \ - -DLLVM_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm + -DLLVM_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm" - cmake --build quantum-build --target check-dialects catalyst-cli + cmake --build $GITHUB_WORKSPACE/quantum-build --target check-dialects catalyst-cli - name: Build Plugin wheel # Run only on Thursday at the given time @@ -376,13 +362,13 @@ jobs: - name: Build wheel run: | PYTHON=python${{ matrix.python_version }} \ - LLVM_BUILD_DIR=$GITHUB_WORKSPACE/llvm-build \ - MHLO_BUILD_DIR=$GITHUB_WORKSPACE/mhlo-build \ - DIALECTS_BUILD_DIR=$GITHUB_WORKSPACE/quantum-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ - OQC_BUILD_DIR=$GITHUB_WORKSPACE/oqc-build \ - OQD_BUILD_DIR=$GITHUB_WORKSPACE/oqd-build \ - ENZYME_BUILD_DIR=$GITHUB_WORKSPACE/enzyme-build \ + LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ + MHLO_BUILD_DIR="$GITHUB_WORKSPACE/mhlo-build" \ + DIALECTS_BUILD_DIR="$GITHUB_WORKSPACE/quantum-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ + OQC_BUILD_DIR="$GITHUB_WORKSPACE/oqc-build" \ + OQD_BUILD_DIR="$GITHUB_WORKSPACE/oqd-build" \ + ENZYME_BUILD_DIR="$GITHUB_WORKSPACE/enzyme-build" \ make wheel - name: Repair wheel using delocate-wheel @@ -394,7 +380,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: catalyst-macos_arm64-wheel-py-${{ matrix.python_version }}.zip - path: wheel/ + path: ${{ github.workspace }}/wheel/ retention-days: 14 - name: Upload Standalone Plugin Wheel Artifact @@ -403,7 +389,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: standalone-plugin-macos_arm64-wheel-py-${{ matrix.python_version }}.zip - path: standalone_plugin_wheel/dist + path: ${{ github.workspace }}/standalone_plugin_wheel/dist retention-days: 14 test-wheels: @@ -432,7 +418,7 @@ jobs: uses: actions/download-artifact@v4 with: name: catalyst-macos_arm64-wheel-py-${{ matrix.python_version }}.zip - path: dist + path: ${{ github.workspace }}/dist - name: Download Standalone Plugin Wheel Artifact # Run only on Thursday at the given time @@ -440,7 +426,7 @@ jobs: uses: actions/download-artifact@v4 with: name: standalone-plugin-macos_arm64-wheel-py-${{ matrix.python_version }}.zip - path: standalone_plugin_wheel/wheel + path: ${{ github.workspace }}/standalone_plugin_wheel/wheel - name: Setup Python version # There are multiple Python versions installed on the GitHub image, 3.10 - 3.12 is already diff --git a/.github/workflows/build-wheel-macos-x86_64.yaml b/.github/workflows/build-wheel-macos-x86_64.yaml index 9e99f3b6c3..19a3f341f2 100644 --- a/.github/workflows/build-wheel-macos-x86_64.yaml +++ b/.github/workflows/build-wheel-macos-x86_64.yaml @@ -56,7 +56,7 @@ jobs: id: cache-llvm-source uses: actions/cache@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source enableCrossOsArchive: True @@ -64,7 +64,7 @@ jobs: id: cache-mhlo-source uses: actions/cache@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-default-source enableCrossOsArchive: True @@ -72,7 +72,7 @@ jobs: id: cache-enzyme-source uses: actions/cache@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-default-source enableCrossOsArchive: True @@ -82,7 +82,7 @@ jobs: with: repository: llvm/llvm-project ref: ${{ needs.constants.outputs.llvm_version }} - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project - name: Clone MHLO Submodule if: steps.cache-mhlo-source.outputs.cache-hit != 'true' @@ -90,7 +90,7 @@ jobs: with: repository: tensorflow/mlir-hlo ref: ${{ needs.constants.outputs.mhlo_version }} - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo - name: Clone Enzyme Submodule if: steps.cache-enzyme-source.outputs.cache-hit != 'true' @@ -98,21 +98,21 @@ jobs: with: repository: EnzymeAD/Enzyme ref: ${{ needs.constants.outputs.enzyme_version }} - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme # Cache external project builds - name: Restore LLVM Build id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ runner.os }}-${{ runner.arch }}-llvm-${{ needs.constants.outputs.llvm_version }}-${{matrix.python_version}}-wheel-build - name: Restore MHLO Build id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ runner.os }}-${{ runner.arch }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build lookup-only: True @@ -120,7 +120,7 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ runner.os }}-${{ runner.arch }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build lookup-only: True @@ -136,32 +136,18 @@ jobs: - name: Build LLVM / MLIR if: steps.cache-llvm-build.outputs.cache-hit != 'true' run: | - cmake -S mlir/llvm-project/llvm -B llvm-build -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_BUILD_EXAMPLES=OFF \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="mlir" \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_INSTALL_UTILS=ON \ - -DLLVM_ENABLE_ZLIB=FORCE_ON \ - -DLLVM_ENABLE_ZSTD=OFF \ - -DLLVM_ENABLE_LLD=OFF \ - -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ - -DPython3_NumPy_INCLUDE_DIRS=$(python${{ matrix.python_version }} -c "import numpy as np; print(np.get_include())") \ - -DCMAKE_CXX_VISIBILITY_PRESET=default - - # TODO: when updating LLVM, test to see if mlir/unittests/Bytecode/BytecodeTest.cpp:55 is passing - # and remove filter - # This tests fails on CI/CD not locally. - LIT_FILTER_OUT="Bytecode" cmake --build llvm-build --target check-mlir + PYTHON=$(which python${{ matrix.python_version }}) \ + LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ + LLVM_TARGETS="check-mlir" \ + ENABLE_ZLIB=FORCE_ON \ + make llvm - name: Save LLVM Build id: save-llvm-build if: steps.cache-llvm-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ runner.os }}-${{ runner.arch }}-llvm-${{ needs.constants.outputs.llvm_version }}-${{matrix.python_version}}-wheel-build - name: Build MHLO Dialect @@ -173,43 +159,43 @@ jobs: export PATCH_FILE=mlir/patches/mhlo-Add-PassesIncGen-in-transforms-CMakeList.patch if patch --dry-run -p1 -N $TARGET_FILE $PATCH_FILE > /dev/null 2>&1; then patch -p1 $TARGET_FILE $PATCH_FILE; fi - cmake -S mlir/mlir-hlo -B mhlo-build -G Ninja \ + cmake -S mlir/mlir-hlo -B $GITHUB_WORKSPACE/mhlo-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ - -DMLIR_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir \ + -DMLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DLLVM_ENABLE_LLD=OFF \ -DLLVM_ENABLE_ZLIB=FORCE_ON \ -DLLVM_ENABLE_ZSTD=OFF \ -DCMAKE_CXX_VISIBILITY_PRESET=default - cmake --build mhlo-build --target check-mlir-hlo + cmake --build $GITHUB_WORKSPACE/mhlo-build --target check-mlir-hlo - name: Save MHLO Build id: save-mhlo-build if: steps.cache-mhlo-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ runner.os }}-${{ runner.arch }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build - name: Build Enzyme if: steps.cache-enzyme-build.outputs.cache-hit != 'true' run: | - cmake -S mlir/Enzyme/enzyme -B enzyme-build -G Ninja \ + cmake -S mlir/Enzyme/enzyme -B $GITHUB_WORKSPACE/enzyme-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm \ + -DLLVM_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/llvm" \ -DENZYME_STATIC_LIB=ON \ -DCMAKE_CXX_VISIBILITY_PRESET=default - cmake --build enzyme-build --target EnzymeStatic-19 + cmake --build $GITHUB_WORKSPACE/enzyme-build --target EnzymeStatic-19 - name: Save Enzyme Build id: save-enzyme-build if: steps.cache-enzyme-build.outputs.cache-hit != 'true' uses: actions/cache/save@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ runner.os }}-${{ runner.arch }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build catalyst-macos-wheels-x86-64: @@ -239,7 +225,7 @@ jobs: id: cache-llvm-source uses: actions/cache/restore@v4 with: - path: mlir/llvm-project + path: ${{ github.workspace }}/mlir/llvm-project key: llvm-${{ needs.constants.outputs.llvm_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -248,7 +234,7 @@ jobs: id: cache-llvm-build uses: actions/cache/restore@v4 with: - path: llvm-build + path: ${{ github.workspace }}/llvm-build key: ${{ runner.os }}-${{ runner.arch }}-llvm-${{ needs.constants.outputs.llvm_version }}-3.10-wheel-build fail-on-cache-miss: True @@ -256,7 +242,7 @@ jobs: id: cache-mhlo-source uses: actions/cache/restore@v4 with: - path: mlir/mlir-hlo + path: ${{ github.workspace }}/mlir/mlir-hlo key: mhlo-${{ needs.constants.outputs.mhlo_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -265,7 +251,7 @@ jobs: id: cache-mhlo-build uses: actions/cache/restore@v4 with: - path: mhlo-build + path: ${{ github.workspace }}/mhlo-build key: ${{ runner.os }}-${{ runner.arch }}-mhlo-${{ needs.constants.outputs.mhlo_version }}-wheel-build fail-on-cache-miss: True @@ -273,7 +259,7 @@ jobs: id: cache-enzyme-source uses: actions/cache/restore@v4 with: - path: mlir/Enzyme + path: ${{ github.workspace }}/mlir/Enzyme key: enzyme-${{ needs.constants.outputs.enzyme_version }}-default-source enableCrossOsArchive: True fail-on-cache-miss: True @@ -282,7 +268,7 @@ jobs: id: cache-enzyme-build uses: actions/cache/restore@v4 with: - path: enzyme-build + path: ${{ github.workspace }}/enzyme-build key: ${{ runner.os }}-${{ runner.arch }}-enzyme-${{ needs.constants.outputs.llvm_version }}-${{ needs.constants.outputs.enzyme_version }}-wheel-build fail-on-cache-miss: True @@ -290,75 +276,74 @@ jobs: - name: Build Catalyst-Runtime run: | # Segfaults in computing Lightning's adjoint-jacobian when building with OMP - cmake -S runtime -B runtime-build -G Ninja \ + cmake -S runtime -B $GITHUB_WORKSPACE/runtime-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_LIBRARY_OUTPUT_DIRECTORY=$GITHUB_WORKSPACE/runtime-build/lib \ + -DCMAKE_LIBRARY_OUTPUT_DIRECTORY="$GITHUB_WORKSPACE/runtime-build/lib" \ -DPython_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DENABLE_OPENQASM=ON - cmake --build runtime-build --target rt_capi rtd_openqasm rtd_null_qubit + cmake --build $GITHUB_WORKSPACE/runtime-build --target rt_capi rtd_openqasm rtd_null_qubit # Build OQC-Runtime - name: Build OQC-Runtime run: | - OQC_BUILD_DIR="$(pwd)/oqc-build" \ - RT_BUILD_DIR="$(pwd)/runtime-build" \ + OQC_BUILD_DIR="$GITHUB_WORKSPACE/oqc-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ PYTHON=$(which python${{ matrix.python_version }}) \ make oqc # Build OQD-Runtime - name: Build OQD-Runtime run: | - OQD_BUILD_DIR="$(pwd)/oqd-build" \ - RT_BUILD_DIR="$(pwd)/runtime-build" \ + OQD_BUILD_DIR="$GITHUB_WORKSPACE/oqd-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ PYTHON=$(which python${{ matrix.python_version }}) \ make oqd - name: Test Catalyst-Runtime run: | python${{ matrix.python_version }} -m pip install 'amazon-braket-pennylane-plugin>1.27.1' - cmake --build runtime-build --target runner_tests_openqasm + cmake --build $GITHUB_WORKSPACE/runtime-build --target runner_tests_openqasm ./runtime-build/tests/runner_tests_openqasm # Build Quantum and Gradient Dialects - name: Build MLIR Dialects run: | - cmake -S mlir -B quantum-build -G Ninja \ + cmake -S mlir -B $GITHUB_WORKSPACE/quantum-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ -DQUANTUM_ENABLE_BINDINGS_PYTHON=ON \ -DPython3_EXECUTABLE=$(which python${{ matrix.python_version }}) \ -DPython3_NumPy_INCLUDE_DIRS=$(python${{ matrix.python_version }} -c "import numpy as np; print(np.get_include())") \ - -DMLIR_DIR=$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir \ - -DMHLO_DIR=$GITHUB_WORKSPACE/mhlo-build/lib/cmake/mlir-hlo \ - -DMHLO_BINARY_DIR=$GITHUB_WORKSPACE/mhlo-build/bin \ - -DEnzyme_DIR=$GITHUB_WORKSPACE/enzyme-build \ - -DENZYME_SRC_DIR=$GITHUB_WORKSPACE/mlir/Enzyme \ + -DMLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ + -DMHLO_DIR="$GITHUB_WORKSPACE/mhlo-build/lib/cmake/mlir-hlo" \ + -DMHLO_BINARY_DIR="$GITHUB_WORKSPACE/mhlo-build/bin" \ + -DEnzyme_DIR="$GITHUB_WORKSPACE/enzyme-build" \ + -DENZYME_SRC_DIR="$GITHUB_WORKSPACE/mlir/Enzyme" \ -DLLVM_ENABLE_ZLIB=FORCE_ON \ -DLLVM_ENABLE_ZSTD=OFF \ -DLLVM_ENABLE_LLD=OFF - cmake --build quantum-build --target check-dialects catalyst-cli + cmake --build $GITHUB_WORKSPACE/quantum-build --target check-dialects catalyst-cli - name: Build Plugin wheel # Run only on Thursday at the given time if: github.event.schedule == '35 4 * * 4' run: | - CCACHE_DIR="$(pwd)/.ccache" \ LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ - MLIR_DIR="$(pwd)/llvm-build/lib/cmake/mlir" \ + MLIR_DIR="$GITHUB_WORKSPACE/llvm-build/lib/cmake/mlir" \ make plugin-wheel - name: Build wheel run: | PYTHON=python${{ matrix.python_version }} \ - LLVM_BUILD_DIR=$GITHUB_WORKSPACE/llvm-build \ - MHLO_BUILD_DIR=$GITHUB_WORKSPACE/mhlo-build \ - DIALECTS_BUILD_DIR=$GITHUB_WORKSPACE/quantum-build \ - RT_BUILD_DIR=$GITHUB_WORKSPACE/runtime-build \ - OQC_BUILD_DIR=$GITHUB_WORKSPACE/oqc-build \ - OQD_BUILD_DIR=$GITHUB_WORKSPACE/oqd-build \ - ENZYME_BUILD_DIR=$GITHUB_WORKSPACE/enzyme-build \ + LLVM_BUILD_DIR="$GITHUB_WORKSPACE/llvm-build" \ + MHLO_BUILD_DIR="$GITHUB_WORKSPACE/mhlo-build" \ + DIALECTS_BUILD_DIR="$GITHUB_WORKSPACE/quantum-build" \ + RT_BUILD_DIR="$GITHUB_WORKSPACE/runtime-build" \ + OQC_BUILD_DIR="$GITHUB_WORKSPACE/oqc-build" \ + OQD_BUILD_DIR="$GITHUB_WORKSPACE/oqd-build" \ + ENZYME_BUILD_DIR="$GITHUB_WORKSPACE/enzyme-build" \ make wheel - name: Repair wheel using delocate-wheel @@ -370,7 +355,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: catalyst-macos_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: wheel/ + path: ${{ github.workspace }}/wheel/ retention-days: 14 - name: Upload Standalone Plugin Wheel Artifact @@ -379,7 +364,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: standalone-plugin-macos_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: standalone_plugin_wheel/dist + path: ${{ github.workspace }}/standalone_plugin_wheel/dist retention-days: 14 test-wheels: @@ -401,7 +386,7 @@ jobs: uses: actions/download-artifact@v4 with: name: catalyst-macos_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: dist + path: ${{ github.workspace }}/dist - name: Download Standalone Plugin Wheel Artifact # Run only on Thursday at the given time @@ -409,7 +394,7 @@ jobs: uses: actions/download-artifact@v4 with: name: standalone-plugin-macos_x86_64-wheel-py-${{ matrix.python_version }}.zip - path: standalone_plugin_wheel/wheel + path: ${{ github.workspace }}/standalone_plugin_wheel/wheel - name: Set up Python ${{ matrix.python_version }} uses: actions/setup-python@v5 diff --git a/.github/workflows/scripts/linux_arm64/rh8/build_catalyst.sh b/.github/workflows/scripts/linux_arm64/rh8/build_catalyst.sh index 488cf37b4b..9014e2af81 100644 --- a/.github/workflows/scripts/linux_arm64/rh8/build_catalyst.sh +++ b/.github/workflows/scripts/linux_arm64/rh8/build_catalyst.sh @@ -55,7 +55,7 @@ export OQD_BUILD_DIR="/catalyst/oqd-build" make oqd # Build Catalyst dialects -cmake -S mlir -B quantum-build -G Ninja \ +cmake -S mlir -B /catalyst/quantum-build -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_ENABLE_ASSERTIONS=ON \ -DQUANTUM_ENABLE_BINDINGS_PYTHON=ON \ @@ -70,7 +70,7 @@ cmake -S mlir -B quantum-build -G Ninja \ -DLLVM_ENABLE_ZSTD=OFF \ -DLLVM_ENABLE_LLD=ON \ -DLLVM_DIR=/catalyst/llvm-build/lib/cmake/llvm -cmake --build quantum-build --target check-dialects catalyst-cli +cmake --build /catalyst/quantum-build --target check-dialects catalyst-cli # Copy files needed for the wheel where they are expected cp /catalyst/runtime-build/lib/*/*/*/*/librtd* /catalyst/runtime-build/lib diff --git a/.github/workflows/scripts/linux_arm64/rh8/build_lld.sh b/.github/workflows/scripts/linux_arm64/rh8/build_lld.sh deleted file mode 100644 index e6522c443b..0000000000 --- a/.github/workflows/scripts/linux_arm64/rh8/build_lld.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -e -x -cd /catalyst - -# Process args -export GCC_VERSION=$1 -export PYTHON_VERSION=$2 -export PYTHON_SUBVERSION=$3 -export PYTHON_PACKAGE=$4 - -# Install system dependencies -dnf update -y -dnf install -y libzstd-devel gcc-toolset-${GCC_VERSION} -if [ "$PYTHON_VERSION" != "3.10" ]; then - dnf install -y ${PYTHON_PACKAGE} ${PYTHON_PACKAGE}-devel -fi -dnf clean all -y - -# Make GCC the default compiler -source /opt/rh/gcc-toolset-${GCC_VERSION}/enable -y -export C_COMPILER=/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin/gcc -export CXX_COMPILER=/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin/g++ - -# Set the right Python interpreter -rm -rf /usr/bin/python3 -ln -s /opt/_internal/cpython-${PYTHON_VERSION}.${PYTHON_SUBVERSION}/bin/python3 /usr/bin/python3 -export PYTHON=/usr/bin/python3 - -# Add Python and GCC to the PATH env var -export PATH=/opt/_internal/cpython-${PYTHON_VERSION}.${PYTHON_SUBVERSION}/bin:/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin:$PATH - -# Install python dependencies -/usr/bin/python3 -m pip install pennylane pybind11 PyYAML cmake ninja - -cmake -S /catalyst/mlir/llvm-project/llvm -B llvm-build -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="lld" - -cmake --build /catalyst/llvm-build --target lld diff --git a/.github/workflows/scripts/linux_arm64/rh8/build_llvm.sh b/.github/workflows/scripts/linux_arm64/rh8/build_llvm.sh index 67082dc4c4..804d4e8431 100644 --- a/.github/workflows/scripts/linux_arm64/rh8/build_llvm.sh +++ b/.github/workflows/scripts/linux_arm64/rh8/build_llvm.sh @@ -31,22 +31,12 @@ export PYTHON=/usr/bin/python3 export PATH=/opt/_internal/cpython-${PYTHON_VERSION}.${PYTHON_SUBVERSION}/bin:/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin:/catalyst/llvm-build/bin:$PATH # Install python dependencies -/usr/bin/python3 -m pip install pennylane pybind11 PyYAML cmake ninja +/usr/bin/python3 -m pip install numpy pybind11 PyYAML cmake ninja # Build LLVM -cmake -S /catalyst/mlir/llvm-project/llvm -B /catalyst/llvm-build -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_BUILD_EXAMPLES=OFF \ - -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="mlir" \ - -DLLVM_ENABLE_ASSERTIONS=ON \ - -DLLVM_INSTALL_UTILS=ON \ - -DLLVM_ENABLE_ZLIB=FORCE_ON \ - -DLLVM_ENABLE_ZSTD=OFF \ - -DLLVM_ENABLE_LLD=ON \ - -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ - -DPython3_EXECUTABLE=/usr/bin/python3 \ - -DPython3_NumPy_INCLUDE_DIRS=/opt/_internal/cpython-${PYTHON_VERSION}.${PYTHON_SUBVERSION}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include \ - -DCMAKE_CXX_VISIBILITY_PRESET=default - -LIT_FILTER_OUT="Bytecode|tosa-to-tensor" cmake --build /catalyst/llvm-build --target check-mlir llvm-symbolizer +export LLVM_BUILD_DIR="/catalyst/llvm-build" +export LLVM_PROJECTS="lld;mlir" +export LLVM_TARGETS="lld check-mlir" +export ENABLE_LLD=OFF +export ENABLE_ZLIB=FORCE_ON +make llvm diff --git a/.github/workflows/scripts/linux_arm64/rh8/test_wheels.sh b/.github/workflows/scripts/linux_arm64/rh8/test_wheels.sh index 531dbcce1e..398d23e113 100644 --- a/.github/workflows/scripts/linux_arm64/rh8/test_wheels.sh +++ b/.github/workflows/scripts/linux_arm64/rh8/test_wheels.sh @@ -10,7 +10,7 @@ export PYTHON_PATCH=$3 export PYTHON_PACKAGE=$4 # Install system dependencies (gcc gives access to c99, which is needed by some tests) -dnf update -y +dnf update -y dnf install -y libzstd-devel gcc-toolset-${GCC_VERSION} gcc if [ "$PYTHON_MAJOR_MINOR" != "3.10" ]; then dnf install -y ${PYTHON_PACKAGE} @@ -18,8 +18,8 @@ fi dnf clean all -y # Make GCC the default compiler -source /opt/rh/gcc-toolset-${GCC_VERSION}/enable -y -export C_COMPILER=/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin/gcc +source /opt/rh/gcc-toolset-${GCC_VERSION}/enable -y +export C_COMPILER=/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin/gcc export CXX_COMPILER=/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin/g++ # Set the right Python interpreter @@ -27,10 +27,6 @@ rm -rf /usr/bin/python3 ln -s /opt/_internal/cpython-${PYTHON_MAJOR_MINOR}.${PYTHON_PATCH}/bin/python3 /usr/bin/python3 export PYTHON=/usr/bin/python3 -# Set llvm-symbolizer -ls -la /catalyst/llvm-build/bin/llvm-symbolizer -export LLVM_SYMBOLIZER_PATH=/catalyst/llvm-build/bin/llvm-symbolizer - # Add LLVM, Python and GCC to the PATH env var export PATH=/catalyst/llvm-build/bin:/opt/_internal/cpython-${PYTHON_MAJOR_MINOR}.${PYTHON_PATCH}/bin:/opt/rh/gcc-toolset-${GCC_VERSION}/root/usr/bin:$PATH diff --git a/mlir/Makefile b/mlir/Makefile index aebb37294c..5951415768 100644 --- a/mlir/Makefile +++ b/mlir/Makefile @@ -12,8 +12,6 @@ ENZYME_BUILD_DIR?=$(MK_DIR)/Enzyme/build RT_BUILD_DIR?=$(MK_DIR)/../runtime/build ENABLE_ASAN?=OFF BUILD_TYPE?=Release -TARGET_FILE=$(MK_DIR)/mlir-hlo/mhlo/transforms/CMakeLists.txt -PATCH_FILE=$(MK_DIR)/patches/mhlo-Add-PassesIncGen-in-transforms-CMakeList.patch LLVM_EXTERNAL_LIT ?= $(LLVM_BUILD_DIR)/bin/llvm-lit ifeq ($(shell uname), Darwin) @@ -36,6 +34,9 @@ USE_SANITIZER_NAMES="" USE_SANITIZER_FLAGS="" endif +LLVM_PROJECTS ?= mlir +LLVM_TARGETS ?= check-mlir llvm-symbolizer + .PHONY: help help: @echo "Please use \`make ' where is one of" @@ -53,13 +54,20 @@ help: all: llvm mhlo enzyme dialects plugin .PHONY: llvm +llvm: TARGET_FILE := $(MK_DIR)/llvm-project/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp +llvm: PATCH_FILE := $(MK_DIR)/patches/mlir-buffer-deallocation.patch llvm: @echo "build LLVM and MLIR enabling Python bindings" + # Patch in MLIR buffer deallocation bugfix + # TODO: remove once https://github.com/llvm/llvm-project/pull/121582 is merged & the dep updated + @if patch --dry-run -p1 -N $(TARGET_FILE) $(PATCH_FILE) > /dev/null 2>&1; then \ + patch -p1 $(TARGET_FILE) $(PATCH_FILE); \ + fi cmake -G Ninja -S llvm-project/llvm -B $(LLVM_BUILD_DIR) \ -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) \ -DLLVM_BUILD_EXAMPLES=OFF \ -DLLVM_TARGETS_TO_BUILD="host" \ - -DLLVM_ENABLE_PROJECTS="mlir" \ + -DLLVM_ENABLE_PROJECTS="$(LLVM_PROJECTS)" \ -DLLVM_ENABLE_ASSERTIONS=ON \ -DMLIR_ENABLE_BINDINGS_PYTHON=ON \ -DPython3_EXECUTABLE=$(PYTHON) \ @@ -75,10 +83,12 @@ llvm: -DCMAKE_CXX_VISIBILITY_PRESET=$(SYMBOL_VISIBILITY) # TODO: when updating LLVM, test to see if mlir/unittests/Bytecode/BytecodeTest.cpp:55 is passing - # and remove filter - LIT_FILTER_OUT="Bytecode" cmake --build $(LLVM_BUILD_DIR) --target check-mlir llvm-symbolizer + # and remove filter. This tests fails on CI/CD not locally. + LIT_FILTER_OUT="Bytecode|tosa-to-tensor" cmake --build $(LLVM_BUILD_DIR) --target $(LLVM_TARGETS) .PHONY: mhlo +mhlo: TARGET_FILE := $(MK_DIR)/mlir-hlo/mhlo/transforms/CMakeLists.txt +mhlo: PATCH_FILE := $(MK_DIR)/patches/mhlo-Add-PassesIncGen-in-transforms-CMakeList.patch mhlo: @echo "build MLIR-HLO" # Patch MHLO cmake dependency diff --git a/mlir/patches/mlir-buffer-deallocation.patch b/mlir/patches/mlir-buffer-deallocation.patch new file mode 100644 index 0000000000..852e6e84c5 --- /dev/null +++ b/mlir/patches/mlir-buffer-deallocation.patch @@ -0,0 +1,14 @@ +diff --git a/mlir/llvm-project/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp b/mlir/llvm-project/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp +index a0a81d4add..7b7be9e577 100644 +--- a/mlir/llvm-project/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp ++++ b/mlir/llvm-project/mlir/lib/Dialect/Bufferization/Transforms/BufferDeallocation.cpp +@@ -308,6 +308,9 @@ private: + + // Add new allocs and additional clone operations. + for (Value value : valuesToFree) { ++ if (!isa(value.getType())) { ++ continue; ++ } + if (failed(isa(value) + ? introduceBlockArgCopy(cast(value)) + : introduceValueCopyForRegionResult(value))) From 99e4cc2a0d8e5e7ef284c05efd4fbae281b1efe4 Mon Sep 17 00:00:00 2001 From: erick-xanadu <110487834+erick-xanadu@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:23:05 -0500 Subject: [PATCH 05/10] [Documentation][Frontend] Documentation for plugins and small improvement (#1404) **Context:** The documentation for MLIR plugins did not state anything about `entry_points` nor relevant functions when loading MLIR plugins from python. Also changed the type of the `pass_pipeline` kwarg. Instead of it being a tuple of Passes, it is now a list of Passes. This is nicer for developers. **Description of the Change:** The `pass_pipeline` keyword argument is now a list instead of a tuple. This list is converted into a tuple at the moment the qnode primitive is bound. Added documentation for MLIR plugins. --------- Co-authored-by: Isaac De Vlugt <34751083+isaacdevlugt@users.noreply.github.com> --- doc/dev/plugins.rst | 82 ++++++++++++++-- frontend/catalyst/__init__.py | 6 +- frontend/catalyst/passes.py | 93 +++++++++++++++---- frontend/catalyst/qfunc.py | 8 +- .../test/pytest/test_mlir_plugin_interface.py | 29 ++++++ 5 files changed, 190 insertions(+), 28 deletions(-) create mode 100644 frontend/test/pytest/test_mlir_plugin_interface.py diff --git a/doc/dev/plugins.rst b/doc/dev/plugins.rst index 3a368c171f..efa9b98c46 100644 --- a/doc/dev/plugins.rst +++ b/doc/dev/plugins.rst @@ -33,7 +33,7 @@ For example using ``quantum-opt --help`` while loading your pass plugin will ena .. code-block:: - --standalone-switch-bar-foo - Switches the name of a FuncOp named `bar` to `foo` and folds. + --standalone-switch-bar-foo - Switches the name of a FuncOp named `bar` to `foo` and folds. Taking into account the description of the pass ``standalone-switch-bar-foo``, let's write the most minimal program that would be transformed by this transformation. @@ -334,20 +334,90 @@ From here, you can change the name of the pass, change the name of the shared ob Now that you have your ``StandalonePlugin.so``, you can ship it in a python wheel. To allow users to run your pass, we have provided a class called :class:`~.passes.Pass` and :class:`~.passes.PassPlugin`. You can extend these classes and allow the user to import your derived classes and run passes as a decorator. -For example: +We provide the :func:`~.passes.apply_pass_plugin` decorator to allow pass plugins to be loaded and executed. +See for example: .. code-block:: python - @SwitchBarToFoo + from standalone import getStandalonePluginAbsolutePath + + @apply_pass_plugin(getStandalonePluginAbsolutePath(), "standalone-switch-bar-foo") @qml.qnode(qml.device("lightning.qubit", wires=1)) def qnode(): return qml.state() - @qml.qjit + @qml.qjit(target="mlir") def module(): return qnode() -If you inspect the MLIR sources, you'll find that the number of qubits allocated will be 42. + print(module.mlir) + + +If you have followed all the steps in this tutorial and inspect the MLIR sources, you'll find that the number of qubits allocated will be 42. Take a look into the ``standalone_plugin_wheel`` make rule to see how we test shipping a plugin. -For more information, please consult our `dialect guide <../dev/dialects.html>`_, our `compiler passes guide <../dev/transforms.html>`_, and the `MLIR documentation `_. +For more information, please consult our :doc:`dialect guide `_, our `compiler passes guide :doc:`_, and the `MLIR documentation `_. + +You can also register your pass with Catalyst via Python's `entry_points `_ (for reference, we have an `example in the Catalyst Github repository `_ +that implements the standalone plugin as a Python package). +To do this, you only need to define a function named ``name2pass``—it must be named ``name2pass``—that takes a string with the name of the pass (from the user perspective) and returns the absolute path to the plugin stored in your package and the name of the MLIR pass. +For the `standalone plugin python `_ package we defined: + +.. code-block:: python + + def name2pass(_name): + """Example entry point for standalone plugin""" + + return getStandalonePluginAbsolutePath(), "standalone-switch-bar-foo" + +You will also need to modify your setup to include the ``entry_points``. +See our ``setup.py`` `file in the standalone plugin python package `_. + +.. code-block:: python + + entry_points = { + "catalyst.passes_resolution": [ + "standalone.passes = standalone_plugin", + ], + } + + setup( + name="standalone_plugin", + version="0.1.0", + # ... + entry_points=entry_points, + # ... + ) + +After this, the user will be able to use your pass with the :func:`~.passes.apply_pass` function. + +.. code-block:: python + + @apply_pass("standalone.standalone-switch-bar-foo") + @qml.qnode(qml.device("lightning.qubit", wires=1)) + def qnode(): + return qml.state() + + @qml.qjit(target="mlir") + def module(): + return qnode() + + print(module.mlir) + +Of course, you can also define your own decorators similar to :func:`~.passes.apply_pass` to check parameters, do some other validation or perhaps just to improve the user interface. +For example: + + +.. code-block:: python + + from standalone import SwitchBarToFoo + + @SwitchBarToFoo + @qml.qnode(qml.device("lightning.qubit", wires=1)) + def qnode(): + return qml.state() + + @qml.qjit(target="mlir") + def module(): + return qnode() + print(module.mlir) diff --git a/frontend/catalyst/__init__.py b/frontend/catalyst/__init__.py index 02802dba65..e0a39e175d 100644 --- a/frontend/catalyst/__init__.py +++ b/frontend/catalyst/__init__.py @@ -89,7 +89,7 @@ from catalyst.compiler import CompileOptions from catalyst.debug.assertion import debug_assert from catalyst.jit import QJIT, qjit -from catalyst.passes import pipeline +from catalyst.passes import Pass, PassPlugin, apply_pass, apply_pass_plugin, pipeline from catalyst.utils.exceptions import ( AutoGraphError, CompileError, @@ -187,7 +187,11 @@ "debug_assert", "CompileOptions", "debug", + "apply_pass", + "apply_pass_plugin", "pipeline", + "Pass", + "PassPlugin", *_api_extension_list, *_autograph_functions, ) diff --git a/frontend/catalyst/passes.py b/frontend/catalyst/passes.py index c163e444f6..498bba016a 100644 --- a/frontend/catalyst/passes.py +++ b/frontend/catalyst/passes.py @@ -82,17 +82,20 @@ def __init__( super().__init__(name, *options, **valued_options) -def dictionary_to_tuple_of_passes(pass_pipeline: PipelineDict): - """Convert dictionary of passes into tuple of passes""" +def dictionary_to_list_of_passes(pass_pipeline: PipelineDict): + """Convert dictionary of passes into list of passes""" + + if pass_pipeline == None: + return [] if type(pass_pipeline) != dict: return pass_pipeline - passes = tuple() + passes = [] pass_names = _API_name_to_pass_name() for API_name, pass_options in pass_pipeline.items(): name = pass_names.get(API_name, API_name) - passes += (Pass(name, **pass_options),) + passes.append(Pass(name, **pass_options)) return passes @@ -195,8 +198,8 @@ def _decorator(qnode=None): @functools.wraps(clone) def wrapper(*args, **kwargs): if EvaluationContext.is_tracing(): - passes = kwargs.pop("pass_pipeline", tuple()) - passes += dictionary_to_tuple_of_passes(pass_pipeline) + passes = kwargs.pop("pass_pipeline", []) + passes += dictionary_to_list_of_passes(pass_pipeline) kwargs["pass_pipeline"] = passes return clone(*args, **kwargs) @@ -323,16 +326,40 @@ def circuit(x: float): @functools.wraps(clone) def wrapper(*args, **kwargs): - pass_pipeline = kwargs.pop("pass_pipeline", tuple()) - pass_pipeline += (Pass("remove-chained-self-inverse"),) + pass_pipeline = kwargs.pop("pass_pipeline", []) + pass_pipeline.append(Pass("remove-chained-self-inverse")) kwargs["pass_pipeline"] = pass_pipeline return clone(*args, **kwargs) return wrapper -def apply_pass(pass_name, *flags, **valued_options): - """Applies a single pass to the qnode""" +def apply_pass(pass_name: str, *flags, **valued_options): + """ + Applies a single pass to the QNode, where the pass is from Catalyst or a third-party + if `entry_points` has been implemented. See :doc:`the compiler plugin documentation ` + for more details. + + Args: + pass_name (str): Name of the pass + *flags: Pass options + **valued_options: options with values + + Returns: + Function that can be used as a decorator to a QNode. + E.g., + + .. code-block:: python + + @apply_pass("merge-rotations") + @qml.qnode(qml.device("lightning.qubit", wires=1)) + def qnode(): + return qml.state() + + @qml.qjit(target="mlir") + def module(): + return qnode() + """ def decorator(qnode): @@ -353,8 +380,40 @@ def qnode_call(*args, **kwargs): return decorator -def apply_pass_plugin(plugin_name, pass_name, *flags, **valued_options): - """Applies a pass plugin""" +def apply_pass_plugin(path_to_plugin: str | Path, pass_name: str, *flags, **valued_options): + """ + Applies a pass plugin to the QNode. See :doc:`the compiler plugin documentation ` + for more details. + + Args: + path_to_plugin (str | Path): full path to plugin + pass_name (str): Name of the pass + *flags: Pass options + **valued_options: options with values + + Returns: + Function that can be used as a decorator to a QNode. + E.g., + + .. code-block:: python + + from standalone import getStandalonePluginAbsolutePath + + @apply_pass_plugin(getStandalonePluginAbsolutePath(), "standalone-switch-bar-foo") + @qml.qnode(qml.device("lightning.qubit", wires=1)) + def qnode(): + return qml.state() + + @qml.qjit(target="mlir") + def module(): + return qnode() + """ + + if not isinstance(path_to_plugin, Path): + path_to_plugin = Path(path_to_plugin) + + if not path_to_plugin.exists(): + raise FileNotFoundError(f"File '{path_to_plugin}' does not exist.") def decorator(qnode): if not isinstance(qnode, qml.QNode): @@ -365,7 +424,7 @@ def decorator(qnode): def qnode_call(*args, **kwargs): pass_pipeline = kwargs.get("pass_pipeline", []) - pass_pipeline.append(PassPlugin(plugin_name, pass_name, *flags, **valued_options)) + pass_pipeline.append(PassPlugin(path_to_plugin, pass_name, *flags, **valued_options)) kwargs["pass_pipeline"] = pass_pipeline return qnode(*args, **kwargs) @@ -443,8 +502,8 @@ def circuit(x: float): @functools.wraps(clone) def wrapper(*args, **kwargs): - pass_pipeline = kwargs.pop("pass_pipeline", tuple()) - pass_pipeline += (Pass("merge-rotations"),) + pass_pipeline = kwargs.pop("pass_pipeline", []) + pass_pipeline.append(Pass("merge-rotations")) kwargs["pass_pipeline"] = pass_pipeline return clone(*args, **kwargs) @@ -467,8 +526,8 @@ def ions_decomposition(qnode=None): # pragma: nocover @functools.wraps(qnode) def wrapper(*args, **kwargs): - pass_pipeline = kwargs.pop("pass_pipeline", tuple()) - pass_pipeline += (Pass("ions-decomposition"),) + pass_pipeline = kwargs.pop("pass_pipeline", []) + pass_pipeline.append(Pass("ions-decomposition")) kwargs["pass_pipeline"] = pass_pipeline return qnode(*args, **kwargs) diff --git a/frontend/catalyst/qfunc.py b/frontend/catalyst/qfunc.py index da59f07ab2..de13bd5443 100644 --- a/frontend/catalyst/qfunc.py +++ b/frontend/catalyst/qfunc.py @@ -49,7 +49,7 @@ from catalyst.jax_primitives import quantum_kernel_p from catalyst.jax_tracer import Function, trace_quantum_function from catalyst.logging import debug_logger -from catalyst.passes import dictionary_to_tuple_of_passes +from catalyst.passes import dictionary_to_list_of_passes from catalyst.tracing.contexts import EvaluationContext from catalyst.tracing.type_signatures import filter_static_args from catalyst.utils.exceptions import CompileError @@ -105,8 +105,8 @@ def __call__(self, *args, **kwargs): assert isinstance(self, qml.QNode) # Update the qnode with peephole pipeline - pass_pipeline = kwargs.pop("pass_pipeline", tuple()) - pass_pipeline = dictionary_to_tuple_of_passes(pass_pipeline) + pass_pipeline = kwargs.pop("pass_pipeline", []) + pass_pipeline = dictionary_to_list_of_passes(pass_pipeline) # Mid-circuit measurement configuration/execution dynamic_one_shot_called = getattr(self, "_dynamic_one_shot_called", False) @@ -148,7 +148,7 @@ def _eval_quantum(*args, **kwargs): dynamic_args = filter_static_args(args, static_argnums) args_flat = tree_flatten((dynamic_args, kwargs))[0] res_flat = quantum_kernel_p.bind( - flattened_fun, *args_flat, qnode=self, pipeline=pass_pipeline + flattened_fun, *args_flat, qnode=self, pipeline=tuple(pass_pipeline) ) return tree_unflatten(out_tree_promise(), res_flat)[0] diff --git a/frontend/test/pytest/test_mlir_plugin_interface.py b/frontend/test/pytest/test_mlir_plugin_interface.py new file mode 100644 index 0000000000..e9ef93c5a3 --- /dev/null +++ b/frontend/test/pytest/test_mlir_plugin_interface.py @@ -0,0 +1,29 @@ +# Copyright 2025 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Testing interface around main plugin functionality""" + +from pathlib import Path + +import pytest +import catalyst + +def test_path_does_not_exists(): + """Test what happens when a pass_plugin is given an path that does not exist""" + + with pytest.raises(FileNotFoundError, match="does not exist"): + catalyst.apply_pass_plugin("this-path-does-not-exist", "this-pass-also-doesnt-exists") + + with pytest.raises(FileNotFoundError, match="does not exist"): + catalyst.apply_pass_plugin(Path("this-path-does-not-exist"), "this-pass-also-doesnt-exists") From 469f088c152eab4986009d6b1cb924f098618ce0 Mon Sep 17 00:00:00 2001 From: Mehrdad Malek <39844030+mehrdad2m@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:33:34 -0500 Subject: [PATCH 06/10] Support static parameters for GlobalPhase and multiRZ gates. (#1396) **Context:** PR #1387, adds support for static parameters in customOp. However support for GlobalPhase and MultiRZ gates are missing. **Description of the Change:** This PR adds support for the missing two gates. We unified the way `GlobalPhase` and `MultiRZ` are treated in the frontend. right now, GlobalPase has its own primitive (`qphase_p`) however `MultiRZ` is binded through the `qinst_p` primitive. `qphase_p` is now removed from frontend and binded through `qints_p` similar to MultiRZ. If a `MultiRZ` or `GlobalPhase` gate has static parameters parameters, a `staticCustomOp` is created which will be lowered to the respective gate in static_custom_lowering pass. **Benefits:** removed inconsistency in the definition of odd custom gates (MultiRZ and GlobalPhase) static circuit IR now supports more parameterized gates. **Possible Drawbacks:** **Related GitHub Issues:** --------- Co-authored-by: ringo-but-quantum Co-authored-by: paul0403 <79805239+paul0403@users.noreply.github.com> Co-authored-by: Ritu Thombre Co-authored-by: Ritu Thombre <42207923+ritu-thombre99@users.noreply.github.com> Co-authored-by: lillian542 <38584660+lillian542@users.noreply.github.com> Co-authored-by: Joey Carter Co-authored-by: Raul Torres <138264735+rauletorresc@users.noreply.github.com> Co-authored-by: Isaac De Vlugt <34751083+isaacdevlugt@users.noreply.github.com> Co-authored-by: erick-xanadu <110487834+erick-xanadu@users.noreply.github.com> --- doc/releases/changelog-0.10.0.md | 1 + frontend/catalyst/jax_primitives.py | 101 ++++++++++++------ frontend/catalyst/jax_tracer.py | 6 +- frontend/test/lit/test_quantum_control.py | 2 +- frontend/test/lit/test_static_circuit.py | 4 + .../Transforms/StaticCustomPatterns.cpp | 21 +++- .../Transforms/static_custom_lowering.cpp | 2 + 7 files changed, 101 insertions(+), 36 deletions(-) diff --git a/doc/releases/changelog-0.10.0.md b/doc/releases/changelog-0.10.0.md index f8b56f1aba..f96aada52b 100644 --- a/doc/releases/changelog-0.10.0.md +++ b/doc/releases/changelog-0.10.0.md @@ -264,6 +264,7 @@ parameters of quantum gates by adding a new gate called `StaticCustomOp` with lowering to regular `CustomOp`. [(#1387)](https://github.com/PennyLaneAI/catalyst/pull/1387) + [(#1396)](https://github.com/PennyLaneAI/catalyst/pull/1396)

Documentation 📝

diff --git a/frontend/catalyst/jax_primitives.py b/frontend/catalyst/jax_primitives.py index 9f58dff29b..9af5937f22 100644 --- a/frontend/catalyst/jax_primitives.py +++ b/frontend/catalyst/jax_primitives.py @@ -917,13 +917,21 @@ def _qinsert_lowering( # gphase # @gphase_p.def_abstract_eval -def _gphase_abstract_eval(*qubits_or_params, ctrl_len=0, adjoint=False): +def _gphase_abstract_eval( + *qubits_or_params, + ctrl_len=0, + adjoint=False, + static_params=None, +): # The signature here is: (using * to denote zero or more) # param, ctrl_qubits*, ctrl_values* # since gphase has no target qubits. - param = qubits_or_params[0] + if static_params is None: + param = qubits_or_params[-1] + else: + param = static_params[0] assert not isinstance(param, AbstractQbit) - ctrl_qubits = qubits_or_params[-2 * ctrl_len : -ctrl_len] + ctrl_qubits = qubits_or_params[:ctrl_len] for idx in range(ctrl_len): qubit = ctrl_qubits[idx] assert isinstance(qubit, AbstractQbit) @@ -937,34 +945,63 @@ def _gphase_def_impl(*args, **kwargs): def _gphase_lowering( - jax_ctx: mlir.LoweringRuleContext, *qubits_or_params, ctrl_len=0, adjoint=False + jax_ctx: mlir.LoweringRuleContext, + *qubits_or_params, + ctrl_len=0, + adjoint=False, + static_params=None, ): ctx = jax_ctx.module_context.context ctx.allow_unregistered_dialects = True - param = qubits_or_params[0] - ctrl_qubits = qubits_or_params[1 : 1 + ctrl_len] - ctrl_values = qubits_or_params[1 + ctrl_len :] + param = None if static_params else qubits_or_params[-1] + ctrl_qubits = qubits_or_params[:ctrl_len] + ctrl_values = qubits_or_params[ctrl_len:-1] if param else qubits_or_params[ctrl_len:] + + assert ( + not static_params or len(static_params) == 1 + ), "GlobalPhase only takes one static float parameter" + + param_attr = ( + None + if static_params is None + else ir.DenseF64ArrayAttr.get([ir.FloatAttr.get_f64(static_params[0])]) + ) - param = safe_cast_to_f64(param, "GlobalPhase") - param = extract_scalar(param, "GlobalPhase") + assert bool(param_attr) != bool(param) - assert ir.F64Type.isinstance( - param.type - ), "Only scalar double parameters are allowed for quantum gates!" + if param_attr is None: + param = safe_cast_to_f64(param, "GlobalPhase") + param = extract_scalar(param, "GlobalPhase") + + assert ir.F64Type.isinstance( + param.type + ), "Only scalar double parameters are allowed for quantum gates!" ctrl_values_i1 = [] for v in ctrl_values: p = TensorExtractOp(ir.IntegerType.get_signless(1), v, []).result ctrl_values_i1.append(p) - GlobalPhaseOp( - params=param, - out_ctrl_qubits=[qubit.type for qubit in ctrl_qubits], - in_ctrl_qubits=ctrl_qubits, - in_ctrl_values=ctrl_values_i1, - adjoint=adjoint, - ) + if static_params: + StaticCustomOp( + out_qubits=[], + out_ctrl_qubits=[qubit.type for qubit in ctrl_qubits], + static_params=param_attr, + in_qubits=[], + gate_name="GlobalPhase", + in_ctrl_qubits=ctrl_qubits, + in_ctrl_values=ctrl_values_i1, + adjoint=adjoint, + ) + else: + GlobalPhaseOp( + params=param, + out_ctrl_qubits=[qubit.type for qubit in ctrl_qubits], + in_ctrl_qubits=ctrl_qubits, + in_ctrl_values=ctrl_values_i1, + adjoint=adjoint, + ) return ctrl_qubits @@ -1038,7 +1075,7 @@ def _qinst_lowering( params_attr = ( None - if not static_params + if static_params is None else ir.DenseF64ArrayAttr.get([ir.FloatAttr.get_f64(val) for val in static_params]) ) if len(float_params) > 0: @@ -1050,12 +1087,23 @@ def _qinst_lowering( name_str = str(name_attr) name_str = name_str.replace('"', "") + if static_params: + return StaticCustomOp( + out_qubits=[qubit.type for qubit in qubits], + out_ctrl_qubits=[qubit.type for qubit in ctrl_qubits], + static_params=params_attr, + in_qubits=qubits, + gate_name=name_attr, + in_ctrl_qubits=ctrl_qubits, + in_ctrl_values=ctrl_values_i1, + adjoint=adjoint, + ).results + if name_str == "MultiRZ": assert len(float_params) <= 1, "MultiRZ takes at most one dynamic float parameter" assert ( not static_params or len(static_params) <= 1 ), "MultiRZ takes at most one static float parameter" - # TODO: Add support for MultiRZ with static params float_param = ( TensorExtractOp(ir.F64Type.get(), mlir.ir_constant(static_params[0]), []) if len(float_params) == 0 @@ -1070,17 +1118,6 @@ def _qinst_lowering( in_ctrl_values=ctrl_values_i1, adjoint=adjoint, ).results - if params_attr: - return StaticCustomOp( - out_qubits=[qubit.type for qubit in qubits], - out_ctrl_qubits=[qubit.type for qubit in ctrl_qubits], - static_params=params_attr, - in_qubits=qubits, - gate_name=name_attr, - in_ctrl_qubits=ctrl_qubits, - in_ctrl_values=ctrl_values_i1, - adjoint=adjoint, - ).results return CustomOp( out_qubits=[qubit.type for qubit in qubits], out_ctrl_qubits=[qubit.type for qubit in ctrl_qubits], diff --git a/frontend/catalyst/jax_tracer.py b/frontend/catalyst/jax_tracer.py index 2eb4dc7396..1680279454 100644 --- a/frontend/catalyst/jax_tracer.py +++ b/frontend/catalyst/jax_tracer.py @@ -678,8 +678,10 @@ def bind_native_operation(qrp, op, controlled_wires, controlled_values, adjoint= qrp.insert(controlled_wires, qubits2[len(qubits) :]) elif isinstance(op, qml.GlobalPhase): controlled_qubits = qrp.extract(controlled_wires) - qubits2 = gphase_p.bind( - *[*op.parameters, *controlled_qubits, *controlled_values], + qubits2 = bind_flexible_primitive( + gphase_p, + {"static_params": op.parameters}, + *[*controlled_qubits, *controlled_values], ctrl_len=len(controlled_qubits), adjoint=adjoint, ) diff --git a/frontend/test/lit/test_quantum_control.py b/frontend/test/lit/test_quantum_control.py index 730692c855..027c677498 100644 --- a/frontend/test/lit/test_quantum_control.py +++ b/frontend/test/lit/test_quantum_control.py @@ -148,7 +148,7 @@ def test_native_controlled_multirz(): @qml.qnode(dev) # CHECK-LABEL: public @jit_native_controlled_multirz def native_controlled_multirz(): - # CHECK: [[out:%.+]]:2, [[out_ctrl:%.+]] = quantum.multirz + # CHECK: [[out:%.+]]:2, [[out_ctrl:%.+]] = quantum.static_custom "MultiRZ" # CHECK-SAME: ctrls # CHECK-SAME: ctrlvals(%true) qml.ctrl(qml.MultiRZ(0.6, wires=[0, 2]), control=[1]) diff --git a/frontend/test/lit/test_static_circuit.py b/frontend/test/lit/test_static_circuit.py index 40de0c68d3..c2a2487667 100644 --- a/frontend/test/lit/test_static_circuit.py +++ b/frontend/test/lit/test_static_circuit.py @@ -45,6 +45,8 @@ def circuit(): qml.CRY(x, wires=[0, 1]) qml.CRZ(x, wires=[0, 1]) + qml.MultiRZ(x, wires=[0, 1, 2, 3]) + return qml.state() print(circuit.mlir) @@ -65,4 +67,6 @@ def circuit(): # CHECK: %[[CRX:.*]] = quantum.static_custom "CRX" # CHECK: %[[CRY:.*]] = quantum.static_custom "CRY" # CHECK: %[[CRZ:.*]] = quantum.static_custom "CRZ" +# CHECK: %[[BIT3:.*]] = quantum.extract %[[REG]][ 3] +# CHECK: %[[MRZ:.*]] = quantum.static_custom "MultiRZ" test_static_params() diff --git a/mlir/lib/Quantum/Transforms/StaticCustomPatterns.cpp b/mlir/lib/Quantum/Transforms/StaticCustomPatterns.cpp index 64d8e49529..06c75f59cd 100644 --- a/mlir/lib/Quantum/Transforms/StaticCustomPatterns.cpp +++ b/mlir/lib/Quantum/Transforms/StaticCustomPatterns.cpp @@ -40,7 +40,26 @@ struct LowerStaticCustomOp : public OpConversionPattern { rewriter.getF64FloatAttr(param)); paramValues.push_back(constant); } - + if (op.getGateName() == "MultiRZ") { + if (paramValues.size() != 1) { + op.emitError() << "MultiRZ gate expects exactly one parameter"; + return failure(); + } + rewriter.replaceOpWithNewOp( + op, op.getOutQubits().getTypes(), op.getOutCtrlQubits().getTypes(), paramValues[0], + op.getInQubits(), op.getAdjointAttr(), op.getInCtrlQubits(), op.getInCtrlValues()); + return success(); + } + if (op.getGateName() == "GlobalPhase") { + if (paramValues.size() != 1) { + op.emitError() << "GlobalPhase gate expects exactly one parameter"; + return failure(); + } + rewriter.replaceOpWithNewOp(op, op.getOutCtrlQubits().getTypes(), + paramValues[0], op.getAdjointAttr(), + op.getInCtrlQubits(), op.getInCtrlValues()); + return success(); + } rewriter.replaceOpWithNewOp(op, op.getGateName(), op.getInQubits(), op.getInCtrlQubits(), op.getInCtrlValues(), paramValues, op.getAdjointFlag()); diff --git a/mlir/lib/Quantum/Transforms/static_custom_lowering.cpp b/mlir/lib/Quantum/Transforms/static_custom_lowering.cpp index 2e55614acb..5d120ef3ff 100644 --- a/mlir/lib/Quantum/Transforms/static_custom_lowering.cpp +++ b/mlir/lib/Quantum/Transforms/static_custom_lowering.cpp @@ -48,6 +48,8 @@ struct StaticCustomLoweringPass : impl::StaticCustomLoweringPassBase(); target.addLegalOp(); + target.addLegalOp(); + target.addLegalOp(); target.addIllegalOp(); populateStaticCustomPatterns(patterns); From 00a9f8aef1cbe57239eba85fbcdc934efa4921c6 Mon Sep 17 00:00:00 2001 From: David Ittah Date: Wed, 8 Jan 2025 18:01:10 -0500 Subject: [PATCH 07/10] Edit the changelog for release (#1424) Co-authored-by: Joey Carter --- doc/releases/changelog-0.10.0.md | 328 +++++++++++++++---------------- 1 file changed, 160 insertions(+), 168 deletions(-) diff --git a/doc/releases/changelog-0.10.0.md b/doc/releases/changelog-0.10.0.md index f96aada52b..eae56b53e2 100644 --- a/doc/releases/changelog-0.10.0.md +++ b/doc/releases/changelog-0.10.0.md @@ -3,22 +3,24 @@

New features since last release

* Catalyst can now load and apply local MLIR plugins from the PennyLane frontend. + [(#1287)](https://github.com/PennyLaneAI/catalyst/pull/1287) [(#1317)](https://github.com/PennyLaneAI/catalyst/pull/1317) [(#1361)](https://github.com/PennyLaneAI/catalyst/pull/1361) [(#1370)](https://github.com/PennyLaneAI/catalyst/pull/1370) - Custom compilation passes and dialects in MLIR can be specified for use in Catalyst via a shared - object (`*.so` or `*.dylib` on MacOS) that implements the pass. Details on creating your own - plugin can be found in our + Custom compilation passes and dialects in MLIR can be specified for use in Catalyst via a shared + object (`*.so` or `*.dylib` on macOS) that implements the pass. Details on creating your own + plugin can be found in our [compiler plugin documentation](https://docs.pennylane.ai/projects/catalyst/en/stable/dev/plugins.html). - At a high level, there are three ways to utilize a plugin once it's properly specified: + At a high level, there are three ways to use a plugin once it's properly specified: - * :func:`~.passes.apply_pass` can be used on QNodes when there is a - [Python entry point](https://packaging.python.org/en/latest/specifications/entry-points/) - defined for the plugin. + - :func:`~.passes.apply_pass` can be used on QNodes when there is a + [Python entry point](https://packaging.python.org/en/latest/specifications/entry-points/) + defined for the plugin. In that case, the plugin and pass should both be specified and separated + by a period. ```python - @catalyst.passes.apply_pass(pass_name) + @catalyst.passes.apply_pass("plugin_name.pass_name") @qml.qnode(qml.device("lightning.qubit", wires=1)) def qnode(): return qml.state() @@ -28,11 +30,13 @@ return qnode() ``` - * :func:`~.passes.apply_pass_plugin` can be used on QNodes when there is not an entry point - defined for the plugin. + - :func:`~.passes.apply_pass_plugin` can be used on QNodes when the plugin did not define an entry + point. In that case the full filesystem path must be specified in addition to the pass name. ```python - @catalyst.passes.apply_pass_plugin(path_to_plugin, pass_name) + from pathlib import Path + + @catalyst.passes.apply_pass_plugin(Path("path_to_plugin"), "pass_name") @qml.qnode(qml.device("lightning.qubit", wires=1)) def qnode(): return qml.state() @@ -42,14 +46,16 @@ return qnode() ``` - * Specifying multiple compilation pass plugins or dialect plugins directly in :func:`~.qjit` via - the `pass_plugins` and `dialect_plugins` keyword arguments, which must be lists of plugin paths. + - Alternatively, one or more dialect and pass plugins can be specified in advance in the + :func:`~.qjit` decorator, via the `pass_plugins` and `dialect_plugins` keyword arguments. The + :func:`~.passes.apply_pass` function can then be used without specifying the plugin. ```python from pathlib import Path plugin = Path("shared_object_file.so") + @catalyst.passes.apply_pass("pass_name") @qml.qnode(qml.device("lightning.qubit", wires=0)) def qnode(): qml.Hadamard(wires=0) @@ -57,215 +63,209 @@ @qml.qjit(pass_plugins=[plugin], dialect_plugins=[plugin]) def module(): - return catalyst.passes.apply_pass(qnode, "pass_name")() + return qnode() ``` - For more information on usage, - visit our [compiler plugin documentation](https://docs.pennylane.ai/projects/catalyst/en/stable/dev/plugins.html). + For more information on usage, visit our + [compiler plugin documentation](https://docs.pennylane.ai/projects/catalyst/en/stable/dev/plugins.html).

Improvements 🛠

-* The lightning runtime now supports finite shots with measuring expectation values of `qml.Hermitian`. - [(#451)](https://github.com/PennyLaneAI/catalyst/pull/451) - -* Pybind11 has been replaced with nanobind for C++/Python bindings in the frontend and in the runtime. - [(#1173)](https://github.com/PennyLaneAI/catalyst/pull/1173) - [(#1293)](https://github.com/PennyLaneAI/catalyst/pull/1293) - [(#1391)](https://github.com/PennyLaneAI/catalyst/pull/1391) - - Nanobind has been developed as a natural successor to the pybind11 library and offers a number of - [advantages](https://nanobind.readthedocs.io/en/latest/why.html#major-additions) like its ability - to target Python's [stable ABI interface](https://docs.python.org/3/c-api/stable.html) starting - with Python 3.12. - -* Catalyst now uses the new compiler API (`catalyst-cli`) to compile quantum code from the Python - frontend instead of using pybind11 as an interface between the compiler and the frontend. +* The Catalyst CLI, a command line interface for debugging and dissecting different stages of + compilation, is now available under the `catalyst` command after installing Catalyst with pip. + Even though the tool was first introduced in `v0.9`, it was not yet included in binary + distributions of Catalyst (wheels). The full usage instructions are available in the + [Catalyst CLI documentation](https://docs.pennylane.ai/projects/catalyst/en/stable/catalyst-cli/catalyst-cli.html). [(#1285)](https://github.com/PennyLaneAI/catalyst/pull/1285) + [(#1368)](https://github.com/PennyLaneAI/catalyst/pull/1368) + [(#1405)](https://github.com/PennyLaneAI/catalyst/pull/1405) -* Gates in the gate set `{T, S, Z, Hadamard, RZ, PhaseShift, CNOT}` now have MLIR decompositions to - the gate set `{RX, RY, MS}`, which are useful for trapped ion devices. - [(#1226)](https://github.com/PennyLaneAI/catalyst/pull/1226) +* Lightning devices now support finite-shot expectation values of `qml.Hermitian` when used with + Catalyst. + [(#451)](https://github.com/PennyLaneAI/catalyst/pull/451) -* `qml.CosineWindow` is now compatible with QJIT. +* The PennyLane state preparation template `qml.CosineWindow` is now compatible with Catalyst. [(#1166)](https://github.com/PennyLaneAI/catalyst/pull/1166) -* All PennyLane templates are tested for QJIT compatibility. - [(#1161)](https://github.com/PennyLaneAI/catalyst/pull/1161) - -* Python is now decoupled from the Runtime by using the Python Global Interpreter Lock (GIL) instead - of custom mutexes. - [(#624)](https://github.com/PennyLaneAI/catalyst/pull/624) - - In addition, executables created using :func:`~.debug.compile_executable` no longer require - linking against Python shared libraries after decoupling Python from the Runtime C-API. +* A development distribution of Python with dynamic linking support (`libpython.so`) is no longer + needed in order to use :func:`~.debug.compile_executable` to generate standalone executables of + compiled programs. [(#1305)](https://github.com/PennyLaneAI/catalyst/pull/1305) -* The readability of conditional passes in `catalyst.pipelines` has been improved. - [(#1194)](https://github.com/PennyLaneAI/catalyst/pull/1194) - -* The output of compiler instrumentation has been cleaned up by only printing stats after a `pipeline`. - It is still possible to get the more detailed output with `qjit(verbose=True)`. +* In Catalyst `v0.9` the output of the compiler instrumentation (:func:`~.debug.instrumentation`) + had inadvertently been made more verbose by printing timing information for each run of each pass. + This change has been reverted. Instead, the :func:`~.qjit` option `verbose=True` will now instruct + the instrumentation to produce this more detailed output. [(#1343)](https://github.com/PennyLaneAI/catalyst/pull/1343) -* Stable ABI wheels for Python 3.12 and up are now generated. - [(#1357)](https://github.com/PennyLaneAI/catalyst/pull/1357) - [(#1385)](https://github.com/PennyLaneAI/catalyst/pull/1385) - -* Two new circuit optimization passes, `--disentangle-CNOT` and `--disentangle-SWAP`, are available. +* Two additional circuit optimizations have been added to Catalyst: `disentangle-CNOT` and + `disentangle-SWAP`. The optimizations are available via the :mod:`~.passes` module. [(#1154)](https://github.com/PennyLaneAI/catalyst/pull/1154) + [(#1407)](https://github.com/PennyLaneAI/catalyst/pull/1407) - The CNOT pass disentangles CNOT gates whenever possible, e.g., when the control bit is known to be - in the `|0>` state, the pass removes the CNOT. The pass uses a finite state machine to propagate - simple one-qubit states, in order to determine the input states to the CNOT. - - Similarly, the SWAP pass disentangles SWAP gates whenever possible by using a finite state machine - to propagate simple one-qubit states, similar to the `--disentangle-CNOT` pass. - - Both passes are implemented in accordance with the algorithm from - J. Liu, L. Bello, and H. Zhou, _Relaxed Peephole Optimization: A Novel Compiler Optimization for Quantum Circuits_, 2020, [arXiv:2012.07711](https://arxiv.org/abs/2012.07711) [quant-ph]. - -* Allow specifying a branch to switch to when setting up a dev environment from the wheels. - [(#1406)](https://github.com/PennyLaneAI/catalyst/pull/1406) + The optimizations use a finite state machine to propagate limited qubit state information through + the circuit to turn CNOT and SWAP gates into cheaper instructions. The pass is based on the work + by J. Liu, L. Bello, and H. Zhou, _Relaxed Peephole Optimization: A Novel Compiler Optimization + for Quantum Circuits_, 2020, [arXiv:2012.07711](https://arxiv.org/abs/2012.07711).

Breaking changes 💔

-* The `sample` and `counts` measurement primitives now support dynamic shot values across Catalyst, - although, on the PennyLane side, the device's shots is still constrained to a static integer - literal. - [(#1310)](https://github.com/PennyLaneAI/catalyst/pull/1310) +* The minimum supported PennyLane version has been updated to `v0.40`; backwards compatibility in + either direction is not maintained. + [(#1308)](https://github.com/PennyLaneAI/catalyst/pull/1308) - To support this, `SampleOp` and `CountsOp` in MLIR no longer carry the shots attribute, since - integer attributes are tied to literal values and must be static. +* (Device Developers Only) The way the `shots` parameter is initialized in C++ device backends is + changing. + [(#1310)](https://github.com/PennyLaneAI/catalyst/pull/1310) - `DeviceInitOp` now takes in an optional SSA argument for shots, and the device init runtime CAPI - will take in this SSA shots value as an argument and set it as the device shots. The sample and - counts runtime CAPI functions no longer take in the shots argument and will retrieve shots from - the device. + The previous method of including the shot number in the `kwargs` argument of the device + constructor is deprecated and will be removed in the next release (`v0.11`). Instead, the shots + value will be specified exclusively via the existing `SetDeviceShots` function called at the + beginning of a quantum execution. Device developers are encouraged to update their device + implementations between this and the next release while both methods are supported. - Correspondingly, the device C++ interface should no longer parse the `DeviceInitOp`'s attributes - dictionary for the shots. For now, we still keep the shots as an attribute so device implementors - can have time to migrate, but we will remove shots from the attribute dictionary in the next - release (`v0.11`) + Similarly, the `Sample` and `Counts` functions (and their `Partial*` equivalents) will no longer + provide a `shots` argument, since they are redundant. The signature of these functions will update + in the next release. -* The `toml` module has been migrated to PennyLane with an updated schema for declaring device - capabilities. Devices with TOML files using `schema = 2` will not be compatible with the latest - Catalyst. See the [Custom Devices documentation page](https://docs.pennylane.ai/projects/catalyst/en/stable/dev/custom_devices.html) - for updated instructions on integrating your device with Catalyst and PennyLane. +* (Device Developers Only) The `toml`-based device schemas have been integrated with PennyLane and + updated to a new version `schema = 3`. [(#1275)](https://github.com/PennyLaneAI/catalyst/pull/1275) -* Handling for the legacy operator arithmetic (the `Hamiltonian` and `Tensor` classes in PennyLane) - has been removed. - [(#1308)](https://github.com/PennyLaneAI/catalyst/pull/1308) + Devices with existing TOML `schema = 2` will not be compatible with the current release of + Catalyst until updated. A summary of the most importation changes is listed here: + - `operators.gates.native` renamed to `operators.gates` + - `operators.gates.decomp` and `operators.gates.matrix` are removed and no longer necessary + - `condition` property is renamed to `conditions` + - Entries in the `measurement_processes` section now expect the full PennyLane class name as + opposed to the deprecated `mp.return_type` shorthand (e.g. `ExpectationMP` instead of `Expval`). + - The `mid_circuit_measurements` field has been replaced with `supported_mcm_methods`, which + expects a list of mcm methods that the device is able to work with (or empty if unsupported). + - A new field has been added, `overlapping_observables`, which indicates whether a device supports + multiple measurements during one execution on overlapping wires. + - The `options` section has been removed. Instead, the Python device class should define a + `device_kwargs` field holding the name and values of C++ device constructor kwargs. + + See the [Custom Devices page](https://docs.pennylane.ai/projects/catalyst/en/latest/dev/custom_devices.html) + for the most up-to-date information on integrating your device with Catalyst and PennyLane.

Bug fixes 🐛

-* Fixed a bug introduced in 0.8 that breaks nested invocations of `qml.adjoint` and `qml.ctrl` (e.g., - `qml.adjoint(qml.adjoint(qml.H(0)))`). +* Fixed a bug introduced in Catalyst `v0.8` that breaks nested invocations of `qml.adjoint` and + `qml.ctrl` (e.g. `qml.adjoint(qml.adjoint(qml.H(0)))`). [(#1301)](https://github.com/PennyLaneAI/catalyst/issues/1301) -* Fixed a bug in :func:`~.debug.compile_executable` that would generate incorrect stride information for - array arguments of the function, in particular when non-64bit datatypes are used. +* Fixed a bug in :func:`~.debug.compile_executable` when using non-64bit arrays as input to the + compiled function, due to incorrectly computed stride information. [(#1338)](https://github.com/PennyLaneAI/catalyst/pull/1338) -* Fixed a bug in catalyst cli where using `checkpoint-stage` would cause `save-ir-after-each` - to not work properly. - [(#1405)](https://github.com/PennyLaneAI/catalyst/pull/1405) -

Internal changes ⚙️

+* Starting with Python 3.12, Catalyst's binary distributions (wheels) will now follow Python's + [Stable ABI](https://docs.python.org/3/c-api/stable.html), eliminating the need for a separate + wheel per minor Python version. To enable this, the following changes have made: + + - Stable ABI wheels are now generated for Python 3.12 and up. + [(#1357)](https://github.com/PennyLaneAI/catalyst/pull/1357) + [(#1385)](https://github.com/PennyLaneAI/catalyst/pull/1385) + + - Pybind11 has been replaced with nanobind for C++/Python bindings across all components. + [(#1173)](https://github.com/PennyLaneAI/catalyst/pull/1173) + [(#1293)](https://github.com/PennyLaneAI/catalyst/pull/1293) + [(#1391)](https://github.com/PennyLaneAI/catalyst/pull/1391) + [(#624)](https://github.com/PennyLaneAI/catalyst/pull/624) + + Nanobind has been developed as a natural successor to the pybind11 library and offers a number + of [advantages](https://nanobind.readthedocs.io/en/latest/why.html#major-additions) like its + ability to target Python's Stable ABI. + + - Python C-API calls have been replaced with functions from Python's Limited API. + [(#1354)](https://github.com/PennyLaneAI/catalyst/pull/1354) + + - The `QuantumExtension` module for MLIR Python bindings, which relies on pybind11, has been + removed. The module was never included in the distributed wheels and could not be converted to + nanobind easily due to its dependency on upstream MLIR code. Pybind11 does not support the + Python Stable ABI. + [(#1187)](https://github.com/PennyLaneAI/catalyst/pull/1187) + * Catalyst no longer depends on or pins the `scipy` package. Instead, OpenBLAS is sourced directly - from [`scipy-openblas32`](https://pypi.org/project/scipy-openblas32/) or + from [scipy-openblas32](https://pypi.org/project/scipy-openblas32/) or [Accelerate](https://developer.apple.com/accelerate/) is used. [(#1322)](https://github.com/PennyLaneAI/catalyst/pull/1322) [(#1328)](https://github.com/PennyLaneAI/catalyst/pull/1328) -* The `QuantumExtension` module—previously implemented with pybind11—has been removed. This module - was not included in the distributed wheels and has been deprecated to align with our adoption of - Python's stable ABI, which pybind11 does not support. - [(#1187)](https://github.com/PennyLaneAI/catalyst/pull/1187) - -* Code for using `lightning.qubit` with Catalyst has been moved from the Catalyst repository to - the [Lightning repository](https://github.com/PennyLaneAI/pennylane-lightning) so that Catalyst - wheels will build faster. +* The Catalyst plugin for the `lightning.qubit` device has been migrated from the Catalyst repo to + the [Lightning repository](https://github.com/PennyLaneAI/pennylane-lightning). This reduces + the size of Catalyst's binary distributions and the build time of the project, by avoiding + re-compilation of the lightning source code. [(#1227)](https://github.com/PennyLaneAI/catalyst/pull/1227) [(#1307)](https://github.com/PennyLaneAI/catalyst/pull/1307) [(#1312)](https://github.com/PennyLaneAI/catalyst/pull/1312) -* `catalyst-cli` and `quantum-opt` are now compiled with `default` visibility, which allows for MLIR - plugins to work. - [(#1287)](https://github.com/PennyLaneAI/catalyst/pull/1287) - -* The patching mechanism of autograph's `allowlist` has been streamlined to only be used in places - where it's required. +* The AutoGraph exception mechanism (`allowlist` parameter) has been streamlined to only be used in + places where it's required. [(#1332)](https://github.com/PennyLaneAI/catalyst/pull/1332) [(#1337)](https://github.com/PennyLaneAI/catalyst/pull/1337) -* Each qnode now has its own transformation schedule. Instead of relying on the name of the qnode, - each qnode now has a transformation module, which denotes the transformation schedule, embedded in +* Each QNode now has its own transformation schedule. Instead of relying on the name of the QNode, + each QNode now has a transformation module, which denotes the transformation schedule, embedded in its MLIR representation. [(#1323)](https://github.com/PennyLaneAI/catalyst/pull/1323) -* The `apply_registered_pass_p` primitive has been removed and the API for scheduling passes to run - using the transform dialect has been refactored. In particular, passes are appended to a tuple as - they are being registered and they will be run in order. If there are no local passes, the global - `pass_pipeline` is scheduled. Furthermore, this commit also reworks the caching mechanism for - primitives, which is important as qnodes and functions are primitives and now that we can apply +* The `apply_registered_pass_p` primitive has been removed and the API for scheduling passes to run + using the transform dialect has been refactored. In particular, passes are appended to a tuple as + they are being registered and they will be run in order. If there are no local passes, the global + `pass_pipeline` is scheduled. Furthermore, this commit also reworks the caching mechanism for + primitives, which is important as qnodes and functions are primitives and now that we can apply passes to them, they are distinct based on which passes have been scheduled to run on them. [(#1317)](https://github.com/PennyLaneAI/catalyst/pull/1317) -* Python C-API calls have been replaced with Stable ABI calls. - [(#1354)](https://github.com/PennyLaneAI/catalyst/pull/1354) +* The Catalyst infrastructure has been upgraded to support a dynamic `shots` parameter for quantum + execution. Previously, this value had to be a static compile-time constant, and could not be + changed once the program was compiled. Upcoming UI changes will make the feature accessible to + users. + [(#1360)](https://github.com/PennyLaneAI/catalyst/pull/1360) -* A framework for loading and interacting with databases containing hardware information and - calibration data for Open Quantum Design (OQD) trapped-ion quantum devices has been added. - [(#1348)](https://github.com/PennyLaneAI/catalyst/pull/1348) +* Several changes for experimental support of trapped-ion OQD devices have been made, including: - A new module, `catalyst.utils.toml_utils`, was also added to assist in loading information from - these databases, which are stored as text files in TOML format. In particular, this module - contains a new function, :func:`~.utils.toml_utils.safe_eval`, to safely evaluate mathematical - expressions: + - An experimental `ion` dialect has been added for Catalyst programs targeting OQD trapped-ion + quantum devices. + [(#1260)](https://github.com/PennyLaneAI/catalyst/pull/1260) + [(#1372)](https://github.com/PennyLaneAI/catalyst/pull/1372) - ```python - >>> from catalyst.utils.toml_utils import safe_eval - >>> safe_eval("2 * math.pi * 1e9") - 6283185307.179586 - ``` + The `ion` dialect defines the set of physical properties of the device, such as the ion species + and their atomic energy levels, as well as the operations to manipulate the qubits in the + trapped-ion system, such as laser pulse durations, polarizations, detuning frequencies, etc. -* A default backend for OQD trapped-ion quantum devices has been added. - [(#1355)](https://github.com/PennyLaneAI/catalyst/pull/1355) - [(#1403)](https://github.com/PennyLaneAI/catalyst/pull/1355) + A new pass, `--quantum-to-ion`, has also been added to convert logical gate-based circuits in + the Catalyst `quantum` dialect to laser pulse operations in the `ion` dialect. This pass accepts + logical quantum gates from the set `{RX, RY, MS}`, where `MS` is the Mølmer–Sørensen gate. Doing + so enables the insertion of physical device parameters into the IR, which will be necessary when + lowering to OQD's backend calls. The physical parameters, which are typically obtained from + hardware-calibration runs, are read in from [TOML](https://toml.io/en/) files during the + `--quantum-to-ion` conversion. The TOML filepaths are taken in as pass options. - Support for OQD devices is still under development, therefore the OQD modules are currently not - included in the distributed wheels. + - A plugin and device backend for OQD trapped-ion quantum devices has been added. + [(#1355)](https://github.com/PennyLaneAI/catalyst/pull/1355) + [(#1403)](https://github.com/PennyLaneAI/catalyst/pull/1403) -* As a step towards supporting dynamic shots across catalyst, `expval` and `var` operations no - longer keep the static shots attribute. - [(#1360)](https://github.com/PennyLaneAI/catalyst/pull/1360) + - An MLIR transformation has been added to decompose `{T, S, Z, Hadamard, RZ, PhaseShift, CNOT}` + gates into the set `{RX, RY, MS}`. + [(#1226)](https://github.com/PennyLaneAI/catalyst/pull/1226) -* A new `ion` dialect has been added for Catalyst programs targeting OQD trapped-ion quantum devices. - [(#1260)](https://github.com/PennyLaneAI/catalyst/pull/1260) - [(#1372)](https://github.com/PennyLaneAI/catalyst/pull/1372) - - The `ion` dialect defines the set of physical properties of the device, such as the ion species - and their atomic energy levels, as well as the operations to manipulate the qubits in the - trapped-ion system, such as laser pulse durations, polarizations, detuning frequencies, etc. - - A new pass, `--quantum-to-ion`, has also been added to convert logical gate-based circuits in the - Catalyst `quantum` dialect to laser pulse operations in the `ion` dialect. This pass accepts - logical quantum gates from the set `{RX, RY, MS}`, where `MS` is the Mølmer–Sørensen gate. Doing - so enables the insertion of physical device parameters into the IR, which will be necessary when - lowering to OQD's backend calls. The physical parameters are read in from - [TOML](https://toml.io/en/) files during the `--quantum-to-ion` conversion. The TOML files are - assumed to exist by the pass (the paths to the TOML file locations are taken in as pass options), - with the intention that they are generated immediately before compilation during - hardware-calibration runs. - -* The Catalyst IR has been extended to support literal values as opposed to SSA Values for static - parameters of quantum gates by adding a new gate called `StaticCustomOp` with lowering to regular - `CustomOp`. + Support for OQD devices is still under development, therefore OQD modules are currently not + included in binary distributions (wheels) of Catalyst. + +* The Catalyst IR has been extended to support literal values as opposed to SSA Values for static + parameters of quantum gates by adding a new gate called `StaticCustomOp`, with eventual lowering + to the regular `CustomOp` operation. [(#1387)](https://github.com/PennyLaneAI/catalyst/pull/1387) [(#1396)](https://github.com/PennyLaneAI/catalyst/pull/1396) +* Code readability in the `catalyst.pipelines` module has been improved, in particular for pipelines + with conditionally included passes. + [(#1194)](https://github.com/PennyLaneAI/catalyst/pull/1194) +

Documentation 📝

* A new tutorial going through how to write a new MLIR pass is available. The tutorial writes an @@ -273,14 +273,6 @@ [a separate github branch](https://github.com/PennyLaneAI/catalyst/commit/ba7b3438667963b307c07440acd6d7082f1960f3). [(#872)](https://github.com/PennyLaneAI/catalyst/pull/872) -* The `catalyst-cli` documentation has been updated to reflect the removal of the `func-name` option - for transformation passes. - [(#1368)](https://github.com/PennyLaneAI/catalyst/pull/1368) - -* Added more details to catalyst-cli documentation specifiying available options for - checkpoint-stage and default pipelines - [(#1405)](https://github.com/PennyLaneAI/catalyst/pull/1405) -

Contributors ✍️

This release contains contributions from (in alphabetical order): From b3f44171b916d10de3455fe8ee7e42090688f147 Mon Sep 17 00:00:00 2001 From: GitHub Actions Bot <> Date: Thu, 9 Jan 2025 03:11:22 +0000 Subject: [PATCH 08/10] exclude files from pr --- doc/dev/release_notes.rst | 2 ++ frontend/catalyst/_version.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/dev/release_notes.rst b/doc/dev/release_notes.rst index 027704d8be..d0a972569e 100644 --- a/doc/dev/release_notes.rst +++ b/doc/dev/release_notes.rst @@ -3,6 +3,8 @@ Release notes This page contains the release notes for Catalyst. +.. mdinclude:: ../releases/changelog-dev.md + .. mdinclude:: ../releases/changelog-0.10.0.md .. mdinclude:: ../releases/changelog-0.9.0.md diff --git a/frontend/catalyst/_version.py b/frontend/catalyst/_version.py index 9623f05123..f0189718e8 100644 --- a/frontend/catalyst/_version.py +++ b/frontend/catalyst/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.10.0" +__version__ = "0.11.0-dev2" From 26851f92ae8f0c746feb67410e9a666887affe00 Mon Sep 17 00:00:00 2001 From: Joey Carter Date: Thu, 9 Jan 2025 09:35:17 -0500 Subject: [PATCH 09/10] Revert "exclude files from pr" This reverts commit b3f44171b916d10de3455fe8ee7e42090688f147. --- doc/dev/release_notes.rst | 2 -- frontend/catalyst/_version.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/dev/release_notes.rst b/doc/dev/release_notes.rst index d0a972569e..027704d8be 100644 --- a/doc/dev/release_notes.rst +++ b/doc/dev/release_notes.rst @@ -3,8 +3,6 @@ Release notes This page contains the release notes for Catalyst. -.. mdinclude:: ../releases/changelog-dev.md - .. mdinclude:: ../releases/changelog-0.10.0.md .. mdinclude:: ../releases/changelog-0.9.0.md diff --git a/frontend/catalyst/_version.py b/frontend/catalyst/_version.py index f0189718e8..9623f05123 100644 --- a/frontend/catalyst/_version.py +++ b/frontend/catalyst/_version.py @@ -16,4 +16,4 @@ Version number (major.minor.patch[-label]) """ -__version__ = "0.11.0-dev2" +__version__ = "0.10.0" From e445a7c2ded6e46a91fc3632343f30db51f24303 Mon Sep 17 00:00:00 2001 From: Joey Carter Date: Thu, 9 Jan 2025 09:42:34 -0500 Subject: [PATCH 10/10] Python code formatting --- frontend/test/pytest/test_mlir_plugin_interface.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/frontend/test/pytest/test_mlir_plugin_interface.py b/frontend/test/pytest/test_mlir_plugin_interface.py index e9ef93c5a3..fe20d97bab 100644 --- a/frontend/test/pytest/test_mlir_plugin_interface.py +++ b/frontend/test/pytest/test_mlir_plugin_interface.py @@ -17,8 +17,10 @@ from pathlib import Path import pytest + import catalyst + def test_path_does_not_exists(): """Test what happens when a pass_plugin is given an path that does not exist"""