Skip to content

Commit

Permalink
Merge branch 'main' into frontend-refactor-2
Browse files Browse the repository at this point in the history
  • Loading branch information
dime10 committed Feb 23, 2024
2 parents 45b2483 + ce1d93c commit 728ae41
Show file tree
Hide file tree
Showing 50 changed files with 2,741 additions and 503 deletions.
10 changes: 5 additions & 5 deletions .dep-versions
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Update the version check in catalyst.__init__ when changing the JAX version.
jax=0.4.14
mhlo=00be4a6ce2c4d464e07d10eae51918a86f8df7b4
llvm=4706251a3186c34da0ee8fd894f7e6b095da8fdc
enzyme=8d22ed1b8c424a061ed9d6d0baf0cc0d2d6842e2
# Always update the version check in catalyst.__init__ when changing the JAX version.
jax=0.4.23
mhlo=4611968a5f6818e6bdfb82217b9e836e0400bba9
llvm=cd9a641613eddf25d4b25eaa96b2c393d401d42c
enzyme=1beb98b51442d50652eaa3ffb9574f4720d611f1
4 changes: 4 additions & 0 deletions .github/workflows/check-catalyst.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,10 @@ jobs:
sudo apt-get install -y python3 python3-pip libomp-dev libasan6
python3 --version | grep ${{ needs.constants.outputs.primary_python_version }}
python3 -m pip install -r requirements.txt
# cuda-quantum is added manually here.
# It can't be in requirements.txt as that will break
# macOS requirements.txt
python3 -m pip install cuda-quantum
python3 -m pip install .
- name: Get Cached LLVM Build
Expand Down
9 changes: 7 additions & 2 deletions .github/workflows/check-pl-compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,13 @@ jobs:
DIALECTS_BUILD_DIR="$(pwd)/quantum-build" \
ENABLE_LLD=ON \
make dialects
pip install --upgrade .
if [ ${{ inputs.pennylane }} = "stable" ]; then
pip install --upgrade .
else
# TODO(@erick-xanadu): Remove after release. See issue
# https://github.com/PennyLaneAI/catalyst/issues/494
pl_version="==0.35.0.dev0" pip install --upgrade .
fi
- name: Build Catalyst Runtime (latest)
if: ${{ inputs.lightning == 'latest' }}
Expand Down
3 changes: 2 additions & 1 deletion MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
recursive-include frontend/catalyst/bin *
recursive-include frontend/catalyst/lib *
recursive-include frontend/catalyst/enzyme *
recursive-include frontend/mlir_quantum *
recursive-include frontend/mlir_quantum *
recursive-include frontend/catalyst/cuda/ *.toml
2 changes: 1 addition & 1 deletion frontend/catalyst/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

import jaxlib as _jaxlib

_jaxlib_version = "0.4.14"
_jaxlib_version = "0.4.23"
if _jaxlib.__version__ != _jaxlib_version:
import warnings

Expand Down
2 changes: 1 addition & 1 deletion frontend/catalyst/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,6 @@ def run_writing_command(command: List[str], compile_options: Optional[CompileOpt
"func.func(hlo-legalize-to-linalg)",
"func.func(mhlo-legalize-to-std)",
"convert-to-signless",
"func.func(scalarize)",
"canonicalize",
"scatter-lowering",
"hlo-custom-call-lowering",
Expand Down Expand Up @@ -179,6 +178,7 @@ def run_writing_command(command: List[str], compile_options: Optional[CompileOpt
MLIR_TO_LLVM_PASS = (
"MLIRToLLVMDialect",
[
"expand-realloc",
"convert-gradient-to-llvm",
"func.func(convert-linalg-to-loops)",
"convert-scf-to-cf",
Expand Down
112 changes: 112 additions & 0 deletions frontend/catalyst/cuda/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# Copyright 2024 Xanadu Quantum Technologies Inc.

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at

# http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains a CudaQDevice and the qjit
entry point.
"""

from pathlib import Path

import cudaq
import pennylane as qml

from catalyst.cuda.catalyst_to_cuda_interpreter import interpret


def qjit(fn=None, **kwargs):
"""Wrapper around QJIT for CUDA-quantum."""

if fn is not None:
return interpret(fn, **kwargs)

def wrap_fn(fn):
return interpret(fn, **kwargs)

return wrap_fn


# Do we need to reimplement apply for every child?
# pylint: disable=abstract-method
class BaseCudaInstructionSet(qml.QubitDevice):
"""Base instruction set for CUDA-Quantum devices"""

# TODO: Once 0.35 is released, remove -dev suffix.
pennylane_requires = "0.35.0-dev"
version = "0.1.0"
author = "Xanadu, Inc."

# There are similar lines of code in possibly
# all other list of operations supported by devices.
# At the time of writing, this warning is raised
# due to similar lines of code in the QJITDevice
# pylint: disable=duplicate-code
operations = [
"CNOT",
"CY",
"CZ",
"CRX",
"CRY",
"CRZ",
"PauliX",
"PauliY",
"PauliZ",
"Hadamard",
"S",
"T",
"RX",
"RY",
"RZ",
"SWAP",
# "CSWAP", This is a bug in cuda-quantum. CSWAP is not exposed.
]
observables = []
config = Path(__file__).parent / "cuda_quantum.toml"

def __init__(self, shots=None, wires=None, mps=False, multi_gpu=False):
self.mps = mps
self.multi_gpu = multi_gpu
super().__init__(wires=wires, shots=shots)

def apply(self, operations, **kwargs):
"""Unused"""
raise NotImplementedError(
"This device is only supported with `qml.qjit`."
) # pragma: no cover


class SoftwareQQPP(BaseCudaInstructionSet):
"""Concrete device class for qpp-cpu"""

name = "SoftwareQ q++ simulator"
short_name = "softwareq.qpp"


class NvidiaCuStateVec(BaseCudaInstructionSet):
"""Concrete device class for CuStateVec"""

name = "CuStateVec"
short_name = "nvidia.custatevec"

def __init__(self, shots=None, wires=None, multi_gpu=False): # pragma: no cover
super().__init__(wires=wires, shots=shots, multi_gpu=multi_gpu)


class NvidiaCuTensorNet(BaseCudaInstructionSet):
"""Concrete device class for CuTensorNet"""

name = "CuTensorNet"
short_name = "nvidia.cutensornet"

def __init__(self, shots=None, wires=None, mps=False): # pragma: no cover
super().__init__(wires=wires, shots=shots, mps=mps)
Loading

0 comments on commit 728ae41

Please sign in to comment.