From 9998e6b260354b0ecec73df2ea333a147a3ad116 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Mon, 26 Aug 2024 17:44:33 +0300 Subject: [PATCH 01/35] build and tar installation in docker for ubuntu22 --- scripts/release/Dockerfile.ubuntu22 | 11 ++++++++ scripts/release/README.md | 15 ++++++++++ scripts/release/build_release_and_tar.sh | 36 ++++++++++++++++++++++++ 3 files changed, 62 insertions(+) create mode 100644 scripts/release/Dockerfile.ubuntu22 create mode 100644 scripts/release/README.md create mode 100755 scripts/release/build_release_and_tar.sh diff --git a/scripts/release/Dockerfile.ubuntu22 b/scripts/release/Dockerfile.ubuntu22 new file mode 100644 index 000000000..0cac69c89 --- /dev/null +++ b/scripts/release/Dockerfile.ubuntu22 @@ -0,0 +1,11 @@ +# Use the official NVIDIA development runtime image for Ubuntu 22.04 +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 + +# Install necessary packages +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + tar + +# Define the environment variable for the output tar file name +ENV OUTPUT_TAR_NAME=icicle30-ubuntu22-cuda122.tar.gz diff --git a/scripts/release/README.md b/scripts/release/README.md new file mode 100644 index 000000000..49273256e --- /dev/null +++ b/scripts/release/README.md @@ -0,0 +1,15 @@ + +# Build docker image +```bash +docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . +``` + +# Build libs inside the docker +To build inside the docker and ouptut the tar: +```bash +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_objects:/output \ + -v ./scripts:/scripts \ + icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh +``` diff --git a/scripts/release/build_release_and_tar.sh b/scripts/release/build_release_and_tar.sh new file mode 100755 index 000000000..1143600a8 --- /dev/null +++ b/scripts/release/build_release_and_tar.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -e + +# List of fields and curves +fields=("babybear" "stark252") +curves=("bn254") + +cd / +mkdir install_dir && mkdir install_dir/icicle # output dir that is tared + +# Iterate over fields +for field in "${fields[@]}"; do + echo "Building for field: $field" + + mkdir build -p && rm -rf build/* + # Configure, build, and install + cmake -S icicle -B build -DFIELD=$field -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle + cmake --build build -j # build + cmake --install build # install +done + +# Iterate over curves +for curve in "${curves[@]}"; do + echo "Building for curve: $curve" + + mkdir build -p && rm -rf build/* + # Configure, build, and install + cmake -S icicle -B build -DCURVE=$curve -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle + cmake --build build -j # build + cmake --install build # install +done + +# Create the tarball +cd install_dir +tar -czvf /output/${OUTPUT_TAR_NAME} icicle # tar the install dir From b7daf851466cec88a236f7e19a07e7cfc9701cde Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Mon, 26 Aug 2024 17:56:34 +0300 Subject: [PATCH 02/35] add dockerfiles for ubuntu20 and centos7 too --- scripts/release/Dockerfile.centos7 | 11 +++++++++++ scripts/release/Dockerfile.ubuntu20 | 11 +++++++++++ scripts/release/README.md | 4 ++++ 3 files changed, 26 insertions(+) create mode 100644 scripts/release/Dockerfile.centos7 create mode 100644 scripts/release/Dockerfile.ubuntu20 diff --git a/scripts/release/Dockerfile.centos7 b/scripts/release/Dockerfile.centos7 new file mode 100644 index 000000000..bc55ebacf --- /dev/null +++ b/scripts/release/Dockerfile.centos7 @@ -0,0 +1,11 @@ +# Use the official NVIDIA development runtime image for Centos 7 +FROM nvidia/cuda:12.2.0-devel-centos7 + +# Install necessary packages +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + tar + +# Define the environment variable for the output tar file name +ENV OUTPUT_TAR_NAME=icicle30-centos7-cuda122.tar.gz diff --git a/scripts/release/Dockerfile.ubuntu20 b/scripts/release/Dockerfile.ubuntu20 new file mode 100644 index 000000000..94a31e0d7 --- /dev/null +++ b/scripts/release/Dockerfile.ubuntu20 @@ -0,0 +1,11 @@ +# Use the official NVIDIA development runtime image for Ubuntu 20.04 +FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 + +# Install necessary packages +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + tar + +# Define the environment variable for the output tar file name +ENV OUTPUT_TAR_NAME=icicle30-ubuntu20-cuda122.tar.gz diff --git a/scripts/release/README.md b/scripts/release/README.md index 49273256e..c56cdc5de 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -2,6 +2,8 @@ # Build docker image ```bash docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . +docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . +docker build -t icicle-release-centos7-cuda122 -f Dockerfile.centos7 . ``` # Build libs inside the docker @@ -13,3 +15,5 @@ docker run --rm --gpus all \ -v ./scripts:/scripts \ icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh ``` + +replace `icicle-release-ubuntu22-cuda122` with another docker image tag to build in this environment instead. From 1af6e00ac490bbdc31570a7af84ede6cd95db8ee Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Tue, 27 Aug 2024 15:36:15 +0300 Subject: [PATCH 03/35] updated readme file for install, use and release --- icicle/src/runtime.cpp | 2 +- scripts/release/Dockerfile.centos7 | 16 ++- scripts/release/Dockerfile.ubuntu20 | 5 +- scripts/release/README.md | 148 +++++++++++++++++++++-- scripts/release/build_release_and_tar.sh | 2 +- 5 files changed, 155 insertions(+), 18 deletions(-) diff --git a/icicle/src/runtime.cpp b/icicle/src/runtime.cpp index 4871c6b45..ce36de773 100644 --- a/icicle/src/runtime.cpp +++ b/icicle/src/runtime.cpp @@ -321,7 +321,7 @@ extern "C" eIcicleError icicle_load_backend_from_env_or_default() } // If not found or failed, fall back to the default directory - const std::string default_dir = "/opt/icicle/backend"; + const std::string default_dir = "/opt/icicle/lib/backend"; if (std::filesystem::exists(default_dir)) { eIcicleError result = icicle_load_backend(default_dir.c_str(), true /*=recursive*/); if (result == eIcicleError::SUCCESS) { diff --git a/scripts/release/Dockerfile.centos7 b/scripts/release/Dockerfile.centos7 index bc55ebacf..dceb1fb4e 100644 --- a/scripts/release/Dockerfile.centos7 +++ b/scripts/release/Dockerfile.centos7 @@ -1,11 +1,15 @@ -# Use the official NVIDIA development runtime image for Centos 7 +# Use the official NVIDIA CUDA development image for CentOS 7 FROM nvidia/cuda:12.2.0-devel-centos7 # Install necessary packages -RUN apt-get update && apt-get install -y \ - build-essential \ - cmake \ - tar +RUN yum -y update && \ + yum -y install \ + gcc \ + gcc-c++ \ + make \ + cmake \ + tar \ + && yum clean all # Define the environment variable for the output tar file name -ENV OUTPUT_TAR_NAME=icicle30-centos7-cuda122.tar.gz +ENV OUTPUT_TAR_NAME=icicle30-centos7-cuda122.tar.gz \ No newline at end of file diff --git a/scripts/release/Dockerfile.ubuntu20 b/scripts/release/Dockerfile.ubuntu20 index 94a31e0d7..d887ba936 100644 --- a/scripts/release/Dockerfile.ubuntu20 +++ b/scripts/release/Dockerfile.ubuntu20 @@ -1,10 +1,13 @@ # Use the official NVIDIA development runtime image for Ubuntu 20.04 FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 +# Prevent interactive prompts during package installation +ENV DEBIAN_FRONTEND=noninteractive + # Install necessary packages RUN apt-get update && apt-get install -y \ build-essential \ - cmake \ + cmake \ tar # Define the environment variable for the output tar file name diff --git a/scripts/release/README.md b/scripts/release/README.md index c56cdc5de..6a8db64f8 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -1,19 +1,149 @@ -# Build docker image +# Icicle Release - How to Install,Use and Release + +## Overview + +This page explains describes the content of a release and how to install and use it.
+It also explains how to build and release Icicle for multiple Linux distributions, including Ubuntu 20.04, Ubuntu 22.04, and CentOS 7. + +:::note +Future releases will also include MacOS and other systems. +::: + +## Content of a Release + +Each Icicle release includes a tar file, named `icicle30-.tar.gz`, where `icicle30` stands for version 3.0. This tar contains icicle-frontend build artifacts and headers for a specific distribution. The tar file includes the following structure: + +- **`./icicle/include/`**: This directory contains all the necessary header files for using the Icicle library from C++. +- **`./icicle/lib/`**: + - **Icicle Libraries**: All the core Icicle libraries are located in this directory. Applications linking to Icicle will use these libraries. + - **Backends**: The `./icicle/lib/backend/` directory houses backend libraries, including the CUDA backend (not included in this tar). + +- **CUDA backend** comes as separate tar `icicle30--cuda122.tar.gz` + - per distribution, for icicle-frontend V3.0 and CUDA 12.2. + +## Installing and Using the Release + +1. **Extract the Tar Files**: + - Download the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or CentOS 7). + - Extract it to your desired location: + ```bash + # install the frontend part (Can skip for Rust) + tar -xzvf icicle30-.tar.gz -C /opt/ # or other non-default install directory + # install CUDA backend (Required for all programming-languages that want to use CUDA backend) + tar -xzvf icicle30--cuda122.tar.gz -C /opt/ # or other non-default install directory + ``` + + - Note that you may install to any directory and you need to make sure it can be found by the linker at runtime. + +2. **Linking Your Application**: + **C++** + - When compiling your C++ application, link against the Icicle libraries found in `/opt/icicle/lib` or other location: + ```bash + g++ -o myapp myapp.cpp -L/opt/icicle/lib -licicle_field_babybear -licicle_curve_bn254 + ``` + - Note: You need to link to the Icicle device library and in addition link to each field or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. + + **Rust** + - When building the icicle crates, icicle frontend libs are built from source too. They are installed to `target//deps/icile` and the crate is linked to that at runtime. + - Need to install CUDA backend only, if tou have a CUDA GPU. + - Note: can install and link to the installed libs instead of building them from source. This is currently not supported but will be in a future release. + + **Go** + TODO + +:::warning when deploying an application (either C++, Rust or Go), you must make sure to either deploy the icicle libs (that are installed to `target//deps/icile` and also the CUDA backend) along the application binaries (as tar, docker image, package manager installer or else) or make sure to install icicle (and the backend) on the target machine. Otherwise the target machine will have linkage issues. +::: + +:::tip +If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found in the filesystem, you need to add the install directory to the search path of the linker. In a development env You can do that via `LD_LIBRARY_PATH` or corresponding variables. For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. +::: + +## Backend Loading + +The Icicle library dynamically loads backend libraries at runtime. By default, it searches for backends in the following order: + +1. **Environment Variable**: If the `ICICLE_BACKEND_INSTALL_DIR` environment variable is defined, Icicle will prioritize this location. +2. **Default Directory**: If the environment variable is not set, Icicle will search in the default directory `/opt/icicle/lib/backend`. + +:::warning +Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guranteed. +::: + +To load backend from ICICLE_BACKEND_INSTALL_DIR or /opt/icicle/lib/backend in your application: + +**C++** +```cpp +extern "C" eIcicleError icicle_load_backend_from_env_or_default(); +``` +**Rust** +```rust +pub fn load_backend_from_env_or_default() -> Result<(), eIcicleError>; +``` +**Go** +```go +TODO +``` + +### Custom Backend Loading + +If you need to load a backend from a custom location at any point during runtime, you can call the following function: + +**C++** +```cpp +extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive); +``` +- **`path`**: The directory where the backend libraries are located. +- **`is_recursive`**: If `true`, the function will search for backend libraries recursively within the specified path. + +**Rust** +```rust + pub fn load_backend(path: &str) -> Result<(), eIcicleError>; // OR + pub fn load_backend_non_recursive(path: &str) -> Result<(), eIcicleError>; +``` +- **`path`**: The directory where the backend libraries are located. + +**Go** +```go +TODO +``` +## Build the release + +This section is describing how a release is generated, given the release sources.
+We use docker to represent the target environment for the release. Each Docker image is tailored to a specific distribution and CUDA version. You first build the Docker image, which sets up the environment, and then use this Docker image to build the release tar file. This ensures that the build process is consistent and reproducible across different environments. + +### Build Docker Image + +The Docker images represent the target environment for the release. + +To build the Docker images for each distribution and CUDA version, use the following commands: + ```bash +# Ubuntu 22.04, CUDA 12.2 docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . + +# Ubuntu 20.04, CUDA 12.2 docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . + +# CentOS 7, CUDA 12.2 docker build -t icicle-release-centos7-cuda122 -f Dockerfile.centos7 . ``` -# Build libs inside the docker -To build inside the docker and ouptut the tar: + +## Build Libraries Inside the Docker + +To build the Icicle libraries inside a Docker container and output the tar file to the `release_output` directory: + ```bash -docker run --rm --gpus all \ - -v ./icicle:/icicle \ - -v ./release_objects:/output \ - -v ./scripts:/scripts \ - icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh +mkdir -p release_output +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh ``` -replace `icicle-release-ubuntu22-cuda122` with another docker image tag to build in this environment instead. +This command executes the `build_release_and_tar.sh` script inside the Docker container, which provides the build environment. It maps the source code and output directory to the container, ensuring the generated tar file is available on the host system. + +You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment (e.g., Ubuntu 20.04 or CentOS 7). + diff --git a/scripts/release/build_release_and_tar.sh b/scripts/release/build_release_and_tar.sh index 1143600a8..7d99246df 100755 --- a/scripts/release/build_release_and_tar.sh +++ b/scripts/release/build_release_and_tar.sh @@ -4,7 +4,7 @@ set -e # List of fields and curves fields=("babybear" "stark252") -curves=("bn254") +curves=("bn254" "bls12_381" "bls12_377" "bw6_761" "grumpkin") cd / mkdir install_dir && mkdir install_dir/icicle # output dir that is tared From 3e83421ecfe88f8c7b44bb3cc8c520a3217a5e93 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Tue, 27 Aug 2024 16:14:45 +0300 Subject: [PATCH 04/35] update docs for install and use --- docs/docs/icicle/build_from_source.md | 182 ++++++++++++++++++ docs/docs/icicle/getting_started.md | 6 +- docs/docs/icicle/install_and_use.md | 122 ++++++++++++ docs/docs/icicle/install_cuda_backend.md | 51 ++--- docs/docs/icicle/libraries.md | 2 +- docs/docs/icicle/overview.md | 2 +- .../primitives/Icicle_Release_README.md | 91 +++++++++ docs/sidebars.js | 15 +- scripts/release/README.md | 109 ----------- 9 files changed, 426 insertions(+), 154 deletions(-) create mode 100644 docs/docs/icicle/build_from_source.md create mode 100644 docs/docs/icicle/install_and_use.md create mode 100644 docs/docs/icicle/primitives/Icicle_Release_README.md diff --git a/docs/docs/icicle/build_from_source.md b/docs/docs/icicle/build_from_source.md new file mode 100644 index 000000000..e1c817f1c --- /dev/null +++ b/docs/docs/icicle/build_from_source.md @@ -0,0 +1,182 @@ + +# Build ICICLE from source + +This guide will help you get started with building, testing, and installing ICICLE, whether you're using C++, Rust, or Go. It also covers installation of the CUDA backend and important build options. + +## Building and Testing ICICLE frontend + +### C++: Build, Test, and Install (Frontend) + +ICICLE can be built and tested in C++ using CMake. The build process is straightforward, but there are several flags you can use to customize the build for your needs. + +#### Build Commands + +1. **Clone the ICICLE repository:** + ```bash + git clone https://github.com/ingonyama-zk/icicle.git + cd icicle + ``` + +2. **Configure the build:** + ```bash + mkdir -p build && rm -rf build/* + cmake -S icicle -B build -DFIELD=babybear + ``` + +:::info +To specify the field, use the flag -DFIELD=field, where field can be one of the following: babybear, stark252, m31. + +To specify a curve, use the flag -DCURVE=curve, where curve can be one of the following: bn254, bls12_377, bls12_381, bw6_761, grumpkin. +::: + +:::tip +If you have access to cuda backend repo, it can be built along ICICLE frontend by adding the following to the cmake command +- `-DCUDA_BACKEND=local` # if you have it locally +- `-DCUDA_BACKEND=` # to pull CUDA backend, given you have access +::: + +3. **Build the project:** + ```bash + cmake --build build -j + ``` + This is building the [libicicle_device](./libraries.md#icicle-device) and the [libicicle_field_babybear](./libraries.md#icicle-core) frontend lib that correspond to the field or curve. + +4. **Link:** +Link you application (or library) to ICICLE: +```cmake +target_link_libraries(yourApp PRIVATE icicle_field_babybear icicle_device) +``` + +5. **Installation (optional):** +To install the libs, specify the install prefix in the [cmake command](./build_from_source.md#build-commands) +`-DCMAKE_INSTALL_PREFIX=/install/dir/`. Default install path on linux is `/usr/local` if not specified. For other systems it may differ. The cmake command will print it to the log +``` +-- CMAKE_INSTALL_PREFIX=/install/dir/for/cmake/install +``` +Then after building, use cmake to install the libraries: +``` +cmake -S icicle -B build -DFIELD=babybear -DCMAKE_INSTALL_PREFIX=/path/to/install/dir/ +cmake --build build -j # build +cmake --install build # install icicle to /path/to/install/dir/ +``` + +6. **Run tests (optional):** +Add `-DBUILD_TESTS=ON` to the [cmake command](./build_from_source.md#build-commands) and build. +Execute all tests +```bash +cmake -S icicle -B build -DFIELD=babybear -DBUILD_TESTS=ON +cmake --build build -j +cd build/tests +ctest +``` +or choose the test-suite +```bash +./build/tests/test_field_api # or another test suite +# can specify tests using regex. For example for tests with ntt in the name: +./build/tests/test_field_api --gtest_filter="*ntt*" +``` +:::note +Most tests assume a cuda backend exists and will fail otherwise if cannot find a CUDA device. +::: + +#### Build Flags + +You can customize your ICICLE build with the following flags: + +- `-DCPU_BACKEND=ON/OFF`: Enable or disable built-in CPU backend. `default=ON`. +- `-DCMAKE_INSTALL_PREFIX=/install/dir`: Specify install directory. `default=/usr/local`. +- `-DBUILD_TESTS=ON/OFF`: Enable or disable tests. `default=OFF`. +- `-DBUILD_BENCHMARKS=ON/OFF`: Enable or disable benchmarks. `default=OFF`. + +#### Features + +By default, all [features](./libraries.md#supported-curves-and-operations) are enabled. +This is since installed backends may implement and register all APIs. Missing APIs in the frontend would cause linkage to fail due to missing symbols. Therefore by default we include them in the frontend part too. + +To disable features, add the following to the cmake command. +- ntt: `-DNTT=OFF` +- msm: `-DMSM=OFF` +- g2 msm: `-DG2=OFF` +- ecntt: `-DECNTT=OFF` +- extension field: `-DEXT_FIELD=OFF` + +:::tip +Disabling features is useful when developing with a backend that is slow to compile (e.g. CUDA backend); +::: + +### Rust: Build, Test, and Install + +To build and test ICICLE in Rust, follow these steps: + +1. **Navigate to the Rust bindings directory:** +```bash +cd wrappers/rust # or go to a specific field/curve 'cd wrappers/rust/icicle-fields/icicle-babybear' +``` + +2. **Build the Rust project:** +```bash +cargo build --release +``` +By default, all [supported features are enabled](#features). +Cargo features are used to disable features, rather than enable them, for the reason explained [here](#features): +- `no_g2` to disable G2 MSM +- `no_ecntt` to disable ECNTT + +They can be disabled as follows: +```bash +cargo build --release --no-default-features --features=no_ecntt,no_g2 +``` + +:::note +If you have access to cuda backend repo, it can be built along ICICLE frontend by using the following cargo features: +- `cuda_backend` : if the cuda backend resides in `icicle/backend/cuda` +- `pull_cuda_backend` : to pull main branch and build it +::: + + +3. **Run tests:** +```bash +cargo test # optional: --features=no_ecntt,no_g2,cuda_backend +``` +:::note +Most tests assume a CUDA backend is installed and fail otherwise. +::: + +4. **Install the library:** + +By default, the libraries are installed to the `target//deps/icicle` dir. For custom install dir. define the env variable: +```bash +export ICICLE_INSTALL_DIR=/path/to/install/dir +``` + +(TODO: cargo install ?) + +#### Use as cargo dependency +In cargo.toml, specify the ICICLE libs to use: + +```bash +[dependencies] +icicle-runtime = { path = "git = "https://github.com/ingonyama-zk/icicle.git"" } +icicle-core = { path = "git = "https://github.com/ingonyama-zk/icicle.git"" } +icicle-bls12-377 = { path = "git = "https://github.com/ingonyama-zk/icicle.git" } +# add other ICICLE crates here if need additional fields/curves +``` + +Can specify `branch = ` or `tag = ` or `rev = `. + +To disable features: +```bash +icicle-bls12-377 = { path = "git = "https://github.com/ingonyama-zk/icicle.git", features = ["no_g2"] } +``` + +As explained above, the libs will be built and installed to `target//deps/icicle` so you can easily link to them. Alternatively you can set `ICICLE_INSTALL_DIR` env variable for a custom install directory. + +:::warning +Make sure to install icicle libs when installing a library/application that depends on icicle such that it is located at runtime. +::: + +### Go: Build, Test, and Install (TODO) + +## Install cuda backend + +[Install CUDA Backend (and License)](./install_cuda_backend.md#installation) \ No newline at end of file diff --git a/docs/docs/icicle/getting_started.md b/docs/docs/icicle/getting_started.md index c728a0cd5..e1c817f1c 100644 --- a/docs/docs/icicle/getting_started.md +++ b/docs/docs/icicle/getting_started.md @@ -1,5 +1,5 @@ -# Getting Started with ICICLE V3 +# Build ICICLE from source This guide will help you get started with building, testing, and installing ICICLE, whether you're using C++, Rust, or Go. It also covers installation of the CUDA backend and important build options. @@ -48,7 +48,7 @@ target_link_libraries(yourApp PRIVATE icicle_field_babybear icicle_device) ``` 5. **Installation (optional):** -To install the libs, specify the install prefix in the [cmake command](./getting_started.md#build-commands) +To install the libs, specify the install prefix in the [cmake command](./build_from_source.md#build-commands) `-DCMAKE_INSTALL_PREFIX=/install/dir/`. Default install path on linux is `/usr/local` if not specified. For other systems it may differ. The cmake command will print it to the log ``` -- CMAKE_INSTALL_PREFIX=/install/dir/for/cmake/install @@ -61,7 +61,7 @@ cmake --install build # install icicle to /path/to/install/dir/ ``` 6. **Run tests (optional):** -Add `-DBUILD_TESTS=ON` to the [cmake command](./getting_started.md#build-commands) and build. +Add `-DBUILD_TESTS=ON` to the [cmake command](./build_from_source.md#build-commands) and build. Execute all tests ```bash cmake -S icicle -B build -DFIELD=babybear -DBUILD_TESTS=ON diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md new file mode 100644 index 000000000..f78a0fbfe --- /dev/null +++ b/docs/docs/icicle/install_and_use.md @@ -0,0 +1,122 @@ + +# Install and use ICICLE + +## Overview + +This page describes the content of a release and how to install and use it. +Icicle binaries are released for multiple Linux distributions, including Ubuntu 20.04, Ubuntu 22.04, and CentOS 7. + +:::note +Future releases will also include MacOS and other systems. +::: + +## Content of a Release + +Each Icicle release includes a tar file, named `icicle30-.tar.gz`, where `icicle30` stands for version 3.0. This tar contains icicle-frontend build artifacts and headers for a specific distribution. The tar file includes the following structure: + +- **`./icicle/include/`**: This directory contains all the necessary header files for using the Icicle library from C++. +- **`./icicle/lib/`**: + - **Icicle Libraries**: All the core Icicle libraries are located in this directory. Applications linking to Icicle will use these libraries. + - **Backends**: The `./icicle/lib/backend/` directory houses backend libraries, including the CUDA backend (not included in this tar). + +- **CUDA backend** comes as separate tar `icicle30--cuda122.tar.gz` + - per distribution, for icicle-frontend V3.0 and CUDA 12.2. + +## installing and using icicle + +1. **Extract the Tar Files**: + - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or CentOS 7). + - Extract it to your desired location: + ```bash + # install the frontend part (Can skip for Rust) + tar -xzvf icicle30-.tar.gz -C /opt/ # or other non-default install directory + # install CUDA backend (Required for all programming-languages that want to use CUDA backend) + tar -xzvf icicle30--cuda122.tar.gz -C /opt/ # or other non-default install directory + ``` + + - Note that you may install to any directory and you need to make sure it can be found by the linker at runtime. + - Default location is `/opt` + +:::tip +You can install anywhere and use a link so that it can be easily found as if in the default directory. +::: + +2. **Linking Your Application**: + + **C++** + - When compiling your C++ application, link against the Icicle libraries found in `/opt/icicle/lib` or other location: + ```bash + g++ -o myapp myapp.cpp -L/opt/icicle/lib -licicle_field_babybear -licicle_curve_bn254 + ``` + + + :::note + You need to link to the Icicle device library and in addition link to each field or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. + ::: + + **Rust** + - When building the icicle crates, icicle frontend libs are built from source, in addition to the rust bindings. They are installed to `target//deps/icile` and the crate is linked to that at runtime. + - Need to install CUDA backend only, if tou have a CUDA GPU. + - Note: can install and link to the installed libs instead of building them from source. This is currently not supported but will be in a future release. + + **Go** - TODO + +:::warning when deploying an application (either C++, Rust or Go), you must make sure to either deploy the icicle libs (in Rust it's in `target//deps/icile` or the preinstalled ones) along the application binaries (as tar, docker image, package manager installer or else) or make sure to install icicle (and the backend) on the target machine. Otherwise the target machine will have linkage issues. +::: + +:::tip +If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found in the filesystem, you need to add the install directory to the search path of the linker. In a development env You can do that by adding the install dir to `export LD_LIBRARY_PATH=/path/to/icicle/lib` or corresponding variables. For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. +::: + +## Backend Loading + +The Icicle library dynamically loads backend libraries at runtime. By default, it searches for backends in the following order: + +1. **Environment Variable**: If the `ICICLE_BACKEND_INSTALL_DIR` environment variable is defined, Icicle will prioritize this location. +2. **Default Directory**: If the environment variable is not set, Icicle will search in the default directory `/opt/icicle/lib/backend`. + +:::warning +Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guranteed. +::: + +To load backend from ICICLE_BACKEND_INSTALL_DIR or `/opt/icicle/lib/backend` in your application: + +**C++** +```cpp +extern "C" eIcicleError icicle_load_backend_from_env_or_default(); +``` +**Rust** +```rust +pub fn load_backend_from_env_or_default() -> Result<(), eIcicleError>; +``` +**Go** +```go +TODO +``` + +### Custom Backend Loading + +If you need to load a backend from a custom location at any point during runtime, you can call the following function: + +**C++** +```cpp +extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive); +``` +- **`path`**: The directory where the backend libraries are located. +- **`is_recursive`**: If `true`, the function will search for backend libraries recursively within the specified path. + +**Rust** +```rust + pub fn load_backend(path: &str) -> Result<(), eIcicleError>; // OR + pub fn load_backend_non_recursive(path: &str) -> Result<(), eIcicleError>; +``` +- **`path`**: The directory where the backend libraries are located. + +**Go** +```go +TODO +``` +## Build the release + +This section is describing how a release is generated, given the release sources. +We use docker to represent the target environment for the release. Each Docker image is tailored to a specific distribution and CUDA version. You first build the Docker image, which sets up the environment, and then use this Docker image to build the release tar file. This ensures that the build process is consistent and reproducible across different environments. diff --git a/docs/docs/icicle/install_cuda_backend.md b/docs/docs/icicle/install_cuda_backend.md index 7d4bb2153..75e1cbecf 100644 --- a/docs/docs/icicle/install_cuda_backend.md +++ b/docs/docs/icicle/install_cuda_backend.md @@ -7,51 +7,26 @@ The CUDA backend in ICICLE V3 is a high-performance, closed-source component des ## Installation -The CUDA backend is a closed-source component that requires a license. To install the CUDA backend: +The CUDA backend is a closed-source component that requires a license. [To install the CUDA backend, see here](./install_and_use#installing-and-using-icicle). -1. **Download the CUDA backend package** from the [ICICLE website](#). TODO fix link. +### Licensing -2. **Install to the default path:** - ```bash - sudo tar -xzf icicle-cuda-backend.tar.gz -C /opt/icicle/backend/ - ``` +The CUDA backend requires a valid license to function. There are two CUDA backend license types: -3. **Set up the environment variable if you installed it in a custom location:** - ```bash - export ICICLE_BACKEND_INSTALL_DIR=/custom/path/to/icicle/backend - # OR symlink - sudo ln -s /custom/path/to/icicle/backend /opt/icicle/backend - ``` + 1. **Floating license**: In this mode, you will host a license-server that is supplied as binary. This license is limited to N concurrent gpus but can be distributed however the user needs between his machines. N is decremented by 1 for every GPU that is using ICICLE, per process. Once the process is terminated (or crashes), the licenses are released. + 2. **Node locked license**: in this mode, you will get a license for a specific machine. It is accepted by the CUDA backend only if used on the licensed machine. -4. **Load the backend in your application:** - ```cpp - extern "C" eIcicleError icicle_load_backend_from_env_or_default(); - // OR - extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive); - ``` - Rust: - ```rust - pub fn load_backend_from_env_or_default() -> Result<(), eIcicleError>; - pub fn load_backend(path: &str) -> Result<(), eIcicleError>; - ``` - Go: - ``` - TODO - ``` +:::note +As for now CUDA backend can be accessed without purchasing a license. Ingonyama is hosting a license server that will allow access to anyone. +To use it, make sure to set the environment to `export ICICLE_LICNSE_SERVER_PATH=5053@ec2-50-16-150-188.compute-1.amazonaws.com` +::: -5. **Acquire a license key** from the [ICICLE website](#) and follow the provided instructions to activate it. +Licenses are available for purchase [here TODO](#) . After purchasing, you will receive a license key that must be installed on the license-server or node-locked machine. +For license-server, you will have to tell the application that is using ICICLE, where the server is. - - -### Licensing (TODO fix link) - -The CUDA backend requires a valid license to function. Licenses are available for purchase [here](#). After purchasing, you will receive a license key that must be installed. **Specify the license server address:** +**Specify the license server address:** ``` export ICICLE_LICNSE_SERVER_PATH=port@ip ``` -For licensing instructions and detailed information, refer to the licensing documentation provided with your purchase or contact our support team for assistance. - -TODO update section and the link in license part above. - - +For further assist , contact our support team for assistance. `support@ingonyama.com` (TODO make sure this exists). diff --git a/docs/docs/icicle/libraries.md b/docs/docs/icicle/libraries.md index 3a0073ad4..00c561e16 100644 --- a/docs/docs/icicle/libraries.md +++ b/docs/docs/icicle/libraries.md @@ -14,7 +14,7 @@ See programmers guide for more details. [C++](./programmers_guide/cpp#device-man ICICLE Core is a template library written in C++ that implements fundamental cryptographic operations, including field and curve arithmetic, as well as higher-level APIs such as MSM and NTT. -The Core can be [instantiated](./getting_started) for different fields, curves, and other cryptographic components, allowing you to tailor it to your specific needs. You can link your application to one or more ICICLE libraries, depending on the requirements of your project. For example, you might only need the babybear library or a combination of babybear and a Merkle tree builder. +The Core can be [instantiated](./build_from_source) for different fields, curves, and other cryptographic components, allowing you to tailor it to your specific needs. You can link your application to one or more ICICLE libraries, depending on the requirements of your project. For example, you might only need the babybear library or a combination of babybear and a Merkle tree builder. ### Rust diff --git a/docs/docs/icicle/overview.md b/docs/docs/icicle/overview.md index 49fb67ecc..e809eb24d 100644 --- a/docs/docs/icicle/overview.md +++ b/docs/docs/icicle/overview.md @@ -69,7 +69,7 @@ ICICLE is also well-suited for prototyping and developing small-scale projects. ## Get Started with ICICLE -Explore the full capabilities of ICICLE by diving into the [Architecture](./arch_overview.md), [Getting Started Guide](./getting_started.md) and the [programmers guide](./programmers_guide/general.md) to learn how to integrate, deploy, and extend ICICLE across different backends. +Explore the full capabilities of ICICLE by diving into the [Architecture](./arch_overview.md), [Getting Started Guide](./install_and_use.md) and the [programmers guide](./programmers_guide/general.md) to learn how to integrate, deploy, and extend ICICLE across different backends. If you have any questions or need support, feel free to reach out on [Discord] or [GitHub](https://github.com/ingonyama-zk). We're here to help you accelerate your ZK development with ICICLE. diff --git a/docs/docs/icicle/primitives/Icicle_Release_README.md b/docs/docs/icicle/primitives/Icicle_Release_README.md new file mode 100644 index 000000000..4d3a76e89 --- /dev/null +++ b/docs/docs/icicle/primitives/Icicle_Release_README.md @@ -0,0 +1,91 @@ + +# Icicle Release README + +## Overview + +Icicle is a powerful C++ library designed to provide flexible and efficient computation through its modular backend architecture. This README explains how to build and release Icicle for multiple Linux distributions, including Ubuntu 20.04, Ubuntu 22.04, and CentOS 7. It also describes the content of a release and how to use the generated tar files. + +## Content of a Release + +Each Icicle release includes a tar file containing the build artifacts for a specific distribution. The tar file includes the following structure: + +- **`./icicle/include/`**: This directory contains all the necessary header files for using the Icicle library from C++. + +- **`./icicle/lib/`**: + - **Icicle Libraries**: All the core Icicle libraries are located in this directory. Applications linking to Icicle will use these libraries. + - **Backends**: The `./icicle/lib/backend/` directory houses backend libraries, including the CUDA backend. While the CUDA backend is included, it will only be used on machines with a GPU. On machines without a GPU, the CUDA backend is not utilized. + +### Considerations + +Currently, the CUDA backend is included in every installation tar file, even on machines without a GPU. This ensures consistency across installations but results in additional files being installed that may not be used. + +**TODO**: Consider splitting the release into two separate tar files—one with the CUDA backend and one without—depending on the target machine’s hardware capabilities. + +## Build Docker Image + +To build the Docker images for each distribution and CUDA version, use the following commands: + +```bash +# Ubuntu 22.04, CUDA 12.2 +docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . + +# Ubuntu 20.04, CUDA 12.2 +docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . + +# CentOS 7, CUDA 12.2 +docker build -t icicle-release-centos7-cuda122 -f Dockerfile.centos7 . +``` + +### Docker Environment Explanation + +The Docker images you build represent the target environment for the release. Each Docker image is tailored to a specific distribution and CUDA version. You first build the Docker image, which sets up the environment, and then use this Docker image to build the release tar file. This ensures that the build process is consistent and reproducible across different environments. + +## Build Libraries Inside the Docker + +To build the Icicle libraries inside a Docker container and output the tar file to the `release_output` directory: + +```bash +mkdir -p release_output +docker run --rm --gpus all -v ./icicle:/icicle -v ./release_output:/output -v ./scripts:/scripts icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh +``` + +This command executes the `build_release_and_tar.sh` script inside the Docker container, which provides the build environment. It maps the source code and output directory to the container, ensuring the generated tar file is available on the host system. + +You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment (e.g., Ubuntu 20.04 or CentOS 7). + +## Installing and Using the Release + +1. **Extract the Tar File**: + - Download the appropriate tar file for your distribution (Ubuntu 20.04, Ubuntu 22.04, or CentOS 7). + - Extract it to your desired location: + ```bash + tar -xzvf icicle--cuda122.tar.gz -C /path/to/install/location + ``` + +2. **Linking Your Application**: + - When compiling your C++ application, link against the Icicle libraries found in `./icicle/lib/`: + ```bash + g++ -o myapp myapp.cpp -L/path/to/icicle/lib -licicle_device -licicle_field_or_curve + ``` + - Note: You only need to link to the Icicle device and field or curve libraries. The backend libraries are dynamically loaded at runtime. + +## Backend Loading + +The Icicle library dynamically loads backend libraries at runtime. By default, it searches for backends in the following order: + +1. **Environment Variable**: If the `ICICLE_BACKEND_INSTALL_DIR` environment variable is defined, Icicle will prioritize this location. +2. **Default Directory**: If the environment variable is not set, Icicle will search in the default directory `/opt/icicle/lib/backend`. + +### Custom Backend Loading + +If you need to load a backend from a custom location at any point during runtime, you can call the following function: + +```cpp +extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive); +``` + +- **`path`**: The directory where the backend libraries are located. +- **`is_recursive`**: If `true`, the function will search for backend libraries recursively within the specified path. + +--- + diff --git a/docs/sidebars.js b/docs/sidebars.js index 0da18befb..256ab6e4f 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -46,9 +46,20 @@ module.exports = { id: "icicle/libraries", }, { - type: "doc", + type: "category", label: "Getting started", - id: "icicle/getting_started" + link: { + type: `doc`, + id: "icicle/install_and_use", + }, + collapsed: false, + items: [ + { + type: "doc", + label: "Build ICICLE from source", + id: "icicle/build_from_source", + }, + ], }, { type: "category", diff --git a/scripts/release/README.md b/scripts/release/README.md index 6a8db64f8..11076007e 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -1,112 +1,3 @@ - -# Icicle Release - How to Install,Use and Release - -## Overview - -This page explains describes the content of a release and how to install and use it.
-It also explains how to build and release Icicle for multiple Linux distributions, including Ubuntu 20.04, Ubuntu 22.04, and CentOS 7. - -:::note -Future releases will also include MacOS and other systems. -::: - -## Content of a Release - -Each Icicle release includes a tar file, named `icicle30-.tar.gz`, where `icicle30` stands for version 3.0. This tar contains icicle-frontend build artifacts and headers for a specific distribution. The tar file includes the following structure: - -- **`./icicle/include/`**: This directory contains all the necessary header files for using the Icicle library from C++. -- **`./icicle/lib/`**: - - **Icicle Libraries**: All the core Icicle libraries are located in this directory. Applications linking to Icicle will use these libraries. - - **Backends**: The `./icicle/lib/backend/` directory houses backend libraries, including the CUDA backend (not included in this tar). - -- **CUDA backend** comes as separate tar `icicle30--cuda122.tar.gz` - - per distribution, for icicle-frontend V3.0 and CUDA 12.2. - -## Installing and Using the Release - -1. **Extract the Tar Files**: - - Download the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or CentOS 7). - - Extract it to your desired location: - ```bash - # install the frontend part (Can skip for Rust) - tar -xzvf icicle30-.tar.gz -C /opt/ # or other non-default install directory - # install CUDA backend (Required for all programming-languages that want to use CUDA backend) - tar -xzvf icicle30--cuda122.tar.gz -C /opt/ # or other non-default install directory - ``` - - - Note that you may install to any directory and you need to make sure it can be found by the linker at runtime. - -2. **Linking Your Application**: - **C++** - - When compiling your C++ application, link against the Icicle libraries found in `/opt/icicle/lib` or other location: - ```bash - g++ -o myapp myapp.cpp -L/opt/icicle/lib -licicle_field_babybear -licicle_curve_bn254 - ``` - - Note: You need to link to the Icicle device library and in addition link to each field or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. - - **Rust** - - When building the icicle crates, icicle frontend libs are built from source too. They are installed to `target//deps/icile` and the crate is linked to that at runtime. - - Need to install CUDA backend only, if tou have a CUDA GPU. - - Note: can install and link to the installed libs instead of building them from source. This is currently not supported but will be in a future release. - - **Go** - TODO - -:::warning when deploying an application (either C++, Rust or Go), you must make sure to either deploy the icicle libs (that are installed to `target//deps/icile` and also the CUDA backend) along the application binaries (as tar, docker image, package manager installer or else) or make sure to install icicle (and the backend) on the target machine. Otherwise the target machine will have linkage issues. -::: - -:::tip -If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found in the filesystem, you need to add the install directory to the search path of the linker. In a development env You can do that via `LD_LIBRARY_PATH` or corresponding variables. For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. -::: - -## Backend Loading - -The Icicle library dynamically loads backend libraries at runtime. By default, it searches for backends in the following order: - -1. **Environment Variable**: If the `ICICLE_BACKEND_INSTALL_DIR` environment variable is defined, Icicle will prioritize this location. -2. **Default Directory**: If the environment variable is not set, Icicle will search in the default directory `/opt/icicle/lib/backend`. - -:::warning -Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guranteed. -::: - -To load backend from ICICLE_BACKEND_INSTALL_DIR or /opt/icicle/lib/backend in your application: - -**C++** -```cpp -extern "C" eIcicleError icicle_load_backend_from_env_or_default(); -``` -**Rust** -```rust -pub fn load_backend_from_env_or_default() -> Result<(), eIcicleError>; -``` -**Go** -```go -TODO -``` - -### Custom Backend Loading - -If you need to load a backend from a custom location at any point during runtime, you can call the following function: - -**C++** -```cpp -extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive); -``` -- **`path`**: The directory where the backend libraries are located. -- **`is_recursive`**: If `true`, the function will search for backend libraries recursively within the specified path. - -**Rust** -```rust - pub fn load_backend(path: &str) -> Result<(), eIcicleError>; // OR - pub fn load_backend_non_recursive(path: &str) -> Result<(), eIcicleError>; -``` -- **`path`**: The directory where the backend libraries are located. - -**Go** -```go -TODO -``` ## Build the release This section is describing how a release is generated, given the release sources.
From ed8d12ee197eca4d75b5719fedb2d3789790b80e Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Tue, 27 Aug 2024 16:51:16 +0300 Subject: [PATCH 05/35] updated release build script for split frontend and backend libs --- scripts/release/Dockerfile.centos7 | 4 +--- scripts/release/Dockerfile.ubuntu20 | 3 --- scripts/release/Dockerfile.ubuntu22 | 5 +---- scripts/release/README.md | 3 ++- scripts/release/build_release_and_tar.sh | 26 +++++++++++++++++++----- 5 files changed, 25 insertions(+), 16 deletions(-) diff --git a/scripts/release/Dockerfile.centos7 b/scripts/release/Dockerfile.centos7 index dceb1fb4e..56bc60dfb 100644 --- a/scripts/release/Dockerfile.centos7 +++ b/scripts/release/Dockerfile.centos7 @@ -10,6 +10,4 @@ RUN yum -y update && \ cmake \ tar \ && yum clean all - -# Define the environment variable for the output tar file name -ENV OUTPUT_TAR_NAME=icicle30-centos7-cuda122.tar.gz \ No newline at end of file + \ No newline at end of file diff --git a/scripts/release/Dockerfile.ubuntu20 b/scripts/release/Dockerfile.ubuntu20 index d887ba936..3cb388db4 100644 --- a/scripts/release/Dockerfile.ubuntu20 +++ b/scripts/release/Dockerfile.ubuntu20 @@ -9,6 +9,3 @@ RUN apt-get update && apt-get install -y \ build-essential \ cmake \ tar - -# Define the environment variable for the output tar file name -ENV OUTPUT_TAR_NAME=icicle30-ubuntu20-cuda122.tar.gz diff --git a/scripts/release/Dockerfile.ubuntu22 b/scripts/release/Dockerfile.ubuntu22 index 0cac69c89..70911c4bc 100644 --- a/scripts/release/Dockerfile.ubuntu22 +++ b/scripts/release/Dockerfile.ubuntu22 @@ -4,8 +4,5 @@ FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 # Install necessary packages RUN apt-get update && apt-get install -y \ build-essential \ - cmake \ + cmake \ tar - -# Define the environment variable for the output tar file name -ENV OUTPUT_TAR_NAME=icicle30-ubuntu22-cuda122.tar.gz diff --git a/scripts/release/README.md b/scripts/release/README.md index 11076007e..a9c57a248 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -31,10 +31,11 @@ docker run --rm --gpus all \ -v ./icicle:/icicle \ -v ./release_output:/output \ -v ./scripts:/scripts \ - icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh + icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu22 cuda122 ``` This command executes the `build_release_and_tar.sh` script inside the Docker container, which provides the build environment. It maps the source code and output directory to the container, ensuring the generated tar file is available on the host system. You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment (e.g., Ubuntu 20.04 or CentOS 7). +Make sure to pass corresponding OS and CUDA version in the params `icicle30 ubuntu22 cuda122`. For example for centos7 it would be `icicle30 centos7 cuda122`. diff --git a/scripts/release/build_release_and_tar.sh b/scripts/release/build_release_and_tar.sh index 7d99246df..a004e8010 100755 --- a/scripts/release/build_release_and_tar.sh +++ b/scripts/release/build_release_and_tar.sh @@ -2,18 +2,23 @@ set -e +# Accept ICICLE_VERSION, ICICLE_OS, and ICICLE_CUDA_VERSION as inputs or use defaults +ICICLE_VERSION=${1:-icicle30} # Default to "icicle30" if not set +ICICLE_OS=${2:-unknown_os} # Default to "unknown_os" if not set +ICICLE_CUDA_VERSION=${3:-cuda_unknown} # Default to "cuda_unknown" if not set + # List of fields and curves fields=("babybear" "stark252") curves=("bn254" "bls12_381" "bls12_377" "bw6_761" "grumpkin") cd / -mkdir install_dir && mkdir install_dir/icicle # output dir that is tared +mkdir -p install_dir/icicle # output dir that is tarred # Iterate over fields for field in "${fields[@]}"; do echo "Building for field: $field" - mkdir build -p && rm -rf build/* + mkdir -p build && rm -rf build/* # Configure, build, and install cmake -S icicle -B build -DFIELD=$field -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle cmake --build build -j # build @@ -24,13 +29,24 @@ done for curve in "${curves[@]}"; do echo "Building for curve: $curve" - mkdir build -p && rm -rf build/* + mkdir -p build && rm -rf build/* # Configure, build, and install cmake -S icicle -B build -DCURVE=$curve -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle cmake --build build -j # build cmake --install build # install done -# Create the tarball +# Split CUDA binaries to a separate directory to tar them separately +mkdir -p install_dir_cuda_only/icicle/lib/backend +mv install_dir/icicle/lib/backend/* install_dir_cuda_only/icicle/lib/backend + +# Copy headers +cp -r ./icicle/include install_dir/icicle + +# Create the tarball for frontend libraries cd install_dir -tar -czvf /output/${OUTPUT_TAR_NAME} icicle # tar the install dir +tar -czvf /output/${ICICLE_VERSION}-${ICICLE_OS}.tar.gz icicle # tar the install dir + +# Create tarball for CUDA backend +cd ../install_dir_cuda_only +tar -czvf /output/${ICICLE_VERSION}-${ICICLE_OS}-${ICICLE_CUDA_VERSION}.tar.gz icicle # tar the install dir \ No newline at end of file From 7e443aa2105a592bbb204f44bad7c8ffac5811e2 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Tue, 27 Aug 2024 19:09:20 +0300 Subject: [PATCH 06/35] update docs about install and issues that may rise --- docs/docs/icicle/getting_started.md | 182 ----------------------- docs/docs/icicle/install_and_use.md | 63 +++++--- scripts/release/build_release_and_tar.sh | 4 +- 3 files changed, 47 insertions(+), 202 deletions(-) delete mode 100644 docs/docs/icicle/getting_started.md diff --git a/docs/docs/icicle/getting_started.md b/docs/docs/icicle/getting_started.md deleted file mode 100644 index e1c817f1c..000000000 --- a/docs/docs/icicle/getting_started.md +++ /dev/null @@ -1,182 +0,0 @@ - -# Build ICICLE from source - -This guide will help you get started with building, testing, and installing ICICLE, whether you're using C++, Rust, or Go. It also covers installation of the CUDA backend and important build options. - -## Building and Testing ICICLE frontend - -### C++: Build, Test, and Install (Frontend) - -ICICLE can be built and tested in C++ using CMake. The build process is straightforward, but there are several flags you can use to customize the build for your needs. - -#### Build Commands - -1. **Clone the ICICLE repository:** - ```bash - git clone https://github.com/ingonyama-zk/icicle.git - cd icicle - ``` - -2. **Configure the build:** - ```bash - mkdir -p build && rm -rf build/* - cmake -S icicle -B build -DFIELD=babybear - ``` - -:::info -To specify the field, use the flag -DFIELD=field, where field can be one of the following: babybear, stark252, m31. - -To specify a curve, use the flag -DCURVE=curve, where curve can be one of the following: bn254, bls12_377, bls12_381, bw6_761, grumpkin. -::: - -:::tip -If you have access to cuda backend repo, it can be built along ICICLE frontend by adding the following to the cmake command -- `-DCUDA_BACKEND=local` # if you have it locally -- `-DCUDA_BACKEND=` # to pull CUDA backend, given you have access -::: - -3. **Build the project:** - ```bash - cmake --build build -j - ``` - This is building the [libicicle_device](./libraries.md#icicle-device) and the [libicicle_field_babybear](./libraries.md#icicle-core) frontend lib that correspond to the field or curve. - -4. **Link:** -Link you application (or library) to ICICLE: -```cmake -target_link_libraries(yourApp PRIVATE icicle_field_babybear icicle_device) -``` - -5. **Installation (optional):** -To install the libs, specify the install prefix in the [cmake command](./build_from_source.md#build-commands) -`-DCMAKE_INSTALL_PREFIX=/install/dir/`. Default install path on linux is `/usr/local` if not specified. For other systems it may differ. The cmake command will print it to the log -``` --- CMAKE_INSTALL_PREFIX=/install/dir/for/cmake/install -``` -Then after building, use cmake to install the libraries: -``` -cmake -S icicle -B build -DFIELD=babybear -DCMAKE_INSTALL_PREFIX=/path/to/install/dir/ -cmake --build build -j # build -cmake --install build # install icicle to /path/to/install/dir/ -``` - -6. **Run tests (optional):** -Add `-DBUILD_TESTS=ON` to the [cmake command](./build_from_source.md#build-commands) and build. -Execute all tests -```bash -cmake -S icicle -B build -DFIELD=babybear -DBUILD_TESTS=ON -cmake --build build -j -cd build/tests -ctest -``` -or choose the test-suite -```bash -./build/tests/test_field_api # or another test suite -# can specify tests using regex. For example for tests with ntt in the name: -./build/tests/test_field_api --gtest_filter="*ntt*" -``` -:::note -Most tests assume a cuda backend exists and will fail otherwise if cannot find a CUDA device. -::: - -#### Build Flags - -You can customize your ICICLE build with the following flags: - -- `-DCPU_BACKEND=ON/OFF`: Enable or disable built-in CPU backend. `default=ON`. -- `-DCMAKE_INSTALL_PREFIX=/install/dir`: Specify install directory. `default=/usr/local`. -- `-DBUILD_TESTS=ON/OFF`: Enable or disable tests. `default=OFF`. -- `-DBUILD_BENCHMARKS=ON/OFF`: Enable or disable benchmarks. `default=OFF`. - -#### Features - -By default, all [features](./libraries.md#supported-curves-and-operations) are enabled. -This is since installed backends may implement and register all APIs. Missing APIs in the frontend would cause linkage to fail due to missing symbols. Therefore by default we include them in the frontend part too. - -To disable features, add the following to the cmake command. -- ntt: `-DNTT=OFF` -- msm: `-DMSM=OFF` -- g2 msm: `-DG2=OFF` -- ecntt: `-DECNTT=OFF` -- extension field: `-DEXT_FIELD=OFF` - -:::tip -Disabling features is useful when developing with a backend that is slow to compile (e.g. CUDA backend); -::: - -### Rust: Build, Test, and Install - -To build and test ICICLE in Rust, follow these steps: - -1. **Navigate to the Rust bindings directory:** -```bash -cd wrappers/rust # or go to a specific field/curve 'cd wrappers/rust/icicle-fields/icicle-babybear' -``` - -2. **Build the Rust project:** -```bash -cargo build --release -``` -By default, all [supported features are enabled](#features). -Cargo features are used to disable features, rather than enable them, for the reason explained [here](#features): -- `no_g2` to disable G2 MSM -- `no_ecntt` to disable ECNTT - -They can be disabled as follows: -```bash -cargo build --release --no-default-features --features=no_ecntt,no_g2 -``` - -:::note -If you have access to cuda backend repo, it can be built along ICICLE frontend by using the following cargo features: -- `cuda_backend` : if the cuda backend resides in `icicle/backend/cuda` -- `pull_cuda_backend` : to pull main branch and build it -::: - - -3. **Run tests:** -```bash -cargo test # optional: --features=no_ecntt,no_g2,cuda_backend -``` -:::note -Most tests assume a CUDA backend is installed and fail otherwise. -::: - -4. **Install the library:** - -By default, the libraries are installed to the `target//deps/icicle` dir. For custom install dir. define the env variable: -```bash -export ICICLE_INSTALL_DIR=/path/to/install/dir -``` - -(TODO: cargo install ?) - -#### Use as cargo dependency -In cargo.toml, specify the ICICLE libs to use: - -```bash -[dependencies] -icicle-runtime = { path = "git = "https://github.com/ingonyama-zk/icicle.git"" } -icicle-core = { path = "git = "https://github.com/ingonyama-zk/icicle.git"" } -icicle-bls12-377 = { path = "git = "https://github.com/ingonyama-zk/icicle.git" } -# add other ICICLE crates here if need additional fields/curves -``` - -Can specify `branch = ` or `tag = ` or `rev = `. - -To disable features: -```bash -icicle-bls12-377 = { path = "git = "https://github.com/ingonyama-zk/icicle.git", features = ["no_g2"] } -``` - -As explained above, the libs will be built and installed to `target//deps/icicle` so you can easily link to them. Alternatively you can set `ICICLE_INSTALL_DIR` env variable for a custom install directory. - -:::warning -Make sure to install icicle libs when installing a library/application that depends on icicle such that it is located at runtime. -::: - -### Go: Build, Test, and Install (TODO) - -## Install cuda backend - -[Install CUDA Backend (and License)](./install_cuda_backend.md#installation) \ No newline at end of file diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index f78a0fbfe..8e4fdf330 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -36,38 +36,52 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, - Note that you may install to any directory and you need to make sure it can be found by the linker at runtime. - Default location is `/opt` - + :::tip You can install anywhere and use a link so that it can be easily found as if in the default directory. ::: 2. **Linking Your Application**: - + + Apps need to link to the ICICLE device library and in addition link to each field or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. + **C++** - When compiling your C++ application, link against the Icicle libraries found in `/opt/icicle/lib` or other location: ```bash - g++ -o myapp myapp.cpp -L/opt/icicle/lib -licicle_field_babybear -licicle_curve_bn254 + g++ -o myapp myapp.cpp -I/opt/icicle/include -L/opt/icicle/lib -licicle_device -licicle_field_bn254 -licicle_curve_bn254 -Wl,-rpath,/opt/icicle/lib/ ``` - - :::note - You need to link to the Icicle device library and in addition link to each field or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. - ::: + - Or via cmake + ```bash + # Include directories + include_directories(/path/to/install/dir/icicle/include) + # Library directories + link_directories(/path/to/install/dir/icicle/lib/) + # Add the executable + add_executable(example example.cpp) + # Link the libraries + target_link_libraries(example icicle_device icicle_field_bn254 icicle_curve_bn254) + # Set the RPATH so linker finds icicle libs at runtime + set_target_properties(example PROPERTIES + BUILD_RPATH /path/to/install/dir/icicle/lib/ + INSTALL_RPATH /path/to/install/dir/icicle/lib/) + ``` + + :::tip + If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found, you need to add the install directory to the search path of the linker. In a development env you can do that using the env variable `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/icicle/lib` or similar (for non linux). For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. + + Alternatively you can embed the search path on the app as an `rpath` by adding `-Wl,-rpath,/path/to/icicle/lib/`. Now the linker will search there too. + ::: **Rust** - - When building the icicle crates, icicle frontend libs are built from source, in addition to the rust bindings. They are installed to `target//deps/icile` and the crate is linked to that at runtime. - - Need to install CUDA backend only, if tou have a CUDA GPU. - - Note: can install and link to the installed libs instead of building them from source. This is currently not supported but will be in a future release. + - When building the icicle crates, icicle frontend libs are built from source, in addition to the rust bindings. They are installed to `target//deps/icile` and cargo will link correctly. Note that you still need to install CUDA backend if you have a CUDA GPU. + - Simply use `cargo build` or `cargo run` and it should link to icicle libs. **Go** - TODO :::warning when deploying an application (either C++, Rust or Go), you must make sure to either deploy the icicle libs (in Rust it's in `target//deps/icile` or the preinstalled ones) along the application binaries (as tar, docker image, package manager installer or else) or make sure to install icicle (and the backend) on the target machine. Otherwise the target machine will have linkage issues. ::: -:::tip -If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found in the filesystem, you need to add the install directory to the search path of the linker. In a development env You can do that by adding the install dir to `export LD_LIBRARY_PATH=/path/to/icicle/lib` or corresponding variables. For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. -::: - ## Backend Loading The Icicle library dynamically loads backend libraries at runtime. By default, it searches for backends in the following order: @@ -79,7 +93,13 @@ The Icicle library dynamically loads backend libraries at runtime. By default, i Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guranteed. ::: -To load backend from ICICLE_BACKEND_INSTALL_DIR or `/opt/icicle/lib/backend` in your application: +If you install in a custom dir, make sure to set `ICICLE_BACKEND_INSTALL_DIR`: +```bash +ICICLE_BACKEND_INSTALL_DIR=path/to/icicle/lib/backend/ myapp # for an executable maypp +ICICLE_BACKEND_INSTALL_DIR=path/to/icicle/lib/backend/ cargo run # when using cargo +``` + +Then to load backend from ICICLE_BACKEND_INSTALL_DIR or `/opt/icicle/lib/backend` in your application: **C++** ```cpp @@ -116,7 +136,14 @@ extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive) ```go TODO ``` -## Build the release -This section is describing how a release is generated, given the release sources. -We use docker to represent the target environment for the release. Each Docker image is tailored to a specific distribution and CUDA version. You first build the Docker image, which sets up the environment, and then use this Docker image to build the release tar file. This ensures that the build process is consistent and reproducible across different environments. +:::note +When loading from the backends dir, you may see the following: +``` +[INFO] Attempting to load: some/path/icicle/lib/backend/bls12_381/cuda/libicicle_cuda_curve_bls12_381.so +[INFO] Failed to load some/path/icicle/lib/backend/bls12_381/cuda/libicicle_cuda_curve_bls12_381.so: libicicle_curve_bls12_381.so: cannot open shared object file: No such file or directory +``` + +In this case the cuda backend for bls12_381 curve failed to load since it cannot find the corresponding frontend. This should not happen if the bls12_381 frontend is linked to the application. +Also note that if the frontend libs are installed and found by the linker, they will be loaded as well. This is not a problem except for loading unused libs to the process. Can avoid it by specifying a more specific path but make sure to load all required libs. +::: diff --git a/scripts/release/build_release_and_tar.sh b/scripts/release/build_release_and_tar.sh index a004e8010..544cc575a 100755 --- a/scripts/release/build_release_and_tar.sh +++ b/scripts/release/build_release_and_tar.sh @@ -20,7 +20,7 @@ for field in "${fields[@]}"; do mkdir -p build && rm -rf build/* # Configure, build, and install - cmake -S icicle -B build -DFIELD=$field -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle + cmake -S icicle -B build -DFIELD=$field -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle -DCMAKE_BUILD_TYPE=Release cmake --build build -j # build cmake --install build # install done @@ -31,7 +31,7 @@ for curve in "${curves[@]}"; do mkdir -p build && rm -rf build/* # Configure, build, and install - cmake -S icicle -B build -DCURVE=$curve -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle + cmake -S icicle -B build -DCURVE=$curve -DCUDA_BACKEND=local -DCMAKE_INSTALL_PREFIX=install_dir/icicle -DCMAKE_BUILD_TYPE=Release cmake --build build -j # build cmake --install build # install done From 6440c7418909c84b359bf6a72eb2fd3d658b0fb8 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Tue, 27 Aug 2024 19:26:48 +0300 Subject: [PATCH 07/35] skip libraries that do not seem to be icicle backend libs --- icicle/src/runtime.cpp | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/icicle/src/runtime.cpp b/icicle/src/runtime.cpp index ce36de773..2d1a2acb4 100644 --- a/icicle/src/runtime.cpp +++ b/icicle/src/runtime.cpp @@ -274,9 +274,27 @@ extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive) }; auto load_library = [](const char* filePath) { - ICICLE_LOG_DEBUG << "Attempting load: " << filePath; - void* handle = dlopen(filePath, RTLD_LAZY | RTLD_GLOBAL); - if (!handle) { ICICLE_LOG_ERROR << "Failed to load " << filePath << ": " << dlerror(); } + // Convert the file path to a std::string for easier manipulation + std::string path(filePath); + + // Extract the library name from the full path + std::string fileName = path.substr(path.find_last_of("/\\") + 1); + + // Check if the library name contains "icicle" and if the path contains "/backend/" + if (fileName.find("icicle") == std::string::npos || path.find("/backend/") == std::string::npos) { + ICICLE_LOG_DEBUG << "Skipping: " << filePath << " - Not an Icicle backend library."; + return; + } + + // Check if the library name contains "device". If yes, load it with GLOBAL visibility, otherwise LOCAL. + // The logic behind it is to avoid symbol conflicts by using LOCAL visibility but allow backends to expose symbols to the other backend libs. + // For example to reuse some device context or any initialization required by APIs that we want to do once. + int flags = (fileName.find("device") != std::string::npos) ? (RTLD_LAZY | RTLD_GLOBAL) : (RTLD_LAZY | RTLD_LOCAL); + + // Attempt to load the library with the appropriate flags + ICICLE_LOG_DEBUG << "Attempting to load: " << filePath; + void* handle = dlopen(filePath, flags); + if (!handle) { ICICLE_LOG_DEBUG << "Failed to load " << filePath << ": " << dlerror(); } }; if (is_directory(path)) { From b50d089b38522ad5e87ac507b8274b1191c2df5e Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Tue, 27 Aug 2024 22:28:13 +0300 Subject: [PATCH 08/35] fix spelling and formatting --- docs/docs/icicle/install_and_use.md | 2 +- icicle/src/runtime.cpp | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index 8e4fdf330..758d82c4a 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -90,7 +90,7 @@ The Icicle library dynamically loads backend libraries at runtime. By default, i 2. **Default Directory**: If the environment variable is not set, Icicle will search in the default directory `/opt/icicle/lib/backend`. :::warning -Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guranteed. +Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guaranteed. ::: If you install in a custom dir, make sure to set `ICICLE_BACKEND_INSTALL_DIR`: diff --git a/icicle/src/runtime.cpp b/icicle/src/runtime.cpp index 2d1a2acb4..fb6d07f82 100644 --- a/icicle/src/runtime.cpp +++ b/icicle/src/runtime.cpp @@ -287,8 +287,9 @@ extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive) } // Check if the library name contains "device". If yes, load it with GLOBAL visibility, otherwise LOCAL. - // The logic behind it is to avoid symbol conflicts by using LOCAL visibility but allow backends to expose symbols to the other backend libs. - // For example to reuse some device context or any initialization required by APIs that we want to do once. + // The logic behind it is to avoid symbol conflicts by using LOCAL visibility but allow backends to expose symbols + // to the other backend libs. For example to reuse some device context or any initialization required by APIs that + // we want to do once. int flags = (fileName.find("device") != std::string::npos) ? (RTLD_LAZY | RTLD_GLOBAL) : (RTLD_LAZY | RTLD_LOCAL); // Attempt to load the library with the appropriate flags From 899159afff0461d3e2c34a349b174b322057c92e Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 14:12:23 +0300 Subject: [PATCH 09/35] add C++ example for install and use icicle release --- docs/docs/icicle/install_and_use.md | 57 ++++++------ .../c++/install_and_use_icicle/CMakeLists.txt | 19 ++++ examples/c++/install_and_use_icicle/README.md | 46 ++++++++++ .../c++/install_and_use_icicle/example.cpp | 87 +++++++++++++++++++ scripts/release/Dockerfile.ubuntu20 | 24 ++++- scripts/release/README.md | 8 ++ 6 files changed, 210 insertions(+), 31 deletions(-) create mode 100644 examples/c++/install_and_use_icicle/CMakeLists.txt create mode 100644 examples/c++/install_and_use_icicle/README.md create mode 100644 examples/c++/install_and_use_icicle/example.cpp diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index 758d82c4a..ad5377a2f 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -25,42 +25,54 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, ## installing and using icicle 1. **Extract the Tar Files**: - - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or CentOS 7). + - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or UBI 7,8,9 for RHEL compatible binaries). + - **Frontend libs and headers** should be installed in default search paths (such as `/usr/local/lib` and `usr/local/include`) for the compiler and linker to find. + - **Backend libs** should be installed in `/opt` - Extract it to your desired location: ```bash - # install the frontend part (Can skip for Rust) - tar -xzvf icicle30-.tar.gz -C /opt/ # or other non-default install directory + # install the frontend part (Can skip for Rust) + tar -xzvf icicle30-.tar.gz -C /path/to/extract/ + cp -r /path/to/extract/icicle/include /usr/local/include/icicle # or any other + cp -r /path/to/extract/icicle/lib /usr/local/lib # or any other # install CUDA backend (Required for all programming-languages that want to use CUDA backend) tar -xzvf icicle30--cuda122.tar.gz -C /opt/ # or other non-default install directory ``` - - Note that you may install to any directory and you need to make sure it can be found by the linker at runtime. - - Default location is `/opt` + :::note + You may install to any directory but need to make sure it can be found by the linker at compile and runtime. + For example can extract the frontend to `/opt` too. + ::: -:::tip -You can install anywhere and use a link so that it can be easily found as if in the default directory. -::: + :::tip + You can install anywhere and use a link so that it can be easily found as if in the default directory. + ::: -2. **Linking Your Application**: +1. **Linking Your Application**: - Apps need to link to the ICICLE device library and in addition link to each field or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. + Apps need to link to the ICICLE device library and to every field and/or curve libraries. The backend libraries are dynamically loaded at runtime, so not linking to them. **C++** - - When compiling your C++ application, link against the Icicle libraries found in `/opt/icicle/lib` or other location: + - When compiling your C++ application, link against the Icicle libraries: ```bash + g++ -o myapp myapp.cpp -licicle_device -licicle_field_bn254 -licicle_curve_bn254 + + # if not installed in standard dirs, for example /opt, need to specify it g++ -o myapp myapp.cpp -I/opt/icicle/include -L/opt/icicle/lib -licicle_device -licicle_field_bn254 -licicle_curve_bn254 -Wl,-rpath,/opt/icicle/lib/ ``` - Or via cmake ```bash - # Include directories - include_directories(/path/to/install/dir/icicle/include) - # Library directories - link_directories(/path/to/install/dir/icicle/lib/) # Add the executable add_executable(example example.cpp) # Link the libraries target_link_libraries(example icicle_device icicle_field_bn254 icicle_curve_bn254) + + # OPTIONAL (if not installed in default location) + # Include directories + include_directories(/path/to/install/dir/icicle/include) + # Library directories + link_directories(/path/to/install/dir/icicle/lib/) + # Set the RPATH so linker finds icicle libs at runtime set_target_properties(example PROPERTIES BUILD_RPATH /path/to/install/dir/icicle/lib/ @@ -70,7 +82,7 @@ You can install anywhere and use a link so that it can be easily found as if in :::tip If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found, you need to add the install directory to the search path of the linker. In a development env you can do that using the env variable `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/icicle/lib` or similar (for non linux). For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. - Alternatively you can embed the search path on the app as an `rpath` by adding `-Wl,-rpath,/path/to/icicle/lib/`. Now the linker will search there too. + Alternatively you can embed the search path on the app as an `rpath` by adding `-Wl,-rpath,/path/to/icicle/lib/`. This is what is demonstrated above. ::: **Rust** @@ -90,7 +102,7 @@ The Icicle library dynamically loads backend libraries at runtime. By default, i 2. **Default Directory**: If the environment variable is not set, Icicle will search in the default directory `/opt/icicle/lib/backend`. :::warning -Make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guaranteed. +If building ICICLE frontend from source, make sure to load a backend that is compatible to the frontend version. CUDA backend libs are forward compatible with newer frontends (e.g. CUDA-backend-3.0 works with ICICLE-3.2). The opposite is not guaranteed. ::: If you install in a custom dir, make sure to set `ICICLE_BACKEND_INSTALL_DIR`: @@ -136,14 +148,3 @@ extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive) ```go TODO ``` - -:::note -When loading from the backends dir, you may see the following: -``` -[INFO] Attempting to load: some/path/icicle/lib/backend/bls12_381/cuda/libicicle_cuda_curve_bls12_381.so -[INFO] Failed to load some/path/icicle/lib/backend/bls12_381/cuda/libicicle_cuda_curve_bls12_381.so: libicicle_curve_bls12_381.so: cannot open shared object file: No such file or directory -``` - -In this case the cuda backend for bls12_381 curve failed to load since it cannot find the corresponding frontend. This should not happen if the bls12_381 frontend is linked to the application. -Also note that if the frontend libs are installed and found by the linker, they will be loaded as well. This is not a problem except for loading unused libs to the process. Can avoid it by specifying a more specific path but make sure to load all required libs. -::: diff --git a/examples/c++/install_and_use_icicle/CMakeLists.txt b/examples/c++/install_and_use_icicle/CMakeLists.txt new file mode 100644 index 000000000..1f11e64a9 --- /dev/null +++ b/examples/c++/install_and_use_icicle/CMakeLists.txt @@ -0,0 +1,19 @@ +# Add the executable +add_executable(example example.cpp) +# Link the libraries +target_link_libraries(example icicle_device icicle_field_bn254 icicle_curve_bn254) + +# OPTIONAL (if not installed in default location) + +# The following is setting compile and runtime paths for headers and libs assuming +# - headers in /opt/icicle/include +# - libs in /opt/icicle/lib + +# # Include directories +# include_directories(/opt/icicle/include) +# # Library directories +# link_directories(/opt/icicle/lib/) +# Set the RPATH so linker finds icicle libs at runtime +# set_target_properties(example PROPERTIES +# BUILD_RPATH /opt/icicle/lib/ +# INSTALL_RPATH /opt/icicle/lib/) \ No newline at end of file diff --git a/examples/c++/install_and_use_icicle/README.md b/examples/c++/install_and_use_icicle/README.md new file mode 100644 index 000000000..bf80534ec --- /dev/null +++ b/examples/c++/install_and_use_icicle/README.md @@ -0,0 +1,46 @@ + + +# Install and use ICICLE + +## Optional: use a docker for env with install permissions if need it. +```bash +docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubuntu22-cuda122 bash +``` + +### Building the docker image +This image is based on nvidia's image for ubuntu22. built from the Dockerfile: +```dockerfile +# Use the official NVIDIA development runtime image for Ubuntu 22.04 +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 + +# Install necessary packages +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + tar +``` + +by `docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 .` + +## Extract tars and install +```bash +cd release +# extract frontend part +tar xzvf icicle30-ubuntu22.tar.gz +cp -r ./icicle/lib/* /usr/lib/ +cp -r ./icicle/include/icicle/ /usr/local/include/ # copy C++ headers +# extract CUDA backend +tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /opt +``` + +## Compile and link C++ example to icicle +```bash +cd .. +mkdir build +cmake -S . -B build && cmake --build build +``` + +## Launch the executable +```bash +./build/example +``` diff --git a/examples/c++/install_and_use_icicle/example.cpp b/examples/c++/install_and_use_icicle/example.cpp new file mode 100644 index 000000000..76495bf78 --- /dev/null +++ b/examples/c++/install_and_use_icicle/example.cpp @@ -0,0 +1,87 @@ +#include +#include +#include "icicle/runtime.h" +#include "icicle/api/bn254.h" + +using namespace bn254; // This makes scalar_t a bn254 scalar instead of bn254::scalar_t + +// Utility function to print arrays +template +void print_array(const T* arr, int size) { + for (int i = 0; i < size; ++i) { + std::cout << "\t" << i << ": " << arr[i] << std::endl; + } +} + +int main(int argc, char* argv[]) { + // Load installed backends + icicle_load_backend_from_env_or_default(); + + // Check if GPU is available + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); + Device device_cpu = {"CPU", 0}; + Device device_gpu = is_cuda_device_available ? Device{"CUDA", 0} : device_cpu; + if (is_cuda_device_available) { std::cout << "GPU is available" << std::endl;} + else { std::cout << "GPU is not available" << std::endl;} + + + // Example input (on host memory) for NTT + const unsigned log_ntt_size = 2; + const unsigned ntt_size = 1 << log_ntt_size; + auto input_cpu = std::make_unique(ntt_size); + scalar_t::rand_host_many(input_cpu.get(), ntt_size); + + // Allocate output on host memory + auto output_cpu = std::make_unique(ntt_size); + scalar_t root_of_unity = scalar_t::omega(log_ntt_size); + auto ntt_config = default_ntt_config(); + + // Part 1: Running NTT on CPU + std::cout << "Part 1: compute on CPU: " << std::endl; + icicle_set_device(device_cpu); + ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for CPU + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, default_ntt_config(), output_cpu.get()); + print_array(output_cpu.get(), ntt_size); + + // Part 2: Running NTT on GPU + std::cout << "Part 2: compute on GPU (from/to CPU memory): " << std::endl; + icicle_set_device(device_gpu); + ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for GPU + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); + print_array(output_cpu.get(), ntt_size); + + // Allocate, copy data to GPU and compute on GPU memory + std::cout << "Part 2: compute on GPU (from/to GPU memory): " << std::endl; + scalar_t* input_gpu = nullptr; + scalar_t* output_gpu = nullptr; + icicle_malloc((void**)&input_gpu, ntt_size * sizeof(scalar_t)); + icicle_malloc((void**)&output_gpu, ntt_size * sizeof(scalar_t)); + icicle_copy(input_gpu, input_cpu.get(), ntt_size * sizeof(scalar_t)); + ntt_config.are_inputs_on_device = true; + ntt_config.are_outputs_on_device = true; + ntt(input_gpu, ntt_size, NTTDir::kForward, ntt_config, output_gpu); + icicle_copy(output_cpu.get(), output_gpu, ntt_size * sizeof(scalar_t)); + print_array(output_cpu.get(), ntt_size); + + // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) + auto output_intt_cpu = std::make_unique(ntt_size); + + // Step 1: Compute NTT on GPU + std::cout << "Part 3: compute NTT on GPU (NTT input): " << std::endl; + icicle_set_device(device_gpu); + ntt_config.are_inputs_on_device = false; // using host memory now + ntt_config.are_outputs_on_device = false; + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); + print_array(input_cpu.get(), ntt_size); + + // Step 2: Compute INTT on CPU + std::cout << "Part 3: compute INTT on CPU (INTT output): " << std::endl; + icicle_set_device(device_cpu); + ntt(output_cpu.get(), ntt_size, NTTDir::kInverse, ntt_config, output_intt_cpu.get()); + print_array(output_intt_cpu.get(), ntt_size); + + // Assert that INTT output is the same as NTT input + assert(0 == memcmp(input_cpu.get(), output_intt_cpu.get(), ntt_size * sizeof(scalar_t))); + + return 0; +} \ No newline at end of file diff --git a/scripts/release/Dockerfile.ubuntu20 b/scripts/release/Dockerfile.ubuntu20 index 3cb388db4..edb48d1b4 100644 --- a/scripts/release/Dockerfile.ubuntu20 +++ b/scripts/release/Dockerfile.ubuntu20 @@ -1,4 +1,4 @@ -# Use the official NVIDIA development runtime image for Ubuntu 20.04 +# Use the official NVIDIA CUDA development image for Ubuntu 20.04 FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 # Prevent interactive prompts during package installation @@ -7,5 +7,23 @@ ENV DEBIAN_FRONTEND=noninteractive # Install necessary packages RUN apt-get update && apt-get install -y \ build-essential \ - cmake \ - tar + wget \ + tar \ + libssl-dev \ + libcurl4-openssl-dev \ + libarchive-dev \ + zlib1g-dev + +# Install the latest stable version of CMake from source +RUN CMAKE_VERSION=3.27.4 \ + && wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz \ + && tar -zxvf cmake-${CMAKE_VERSION}.tar.gz \ + && cd cmake-${CMAKE_VERSION} \ + && ./bootstrap \ + && make -j$(nproc) \ + && make install \ + && cd .. \ + && rm -rf cmake-${CMAKE_VERSION} cmake-${CMAKE_VERSION}.tar.gz + +# Set CMake as the default version +RUN ln -sf /usr/local/bin/cmake /usr/bin/cmake \ No newline at end of file diff --git a/scripts/release/README.md b/scripts/release/README.md index a9c57a248..627a0cbc5 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -27,11 +27,19 @@ To build the Icicle libraries inside a Docker container and output the tar file ```bash mkdir -p release_output +# ubuntu 22 docker run --rm --gpus all \ -v ./icicle:/icicle \ -v ./release_output:/output \ -v ./scripts:/scripts \ icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu22 cuda122 + +# ubuntu 20 +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubuntu20-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu20 cuda122 ``` This command executes the `build_release_and_tar.sh` script inside the Docker container, which provides the build environment. It maps the source code and output directory to the container, ensuring the generated tar file is available on the host system. From ea5641acaa17752eacfccfc3d7270f10adc9645d Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 14:54:42 +0300 Subject: [PATCH 10/35] cover custom install path too --- .../c++/install_and_use_icicle/CMakeLists.txt | 18 ++--- examples/c++/install_and_use_icicle/README.md | 66 ++++++++++++++++++- 2 files changed, 72 insertions(+), 12 deletions(-) diff --git a/examples/c++/install_and_use_icicle/CMakeLists.txt b/examples/c++/install_and_use_icicle/CMakeLists.txt index 1f11e64a9..1b86fab2b 100644 --- a/examples/c++/install_and_use_icicle/CMakeLists.txt +++ b/examples/c++/install_and_use_icicle/CMakeLists.txt @@ -6,14 +6,14 @@ target_link_libraries(example icicle_device icicle_field_bn254 icicle_curve_bn25 # OPTIONAL (if not installed in default location) # The following is setting compile and runtime paths for headers and libs assuming -# - headers in /opt/icicle/include -# - libs in /opt/icicle/lib +# - headers in /custom/path/icicle/include +# - libs in/custom/path/icicle/lib -# # Include directories -# include_directories(/opt/icicle/include) -# # Library directories -# link_directories(/opt/icicle/lib/) +# Include directories +target_include_directories(example PUBLIC /custom/path/icicle/include) +# Library directories +target_link_directories(example PUBLIC /custom/path/icicle/lib/) # Set the RPATH so linker finds icicle libs at runtime -# set_target_properties(example PROPERTIES -# BUILD_RPATH /opt/icicle/lib/ -# INSTALL_RPATH /opt/icicle/lib/) \ No newline at end of file +set_target_properties(example PROPERTIES + BUILD_RPATH /custom/path/icicle/lib/ + INSTALL_RPATH /custom/path/icicle/lib/) \ No newline at end of file diff --git a/examples/c++/install_and_use_icicle/README.md b/examples/c++/install_and_use_icicle/README.md index bf80534ec..61e85f80c 100644 --- a/examples/c++/install_and_use_icicle/README.md +++ b/examples/c++/install_and_use_icicle/README.md @@ -1,12 +1,22 @@ -# Install and use ICICLE +# Example: Install and use ICICLE -## Optional: use a docker for env with install permissions if need it. +Download release binaries: +- **Frontend** icicle30-ubuntu22.tar.gz +- **Backend** icicle30-ubuntu22-cuda122.tar.gz + +:::note +Name of the files is based on the release version. Make sure to update the tar file names in the example if using different release. +::: + +## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. ```bash docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubuntu22-cuda122 bash ``` +This command is starting bash in the docker, with GPUs and mapping the example files to `/worksapce` in the docker. + ### Building the docker image This image is based on nvidia's image for ubuntu22. built from the Dockerfile: ```dockerfile @@ -29,8 +39,9 @@ cd release tar xzvf icicle30-ubuntu22.tar.gz cp -r ./icicle/lib/* /usr/lib/ cp -r ./icicle/include/icicle/ /usr/local/include/ # copy C++ headers -# extract CUDA backend +# extract CUDA backend (OPTIONAL) tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /opt +rm -rf icicle # remove the extracted dir ``` ## Compile and link C++ example to icicle @@ -44,3 +55,52 @@ cmake -S . -B build && cmake --build build ```bash ./build/example ``` + +### CUDA license +If using CUDA backend, make sure to have a CUDA backend license: +- For license server, specify address: `export ICICLE_LICENSE_SERVER_ADDR=port@ip`. +- For local license, specify path to license: `export ICICLE_LICENSE_SERVER_ADDR=path/to/license`. (TODO rename env variable) + +## Install in custom location + +If installing in a custom location such as /custom/path: +```bash +mkdir -p /custom/path +cd release +tar xzvf icicle30-ubuntu22.tar.gz -C /custom/path +tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /custom/path # OPTIONAL +``` + +### Build your app and link to ICICLE +You will have to specify paths for include and libs so that the compiler linker and loader can find them at compile anbd runtime. +You can add the following to cmake file to do so: +```cmake +# Include directories +target_include_directories(example PUBLIC /custom/path/icicle/include) +# Library directories +target_link_directories(example PUBLIC /custom/path/icicle/lib/) +# Set the RPATH so linker finds icicle libs at runtime +set_target_properties(example PROPERTIES + BUILD_RPATH /custom/path/icicle/lib/ + INSTALL_RPATH /custom/path/icicle/lib/) +``` + + +```bash +cd .. +mkdir build +cmake -S . -B build && cmake --build build +``` + +### Launch the executable + +Since CUDA backend is installed to `/custom/path` we need to set the env variable accordingly: +```bash +export ICICLE_BACKEND_INSTALL_DIR=/custom/path/icicle/lib/backend +./build/example +``` + +Alternatively, the example code can use the foolowing API instead: +```cpp +extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive); +``` \ No newline at end of file From c4ecd6e003586505d9c518388dd99196cfd38c55 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 15:08:27 +0300 Subject: [PATCH 11/35] updated doc with link to example --- docs/docs/icicle/install_and_use.md | 36 +++++++++++++++++------------ 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index ad5377a2f..adc9c3a17 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -24,18 +24,20 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, ## installing and using icicle +Full C++ example here: https://github.com/ingonyama-zk/icicle/tree/yshekel/V3_release_and_install/examples/c%2B%2B/install_and_use_icicle + 1. **Extract the Tar Files**: - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or UBI 7,8,9 for RHEL compatible binaries). - - **Frontend libs and headers** should be installed in default search paths (such as `/usr/local/lib` and `usr/local/include`) for the compiler and linker to find. + - **Frontend libs and headers** should be installed in default search paths (such as `/usr/lib` and `usr/local/include`) for the compiler and linker to find. - **Backend libs** should be installed in `/opt` - Extract it to your desired location: ```bash # install the frontend part (Can skip for Rust) - tar -xzvf icicle30-.tar.gz -C /path/to/extract/ - cp -r /path/to/extract/icicle/include /usr/local/include/icicle # or any other - cp -r /path/to/extract/icicle/lib /usr/local/lib # or any other - # install CUDA backend (Required for all programming-languages that want to use CUDA backend) - tar -xzvf icicle30--cuda122.tar.gz -C /opt/ # or other non-default install directory + tar xzvf icicle30-ubuntu22.tar.gz + cp -r ./icicle/lib/* /usr/lib/ + cp -r ./icicle/include/icicle/ /usr/local/include/ # copy C++ headers + # extract CUDA backend (OPTIONAL) + tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /opt ``` :::note @@ -56,8 +58,8 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, ```bash g++ -o myapp myapp.cpp -licicle_device -licicle_field_bn254 -licicle_curve_bn254 - # if not installed in standard dirs, for example /opt, need to specify it - g++ -o myapp myapp.cpp -I/opt/icicle/include -L/opt/icicle/lib -licicle_device -licicle_field_bn254 -licicle_curve_bn254 -Wl,-rpath,/opt/icicle/lib/ + # if not installed in standard dirs, for example /custom/path/, need to specify it + g++ -o myapp myapp.cpp -I/custom/path/icicle/include -L/custom/path/icicle/lib -licicle_device -licicle_field_bn254 -licicle_curve_bn254 -Wl,-rpath,/custom/path/icicle/lib/ ``` - Or via cmake @@ -68,21 +70,25 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, target_link_libraries(example icicle_device icicle_field_bn254 icicle_curve_bn254) # OPTIONAL (if not installed in default location) + + # The following is setting compile and runtime paths for headers and libs assuming + # - headers in /custom/path/icicle/include + # - libs in/custom/path/icicle/lib + # Include directories - include_directories(/path/to/install/dir/icicle/include) + target_include_directories(example PUBLIC /custom/path/icicle/include) # Library directories - link_directories(/path/to/install/dir/icicle/lib/) - + target_link_directories(example PUBLIC /custom/path/icicle/lib/) # Set the RPATH so linker finds icicle libs at runtime set_target_properties(example PROPERTIES - BUILD_RPATH /path/to/install/dir/icicle/lib/ - INSTALL_RPATH /path/to/install/dir/icicle/lib/) + BUILD_RPATH /custom/path/icicle/lib/ + INSTALL_RPATH /custom/path/icicle/lib/) ``` :::tip - If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found, you need to add the install directory to the search path of the linker. In a development env you can do that using the env variable `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/icicle/lib` or similar (for non linux). For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. + If you face linkage issues, try `ldd myapp` to see the runtime deps. If ICICLE libs are not found, you need to add the install directory to the search path of the linker. In a development env you can do that using the env variable `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/custom/path/icicle/lib` or similar (for non linux). For deployment, make sure it can be found and avoid `LD_LIBRARY_PATH`. - Alternatively you can embed the search path on the app as an `rpath` by adding `-Wl,-rpath,/path/to/icicle/lib/`. This is what is demonstrated above. + Alternatively you can embed the search path on the app as an `rpath` by adding `-Wl,-rpath,/custom/path/icicle/lib/`. This is what is demonstrated above. ::: **Rust** From 8e1144b174236e752edb247ddfd440e71c99eb63 Mon Sep 17 00:00:00 2001 From: yshekel Date: Wed, 28 Aug 2024 15:09:46 +0300 Subject: [PATCH 12/35] Update scripts/release/build_release_and_tar.sh Co-authored-by: ChickenLover --- scripts/release/build_release_and_tar.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/build_release_and_tar.sh b/scripts/release/build_release_and_tar.sh index 544cc575a..f65200470 100755 --- a/scripts/release/build_release_and_tar.sh +++ b/scripts/release/build_release_and_tar.sh @@ -8,7 +8,7 @@ ICICLE_OS=${2:-unknown_os} # Default to "unknown_os" if not set ICICLE_CUDA_VERSION=${3:-cuda_unknown} # Default to "cuda_unknown" if not set # List of fields and curves -fields=("babybear" "stark252") +fields=("babybear" "stark252", "m31") curves=("bn254" "bls12_381" "bls12_377" "bw6_761" "grumpkin") cd / From 8158c4f98e30ea90d887c1d9714b12f524fcafee Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 15:12:07 +0300 Subject: [PATCH 13/35] spelling --- examples/c++/install_and_use_icicle/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/c++/install_and_use_icicle/README.md b/examples/c++/install_and_use_icicle/README.md index 61e85f80c..6d1eb38e9 100644 --- a/examples/c++/install_and_use_icicle/README.md +++ b/examples/c++/install_and_use_icicle/README.md @@ -72,7 +72,7 @@ tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /custom/path # OPTIONAL ``` ### Build your app and link to ICICLE -You will have to specify paths for include and libs so that the compiler linker and loader can find them at compile anbd runtime. +You will have to specify paths for include and libs so that the compiler linker and loader can find them at compile anb runtime. You can add the following to cmake file to do so: ```cmake # Include directories From 85019570d17d1148ec9fb5a9b2e4dfd2688149fe Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 16:17:36 +0300 Subject: [PATCH 14/35] ubi docker files and script to build all --- .../c++/install_and_use_icicle/example.cpp | 12 ++-- scripts/release/Dockerfile.centos7 | 13 ----- scripts/release/Dockerfile.ubi7 | 30 ++++++++++ scripts/release/Dockerfile.ubi8 | 4 ++ scripts/release/Dockerfile.ubi9 | 4 ++ scripts/release/Dockerfile.ubuntu20 | 2 +- scripts/release/Dockerfile.ubuntu22 | 2 +- scripts/release/README.md | 20 +++++-- scripts/release/build_all.sh | 56 +++++++++++++++++++ scripts/release/build_release_and_tar.sh | 2 +- 10 files changed, 120 insertions(+), 25 deletions(-) delete mode 100644 scripts/release/Dockerfile.centos7 create mode 100644 scripts/release/Dockerfile.ubi7 create mode 100644 scripts/release/Dockerfile.ubi8 create mode 100644 scripts/release/Dockerfile.ubi9 create mode 100755 scripts/release/build_all.sh diff --git a/examples/c++/install_and_use_icicle/example.cpp b/examples/c++/install_and_use_icicle/example.cpp index 76495bf78..8df86ea5b 100644 --- a/examples/c++/install_and_use_icicle/example.cpp +++ b/examples/c++/install_and_use_icicle/example.cpp @@ -18,11 +18,15 @@ int main(int argc, char* argv[]) { icicle_load_backend_from_env_or_default(); // Check if GPU is available - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); Device device_cpu = {"CPU", 0}; - Device device_gpu = is_cuda_device_available ? Device{"CUDA", 0} : device_cpu; - if (is_cuda_device_available) { std::cout << "GPU is available" << std::endl;} - else { std::cout << "GPU is not available" << std::endl;} + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); + Device device_gpu = {"CUDA",0}; + if (is_cuda_device_available) { + ICICLE_LOG_INFO << "GPU is available"; + } else { + ICICLE_LOG_INFO << "GPU is not available, falling back to CPU only"; + device_gpu = device_cpu; + } // Example input (on host memory) for NTT diff --git a/scripts/release/Dockerfile.centos7 b/scripts/release/Dockerfile.centos7 deleted file mode 100644 index 56bc60dfb..000000000 --- a/scripts/release/Dockerfile.centos7 +++ /dev/null @@ -1,13 +0,0 @@ -# Use the official NVIDIA CUDA development image for CentOS 7 -FROM nvidia/cuda:12.2.0-devel-centos7 - -# Install necessary packages -RUN yum -y update && \ - yum -y install \ - gcc \ - gcc-c++ \ - make \ - cmake \ - tar \ - && yum clean all - \ No newline at end of file diff --git a/scripts/release/Dockerfile.ubi7 b/scripts/release/Dockerfile.ubi7 new file mode 100644 index 000000000..e5434fc44 --- /dev/null +++ b/scripts/release/Dockerfile.ubi7 @@ -0,0 +1,30 @@ +# Use the official NVIDIA CUDA development image for UBI 7 - RHEL compatible +FROM nvidia/cuda:12.2.2-devel-ubi7 + +# Install necessary packages +RUN yum update -y && yum install -y \ + gcc \ + gcc-c++ \ + make \ + wget \ + tar \ + && yum clean all + + +# Download, build, and install CMake from source +ARG CMAKE_VERSION=3.27.4 +RUN wget https://github.com/Kitware/CMake/releases/download/v$CMAKE_VERSION/cmake-$CMAKE_VERSION.tar.gz && \ + tar -xzvf cmake-$CMAKE_VERSION.tar.gz && \ + cd cmake-$CMAKE_VERSION && \ + ./bootstrap && \ + make -j$(nproc) && \ + sudo make install && \ + cd .. && \ + rm -rf cmake-$CMAKE_VERSION cmake-$CMAKE_VERSION.tar.gz + +# # Set environment variables for the CUDA paths (if needed) +# ENV PATH=/usr/local/cuda-12.2/bin${PATH:+:${PATH}} +# ENV LD_LIBRARY_PATH=/usr/local/cuda-12.2/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + +# # Default workdir +# WORKDIR /workspace \ No newline at end of file diff --git a/scripts/release/Dockerfile.ubi8 b/scripts/release/Dockerfile.ubi8 new file mode 100644 index 000000000..d37f2d349 --- /dev/null +++ b/scripts/release/Dockerfile.ubi8 @@ -0,0 +1,4 @@ +# Use the official NVIDIA CUDA development image for ubi8 - rhel compatible +FROM nvidia/cuda:12.2.2-devel-ubi8 + +# Prevent interactive prompts during package installation diff --git a/scripts/release/Dockerfile.ubi9 b/scripts/release/Dockerfile.ubi9 new file mode 100644 index 000000000..ece6ff2b0 --- /dev/null +++ b/scripts/release/Dockerfile.ubi9 @@ -0,0 +1,4 @@ +# Use the official NVIDIA CUDA development image for ubi9 - rhel compatible +FROM nvidia/cuda:12.2.2-devel-ubi9 + +# Prevent interactive prompts during package installation diff --git a/scripts/release/Dockerfile.ubuntu20 b/scripts/release/Dockerfile.ubuntu20 index edb48d1b4..e9e3d57d7 100644 --- a/scripts/release/Dockerfile.ubuntu20 +++ b/scripts/release/Dockerfile.ubuntu20 @@ -1,5 +1,5 @@ # Use the official NVIDIA CUDA development image for Ubuntu 20.04 -FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 +FROM nvidia/cuda:12.2.2-devel-ubuntu20.04 # Prevent interactive prompts during package installation ENV DEBIAN_FRONTEND=noninteractive diff --git a/scripts/release/Dockerfile.ubuntu22 b/scripts/release/Dockerfile.ubuntu22 index 70911c4bc..723af1054 100644 --- a/scripts/release/Dockerfile.ubuntu22 +++ b/scripts/release/Dockerfile.ubuntu22 @@ -1,5 +1,5 @@ # Use the official NVIDIA development runtime image for Ubuntu 22.04 -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 +FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 # Install necessary packages RUN apt-get update && apt-get install -y \ diff --git a/scripts/release/README.md b/scripts/release/README.md index 627a0cbc5..6e022fde8 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -10,14 +10,15 @@ The Docker images represent the target environment for the release. To build the Docker images for each distribution and CUDA version, use the following commands: ```bash -# Ubuntu 22.04, CUDA 12.2 +cd ./scripts/release +# Ubuntu 22.04, CUDA 12.2.2 docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . -# Ubuntu 20.04, CUDA 12.2 +# Ubuntu 20.04, CUDA 12.2.2 docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . -# CentOS 7, CUDA 12.2 -docker build -t icicle-release-centos7-cuda122 -f Dockerfile.centos7 . +# ubi7 (rhel7,centos compatible), CUDA 12.2.2 +docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi7 . ``` @@ -26,6 +27,7 @@ docker build -t icicle-release-centos7-cuda122 -f Dockerfile.centos7 . To build the Icicle libraries inside a Docker container and output the tar file to the `release_output` directory: ```bash +# from icicel root dir mkdir -p release_output # ubuntu 22 docker run --rm --gpus all \ @@ -44,6 +46,14 @@ docker run --rm --gpus all \ This command executes the `build_release_and_tar.sh` script inside the Docker container, which provides the build environment. It maps the source code and output directory to the container, ensuring the generated tar file is available on the host system. -You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment (e.g., Ubuntu 20.04 or CentOS 7). +You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment. Make sure to pass corresponding OS and CUDA version in the params `icicle30 ubuntu22 cuda122`. For example for centos7 it would be `icicle30 centos7 cuda122`. + +## Build full release + +To build all tars: +```bash +cd ./scripts/release # from icicle root dir +./build_all.sh # output is generated to release_output dir +``` \ No newline at end of file diff --git a/scripts/release/build_all.sh b/scripts/release/build_all.sh new file mode 100755 index 000000000..9f7937f49 --- /dev/null +++ b/scripts/release/build_all.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -e + +# Build dockers + +# Ubuntu 22.04, CUDA 12.2.2 +docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . +# Ubuntu 20.04, CUDA 12.2.2 +docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . +# ubi7 (rhel compatible), CUDA 12.2.2 +docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi7 . +# ubi8 (rhel compatible), CUDA 12.2.2 +docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi9 . +# ubi7 (rhel compatible), CUDA 12.2.2 +docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi9 . + +# compile and tar release in each + +mkdir -p release_output && rm -rf release_output/* # output dir where tars will be placed + +# ubuntu 22 +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu22 cuda122 + +# ubuntu 20 +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubuntu20-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu20 cuda122 + +# ubi 7 +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubi7-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi7 cuda122 + +# ubi 8 +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubi8-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi8 cuda122 + +# ubi 9 +docker run --rm --gpus all \ + -v ./icicle:/icicle \ + -v ./release_output:/output \ + -v ./scripts:/scripts \ + icicle-release-ubi9-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi9 cuda122 + diff --git a/scripts/release/build_release_and_tar.sh b/scripts/release/build_release_and_tar.sh index f65200470..c786c3298 100755 --- a/scripts/release/build_release_and_tar.sh +++ b/scripts/release/build_release_and_tar.sh @@ -8,7 +8,7 @@ ICICLE_OS=${2:-unknown_os} # Default to "unknown_os" if not set ICICLE_CUDA_VERSION=${3:-cuda_unknown} # Default to "cuda_unknown" if not set # List of fields and curves -fields=("babybear" "stark252", "m31") +fields=("babybear" "stark252" "m31") curves=("bn254" "bls12_381" "bls12_377" "bw6_761" "grumpkin") cd / From e50d46b5d46d1b259721efbe0a6fceaff4b56586 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 16:32:01 +0300 Subject: [PATCH 15/35] typo --- docs/docs/icicle/multi-device.md | 2 +- docs/docs/icicle/programmers_guide/cpp.md | 6 +- example_from_scratch/example.cpp | 84 +++++++++++++++++++ examples/c++/examples_utils.h | 2 +- .../c++/install_and_use_icicle/example.cpp | 2 +- icicle/include/icicle/runtime.h | 2 +- icicle/src/runtime.cpp | 2 +- icicle/tests/test_device_api.cpp | 2 +- scripts/release/build_all.sh | 6 +- 9 files changed, 96 insertions(+), 12 deletions(-) create mode 100644 example_from_scratch/example.cpp diff --git a/docs/docs/icicle/multi-device.md b/docs/docs/icicle/multi-device.md index ba8e9f538..07f690a8d 100644 --- a/docs/docs/icicle/multi-device.md +++ b/docs/docs/icicle/multi-device.md @@ -62,7 +62,7 @@ icicle_runtime::set_device(&device).unwrap(); * - `SUCCESS` if the device is available. * - `INVALID_DEVICE` if the device is not available. */ -extern "C" eIcicleError icicle_is_device_avialable(const Device& dev); +extern "C" eIcicleError icicle_is_device_available(const Device& dev); /** * @brief Get number of available devices active device for thread diff --git a/docs/docs/icicle/programmers_guide/cpp.md b/docs/docs/icicle/programmers_guide/cpp.md index 8e013990c..bd534da1e 100644 --- a/docs/docs/icicle/programmers_guide/cpp.md +++ b/docs/docs/icicle/programmers_guide/cpp.md @@ -144,7 +144,7 @@ Check if a device is available and retrieve a list of registered devices: ```cpp icicle::Device dev; -eIcicleError result = icicle_is_device_avialable(dev); +eIcicleError result = icicle_is_device_available(dev); ``` ### Querying Device Properties @@ -185,7 +185,7 @@ int main() icicle_load_backend_from_env_or_default(); // trying to choose CUDA if available, or fallback to CPU otherwise (default device) - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); if (is_cuda_device_available) { Device device = {"CUDA", 0}; // GPU-0 ICICLE_CHECK(icicle_set_device(device)); // ICICLE_CHECK asserts that the api call returns eIcicleError::SUCCESS @@ -259,7 +259,7 @@ int main() icicle_load_backend_from_env_or_default(); // trying to choose CUDA if available, or fallback to CPU otherwise (default device) - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); if (is_cuda_device_available) { Device device = {"CUDA", 0}; // GPU-0 ICICLE_CHECK(icicle_set_device(device)); // ICICLE_CHECK asserts that the API call returns eIcicleError::SUCCESS diff --git a/example_from_scratch/example.cpp b/example_from_scratch/example.cpp new file mode 100644 index 000000000..e1369acd7 --- /dev/null +++ b/example_from_scratch/example.cpp @@ -0,0 +1,84 @@ +#include +#include +#include "icicle/runtime.h" +#include "icicle/api/bn254.h" + +using namespace bn254; // This makes scalar_t a bn254 scalar instead of bn254::scalar_t + +// Utility function to print arrays +template +void print_array(const T* arr, int size) { + for (int i = 0; i < size; ++i) { + std::cout << "\t" << i << ": " << arr[i] << std::endl; + } +} + +int main(int argc, char* argv[]) { + // Load installed backends + icicle_load_backend_from_env_or_default(); + + // Check if GPU is available + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); + Device device_cpu = {"CPU", 0}; + Device device_gpu = is_cuda_device_available ? Device{"CUDA", 0} : device_cpu; + + // Example input (on host memory) for NTT + const unsigned log_ntt_size = 2; + const unsigned ntt_size = 1 << log_ntt_size; + auto input_cpu = std::make_unique(ntt_size); + scalar_t::rand_host_many(input_cpu.get(), ntt_size); + + // Allocate output on host memory + auto output_cpu = std::make_unique(ntt_size); + scalar_t root_of_unity = scalar_t::omega(log_ntt_size); + auto ntt_config = default_ntt_config(); + + // Part 1: Running NTT on CPU + std::cout << "Part 1: compute on CPU: " << std::endl; + icicle_set_device(device_cpu); + ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for CPU + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, default_ntt_config(), output_cpu.get()); + print_array(output_cpu.get(), ntt_size); + + // Part 2: Running NTT on GPU + std::cout << "Part 2: compute on GPU (from/to CPU memory): " << std::endl; + icicle_set_device(device_gpu); + ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for GPU + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); + print_array(output_cpu.get(), ntt_size); + + // Allocate, copy data to GPU and compute on GPU memory + std::cout << "Part 2: compute on GPU (from/to GPU memory): " << std::endl; + scalar_t* input_gpu = nullptr; + scalar_t* output_gpu = nullptr; + icicle_malloc((void**)&input_gpu, ntt_size * sizeof(scalar_t)); + icicle_malloc((void**)&output_gpu, ntt_size * sizeof(scalar_t)); + icicle_copy(input_gpu, input_cpu.get(), ntt_size * sizeof(scalar_t)); + ntt_config.are_inputs_on_device = true; + ntt_config.are_outputs_on_device = true; + ntt(input_gpu, ntt_size, NTTDir::kForward, ntt_config, output_gpu); + icicle_copy(output_cpu.get(), output_gpu, ntt_size * sizeof(scalar_t)); + print_array(output_cpu.get(), ntt_size); + + // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) + auto output_intt_cpu = std::make_unique(ntt_size); + + // Step 1: Compute NTT on GPU + std::cout << "Part 3: compute NTT on GPU (NTT input): " << std::endl; + icicle_set_device(device_gpu); + ntt_config.are_inputs_on_device = false; // using host memory now + ntt_config.are_outputs_on_device = false; + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); + print_array(input_cpu.get(), ntt_size); + + // Step 2: Compute INTT on CPU + std::cout << "Part 3: compute INTT on CPU (INTT output): " << std::endl; + icicle_set_device(device_cpu); + ntt(output_cpu.get(), ntt_size, NTTDir::kInverse, ntt_config, output_intt_cpu.get()); + print_array(output_intt_cpu.get(), ntt_size); + + // Assert that INTT output is the same as NTT input + assert(0 == memcmp(input_cpu.get(), output_intt_cpu.get(), ntt_size * sizeof(scalar_t))); + + return 0; +} \ No newline at end of file diff --git a/examples/c++/examples_utils.h b/examples/c++/examples_utils.h index a34684b5c..e71c03bd3 100644 --- a/examples/c++/examples_utils.h +++ b/examples/c++/examples_utils.h @@ -22,7 +22,7 @@ void try_load_and_set_backend_device(int argc = 0, char** argv = nullptr) } // trying to choose CUDA if available, or fallback to CPU otherwise (default device) - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); if (is_cuda_device_available) { Device device = {"CUDA", 0}; // GPU-0 ICICLE_LOG_INFO << "setting " << device; diff --git a/examples/c++/install_and_use_icicle/example.cpp b/examples/c++/install_and_use_icicle/example.cpp index 8df86ea5b..368f43ad2 100644 --- a/examples/c++/install_and_use_icicle/example.cpp +++ b/examples/c++/install_and_use_icicle/example.cpp @@ -19,7 +19,7 @@ int main(int argc, char* argv[]) { // Check if GPU is available Device device_cpu = {"CPU", 0}; - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_avialable("CUDA")); + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); Device device_gpu = {"CUDA",0}; if (is_cuda_device_available) { ICICLE_LOG_INFO << "GPU is available"; diff --git a/icicle/include/icicle/runtime.h b/icicle/include/icicle/runtime.h index 4b6848a9b..14b5eec51 100644 --- a/icicle/include/icicle/runtime.h +++ b/icicle/include/icicle/runtime.h @@ -260,7 +260,7 @@ extern "C" eIcicleError icicle_get_device_properties(DeviceProperties& propertie * - `SUCCESS` if the device is available. * - `INVALID_DEVICE` if the device is not available. */ -extern "C" eIcicleError icicle_is_device_avialable(const Device& dev); +extern "C" eIcicleError icicle_is_device_available(const Device& dev); /** * @brief Retrieves the registered devices in comma-separated string. diff --git a/icicle/src/runtime.cpp b/icicle/src/runtime.cpp index fb6d07f82..393618290 100644 --- a/icicle/src/runtime.cpp +++ b/icicle/src/runtime.cpp @@ -230,7 +230,7 @@ extern "C" eIcicleError icicle_get_device_properties(DeviceProperties& propertie return DeviceAPI::get_thread_local_deviceAPI()->get_device_properties(properties); } -extern "C" eIcicleError icicle_is_device_avialable(const Device& dev) +extern "C" eIcicleError icicle_is_device_available(const Device& dev) { return is_device_registered(dev.type) ? eIcicleError::SUCCESS : eIcicleError::INVALID_DEVICE; } diff --git a/icicle/tests/test_device_api.cpp b/icicle/tests/test_device_api.cpp index 957c6e1d5..98e2dfadb 100644 --- a/icicle/tests/test_device_api.cpp +++ b/icicle/tests/test_device_api.cpp @@ -134,7 +134,7 @@ TEST_F(DeviceApiTest, ApiError) TEST_F(DeviceApiTest, AvailableMemory) { icicle::Device dev = {"CUDA", 0}; - const bool is_cuda_registered = eIcicleError::SUCCESS == icicle_is_device_avialable(dev); + const bool is_cuda_registered = eIcicleError::SUCCESS == icicle_is_device_available(dev); if (!is_cuda_registered) { return; } // TODO implement for CPU too icicle_set_device(dev); diff --git a/scripts/release/build_all.sh b/scripts/release/build_all.sh index 9f7937f49..6d9c64fdb 100755 --- a/scripts/release/build_all.sh +++ b/scripts/release/build_all.sh @@ -9,11 +9,11 @@ docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . # Ubuntu 20.04, CUDA 12.2.2 docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . # ubi7 (rhel compatible), CUDA 12.2.2 -docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi7 . +docker build -t icicle-release-ubi7-cuda122 -f Dockerfile.ubi7 . # ubi8 (rhel compatible), CUDA 12.2.2 -docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi9 . +docker build -t icicle-release-ubi8-cuda122 -f Dockerfile.ubi8 . # ubi7 (rhel compatible), CUDA 12.2.2 -docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi9 . +docker build -t icicle-release-ubi9-cuda122 -f Dockerfile.ubi9 . # compile and tar release in each From 5ed3ce99555ee2ce01548d6a90910a3581d31682 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 17:28:55 +0300 Subject: [PATCH 16/35] install cmake in ubi dockers --- scripts/release/Dockerfile.ubi7 | 30 --------------------- scripts/release/Dockerfile.ubi8 | 3 ++- scripts/release/Dockerfile.ubi9 | 3 ++- scripts/release/README.md | 20 +++----------- scripts/release/build_all.sh | 28 +++++++++---------- wrappers/rust/icicle-runtime/src/runtime.rs | 4 +-- 6 files changed, 23 insertions(+), 65 deletions(-) delete mode 100644 scripts/release/Dockerfile.ubi7 diff --git a/scripts/release/Dockerfile.ubi7 b/scripts/release/Dockerfile.ubi7 deleted file mode 100644 index e5434fc44..000000000 --- a/scripts/release/Dockerfile.ubi7 +++ /dev/null @@ -1,30 +0,0 @@ -# Use the official NVIDIA CUDA development image for UBI 7 - RHEL compatible -FROM nvidia/cuda:12.2.2-devel-ubi7 - -# Install necessary packages -RUN yum update -y && yum install -y \ - gcc \ - gcc-c++ \ - make \ - wget \ - tar \ - && yum clean all - - -# Download, build, and install CMake from source -ARG CMAKE_VERSION=3.27.4 -RUN wget https://github.com/Kitware/CMake/releases/download/v$CMAKE_VERSION/cmake-$CMAKE_VERSION.tar.gz && \ - tar -xzvf cmake-$CMAKE_VERSION.tar.gz && \ - cd cmake-$CMAKE_VERSION && \ - ./bootstrap && \ - make -j$(nproc) && \ - sudo make install && \ - cd .. && \ - rm -rf cmake-$CMAKE_VERSION cmake-$CMAKE_VERSION.tar.gz - -# # Set environment variables for the CUDA paths (if needed) -# ENV PATH=/usr/local/cuda-12.2/bin${PATH:+:${PATH}} -# ENV LD_LIBRARY_PATH=/usr/local/cuda-12.2/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - -# # Default workdir -# WORKDIR /workspace \ No newline at end of file diff --git a/scripts/release/Dockerfile.ubi8 b/scripts/release/Dockerfile.ubi8 index d37f2d349..88d434bee 100644 --- a/scripts/release/Dockerfile.ubi8 +++ b/scripts/release/Dockerfile.ubi8 @@ -1,4 +1,5 @@ # Use the official NVIDIA CUDA development image for ubi8 - rhel compatible FROM nvidia/cuda:12.2.2-devel-ubi8 -# Prevent interactive prompts during package installation +# install cmake +RUN dnf update -y && dnf install -y cmake diff --git a/scripts/release/Dockerfile.ubi9 b/scripts/release/Dockerfile.ubi9 index ece6ff2b0..163db6f58 100644 --- a/scripts/release/Dockerfile.ubi9 +++ b/scripts/release/Dockerfile.ubi9 @@ -1,4 +1,5 @@ # Use the official NVIDIA CUDA development image for ubi9 - rhel compatible FROM nvidia/cuda:12.2.2-devel-ubi9 -# Prevent interactive prompts during package installation +# install cmake +RUN dnf update -y && dnf install -y cmake \ No newline at end of file diff --git a/scripts/release/README.md b/scripts/release/README.md index 6e022fde8..4604ddc5a 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -13,12 +13,6 @@ To build the Docker images for each distribution and CUDA version, use the follo cd ./scripts/release # Ubuntu 22.04, CUDA 12.2.2 docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . - -# Ubuntu 20.04, CUDA 12.2.2 -docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . - -# ubi7 (rhel7,centos compatible), CUDA 12.2.2 -docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi7 . ``` @@ -27,27 +21,21 @@ docker build -t icicle-release-centos7-cuda122 -f Dockerfile.ubi7 . To build the Icicle libraries inside a Docker container and output the tar file to the `release_output` directory: ```bash -# from icicel root dir +# from icicle root dir mkdir -p release_output # ubuntu 22 docker run --rm --gpus all \ -v ./icicle:/icicle \ -v ./release_output:/output \ -v ./scripts:/scripts \ - icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu22 cuda122 - -# ubuntu 20 -docker run --rm --gpus all \ - -v ./icicle:/icicle \ - -v ./release_output:/output \ - -v ./scripts:/scripts \ - icicle-release-ubuntu20-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu20 cuda122 + icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu22 cuda122 ``` This command executes the `build_release_and_tar.sh` script inside the Docker container, which provides the build environment. It maps the source code and output directory to the container, ensuring the generated tar file is available on the host system. You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment. -Make sure to pass corresponding OS and CUDA version in the params `icicle30 ubuntu22 cuda122`. For example for centos7 it would be `icicle30 centos7 cuda122`. +Make sure to pass corresponding OS and CUDA version in the params `icicle30 ubuntu22 cuda122`. For example for ubi9 it would be `icicle30 ubi9 cuda122`. +See `build_all.sh` script for reference. ## Build full release diff --git a/scripts/release/build_all.sh b/scripts/release/build_all.sh index 6d9c64fdb..2b8a1a8e4 100755 --- a/scripts/release/build_all.sh +++ b/scripts/release/build_all.sh @@ -2,18 +2,23 @@ set -e +# from root of icicle +# Check if both directories exist in the current working directory +if [[ ! -d "./icicle" || ! -d "./scripts" ]]; then + echo "Usage: The current directory must contain both 'icicle' and 'scripts' directories. Retry from icicle root dir." + exit 1 +fi + # Build dockers # Ubuntu 22.04, CUDA 12.2.2 -docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . +docker build -t icicle-release-ubuntu22-cuda122 -f ./scripts/release/Dockerfile.ubuntu22 . # Ubuntu 20.04, CUDA 12.2.2 -docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 . -# ubi7 (rhel compatible), CUDA 12.2.2 -docker build -t icicle-release-ubi7-cuda122 -f Dockerfile.ubi7 . +docker build -t icicle-release-ubuntu20-cuda122 -f ./scripts/release/Dockerfile.ubuntu20 . # ubi8 (rhel compatible), CUDA 12.2.2 -docker build -t icicle-release-ubi8-cuda122 -f Dockerfile.ubi8 . +docker build -t icicle-release-ubi8-cuda122 -f ./scripts/release/Dockerfile.ubi8 . # ubi7 (rhel compatible), CUDA 12.2.2 -docker build -t icicle-release-ubi9-cuda122 -f Dockerfile.ubi9 . +docker build -t icicle-release-ubi9-cuda122 -f ./scripts/release/Dockerfile.ubi9 . # compile and tar release in each @@ -33,21 +38,14 @@ docker run --rm --gpus all \ -v ./scripts:/scripts \ icicle-release-ubuntu20-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu20 cuda122 -# ubi 7 -docker run --rm --gpus all \ - -v ./icicle:/icicle \ - -v ./release_output:/output \ - -v ./scripts:/scripts \ - icicle-release-ubi7-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi7 cuda122 - -# ubi 8 +# ubi 8 (rhel compatible) docker run --rm --gpus all \ -v ./icicle:/icicle \ -v ./release_output:/output \ -v ./scripts:/scripts \ icicle-release-ubi8-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi8 cuda122 -# ubi 9 +# ubi 9 (rhel compatible) docker run --rm --gpus all \ -v ./icicle:/icicle \ -v ./release_output:/output \ diff --git a/wrappers/rust/icicle-runtime/src/runtime.rs b/wrappers/rust/icicle-runtime/src/runtime.rs index 290f03e91..c1c88d162 100644 --- a/wrappers/rust/icicle-runtime/src/runtime.rs +++ b/wrappers/rust/icicle-runtime/src/runtime.rs @@ -15,7 +15,7 @@ extern "C" { fn icicle_is_host_memory(ptr: *const c_void) -> eIcicleError; fn icicle_is_active_device_memory(ptr: *const c_void) -> eIcicleError; fn icicle_get_device_count(device_count: &i32) -> eIcicleError; - fn icicle_is_device_avialable(device: &Device) -> eIcicleError; + fn icicle_is_device_available(device: &Device) -> eIcicleError; pub fn icicle_malloc(ptr: *mut *mut c_void, size: usize) -> eIcicleError; pub fn icicle_malloc_async(ptr: *mut *mut c_void, size: usize, stream: IcicleStreamHandle) -> eIcicleError; pub fn icicle_free(ptr: *mut c_void) -> eIcicleError; @@ -85,7 +85,7 @@ pub fn get_device_count() -> Result { } pub fn is_device_available(device: &Device) -> bool { - let err = unsafe { icicle_is_device_avialable(device) }; + let err = unsafe { icicle_is_device_available(device) }; err == eIcicleError::Success } From a17b21a4da2e4894ac6fe61a2ea21657b2f5f9b6 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 18:11:14 +0300 Subject: [PATCH 17/35] add output dir as arg of build_all.sh script --- scripts/release/README.md | 18 +++++++++--------- scripts/release/build_all.sh | 22 ++++++++++++---------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/scripts/release/README.md b/scripts/release/README.md index 4604ddc5a..f6576c54a 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -3,6 +3,15 @@ This section is describing how a release is generated, given the release sources.
We use docker to represent the target environment for the release. Each Docker image is tailored to a specific distribution and CUDA version. You first build the Docker image, which sets up the environment, and then use this Docker image to build the release tar file. This ensures that the build process is consistent and reproducible across different environments. +## Build full release + +To build all tars: +```bash +# from icicle root dir +mkdir -p release_output && rm -rf release_output/* # output dir where tars will be placed +./scripts/release/build_all.sh release_output # release_output is the output dir where tar files will be generated to +``` + ### Build Docker Image The Docker images represent the target environment for the release. @@ -36,12 +45,3 @@ This command executes the `build_release_and_tar.sh` script inside the Docker co You can replace `icicle-release-ubuntu22-cuda122` with another Docker image tag to build in the corresponding environment. Make sure to pass corresponding OS and CUDA version in the params `icicle30 ubuntu22 cuda122`. For example for ubi9 it would be `icicle30 ubi9 cuda122`. See `build_all.sh` script for reference. - - -## Build full release - -To build all tars: -```bash -cd ./scripts/release # from icicle root dir -./build_all.sh # output is generated to release_output dir -``` \ No newline at end of file diff --git a/scripts/release/build_all.sh b/scripts/release/build_all.sh index 2b8a1a8e4..1d5c4089b 100755 --- a/scripts/release/build_all.sh +++ b/scripts/release/build_all.sh @@ -2,14 +2,16 @@ set -e -# from root of icicle +# Use provided release_output directory or default to "release_output" +output_dir="${1:-/release_output}" + # Check if both directories exist in the current working directory if [[ ! -d "./icicle" || ! -d "./scripts" ]]; then echo "Usage: The current directory must contain both 'icicle' and 'scripts' directories. Retry from icicle root dir." exit 1 fi -# Build dockers +# Build Docker images # Ubuntu 22.04, CUDA 12.2.2 docker build -t icicle-release-ubuntu22-cuda122 -f ./scripts/release/Dockerfile.ubuntu22 . @@ -17,38 +19,38 @@ docker build -t icicle-release-ubuntu22-cuda122 -f ./scripts/release/Dockerfile. docker build -t icicle-release-ubuntu20-cuda122 -f ./scripts/release/Dockerfile.ubuntu20 . # ubi8 (rhel compatible), CUDA 12.2.2 docker build -t icicle-release-ubi8-cuda122 -f ./scripts/release/Dockerfile.ubi8 . -# ubi7 (rhel compatible), CUDA 12.2.2 +# ubi9 (rhel compatible), CUDA 12.2.2 docker build -t icicle-release-ubi9-cuda122 -f ./scripts/release/Dockerfile.ubi9 . -# compile and tar release in each +# Compile and tar release in each -mkdir -p release_output && rm -rf release_output/* # output dir where tars will be placed +# Create the output directory if it doesn't exist, and clean it +mkdir -p "$output_dir" && rm -rf "$output_dir/*" # ubuntu 22 docker run --rm --gpus all \ -v ./icicle:/icicle \ - -v ./release_output:/output \ + -v "$output_dir:/output" \ -v ./scripts:/scripts \ icicle-release-ubuntu22-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu22 cuda122 # ubuntu 20 docker run --rm --gpus all \ -v ./icicle:/icicle \ - -v ./release_output:/output \ + -v "$output_dir:/output" \ -v ./scripts:/scripts \ icicle-release-ubuntu20-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubuntu20 cuda122 # ubi 8 (rhel compatible) docker run --rm --gpus all \ -v ./icicle:/icicle \ - -v ./release_output:/output \ + -v "$output_dir:/output" \ -v ./scripts:/scripts \ icicle-release-ubi8-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi8 cuda122 # ubi 9 (rhel compatible) docker run --rm --gpus all \ -v ./icicle:/icicle \ - -v ./release_output:/output \ + -v "$output_dir:/output" \ -v ./scripts:/scripts \ icicle-release-ubi9-cuda122 bash /scripts/release/build_release_and_tar.sh icicle30 ubi9 cuda122 - From f676112adc76f7f1a4def9b7f3256a1dd80e89ed Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 18:13:21 +0300 Subject: [PATCH 18/35] formatting --- example_from_scratch/example.cpp | 84 ----------- examples/c++/best-practice-ntt/example.cpp | 4 +- .../c++/install_and_use_icicle/example.cpp | 137 +++++++++--------- icicle/include/icicle/fields/host_math.h | 7 +- .../include/icicle/fields/stark_fields/m31.h | 2 +- 5 files changed, 76 insertions(+), 158 deletions(-) delete mode 100644 example_from_scratch/example.cpp diff --git a/example_from_scratch/example.cpp b/example_from_scratch/example.cpp deleted file mode 100644 index e1369acd7..000000000 --- a/example_from_scratch/example.cpp +++ /dev/null @@ -1,84 +0,0 @@ -#include -#include -#include "icicle/runtime.h" -#include "icicle/api/bn254.h" - -using namespace bn254; // This makes scalar_t a bn254 scalar instead of bn254::scalar_t - -// Utility function to print arrays -template -void print_array(const T* arr, int size) { - for (int i = 0; i < size; ++i) { - std::cout << "\t" << i << ": " << arr[i] << std::endl; - } -} - -int main(int argc, char* argv[]) { - // Load installed backends - icicle_load_backend_from_env_or_default(); - - // Check if GPU is available - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); - Device device_cpu = {"CPU", 0}; - Device device_gpu = is_cuda_device_available ? Device{"CUDA", 0} : device_cpu; - - // Example input (on host memory) for NTT - const unsigned log_ntt_size = 2; - const unsigned ntt_size = 1 << log_ntt_size; - auto input_cpu = std::make_unique(ntt_size); - scalar_t::rand_host_many(input_cpu.get(), ntt_size); - - // Allocate output on host memory - auto output_cpu = std::make_unique(ntt_size); - scalar_t root_of_unity = scalar_t::omega(log_ntt_size); - auto ntt_config = default_ntt_config(); - - // Part 1: Running NTT on CPU - std::cout << "Part 1: compute on CPU: " << std::endl; - icicle_set_device(device_cpu); - ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for CPU - ntt(input_cpu.get(), ntt_size, NTTDir::kForward, default_ntt_config(), output_cpu.get()); - print_array(output_cpu.get(), ntt_size); - - // Part 2: Running NTT on GPU - std::cout << "Part 2: compute on GPU (from/to CPU memory): " << std::endl; - icicle_set_device(device_gpu); - ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for GPU - ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); - print_array(output_cpu.get(), ntt_size); - - // Allocate, copy data to GPU and compute on GPU memory - std::cout << "Part 2: compute on GPU (from/to GPU memory): " << std::endl; - scalar_t* input_gpu = nullptr; - scalar_t* output_gpu = nullptr; - icicle_malloc((void**)&input_gpu, ntt_size * sizeof(scalar_t)); - icicle_malloc((void**)&output_gpu, ntt_size * sizeof(scalar_t)); - icicle_copy(input_gpu, input_cpu.get(), ntt_size * sizeof(scalar_t)); - ntt_config.are_inputs_on_device = true; - ntt_config.are_outputs_on_device = true; - ntt(input_gpu, ntt_size, NTTDir::kForward, ntt_config, output_gpu); - icicle_copy(output_cpu.get(), output_gpu, ntt_size * sizeof(scalar_t)); - print_array(output_cpu.get(), ntt_size); - - // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) - auto output_intt_cpu = std::make_unique(ntt_size); - - // Step 1: Compute NTT on GPU - std::cout << "Part 3: compute NTT on GPU (NTT input): " << std::endl; - icicle_set_device(device_gpu); - ntt_config.are_inputs_on_device = false; // using host memory now - ntt_config.are_outputs_on_device = false; - ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); - print_array(input_cpu.get(), ntt_size); - - // Step 2: Compute INTT on CPU - std::cout << "Part 3: compute INTT on CPU (INTT output): " << std::endl; - icicle_set_device(device_cpu); - ntt(output_cpu.get(), ntt_size, NTTDir::kInverse, ntt_config, output_intt_cpu.get()); - print_array(output_intt_cpu.get(), ntt_size); - - // Assert that INTT output is the same as NTT input - assert(0 == memcmp(input_cpu.get(), output_intt_cpu.get(), ntt_size * sizeof(scalar_t))); - - return 0; -} \ No newline at end of file diff --git a/examples/c++/best-practice-ntt/example.cpp b/examples/c++/best-practice-ntt/example.cpp index 90f351205..6ffeed7fb 100644 --- a/examples/c++/best-practice-ntt/example.cpp +++ b/examples/c++/best-practice-ntt/example.cpp @@ -116,8 +116,8 @@ int main(int argc, char* argv[]) // Clean-up for (int i = 0; i < 2; i++) { ICICLE_CHECK(icicle_free(d_vec[i])); - delete[] (h_inp[i]); - delete[] (h_out[i]); + delete[](h_inp[i]); + delete[](h_out[i]); } ICICLE_CHECK(icicle_destroy_stream(stream_compute)); ICICLE_CHECK(icicle_destroy_stream(stream_d2h)); diff --git a/examples/c++/install_and_use_icicle/example.cpp b/examples/c++/install_and_use_icicle/example.cpp index 368f43ad2..f86eceb97 100644 --- a/examples/c++/install_and_use_icicle/example.cpp +++ b/examples/c++/install_and_use_icicle/example.cpp @@ -7,85 +7,86 @@ using namespace bn254; // This makes scalar_t a bn254 scalar instead of bn254::s // Utility function to print arrays template -void print_array(const T* arr, int size) { - for (int i = 0; i < size; ++i) { - std::cout << "\t" << i << ": " << arr[i] << std::endl; - } +void print_array(const T* arr, int size) +{ + for (int i = 0; i < size; ++i) { + std::cout << "\t" << i << ": " << arr[i] << std::endl; + } } -int main(int argc, char* argv[]) { - // Load installed backends - icicle_load_backend_from_env_or_default(); +int main(int argc, char* argv[]) +{ + // Load installed backends + icicle_load_backend_from_env_or_default(); - // Check if GPU is available - Device device_cpu = {"CPU", 0}; - const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); - Device device_gpu = {"CUDA",0}; - if (is_cuda_device_available) { - ICICLE_LOG_INFO << "GPU is available"; - } else { - ICICLE_LOG_INFO << "GPU is not available, falling back to CPU only"; - device_gpu = device_cpu; - } - + // Check if GPU is available + Device device_cpu = {"CPU", 0}; + const bool is_cuda_device_available = (eIcicleError::SUCCESS == icicle_is_device_available("CUDA")); + Device device_gpu = {"CUDA", 0}; + if (is_cuda_device_available) { + ICICLE_LOG_INFO << "GPU is available"; + } else { + ICICLE_LOG_INFO << "GPU is not available, falling back to CPU only"; + device_gpu = device_cpu; + } - // Example input (on host memory) for NTT - const unsigned log_ntt_size = 2; - const unsigned ntt_size = 1 << log_ntt_size; - auto input_cpu = std::make_unique(ntt_size); - scalar_t::rand_host_many(input_cpu.get(), ntt_size); + // Example input (on host memory) for NTT + const unsigned log_ntt_size = 2; + const unsigned ntt_size = 1 << log_ntt_size; + auto input_cpu = std::make_unique(ntt_size); + scalar_t::rand_host_many(input_cpu.get(), ntt_size); - // Allocate output on host memory - auto output_cpu = std::make_unique(ntt_size); - scalar_t root_of_unity = scalar_t::omega(log_ntt_size); - auto ntt_config = default_ntt_config(); + // Allocate output on host memory + auto output_cpu = std::make_unique(ntt_size); + scalar_t root_of_unity = scalar_t::omega(log_ntt_size); + auto ntt_config = default_ntt_config(); - // Part 1: Running NTT on CPU - std::cout << "Part 1: compute on CPU: " << std::endl; - icicle_set_device(device_cpu); - ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for CPU - ntt(input_cpu.get(), ntt_size, NTTDir::kForward, default_ntt_config(), output_cpu.get()); - print_array(output_cpu.get(), ntt_size); + // Part 1: Running NTT on CPU + std::cout << "Part 1: compute on CPU: " << std::endl; + icicle_set_device(device_cpu); + ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for CPU + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, default_ntt_config(), output_cpu.get()); + print_array(output_cpu.get(), ntt_size); - // Part 2: Running NTT on GPU - std::cout << "Part 2: compute on GPU (from/to CPU memory): " << std::endl; - icicle_set_device(device_gpu); - ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for GPU - ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); - print_array(output_cpu.get(), ntt_size); + // Part 2: Running NTT on GPU + std::cout << "Part 2: compute on GPU (from/to CPU memory): " << std::endl; + icicle_set_device(device_gpu); + ntt_init_domain(root_of_unity, default_ntt_init_domain_config()); // Initialize NTT domain for GPU + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); + print_array(output_cpu.get(), ntt_size); - // Allocate, copy data to GPU and compute on GPU memory - std::cout << "Part 2: compute on GPU (from/to GPU memory): " << std::endl; - scalar_t* input_gpu = nullptr; - scalar_t* output_gpu = nullptr; - icicle_malloc((void**)&input_gpu, ntt_size * sizeof(scalar_t)); - icicle_malloc((void**)&output_gpu, ntt_size * sizeof(scalar_t)); - icicle_copy(input_gpu, input_cpu.get(), ntt_size * sizeof(scalar_t)); - ntt_config.are_inputs_on_device = true; - ntt_config.are_outputs_on_device = true; - ntt(input_gpu, ntt_size, NTTDir::kForward, ntt_config, output_gpu); - icicle_copy(output_cpu.get(), output_gpu, ntt_size * sizeof(scalar_t)); - print_array(output_cpu.get(), ntt_size); + // Allocate, copy data to GPU and compute on GPU memory + std::cout << "Part 2: compute on GPU (from/to GPU memory): " << std::endl; + scalar_t* input_gpu = nullptr; + scalar_t* output_gpu = nullptr; + icicle_malloc((void**)&input_gpu, ntt_size * sizeof(scalar_t)); + icicle_malloc((void**)&output_gpu, ntt_size * sizeof(scalar_t)); + icicle_copy(input_gpu, input_cpu.get(), ntt_size * sizeof(scalar_t)); + ntt_config.are_inputs_on_device = true; + ntt_config.are_outputs_on_device = true; + ntt(input_gpu, ntt_size, NTTDir::kForward, ntt_config, output_gpu); + icicle_copy(output_cpu.get(), output_gpu, ntt_size * sizeof(scalar_t)); + print_array(output_cpu.get(), ntt_size); - // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) - auto output_intt_cpu = std::make_unique(ntt_size); + // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) + auto output_intt_cpu = std::make_unique(ntt_size); - // Step 1: Compute NTT on GPU - std::cout << "Part 3: compute NTT on GPU (NTT input): " << std::endl; - icicle_set_device(device_gpu); - ntt_config.are_inputs_on_device = false; // using host memory now - ntt_config.are_outputs_on_device = false; - ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); - print_array(input_cpu.get(), ntt_size); + // Step 1: Compute NTT on GPU + std::cout << "Part 3: compute NTT on GPU (NTT input): " << std::endl; + icicle_set_device(device_gpu); + ntt_config.are_inputs_on_device = false; // using host memory now + ntt_config.are_outputs_on_device = false; + ntt(input_cpu.get(), ntt_size, NTTDir::kForward, ntt_config, output_cpu.get()); + print_array(input_cpu.get(), ntt_size); - // Step 2: Compute INTT on CPU - std::cout << "Part 3: compute INTT on CPU (INTT output): " << std::endl; - icicle_set_device(device_cpu); - ntt(output_cpu.get(), ntt_size, NTTDir::kInverse, ntt_config, output_intt_cpu.get()); - print_array(output_intt_cpu.get(), ntt_size); + // Step 2: Compute INTT on CPU + std::cout << "Part 3: compute INTT on CPU (INTT output): " << std::endl; + icicle_set_device(device_cpu); + ntt(output_cpu.get(), ntt_size, NTTDir::kInverse, ntt_config, output_intt_cpu.get()); + print_array(output_intt_cpu.get(), ntt_size); - // Assert that INTT output is the same as NTT input - assert(0 == memcmp(input_cpu.get(), output_intt_cpu.get(), ntt_size * sizeof(scalar_t))); + // Assert that INTT output is the same as NTT input + assert(0 == memcmp(input_cpu.get(), output_intt_cpu.get(), ntt_size * sizeof(scalar_t))); - return 0; + return 0; } \ No newline at end of file diff --git a/icicle/include/icicle/fields/host_math.h b/icicle/include/icicle/fields/host_math.h index 05e115e36..e256aa922 100644 --- a/icicle/include/icicle/fields/host_math.h +++ b/icicle/include/icicle/fields/host_math.h @@ -1,6 +1,7 @@ -// Note: this optimization generates invalid code (using gcc) when storage class has a union for both u32 and u64 so disabling it. +// Note: this optimization generates invalid code (using gcc) when storage class has a union for both u32 and u64 so +// disabling it. #if defined(__GNUC__) && !defined(__NVCC__) && !defined(__clang__) - #pragma GCC optimize("no-strict-aliasing") +#pragma GCC optimize("no-strict-aliasing") #endif #pragma once @@ -297,5 +298,5 @@ namespace host_math { } // namespace host_math #if defined(__GNUC__) && !defined(__NVCC__) && !defined(__clang__) - #pragma GCC reset_options +#pragma GCC reset_options #endif diff --git a/icicle/include/icicle/fields/stark_fields/m31.h b/icicle/include/icicle/fields/stark_fields/m31.h index 8a74d8f32..a8f6fef5f 100644 --- a/icicle/include/icicle/fields/stark_fields/m31.h +++ b/icicle/include/icicle/fields/stark_fields/m31.h @@ -95,7 +95,7 @@ namespace m31 { { const uint32_t modulus = MersenneField::get_modulus().limbs[0]; uint32_t tmp = (xs.storage >> 31) + (xs.storage & modulus); // max: 1 + 2^31-1 = 2^31 - tmp = (tmp >> 31) + (tmp & modulus); // max: 1 + 0 = 1 + tmp = (tmp >> 31) + (tmp & modulus); // max: 1 + 0 = 1 return MersenneField{{tmp == modulus ? 0 : tmp}}; } From 275582c2d4f4f47352c65f360b65ccf437bf1d1c Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 19:28:14 +0300 Subject: [PATCH 19/35] add rust example for install and use CUDA backend --- docs/docs/icicle/build_from_source.md | 6 +- .../CMakeLists.txt | 0 .../README.md | 4 +- .../example.cpp | 0 .../rust/install-and-use-icicle/Cargo.toml | 11 ++ .../rust/install-and-use-icicle/README.md | 78 ++++++++++++++ .../rust/install-and-use-icicle/src/main.rs | 101 ++++++++++++++++++ scripts/release/Dockerfile.ubuntu22 | 2 + 8 files changed, 198 insertions(+), 4 deletions(-) rename examples/c++/{install_and_use_icicle => install-and-use-icicle}/CMakeLists.txt (100%) rename examples/c++/{install_and_use_icicle => install-and-use-icicle}/README.md (96%) rename examples/c++/{install_and_use_icicle => install-and-use-icicle}/example.cpp (100%) create mode 100644 examples/rust/install-and-use-icicle/Cargo.toml create mode 100644 examples/rust/install-and-use-icicle/README.md create mode 100644 examples/rust/install-and-use-icicle/src/main.rs diff --git a/docs/docs/icicle/build_from_source.md b/docs/docs/icicle/build_from_source.md index e1c817f1c..9f226f37f 100644 --- a/docs/docs/icicle/build_from_source.md +++ b/docs/docs/icicle/build_from_source.md @@ -156,9 +156,9 @@ In cargo.toml, specify the ICICLE libs to use: ```bash [dependencies] -icicle-runtime = { path = "git = "https://github.com/ingonyama-zk/icicle.git"" } -icicle-core = { path = "git = "https://github.com/ingonyama-zk/icicle.git"" } -icicle-bls12-377 = { path = "git = "https://github.com/ingonyama-zk/icicle.git" } +icicle-runtime = { git = "https://github.com/ingonyama-zk/icicle.git", branch="main" } +icicle-core = { git = "https://github.com/ingonyama-zk/icicle.git", branch="main" } +icicle-babybear = { git = "https://github.com/ingonyama-zk/icicle.git", branch="main" } # add other ICICLE crates here if need additional fields/curves ``` diff --git a/examples/c++/install_and_use_icicle/CMakeLists.txt b/examples/c++/install-and-use-icicle/CMakeLists.txt similarity index 100% rename from examples/c++/install_and_use_icicle/CMakeLists.txt rename to examples/c++/install-and-use-icicle/CMakeLists.txt diff --git a/examples/c++/install_and_use_icicle/README.md b/examples/c++/install-and-use-icicle/README.md similarity index 96% rename from examples/c++/install_and_use_icicle/README.md rename to examples/c++/install-and-use-icicle/README.md index 6d1eb38e9..7cb18cecc 100644 --- a/examples/c++/install_and_use_icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -2,6 +2,8 @@ # Example: Install and use ICICLE +This example shows how to install binaries and use them in C++ application. + Download release binaries: - **Frontend** icicle30-ubuntu22.tar.gz - **Backend** icicle30-ubuntu22-cuda122.tar.gz @@ -15,7 +17,7 @@ Name of the files is based on the release version. Make sure to update the tar f docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubuntu22-cuda122 bash ``` -This command is starting bash in the docker, with GPUs and mapping the example files to `/worksapce` in the docker. +This command is starting bash in the docker, with GPUs and mapping the example files to `/workspace` in the docker. ### Building the docker image This image is based on nvidia's image for ubuntu22. built from the Dockerfile: diff --git a/examples/c++/install_and_use_icicle/example.cpp b/examples/c++/install-and-use-icicle/example.cpp similarity index 100% rename from examples/c++/install_and_use_icicle/example.cpp rename to examples/c++/install-and-use-icicle/example.cpp diff --git a/examples/rust/install-and-use-icicle/Cargo.toml b/examples/rust/install-and-use-icicle/Cargo.toml new file mode 100644 index 000000000..c1c83c0b5 --- /dev/null +++ b/examples/rust/install-and-use-icicle/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "ntt" +version = "1.2.0" +edition = "2018" + +[dependencies] +icicle-runtime = { git = "https://github.com/ingonyama-zk/icicle.git", branch="yshekel/V3" } +icicle-core = { git = "https://github.com/ingonyama-zk/icicle.git", branch="yshekel/V3" } +icicle-babybear = { git = "https://github.com/ingonyama-zk/icicle.git", branch="yshekel/V3" } + +[features] diff --git a/examples/rust/install-and-use-icicle/README.md b/examples/rust/install-and-use-icicle/README.md new file mode 100644 index 000000000..290e27a86 --- /dev/null +++ b/examples/rust/install-and-use-icicle/README.md @@ -0,0 +1,78 @@ + + +# Example: Install and use ICICLE + +This example shows how to install CUDA backend and use it in Rust application. + +Download release binaries for CUDA backend: +- **Backend** icicle30-ubuntu22-cuda122.tar.gz + +:::note +Name of the files is based on the release version. Make sure to update the tar file names in the example if using different release. +::: + +## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. +```bash +docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubuntu22-cuda122 bash +``` + +This command is starting bash in the docker, with GPUs and mapping the example files to `/workspace` in the docker. + +### Building the docker image +This image is based on nvidia's image for ubuntu22. built from the Dockerfile: +```dockerfile +# Use the official NVIDIA development runtime image for Ubuntu 22.04 +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 + +# Install necessary packages +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + tar + +RUN apt install cargo -y +``` + +by `docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 .` + +## Extract tars and install +```bash +cd release +# extract CUDA backend (OPTIONAL) +tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /opt +``` + +## Build application + +Define ICICLE deps in cargo: +```cargo +[dependencies] +icicle-runtime = { git = "https://github.com/ingonyama-zk/icicle.git", branch="yshekel/V3" } +icicle-core = { git = "https://github.com/ingonyama-zk/icicle.git", branch="yshekel/V3" } +icicle-babybear = { git = "https://github.com/ingonyama-zk/icicle.git", branch="yshekel/V3" } +``` + +Then build +```bash +cargo build --release +``` + +## Launch the executable +```bash +cargo run --release +``` + +### CUDA license +If using CUDA backend, make sure to have a CUDA backend license: +- For license server, specify address: `export ICICLE_LICENSE_SERVER_ADDR=port@ip`. +- For local license, specify path to license: `export ICICLE_LICENSE_SERVER_ADDR=path/to/license`. (TODO rename env variable) + +## Install in custom location + +If installing in a custom location such as /custom/path: +```bash +mkdir -p /custom/path +tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /custom/path +``` + +define `ICICLE_BACKEND_INSTALL_DIR=/custom/path/icicle/lib/backend` or use `pub fn load_backend(path: &str) -> Result<(), eIcicleError>` \ No newline at end of file diff --git a/examples/rust/install-and-use-icicle/src/main.rs b/examples/rust/install-and-use-icicle/src/main.rs new file mode 100644 index 000000000..c52320428 --- /dev/null +++ b/examples/rust/install-and-use-icicle/src/main.rs @@ -0,0 +1,101 @@ +use icicle_babybear::field::{ScalarCfg, ScalarField}; +use icicle_runtime::memory::{DeviceSlice, DeviceVec, HostSlice}; +use icicle_runtime::{self, eIcicleError, Device}; + +use icicle_core::{ + ntt::{self, get_root_of_unity, initialize_domain, ntt, NTTConfig}, + traits::{FieldImpl, GenerateRandom}, +}; + +fn main() { + // Load installed backends + let _ = icicle_runtime::load_backend_from_env_or_default(); + + // Check if GPU is available + let device_cpu = Device::new("CPU", 0); + let mut device_gpu = Device::new("CUDA", 0); + let is_cuda_device_available = icicle_runtime::is_device_available(&device_gpu); + + if is_cuda_device_available { + println!("GPU is available"); + } else { + println!("GPU is not available, falling back to CPU only"); + device_gpu = device_cpu.clone(); + } + + // Example input (on host memory) for NTT + let log_ntt_size = 2; + let ntt_size = 1 << log_ntt_size; + let input_cpu = ScalarCfg::generate_random(ntt_size); + + // Allocate output on host memory + let mut output_cpu = vec![ScalarField::zero(); ntt_size]; + let root_of_unity = get_root_of_unity::(ntt_size as u64); + let mut ntt_config = NTTConfig::::default(); + + // Part 1: Running NTT on CPU + println!("Part 1: compute on CPU: "); + icicle_runtime::set_device(&device_cpu).unwrap(); + initialize_domain(root_of_unity, &ntt::NTTInitDomainConfig::default()).unwrap(); + ntt( + HostSlice::from_slice(&input_cpu), + ntt::NTTDir::kForward, + &ntt_config, + HostSlice::from_mut_slice(&mut output_cpu), + ) + .unwrap(); + println!("{:?}", output_cpu); + + // Part 2: Running NTT on GPU + println!("Part 2: compute on GPU (from/to CPU memory): "); + icicle_runtime::set_device(&device_gpu).unwrap(); + initialize_domain(root_of_unity, &ntt::NTTInitDomainConfig::default()).unwrap(); + ntt( + HostSlice::from_slice(&input_cpu), + ntt::NTTDir::kForward, + &ntt_config, + HostSlice::from_mut_slice(&mut output_cpu), + ) + .unwrap(); + println!("{:?}", output_cpu); + + // Allocate, copy data to GPU and compute on GPU memory + println!("Part 2: compute on GPU (from/to GPU memory): "); + let mut input_gpu = DeviceVec::::device_malloc(ntt_size).unwrap(); + let mut output_gpu = DeviceVec::::device_malloc(ntt_size).unwrap(); + input_gpu + .copy_from_host(HostSlice::from_slice(&input_cpu)) + .unwrap(); + ntt(&input_gpu[..], ntt::NTTDir::kForward, &ntt_config, &mut output_gpu[..]).unwrap(); + output_gpu + .copy_to_host(HostSlice::from_mut_slice(&mut output_cpu)) + .unwrap(); + println!("{:?}", output_cpu); + + // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) + let mut output_intt_cpu = vec![ScalarField::zero(); ntt_size]; + + // Step 1: Compute NTT on GPU + println!("Part 3: compute NTT on GPU (NTT input): "); + icicle_runtime::set_device(&device_gpu).unwrap(); + ntt( + HostSlice::from_slice(&input_cpu), + ntt::NTTDir::kForward, + &ntt_config, + HostSlice::from_mut_slice(&mut output_cpu), + ) + .unwrap(); + println!("{:?}", input_cpu); + + // Step 2: Compute INTT on CPU + println!("Part 3: compute INTT on CPU (INTT output): "); + icicle_runtime::set_device(&device_cpu).unwrap(); + ntt( + HostSlice::from_slice(&output_cpu), + ntt::NTTDir::kInverse, + &ntt_config, + HostSlice::from_mut_slice(&mut output_intt_cpu), + ) + .unwrap(); + println!("{:?}", output_intt_cpu); +} diff --git a/scripts/release/Dockerfile.ubuntu22 b/scripts/release/Dockerfile.ubuntu22 index 723af1054..e64ba489e 100644 --- a/scripts/release/Dockerfile.ubuntu22 +++ b/scripts/release/Dockerfile.ubuntu22 @@ -6,3 +6,5 @@ RUN apt-get update && apt-get install -y \ build-essential \ cmake \ tar + +RUN apt install cargo -y From a5ef1b22c92bdcdab7359315c37b3d3b478969d6 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 19:45:55 +0300 Subject: [PATCH 20/35] update rust use installed binaries example --- docs/docs/icicle/install_and_use.md | 5 ++- .../rust/install-and-use-icicle/src/main.rs | 42 ++++++++++--------- icicle/include/icicle/dispatcher.h | 4 +- icicle/include/icicle/utils/log.h | 7 +++- icicle/src/runtime.cpp | 4 +- 5 files changed, 36 insertions(+), 26 deletions(-) diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index adc9c3a17..caef5696c 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -24,7 +24,10 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, ## installing and using icicle -Full C++ example here: https://github.com/ingonyama-zk/icicle/tree/yshekel/V3_release_and_install/examples/c%2B%2B/install_and_use_icicle +- Full C++ example here: https://github.com/ingonyama-zk/icicle/tree/yshekel/V3_release_and_install/examples/c++/install-and-use-icicle +- Full Rust example here: https://github.com/ingonyama-zk/icicle/tree/yshekel/V3_release_and_install/examples/rust/install-and-use-icicle + +(TODO update links to main branch when merged) 1. **Extract the Tar Files**: - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or UBI 7,8,9 for RHEL compatible binaries). diff --git a/examples/rust/install-and-use-icicle/src/main.rs b/examples/rust/install-and-use-icicle/src/main.rs index c52320428..041747f2f 100644 --- a/examples/rust/install-and-use-icicle/src/main.rs +++ b/examples/rust/install-and-use-icicle/src/main.rs @@ -1,11 +1,10 @@ use icicle_babybear::field::{ScalarCfg, ScalarField}; -use icicle_runtime::memory::{DeviceSlice, DeviceVec, HostSlice}; -use icicle_runtime::{self, eIcicleError, Device}; - use icicle_core::{ ntt::{self, get_root_of_unity, initialize_domain, ntt, NTTConfig}, traits::{FieldImpl, GenerateRandom}, }; +use icicle_runtime::memory::{DeviceSlice, DeviceVec, HostSlice}; +use icicle_runtime::{self, Device}; fn main() { // Load installed backends @@ -35,41 +34,44 @@ fn main() { // Part 1: Running NTT on CPU println!("Part 1: compute on CPU: "); - icicle_runtime::set_device(&device_cpu).unwrap(); - initialize_domain(root_of_unity, &ntt::NTTInitDomainConfig::default()).unwrap(); + icicle_runtime::set_device(&device_cpu).expect("Failed to set device to CPU"); + initialize_domain(root_of_unity, &ntt::NTTInitDomainConfig::default()).expect("Failed to initialize NTT domain"); ntt( HostSlice::from_slice(&input_cpu), ntt::NTTDir::kForward, &ntt_config, HostSlice::from_mut_slice(&mut output_cpu), ) - .unwrap(); + .expect("NTT computation failed on CPU"); println!("{:?}", output_cpu); - // Part 2: Running NTT on GPU + // Part 2: Running NTT on GPU (from/to CPU memory) println!("Part 2: compute on GPU (from/to CPU memory): "); - icicle_runtime::set_device(&device_gpu).unwrap(); - initialize_domain(root_of_unity, &ntt::NTTInitDomainConfig::default()).unwrap(); + icicle_runtime::set_device(&device_gpu).expect("Failed to set device to GPU"); + initialize_domain(root_of_unity, &ntt::NTTInitDomainConfig::default()).expect("Failed to initialize NTT domain"); ntt( HostSlice::from_slice(&input_cpu), ntt::NTTDir::kForward, &ntt_config, HostSlice::from_mut_slice(&mut output_cpu), ) - .unwrap(); + .expect("NTT computation failed on GPU"); println!("{:?}", output_cpu); - // Allocate, copy data to GPU and compute on GPU memory + // Part 2 (cont.): Compute on GPU (from/to GPU memory) println!("Part 2: compute on GPU (from/to GPU memory): "); - let mut input_gpu = DeviceVec::::device_malloc(ntt_size).unwrap(); - let mut output_gpu = DeviceVec::::device_malloc(ntt_size).unwrap(); + let mut input_gpu = + DeviceVec::::device_malloc(ntt_size).expect("Failed to allocate device memory for input"); + let mut output_gpu = + DeviceVec::::device_malloc(ntt_size).expect("Failed to allocate device memory for output"); input_gpu .copy_from_host(HostSlice::from_slice(&input_cpu)) - .unwrap(); - ntt(&input_gpu[..], ntt::NTTDir::kForward, &ntt_config, &mut output_gpu[..]).unwrap(); + .expect("Failed to copy data to GPU"); + ntt(&input_gpu[..], ntt::NTTDir::kForward, &ntt_config, &mut output_gpu[..]) + .expect("NTT computation failed on GPU memory"); output_gpu .copy_to_host(HostSlice::from_mut_slice(&mut output_cpu)) - .unwrap(); + .expect("Failed to copy data back to CPU"); println!("{:?}", output_cpu); // Part 3: Using both CPU and GPU to compute NTT (GPU) and inverse INTT (CPU) @@ -77,25 +79,25 @@ fn main() { // Step 1: Compute NTT on GPU println!("Part 3: compute NTT on GPU (NTT input): "); - icicle_runtime::set_device(&device_gpu).unwrap(); + icicle_runtime::set_device(&device_gpu).expect("Failed to set device to GPU"); ntt( HostSlice::from_slice(&input_cpu), ntt::NTTDir::kForward, &ntt_config, HostSlice::from_mut_slice(&mut output_cpu), ) - .unwrap(); + .expect("NTT computation failed on GPU"); println!("{:?}", input_cpu); // Step 2: Compute INTT on CPU println!("Part 3: compute INTT on CPU (INTT output): "); - icicle_runtime::set_device(&device_cpu).unwrap(); + icicle_runtime::set_device(&device_cpu).expect("Failed to set device to CPU"); ntt( HostSlice::from_slice(&output_cpu), ntt::NTTDir::kInverse, &ntt_config, HostSlice::from_mut_slice(&mut output_intt_cpu), ) - .unwrap(); + .expect("INTT computation failed on CPU"); println!("{:?}", output_intt_cpu); } diff --git a/icicle/include/icicle/dispatcher.h b/icicle/include/icicle/dispatcher.h index 3b35c463f..601950241 100644 --- a/icicle/include/icicle/dispatcher.h +++ b/icicle/include/icicle/dispatcher.h @@ -67,8 +67,8 @@ class tIcicleExecuteDispatcher using dispatcher_class_name = tIcicleExecuteDispatcher; \ void register_##api_name(const std::string& deviceType, type impl) \ { \ - ICICLE_LOG_DEBUG << " Registering API: device=" << deviceType << ", api=" << #api_name << "<" << demangle() \ - << ">"; \ + ICICLE_LOG_VERBOSE << " Registering API: device=" << deviceType << ", api=" << #api_name << "<" \ + << demangle() << ">"; \ dispatcher_class_name::Global()._register(deviceType, impl); \ } diff --git a/icicle/include/icicle/utils/log.h b/icicle/include/icicle/utils/log.h index 9e76434fe..f8e0af490 100644 --- a/icicle/include/icicle/utils/log.h +++ b/icicle/include/icicle/utils/log.h @@ -3,6 +3,7 @@ #include #include +#define ICICLE_LOG_VERBOSE Log(Log::Verbose) #define ICICLE_LOG_DEBUG Log(Log::Debug) #define ICICLE_LOG_INFO Log(Log::Info) #define ICICLE_LOG_WARNING Log(Log::Warning) @@ -11,7 +12,7 @@ class Log { public: - enum eLogLevel { Debug, Info, Warning, Error }; + enum eLogLevel { Verbose, Debug, Info, Warning, Error }; Log(eLogLevel level) : level{level} { @@ -41,6 +42,8 @@ class Log const char* logLevelToString(eLogLevel level) const { switch (level) { + case Verbose: + return "DEBUG"; case Debug: return "DEBUG"; case Info: @@ -60,4 +63,6 @@ class Log #else static inline eLogLevel s_min_log_level = eLogLevel::Debug; #endif + + // Note: for verbose, need to explicitly call `set_min_log_level(eLogLevel::Verbose)` }; diff --git a/icicle/src/runtime.cpp b/icicle/src/runtime.cpp index 393618290..cb180cb01 100644 --- a/icicle/src/runtime.cpp +++ b/icicle/src/runtime.cpp @@ -293,9 +293,9 @@ extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive) int flags = (fileName.find("device") != std::string::npos) ? (RTLD_LAZY | RTLD_GLOBAL) : (RTLD_LAZY | RTLD_LOCAL); // Attempt to load the library with the appropriate flags - ICICLE_LOG_DEBUG << "Attempting to load: " << filePath; + ICICLE_LOG_VERBOSE << "Attempting to load: " << filePath; void* handle = dlopen(filePath, flags); - if (!handle) { ICICLE_LOG_DEBUG << "Failed to load " << filePath << ": " << dlerror(); } + if (!handle) { ICICLE_LOG_VERBOSE << "Failed to load " << filePath << ": " << dlerror(); } }; if (is_directory(path)) { From e4a8e16358f4f84b875af1b6b55be57d3c38b795 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 19:53:27 +0300 Subject: [PATCH 21/35] dummy run scripts for examples --- examples/c++/install-and-use-icicle/run.sh | 1 + examples/rust/install-and-use-icicle/run.sh | 1 + 2 files changed, 2 insertions(+) create mode 100755 examples/c++/install-and-use-icicle/run.sh create mode 100755 examples/rust/install-and-use-icicle/run.sh diff --git a/examples/c++/install-and-use-icicle/run.sh b/examples/c++/install-and-use-icicle/run.sh new file mode 100755 index 000000000..c2bed7d1b --- /dev/null +++ b/examples/c++/install-and-use-icicle/run.sh @@ -0,0 +1 @@ +echo "Check out the README file. You will have to download an ICICLE release and follow instructions" diff --git a/examples/rust/install-and-use-icicle/run.sh b/examples/rust/install-and-use-icicle/run.sh new file mode 100755 index 000000000..c2bed7d1b --- /dev/null +++ b/examples/rust/install-and-use-icicle/run.sh @@ -0,0 +1 @@ +echo "Check out the README file. You will have to download an ICICLE release and follow instructions" From 87bcb04367c6ab2cd93aa0ccbc76dfbc09a860ca Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 20:34:45 +0300 Subject: [PATCH 22/35] fix minor mistake in doc --- docs/docs/icicle/build_from_source.md | 6 ++---- docs/docs/icicle/install_and_use.md | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/docs/icicle/build_from_source.md b/docs/docs/icicle/build_from_source.md index 9f226f37f..e7f7ac6f8 100644 --- a/docs/docs/icicle/build_from_source.md +++ b/docs/docs/icicle/build_from_source.md @@ -124,7 +124,7 @@ Cargo features are used to disable features, rather than enable them, for the re They can be disabled as follows: ```bash -cargo build --release --no-default-features --features=no_ecntt,no_g2 +cargo build --release --features=no_ecntt,no_g2 ``` :::note @@ -144,13 +144,11 @@ Most tests assume a CUDA backend is installed and fail otherwise. 4. **Install the library:** -By default, the libraries are installed to the `target//deps/icicle` dir. For custom install dir. define the env variable: +By default, the libraries are installed to the `target//deps/icicle` dir. If you want them installed elsewhere, define the env variable: ```bash export ICICLE_INSTALL_DIR=/path/to/install/dir ``` -(TODO: cargo install ?) - #### Use as cargo dependency In cargo.toml, specify the ICICLE libs to use: diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index caef5696c..ddfc1098f 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -34,8 +34,8 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, - **Frontend libs and headers** should be installed in default search paths (such as `/usr/lib` and `usr/local/include`) for the compiler and linker to find. - **Backend libs** should be installed in `/opt` - Extract it to your desired location: - ```bash - # install the frontend part (Can skip for Rust) + ```bash + # install the frontend part (Can skip for Rust) tar xzvf icicle30-ubuntu22.tar.gz cp -r ./icicle/lib/* /usr/lib/ cp -r ./icicle/include/icicle/ /usr/local/include/ # copy C++ headers From 5ec47f882138308d51e5869d343f2c8309e5b400 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Wed, 28 Aug 2024 21:02:38 +0300 Subject: [PATCH 23/35] fix doc --- docs/docs/icicle/install_and_use.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index ddfc1098f..f4883adfe 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -29,13 +29,13 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, (TODO update links to main branch when merged) -1. **Extract the Tar Files**: - - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or UBI 7,8,9 for RHEL compatible binaries). +1. **Extract and install the Tar Files**: + - Download (TODO link to latest release) the appropriate tar files for your distribution (Ubuntu 20.04, Ubuntu 22.04, or UBI 8,9 for RHEL compatible binaries). - **Frontend libs and headers** should be installed in default search paths (such as `/usr/lib` and `usr/local/include`) for the compiler and linker to find. - **Backend libs** should be installed in `/opt` - Extract it to your desired location: ```bash - # install the frontend part (Can skip for Rust) + # install the frontend part (Can skip for Rust) tar xzvf icicle30-ubuntu22.tar.gz cp -r ./icicle/lib/* /usr/lib/ cp -r ./icicle/include/icicle/ /usr/local/include/ # copy C++ headers @@ -44,9 +44,9 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, ``` :::note + Installing the frontend is optional. Rust is not using it. You may install to any directory but need to make sure it can be found by the linker at compile and runtime. - For example can extract the frontend to `/opt` too. - ::: + ::: :::tip You can install anywhere and use a link so that it can be easily found as if in the default directory. @@ -65,8 +65,8 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, g++ -o myapp myapp.cpp -I/custom/path/icicle/include -L/custom/path/icicle/lib -licicle_device -licicle_field_bn254 -licicle_curve_bn254 -Wl,-rpath,/custom/path/icicle/lib/ ``` - - Or via cmake - ```bash + - Or via cmake + ```cmake # Add the executable add_executable(example example.cpp) # Link the libraries @@ -96,11 +96,11 @@ Each Icicle release includes a tar file, named `icicle30-.tar.gz`, **Rust** - When building the icicle crates, icicle frontend libs are built from source, in addition to the rust bindings. They are installed to `target//deps/icile` and cargo will link correctly. Note that you still need to install CUDA backend if you have a CUDA GPU. - - Simply use `cargo build` or `cargo run` and it should link to icicle libs. + - Simply use `cargo build` or `cargo run` and it should link to icicle libs. **Go** - TODO -:::warning when deploying an application (either C++, Rust or Go), you must make sure to either deploy the icicle libs (in Rust it's in `target//deps/icile` or the preinstalled ones) along the application binaries (as tar, docker image, package manager installer or else) or make sure to install icicle (and the backend) on the target machine. Otherwise the target machine will have linkage issues. +:::warning when deploying an application (either C++, Rust or Go), you must make sure to either deploy the icicle libs (that you download or build from source) along the application binaries (as tar, docker image, package manager installer or else) or make sure to install icicle (and the backend) on the target machine. Otherwise the target machine will have linkage issues. ::: ## Backend Loading From 7e4a6483cef6c12405628679d0874f5fe15ae62f Mon Sep 17 00:00:00 2001 From: yshekel Date: Thu, 29 Aug 2024 10:34:22 +0300 Subject: [PATCH 24/35] Update examples/c++/install-and-use-icicle/README.md Co-authored-by: Jeremy Felder --- examples/c++/install-and-use-icicle/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index 7cb18cecc..f5c6aab19 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -1,5 +1,3 @@ - - # Example: Install and use ICICLE This example shows how to install binaries and use them in C++ application. From 4c798cb6a11671fb73b50c5f93021d32b9438d41 Mon Sep 17 00:00:00 2001 From: yshekel Date: Thu, 29 Aug 2024 10:34:55 +0300 Subject: [PATCH 25/35] Update examples/c++/install-and-use-icicle/README.md Co-authored-by: Jeremy Felder --- examples/c++/install-and-use-icicle/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index f5c6aab19..67dc75b89 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -6,9 +6,8 @@ Download release binaries: - **Frontend** icicle30-ubuntu22.tar.gz - **Backend** icicle30-ubuntu22-cuda122.tar.gz -:::note -Name of the files is based on the release version. Make sure to update the tar file names in the example if using different release. -::: +> [!NOTE] +> Name of the files is based on the release version. Make sure to update the tar file names in the example if you're using a different release. ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. ```bash From 712bdfc16562745b1cd712762acb9da8098be7bc Mon Sep 17 00:00:00 2001 From: yshekel Date: Thu, 29 Aug 2024 10:35:25 +0300 Subject: [PATCH 26/35] Update examples/c++/install-and-use-icicle/README.md Co-authored-by: Jeremy Felder --- examples/c++/install-and-use-icicle/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index 67dc75b89..1528b0df2 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -10,6 +10,7 @@ Download release binaries: > Name of the files is based on the release version. Make sure to update the tar file names in the example if you're using a different release. ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. + ```bash docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubuntu22-cuda122 bash ``` From d3cdd1f309963eae38a3b546a2fee69b8d661d41 Mon Sep 17 00:00:00 2001 From: yshekel Date: Thu, 29 Aug 2024 10:35:35 +0300 Subject: [PATCH 27/35] Update examples/c++/install-and-use-icicle/README.md Co-authored-by: Jeremy Felder --- examples/c++/install-and-use-icicle/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index 1528b0df2..d9be0772f 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -18,7 +18,9 @@ docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubu This command is starting bash in the docker, with GPUs and mapping the example files to `/workspace` in the docker. ### Building the docker image + This image is based on nvidia's image for ubuntu22. built from the Dockerfile: + ```dockerfile # Use the official NVIDIA development runtime image for Ubuntu 22.04 FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 From e0dcb27c3e3c6bc77cc605c47267d97cf403eb4a Mon Sep 17 00:00:00 2001 From: yshekel Date: Thu, 29 Aug 2024 10:36:20 +0300 Subject: [PATCH 28/35] Update scripts/release/README.md Co-authored-by: Jeremy Felder --- scripts/release/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/README.md b/scripts/release/README.md index f6576c54a..6f11ff346 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -1,4 +1,4 @@ -## Build the release +# Build the release This section is describing how a release is generated, given the release sources.
We use docker to represent the target environment for the release. Each Docker image is tailored to a specific distribution and CUDA version. You first build the Docker image, which sets up the environment, and then use this Docker image to build the release tar file. This ensures that the build process is consistent and reproducible across different environments. From 05c138e04af9715ea9d98e14865502b57222130f Mon Sep 17 00:00:00 2001 From: yshekel Date: Thu, 29 Aug 2024 10:36:26 +0300 Subject: [PATCH 29/35] Update scripts/release/README.md Co-authored-by: Jeremy Felder --- scripts/release/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/release/README.md b/scripts/release/README.md index 6f11ff346..8caa8d420 100644 --- a/scripts/release/README.md +++ b/scripts/release/README.md @@ -24,7 +24,6 @@ cd ./scripts/release docker build -t icicle-release-ubuntu22-cuda122 -f Dockerfile.ubuntu22 . ``` - ## Build Libraries Inside the Docker To build the Icicle libraries inside a Docker container and output the tar file to the `release_output` directory: From a3f9ce4ba4cfbff522b71adaf64b2227dfa721f2 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Thu, 29 Aug 2024 10:44:32 +0300 Subject: [PATCH 30/35] make smoe debug pring verbose --- icicle/src/runtime.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/icicle/src/runtime.cpp b/icicle/src/runtime.cpp index cb180cb01..13ff260d3 100644 --- a/icicle/src/runtime.cpp +++ b/icicle/src/runtime.cpp @@ -282,7 +282,7 @@ extern "C" eIcicleError icicle_load_backend(const char* path, bool is_recursive) // Check if the library name contains "icicle" and if the path contains "/backend/" if (fileName.find("icicle") == std::string::npos || path.find("/backend/") == std::string::npos) { - ICICLE_LOG_DEBUG << "Skipping: " << filePath << " - Not an Icicle backend library."; + ICICLE_LOG_VERBOSE << "Skipping: " << filePath << " - Not an Icicle backend library."; return; } From e1796641736bc1163cb9e068c7e7317e3c6000b7 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Thu, 29 Aug 2024 11:37:29 +0300 Subject: [PATCH 31/35] code review fixes --- docs/docs/icicle/install_and_use.md | 1 - examples/c++/install-and-use-icicle/README.md | 9 +++++++-- examples/rust/install-and-use-icicle/README.md | 8 ++++++-- icicle/include/icicle/backend/msm_config.h | 2 +- icicle/include/icicle/curves/params/bw6_761.h | 14 +++++++------- .../icicle/fields/snark_fields/bw6_761_base.h | 5 +++-- 6 files changed, 24 insertions(+), 15 deletions(-) diff --git a/docs/docs/icicle/install_and_use.md b/docs/docs/icicle/install_and_use.md index f4883adfe..7886ad1f7 100644 --- a/docs/docs/icicle/install_and_use.md +++ b/docs/docs/icicle/install_and_use.md @@ -1,4 +1,3 @@ - # Install and use ICICLE ## Overview diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index d9be0772f..d1a4a9074 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -35,6 +35,7 @@ RUN apt-get update && apt-get install -y \ by `docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 .` ## Extract tars and install + ```bash cd release # extract frontend part @@ -47,6 +48,7 @@ rm -rf icicle # remove the extracted dir ``` ## Compile and link C++ example to icicle + ```bash cd .. mkdir build @@ -54,14 +56,16 @@ cmake -S . -B build && cmake --build build ``` ## Launch the executable + ```bash ./build/example ``` ### CUDA license + If using CUDA backend, make sure to have a CUDA backend license: -- For license server, specify address: `export ICICLE_LICENSE_SERVER_ADDR=port@ip`. -- For local license, specify path to license: `export ICICLE_LICENSE_SERVER_ADDR=path/to/license`. (TODO rename env variable) +- For license server, specify address: `export ICICLE_LICENSE=port@ip`. +- For local license, specify path to license: `export ICICLE_LICENSE=path/to/license`. ## Install in custom location @@ -74,6 +78,7 @@ tar xzvf icicle30-ubuntu22-cuda122.tar.gz -C /custom/path # OPTIONAL ``` ### Build your app and link to ICICLE + You will have to specify paths for include and libs so that the compiler linker and loader can find them at compile anb runtime. You can add the following to cmake file to do so: ```cmake diff --git a/examples/rust/install-and-use-icicle/README.md b/examples/rust/install-and-use-icicle/README.md index 290e27a86..8852c3561 100644 --- a/examples/rust/install-and-use-icicle/README.md +++ b/examples/rust/install-and-use-icicle/README.md @@ -12,6 +12,7 @@ Name of the files is based on the release version. Make sure to update the tar f ::: ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. + ```bash docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubuntu22-cuda122 bash ``` @@ -19,6 +20,7 @@ docker run -it --rm --gpus all -v ./:/workspace -w /workspace icicle-release-ubu This command is starting bash in the docker, with GPUs and mapping the example files to `/workspace` in the docker. ### Building the docker image + This image is based on nvidia's image for ubuntu22. built from the Dockerfile: ```dockerfile # Use the official NVIDIA development runtime image for Ubuntu 22.04 @@ -58,14 +60,16 @@ cargo build --release ``` ## Launch the executable + ```bash cargo run --release ``` ### CUDA license + If using CUDA backend, make sure to have a CUDA backend license: -- For license server, specify address: `export ICICLE_LICENSE_SERVER_ADDR=port@ip`. -- For local license, specify path to license: `export ICICLE_LICENSE_SERVER_ADDR=path/to/license`. (TODO rename env variable) +- For license server, specify address: `export ICICLE_LICENSE_ADDR=port@ip`. +- For local license, specify path to license: `export ICICLE_LICENSE_ADDR=path/to/license`. (TODO rename env variable) ## Install in custom location diff --git a/icicle/include/icicle/backend/msm_config.h b/icicle/include/icicle/backend/msm_config.h index 58412f731..c1e73e670 100644 --- a/icicle/include/icicle/backend/msm_config.h +++ b/icicle/include/icicle/backend/msm_config.h @@ -4,7 +4,7 @@ namespace CpuBackendConfig { // Backend-specific configuration flags as constexpr strings constexpr const char* CPU_NOF_THREADS = "n_threads"; -} +} // namespace CpuBackendConfig /********* CUDA Backend Configurations *********/ namespace CudaBackendConfig { diff --git a/icicle/include/icicle/curves/params/bw6_761.h b/icicle/include/icicle/curves/params/bw6_761.h index d2db00cc5..93fb1617c 100644 --- a/icicle/include/icicle/curves/params/bw6_761.h +++ b/icicle/include/icicle/curves/params/bw6_761.h @@ -11,7 +11,7 @@ namespace bw6_761 { typedef Projective projective_t; typedef Affine affine_t; - struct G2; + struct G2; typedef Field g2_point_field_t; typedef Projective g2_projective_t; typedef Affine g2_affine_t; @@ -34,13 +34,13 @@ namespace bw6_761 { struct G2 { static constexpr g2_point_field_t gen_x = {0xcd025f1c, 0xa830c194, 0xe1bf995b, 0x6410cf4f, 0xc2ad54b0, 0x00e96efb, - 0x3cd208d7, 0xce6948cb, 0x00e1b6ba, 0x963317a3, 0xac70e7c7, 0xc5bbcae9, - 0xf09feb58, 0x734ec3f1, 0xab3da268, 0x26b41c5d, 0x13890f6d, 0x4c062010, - 0xc5a7115f, 0xd61053aa, 0x69d660f9, 0xc852a82e, 0x41d9b816, 0x01101332}; + 0x3cd208d7, 0xce6948cb, 0x00e1b6ba, 0x963317a3, 0xac70e7c7, 0xc5bbcae9, + 0xf09feb58, 0x734ec3f1, 0xab3da268, 0x26b41c5d, 0x13890f6d, 0x4c062010, + 0xc5a7115f, 0xd61053aa, 0x69d660f9, 0xc852a82e, 0x41d9b816, 0x01101332}; static constexpr g2_point_field_t gen_y = {0x28c73b61, 0xeb70a167, 0xf9eac689, 0x91ec0594, 0x3c5a02a5, 0x58aa2d3a, - 0x504affc7, 0x3ea96fcd, 0xffa82300, 0x8906c170, 0xd2c712b8, 0x64f293db, - 0x33293fef, 0x94c97eb7, 0x0b95a59c, 0x0a1d86c8, 0x53ffe316, 0x81a78e27, - 0xcec2181c, 0x26b7cf9a, 0xe4b6d2dc, 0x8179eb10, 0x7761369f, 0x0017c335}; + 0x504affc7, 0x3ea96fcd, 0xffa82300, 0x8906c170, 0xd2c712b8, 0x64f293db, + 0x33293fef, 0x94c97eb7, 0x0b95a59c, 0x0a1d86c8, 0x53ffe316, 0x81a78e27, + 0xcec2181c, 0x26b7cf9a, 0xe4b6d2dc, 0x8179eb10, 0x7761369f, 0x0017c335}; static constexpr g2_point_field_t weierstrass_b = { 0x00000004, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, diff --git a/icicle/include/icicle/fields/snark_fields/bw6_761_base.h b/icicle/include/icicle/fields/snark_fields/bw6_761_base.h index 2b2de80ac..48cb5d8eb 100644 --- a/icicle/include/icicle/fields/snark_fields/bw6_761_base.h +++ b/icicle/include/icicle/fields/snark_fields/bw6_761_base.h @@ -13,8 +13,9 @@ namespace bw6_761 { }; // Note: this fq_config_g2 is a workaround to have different types for G1 and G2. - // Otherwise, they have the same types, thus APIs have the same type, thus we don't know which to call when specializing g2 templates. - struct fq_config_g2 { + // Otherwise, they have the same types, thus APIs have the same type, thus we don't know which to call when + // specializing g2 templates. + struct fq_config_g2 { static constexpr storage<24> modulus = {0x0000008b, 0xf49d0000, 0x70000082, 0xe6913e68, 0xeaf0a437, 0x160cf8ae, 0x5667a8f8, 0x98a116c2, 0x73ebff2e, 0x71dcd3dc, 0x12f9fd90, 0x8689c8ed, 0x25b42304, 0x03cebaff, 0xe584e919, 0x707ba638, 0x8087be41, 0x528275ef, From 7275872d3912ca77cd86d36f12f0e468b9ad5de2 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Thu, 29 Aug 2024 11:43:40 +0300 Subject: [PATCH 32/35] update release.yml script to build and upload the release --- .github/workflows/release.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index af8f024b4..6755bbe49 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,5 +46,15 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | + mkdir -p release_output && rm -rf ./release_output/* + ./scripts/release/build_all.sh ./release_output LATEST_TAG=$(git describe --tags --abbrev=0) gh release create $LATEST_TAG --generate-notes -d --verify-tag -t "Release $LATEST_TAG" + - name: Upload release tars + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + LATEST_TAG=$(git describe --tags --abbrev=0) + for file in ./release_output/*.tar.gz; do + gh release upload $LATEST_TAG "$file" + done From e9e9b2a56b9b700fa43e5810bb778b04d9dba0d7 Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Thu, 29 Aug 2024 11:49:12 +0300 Subject: [PATCH 33/35] fixed cuda backend license info --- docs/docs/icicle/install_cuda_backend.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/docs/icicle/install_cuda_backend.md b/docs/docs/icicle/install_cuda_backend.md index 75e1cbecf..2749a19db 100644 --- a/docs/docs/icicle/install_cuda_backend.md +++ b/docs/docs/icicle/install_cuda_backend.md @@ -18,15 +18,17 @@ The CUDA backend requires a valid license to function. There are two CUDA backen :::note As for now CUDA backend can be accessed without purchasing a license. Ingonyama is hosting a license server that will allow access to anyone. -To use it, make sure to set the environment to `export ICICLE_LICNSE_SERVER_PATH=5053@ec2-50-16-150-188.compute-1.amazonaws.com` +By default CUDA backend will try to access this server if no other license is available. +TO manually specify it, set `export ICICLE_LICENSE=5053@ec2-50-16-150-188.compute-1.amazonaws.com`. ::: Licenses are available for purchase [here TODO](#) . After purchasing, you will receive a license key that must be installed on the license-server or node-locked machine. For license-server, you will have to tell the application that is using ICICLE, where the server is. **Specify the license server address:** + ``` -export ICICLE_LICNSE_SERVER_PATH=port@ip +export ICICLE_LICENSE=port@ip ``` For further assist , contact our support team for assistance. `support@ingonyama.com` (TODO make sure this exists). From e7b6ef210ceff2f8f8a4bc10ac69815acd48dd0d Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Thu, 29 Aug 2024 11:53:39 +0300 Subject: [PATCH 34/35] more code review fixes --- examples/c++/install-and-use-icicle/README.md | 4 ++-- examples/rust/install-and-use-icicle/README.md | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index d1a4a9074..516660211 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -6,8 +6,8 @@ Download release binaries: - **Frontend** icicle30-ubuntu22.tar.gz - **Backend** icicle30-ubuntu22-cuda122.tar.gz -> [!NOTE] -> Name of the files is based on the release version. Make sure to update the tar file names in the example if you're using a different release. +[!NOTE] +Name of the files is based on the release version. Make sure to update the tar file names in the example if you're using a different release. ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. diff --git a/examples/rust/install-and-use-icicle/README.md b/examples/rust/install-and-use-icicle/README.md index 8852c3561..be04f89f0 100644 --- a/examples/rust/install-and-use-icicle/README.md +++ b/examples/rust/install-and-use-icicle/README.md @@ -5,11 +5,12 @@ This example shows how to install CUDA backend and use it in Rust application. Download release binaries for CUDA backend: + - **Backend** icicle30-ubuntu22-cuda122.tar.gz -:::note +[!NOTE] Name of the files is based on the release version. Make sure to update the tar file names in the example if using different release. -::: + ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. @@ -38,6 +39,7 @@ RUN apt install cargo -y by `docker build -t icicle-release-ubuntu20-cuda122 -f Dockerfile.ubuntu20 .` ## Extract tars and install + ```bash cd release # extract CUDA backend (OPTIONAL) From 3442cbdbca42cbe5e866c69955ac453cd25dcb0d Mon Sep 17 00:00:00 2001 From: Yuval Shekel Date: Thu, 29 Aug 2024 12:42:16 +0300 Subject: [PATCH 35/35] fix missing > in readme --- examples/c++/install-and-use-icicle/README.md | 4 ++-- examples/rust/install-and-use-icicle/README.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/c++/install-and-use-icicle/README.md b/examples/c++/install-and-use-icicle/README.md index 516660211..d1a4a9074 100644 --- a/examples/c++/install-and-use-icicle/README.md +++ b/examples/c++/install-and-use-icicle/README.md @@ -6,8 +6,8 @@ Download release binaries: - **Frontend** icicle30-ubuntu22.tar.gz - **Backend** icicle30-ubuntu22-cuda122.tar.gz -[!NOTE] -Name of the files is based on the release version. Make sure to update the tar file names in the example if you're using a different release. +> [!NOTE] +> Name of the files is based on the release version. Make sure to update the tar file names in the example if you're using a different release. ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory. diff --git a/examples/rust/install-and-use-icicle/README.md b/examples/rust/install-and-use-icicle/README.md index be04f89f0..a75dcefb6 100644 --- a/examples/rust/install-and-use-icicle/README.md +++ b/examples/rust/install-and-use-icicle/README.md @@ -8,8 +8,8 @@ Download release binaries for CUDA backend: - **Backend** icicle30-ubuntu22-cuda122.tar.gz -[!NOTE] -Name of the files is based on the release version. Make sure to update the tar file names in the example if using different release. +> [!NOTE] +> Name of the files is based on the release version. Make sure to update the tar file names in the example if using different release. ## Optional: This example is demonstrated in an ubuntu22 docker but this is not mandatory.