diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1697ea4e..af2280f3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,9 +2,9 @@ name: Rust on: push: - branches: [ "main" ] + branches: ["main"] pull_request: - branches: [ "main" ] + branches: ["main"] env: CARGO_TERM_COLOR: always @@ -18,23 +18,27 @@ jobs: os: [windows-latest, ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - - name: Check - run: cargo check --verbose - - name: Build - run: cargo build --verbose - - name: Run tests - run: cargo test --verbose + - uses: actions/checkout@v3 + with: + submodules: recursive + - uses: dtolnay/rust-toolchain@stable + - name: Check + run: cargo check --verbose + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --verbose fmt: name: Clippy and formatting runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt, clippy - - name: Formatting - run: cargo fmt --all -- --check - - name: Clippy - run: cargo clippy -- -Dclippy::all # -Dclippy::pedantic + - uses: actions/checkout@v3 + with: + submodules: recursive + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt, clippy + - name: Formatting + run: cargo fmt --all -- --check + - name: Clippy + run: cargo clippy -- -Dclippy::all # -Dclippy::pedantic diff --git a/.gitignore b/.gitignore index ea8c4bf7..5dbf6776 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ /target +/models +.DS_Store \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..12466c24 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ggml/ggml"] + path = ggml/sys/ggml + url = git@github.com:ggerganov/ggml.git diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..64c08765 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,62 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug example 'bloom_inference'", + "cargo": { + "args": [ + "build", + "--example=bloom_inference", + "--package=bloom" + ], + "filter": { + "name": "bloom_inference", + "kind": "example" + } + }, + "args": ["${env:HOME}/.ggml-models/bloom-7b.bin"], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug example 'gpt2_inference'", + "cargo": { + "args": [ + "build", + "--example=gpt2_inference", + "--package=gpt2" + ], + "filter": { + "name": "gpt2_inference", + "kind": "example" + } + }, + "args": ["${env:HOME}/.ggml-models/cerebras-gpt-13b.bin"], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug example 'llama_inference'", + "cargo": { + "args": [ + "build", + "--example=llama_inference", + "--package=llama" + ], + "filter": { + "name": "llama_inference", + "kind": "example" + } + }, + "args": ["${env:HOME}/.ggml-models/gpt4all-7b.bin"], + "cwd": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c6e025a5..a1bae158 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -9,12 +9,13 @@ When new GGML versions are pushed to llama.cpp (or one of the other repos hosting a copy of it) and we want to update our copy, the process should be as follows: -- Update the `ggml.c` and `ggml.h` inside `ggml-sys/ggml`. -- In that same folder, update `CREDITS.txt` to indicate the llama.cpp version - these files were taken from +- Update the submodule to the latest version of GGML: + ```shell + $ git submodule update --remote + ``` - Run the bindgen script: - ```shell - $ cargo run --bin generate-ggml-bindings ggml-sys - ``` + ```shell + $ cargo run --bin generate-ggml-bindings ggml-sys + ``` - Fix any compiler errors that pop up due to the new version of the bindings and test the changes. diff --git a/Cargo.lock b/Cargo.lock index 58b2c4ba..55a64934 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -104,9 +104,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.64.0" +version = "0.65.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" +checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" dependencies = [ "bitflags", "cexpr", @@ -115,12 +115,13 @@ dependencies = [ "lazycell", "log", "peeking_take_while", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 1.0.109", + "syn 2.0.13", "which", ] @@ -130,6 +131,16 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bloom" +version = "0.1.0" +dependencies = [ + "bytemuck", + "ggml", + "llm-base", + "rand", +] + [[package]] name = "bytemuck" version = "1.13.1" @@ -443,13 +454,6 @@ name = "ggml" version = "0.1.0" dependencies = [ "ggml-sys", -] - -[[package]] -name = "ggml-format" -version = "0.1.0" -dependencies = [ - "ggml", "rand", "thiserror", ] @@ -473,6 +477,16 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gpt2" +version = "0.1.0" +dependencies = [ + "bytemuck", + "ggml", + "llm-base", + "rand", +] + [[package]] name = "half" version = "2.2.1" @@ -607,40 +621,66 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] -name = "llama-cli" +name = "llama" version = "0.1.0" dependencies = [ - "bincode", - "clap", - "color-eyre", - "env_logger", - "llama-rs", - "log", - "num_cpus", - "once_cell", + "bytemuck", + "ggml", + "half", + "llm-base", + "partial_sort", + "protobuf", "rand", - "rustyline", - "spinners", - "zstd", + "rust_tokenizers", + "serde", + "serde_bytes", + "serde_json", + "thiserror", +] + +[[package]] +name = "llm" +version = "0.1.0" +dependencies = [ + "bloom", + "gpt2", + "llama", + "llm-base", ] [[package]] -name = "llama-rs" +name = "llm-base" version = "0.1.0" dependencies = [ + "bincode", "bytemuck", "ggml", - "ggml-format", - "half", + "log", "memmap2", "partial_sort", - "protobuf", "rand", - "rust_tokenizers", "serde", "serde_bytes", - "serde_json", "thiserror", + "zstd", +] + +[[package]] +name = "llm-cli" +version = "0.1.0" +dependencies = [ + "bincode", + "clap", + "color-eyre", + "env_logger", + "llm", + "log", + "num_cpus", + "once_cell", + "rand", + "rustyline", + "spinners", + "zstd", ] [[package]] @@ -783,6 +823,16 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "prettyplease" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" +dependencies = [ + "proc-macro2", + "syn 2.0.13", +] + [[package]] name = "proc-macro2" version = "1.0.56" diff --git a/Cargo.toml b/Cargo.toml index 20eca429..5d7d42ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,15 @@ [workspace] members = [ - "ggml-sys", + # Crates + "bloom", "ggml", - "ggml-format", - "llama-rs", - "llama-cli", - "generate-ggml-bindings" + "ggml/sys", + "gpt2", + "llama", + "llm", + "llm-base", + "llm-cli", + "tools/*" ] resolver = "2" @@ -13,4 +17,7 @@ resolver = "2" version = "0.1.0" [workspace.dependencies] +bytemuck = "1.13.1" +log = "0.4" rand = "0.8.5" +serde = { version = "1.0", features = ["derive"] } diff --git a/README.md b/README.md index e96ecaf0..031a55dc 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,31 @@ # LLaMA-rs - +This project is a Rust port of +[llama.cpp](https://github.com/ggerganov/llama.cpp) 🦙🦀🚀 -> Do the LLaMA thing, but now in Rust 🦀🚀🦙 - -![A llama riding a crab, AI-generated](./doc/resources/logo2.png) - -> _Image by [@darthdeus](https://github.com/darthdeus/), using Stable Diffusion_ - -[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/F1F8DNO5D) +Just like its C++ counterpart, it is powered by the +[`ggml`](https://github.com/ggerganov/ggml) tensor library, which allows running +inference for Facebook's [LLaMA](https://github.com/facebookresearch/llama) +model on a CPU with good performance using full precision, f16 or 4-bit +quantized versions of the model. [![Latest version](https://img.shields.io/crates/v/llama-rs.svg)](https://crates.io/crates/llama_rs) ![MIT/Apache2](https://shields.io/badge/license-MIT%2FApache--2.0-blue) [![Discord](https://img.shields.io/discord/1085885067601137734)](https://discord.gg/YB9WaXYAWU) -![Gif showcasing language generation using llama-rs](./doc/resources/llama_gif.gif) - -**LLaMA-rs** is a Rust port of the -[llama.cpp](https://github.com/ggerganov/llama.cpp) project. This allows running -inference for Facebook's [LLaMA](https://github.com/facebookresearch/llama) -model on a CPU with good performance using full precision, f16 or 4-bit -quantized versions of the model. +![A llama riding a crab, AI-generated](./doc/resources/logo2.png) -Just like its C++ counterpart, it is powered by the -[`ggml`](https://github.com/ggerganov/ggml) tensor library, achieving the same -performance as the original code. +> _Image by [@darthdeus](https://github.com/darthdeus/), using Stable Diffusion_ ## Getting started Make sure you have a Rust 1.65.0 or above and C toolchain[^1] set up. -`llama-rs` is a Rust library, while `llama-cli` is a CLI application that wraps -`llama-rs` and offers basic inference capabilities. +`llm-base`, and the model crates (e.g. `bloom`, `gpt2` `llama`) are Rust +libraries, while `llm-cli` is a CLI applications that wraps the models and offer +basic inference capabilities. -The following instructions explain how to build `llama-cli`. +The following instructions explain how to build CLI applications. **NOTE**: For best results, make sure to build and run in release mode. Debug builds are going to be very slow. @@ -43,33 +35,36 @@ Debug builds are going to be very slow. Run ```shell -cargo install --git https://github.com/rustformers/llama-rs llama-cli +cargo install --git https://github.com/rustformers/llama-rs llm ``` -to install `llama-cli` to your Cargo `bin` directory, which `rustup` is likely to +to install `llm` to your Cargo `bin` directory, which `rustup` is likely to have added to your `PATH`. -It can then be run through `llama-cli`. +The CLI application can then be run through `llm`. + +![Gif showcasing language generation using llama-rs](./doc/resources/llama_gif.gif) ### Building from repository -Clone the repository, and then build it through +Clone the repository and then build it with ```shell -cargo build --release --bin llama-cli +git clone --recurse-submodules git@github.com:rustformers/llama-rs.git +cargo build --release ``` -The resulting binary will be at `target/release/llama-cli[.exe]`. +The resulting binary will be at `target/release/llm[.exe]`. It can also be run directly through Cargo, using ```shell -cargo run --release --bin llama-cli -- +cargo run --release --bin llm -- ``` This is useful for development. -### Getting the weights +### Getting LLaMA weights In order to run the inference code in `llama-rs`, a copy of the model's weights are required. @@ -77,7 +72,8 @@ are required. #### From Hugging Face Compatible weights - not necessarily the original LLaMA weights - can be found -on [Hugging Face by searching for GGML](https://huggingface.co/models?search=ggml). At present, LLaMA-architecture models are supported. +on [Hugging Face by searching for GGML](https://huggingface.co/models?search=ggml). +At present, LLaMA-architecture models are supported. #### LLaMA original weights @@ -107,6 +103,21 @@ cargo run -p llama-cli quantize /path/to/your/models/7B/ggml-model-f16.bin /path > The [llama.cpp repository](https://github.com/ggerganov/llama.cpp) has > additional information on how to obtain and run specific models. +### BLOOM + +The open-source [BLOOM](https://bigscience.huggingface.co/blog/bloom) model is +also supported. +[More information](https://huggingface.co/docs/transformers/model_doc/bloom) +about BLOOM is available on HuggingFace, as are some +[quantized models](https://huggingface.co/models?search=bloom%20ggml). + +### GPT2 + +OpenAI's [GPT-2](https://jalammar.github.io/illustrated-gpt2/) architecture is +also supported. The open-source family of +[Cerebras](https://www.cerebras.net/blog/cerebras-gpt-a-family-of-open-compute-efficient-large-language-models/) +models is built on this architecture. + _Support for other open source models is currently planned. For models where weights can be legally distributed, this section will be updated with scripts to make the install process as user-friendly as possible. Due to the model's legal @@ -133,9 +144,9 @@ Some additional things to try: ![Gif showcasing alpaca repl mode](./doc/resources/alpaca_repl_screencap.gif) -- Sessions can be loaded (`--load-session`) or saved (`--save-session`) to file. To automatically load - and save the same session, use `--persist-session`. This can be used to cache prompts to reduce load - time, too: +- Sessions can be loaded (`--load-session`) or saved (`--save-session`) to file. + To automatically load and save the same session, use `--persist-session`. + This can be used to cache prompts to reduce load time, too: ![Gif showcasing prompt caching](./doc/resources/prompt_caching_screencap.gif) diff --git a/ggml-format/Cargo.toml b/bloom/Cargo.toml similarity index 56% rename from ggml-format/Cargo.toml rename to bloom/Cargo.toml index 91daca22..2dd9b0a9 100644 --- a/ggml-format/Cargo.toml +++ b/bloom/Cargo.toml @@ -1,13 +1,15 @@ [package] -name = "ggml-format" -version = "0.1.0" +name = "bloom" +version = { workspace = true } edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] ggml = { path = "../ggml" } -thiserror = "1.0" +llm-base = { path = "../llm-base" } + +bytemuck = { workspace = true } [dev-dependencies] -rand = "0.8" +rand = { workspace = true } diff --git a/bloom/examples/bloom_inference.rs b/bloom/examples/bloom_inference.rs new file mode 100644 index 00000000..8ca2435b --- /dev/null +++ b/bloom/examples/bloom_inference.rs @@ -0,0 +1,42 @@ +use std::{convert::Infallible, env::args, io::Write}; + +use llm_base::{load_progress_callback, model::KnownModel}; + +extern crate bloom; + +fn main() { + let args: Vec = args().collect(); + let loc = &args[1]; + let prompt = match &args.len() { + 3 => &args[2], + _ => "Rust is a cool programming language because ", + }; + + println!(" >>> Loading model from {loc}..."); + let now = std::time::Instant::now(); + + let bloom = bloom::Bloom::load(loc, true, 512, load_progress_callback) + .unwrap_or_else(|e| panic!("Error loading model from {loc}: {e}")); + + println!(" >>> Model loaded in {} ms.", now.elapsed().as_millis()); + + let mut session = bloom.start_session(Default::default()); + let res = session.inference_with_prompt::( + &bloom, + &Default::default(), + prompt, + None, + &mut rand::thread_rng(), + |t| { + print!("{t}"); + std::io::stdout().flush().unwrap(); + + Ok(()) + }, + ); + + match res { + Ok(result) => println!("\n\nInference stats:\n{result}"), + Err(err) => println!("\n{err}"), + } +} diff --git a/bloom/src/lib.rs b/bloom/src/lib.rs new file mode 100644 index 00000000..a5a4ed2f --- /dev/null +++ b/bloom/src/lib.rs @@ -0,0 +1,519 @@ +//! An implementation of BLOOM (BigScience Large Open-science Open-access Multilingual Language Model). +//! +//! This implementation of BLOOM may not be fully correct. More work may be required. + +use std::path::Path; + +use llm_base::{ + util, EvaluateOutputRequest, FileType, InferenceParameters, InferenceSession, + InferenceSessionParameters, KnownModel, LoadError, LoadProgress, Mmap, TokenId, Vocabulary, +}; + +/// The weights for the BLOOM model. All the mutable state is split into a +/// separate struct `InferenceSession`. +pub struct Bloom { + hyperparameters: Hyperparameters, + n_context_tokens: usize, + + vocabulary: Vocabulary, + tok_embeddings: ggml::Tensor, + norm: ggml::Tensor, + norm_b: ggml::Tensor, + output_norm: ggml::Tensor, + output_norm_b: ggml::Tensor, + output: ggml::Tensor, + layers: Vec, + + // Must be kept alive for the model + _context: ggml::context::Context, + _mmap: Option, +} + +impl Bloom { + /// Load the model from `path` with `n_context_tokens` context tokens. + /// + /// The status of the loading process will be reported through `load_progress_callback`. + pub fn load( + path: impl AsRef, + prefer_mmap: bool, + n_context_tokens: usize, + load_progress_callback: impl FnMut(LoadProgress), + ) -> Result { + llm_base::load(path, prefer_mmap, n_context_tokens, load_progress_callback) + } +} + +impl KnownModel for Bloom { + type Hyperparameters = Hyperparameters; + + fn new( + hyperparameters: Self::Hyperparameters, + n_context_tokens: usize, + vocabulary: Vocabulary, + tensor_loader: impl llm_base::TensorLoader, + ) -> Result { + let n_embd = hyperparameters.n_embd; + let n_layer = hyperparameters.n_layer; + let n_vocab = hyperparameters.n_vocab; + let n_mult = hyperparameters.n_mult; + let n_ff = ((4 * n_embd + n_mult - 1) / n_mult) * n_mult; + + let mut tl = tensor_loader; + + let tok_embeddings = tl.load("tok_embeddings.weight", &[n_embd, n_vocab])?; + + let norm = tl.load("norm.weight", &[n_embd])?; + let norm_b = tl.load("norm.bias", &[n_embd])?; + + let output_norm = tl.load("output_norm.weight", &[n_embd])?; + let output_norm_b = tl.load("output_norm.bias", &[n_embd])?; + + let output = tl.load("output.weight", &[n_embd, n_vocab])?; + + let mut layers = Vec::new(); + for i in 0..n_layer { + let layer = Layer { + attention_norm: tl.load(&format!("layers.{i}.attention_norm.weight"), &[n_embd])?, + attention_norm_b: tl.load(&format!("layers.{i}.attention_norm.bias"), &[n_embd])?, + + query_key_value: tl.load( + &format!("layers.{i}.attention.query_key_value.weight"), + &[n_embd, 3 * n_embd], + )?, + query_key_value_b: tl.load( + &format!("layers.{i}.attention.query_key_value.bias"), + &[3 * n_embd], + )?, + + wo: tl.load( + &format!("layers.{i}.attention.wo.weight"), + &[n_embd, n_embd], + )?, + wo_b: tl.load(&format!("layers.{i}.attention.wo.bias"), &[n_embd])?, + + ffn_norm: tl.load(&format!("layers.{i}.ffn_norm.weight"), &[n_embd])?, + ffn_norm_b: tl.load(&format!("layers.{i}.ffn_norm.bias"), &[n_embd])?, + + w1: tl.load( + &format!("layers.{i}.feed_forward.w1.weight"), + &[n_embd, n_ff], + )?, + w1_b: tl.load(&format!("layers.{i}.feed_forward.w1.bias"), &[n_ff])?, + w2: tl.load( + &format!("layers.{i}.feed_forward.w2.weight"), + &[n_ff, n_embd], + )?, + w2_b: tl.load(&format!("layers.{i}.feed_forward.w2.bias"), &[n_embd])?, + }; + + layers.push(layer); + } + + let (_context, _, _mmap) = tl.finish(); + + Ok(Bloom { + hyperparameters, + n_context_tokens, + vocabulary, + tok_embeddings, + norm, + norm_b, + output_norm, + output_norm_b, + output, + layers, + _context, + _mmap, + }) + } + + fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession { + InferenceSession::new( + params, + self.n_context_tokens, + self.hyperparameters.n_layer, + self.hyperparameters.n_embd, + self.hyperparameters.n_vocab, + ) + } + + fn evaluate( + &self, + session: &mut InferenceSession, + params: &InferenceParameters, + input_tokens: &[TokenId], + output_request: &mut EvaluateOutputRequest, + ) { + let n = input_tokens.len(); + let n_past = session.n_past; + let n_threads = params.n_threads; + + let Hyperparameters { + n_vocab, + n_embd, + n_mult: _, + n_head, + n_layer, + file_type: _, + } = self.hyperparameters; + let n_ctx = self.n_context_tokens; + + // For the first run, we need to guess a maximum buffer size so we can measure + // the actual memory consumption of the temporary ggml context. + let mut buf_size = 1024 * 1024 * 1024; + if session.mem_per_token > 0 && session.mem_per_token * n > buf_size { + // add 10% to account for ggml object overhead + buf_size = (1.1f64 * session.mem_per_token as f64 * n as f64) as usize; + }; + let ctx0 = ggml::context::Context::init(buf_size, true); + + // TODO: REMAKE THIS AFTER CHECKING GGML GRAPH + let mut gf = ggml::ComputationGraph::new(n_threads); + + let mut embd = ctx0.new_tensor_1d(ggml::Type::I32, n); + unsafe { embd.write_data(bytemuck::cast_slice(input_tokens)) }; + + let mut input_layer = ctx0.op_get_rows(&self.tok_embeddings, &embd); + + // word embeddings norm, + { + input_layer = ctx0.op_norm(&input_layer); + input_layer = ctx0.op_mul(&ctx0.op_repeat(&self.norm, &input_layer), &input_layer); + input_layer = ctx0.op_add(&ctx0.op_repeat(&self.norm_b, &input_layer), &input_layer); + } + + for il in 0..n_layer { + let input_self_attention = input_layer.share(); + let mut current: ggml::Tensor; + + // norm + { + current = ctx0.op_norm(&input_layer); + + // cur = attention_norm * cur + current = ctx0.op_mul( + &ctx0.op_repeat(&self.layers[il].attention_norm, ¤t), + ¤t, + ); + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].attention_norm_b, ¤t), + ¤t, + ); + } + + //attention + { + current = ctx0.op_mul_mat(&self.layers[il].query_key_value, ¤t); + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].query_key_value_b, ¤t), + ¤t, + ); + } + + // self-attention + { + let nb = current.get_nb()[1]; + let q_current = ctx0.op_view_2d( + ¤t, + (n_embd, n), + nb, + //0 * std::mem::size_of::() * n_embd as usize, + 0, + ); + let k_current = ctx0.op_view_2d( + ¤t, + (n_embd, n), + nb, + std::mem::size_of::() * n_embd, + ); + let v_current = ctx0.op_view_2d( + ¤t, + (n_embd, n), + nb, + 2 * std::mem::size_of::() * n_embd, + ); + + // store key and value to memory + if n >= 1 { + let k = ctx0.op_view_1d( + &session.memory_k, + n * n_embd, + (session.memory_k.element_size() * n_embd) * (il * n_ctx + n_past), + ); + + let v = ctx0.op_view_1d( + &session.memory_v, + n * n_embd, + (session.memory_v.element_size() * n_embd) * (il * n_ctx + n_past), + ); + + gf.build_forward_expand(&ctx0.op_cpy(&k_current, &k)); + gf.build_forward_expand(&ctx0.op_cpy(&v_current, &v)); + } + + // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) + let q = ctx0.op_permute( + &ctx0.op_cpy( + &q_current, + &ctx0.new_tensor_3d(ggml::Type::F32, n_embd / n_head, n_head, n), + ), + 0, + 2, + 1, + 3, + ); + + // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) + let k = ctx0.op_permute( + &ctx0.op_reshape_3d( + &ctx0.op_view_1d( + &session.memory_k, + (n_past + n) * n_embd, + il * n_ctx * session.memory_k.element_size() * n_embd, + ), + n_embd / n_head, + n_head, + n_past + n, + ), + 0, + 2, + 1, + 3, + ); + + // K * Q + let k_q = ctx0.op_mul_mat(&k, &q); + + // KQ_scaled = KQ / sqrt(n_embd/n_head) + let k_q_scaled = ctx0.op_scale( + &k_q, + &ctx0.new_f32(1.0 / f32::sqrt(n_embd as f32 / n_head as f32)), + ); + + //alibi + // KQ_scaled_alibi = KQ_scaled + alibi_bias + let k_q_scaled_alibi = ctx0.op_alibi(&k_q_scaled, n_past, n_head); + + // KQ_masked = mask_past(KQ_scaled) + let k_q_masked = ctx0.op_diag_mask_inf(&k_q_scaled_alibi, n_past); + + // KQ = soft_max(KQ_masked) + let k_q_soft_max = ctx0.op_soft_max(&k_q_masked); + + let memv_elsize = session.memory_v.element_size(); + + // let v_trans = ctx0.op_permute( + // &ctx0.op_reshape_3d( + // &ctx0.op_view_1d( + // &session.memory_v, + // (n_past + n) * n_embd, + // il * n_ctx * memv_elsize * n_embd, + // ), + // n_embd / n_head, + // n_head, + // n_past + n, + // ), + // 1, + // 2, + // 0, + // 3, + // ); + + // // GGML_ASSERT: ggml/ggml.c:4899: !ggml_is_transposed(a) + // let k_q_v = ctx0.op_mul_mat(&v_trans, &k_q_soft_max); + + // split cached V into n_head heads + let v = ctx0.op_view_3d( + &session.memory_v, + (n_past + n, n_embd / n_head, n_head), + (n_ctx * memv_elsize, n_ctx * memv_elsize * n_embd / n_head), + il * n_ctx * memv_elsize * n_embd, + ); + + // KQV = transpose(V) * KQ_soft_max + let k_q_v = ctx0.op_mul_mat(&v, &k_q_soft_max); + + // KQV_merged = KQV.permute(0, 2, 1, 3) + let k_q_v_merged = ctx0.op_permute(&k_q_v, 0, 2, 1, 3); + + // cur = KQV_merged.contiguous().view(n_embd, N) + current = ctx0.op_cpy( + &k_q_v_merged, + &ctx0.new_tensor_2d(ggml::Type::F32, n_embd, n), + ); + + // projection + current = ctx0.op_mul_mat(&self.layers[il].wo, ¤t); + current = ctx0.op_add(&ctx0.op_repeat(&self.layers[il].wo_b, ¤t), ¤t); + } + + let input_feed_forward = ctx0.op_add(¤t, &input_self_attention); + + // feed-forward network + { + // norm + { + current = ctx0.op_norm(&input_feed_forward); + + // cur = ffn_norm*cur + ffn_norm_b + current = ctx0.op_mul( + &ctx0.op_repeat(&self.layers[il].ffn_norm, ¤t), + ¤t, + ); + + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].ffn_norm_b, ¤t), + ¤t, + ); + } + + current = ctx0.op_mul_mat(&self.layers[il].w1, ¤t); + + current = ctx0.op_add(&ctx0.op_repeat(&self.layers[il].w1_b, ¤t), ¤t); + + // SILU activation + + current = ctx0.op_gelu(¤t); + + current = ctx0.op_mul_mat(&self.layers[il].w2, ¤t); + + current = ctx0.op_add(&ctx0.op_repeat(&self.layers[il].w2_b, ¤t), ¤t); + } + + current = ctx0.op_add(¤t, &input_feed_forward); + + // input for next layer + input_layer = current; + } + + // Used at the end to optionally extract the embeddings. + let embeddings_tensor; + + // norm + { + input_layer = ctx0.op_norm(&input_layer); + + // inpL = norm*inpL + input_layer = ctx0.op_mul( + &ctx0.op_repeat(&self.output_norm, &input_layer), + &input_layer, + ); + + input_layer = ctx0.op_add( + &ctx0.op_repeat(&self.output_norm_b, &input_layer), + &input_layer, + ); + + embeddings_tensor = input_layer.share(); //TODO: CHECK if this is still necessary, (not in BLOOM C implementation) + } + + // lm_head + { + input_layer = ctx0.op_mul_mat(&self.output, &input_layer); + } + + // logits -> probs + // inpL = ctx0.op_soft_max(&inpL); + + // run the computation + gf.build_forward_expand(&input_layer); + ctx0.graph_compute(&mut gf); + + // return result for just the last token + // SAFETY: yolo + assert_eq!(session.last_logits.len(), { n_vocab }); + unsafe { + input_layer.read_data( + n_vocab * (n - 1) * std::mem::size_of::(), + bytemuck::cast_slice_mut(&mut session.last_logits), + ) + }; + + // Extract logits + if let Some(all_logits) = &mut output_request.all_logits { + all_logits.resize(n_vocab * n, 0.0); + // SAFETY: Tensor data can be read (properly aligned, initialized, + // data will not be mutated or otherwise aliased during the copy), + // and we're not reading past the end of the tensor data. + assert_eq!(input_layer.nelements(), n_vocab * n); + unsafe { + input_layer.read_data(0, bytemuck::cast_slice_mut(all_logits)); + } + } + + // Extract embeddings + if let Some(embeddings) = &mut output_request.embeddings { + embeddings.resize(n_embd * n, 0.0); + // SAFETY: Same rationale as for the "Extract logits" section applies. + assert_eq!(embeddings_tensor.nelements(), n_embd * n); + unsafe { + embeddings_tensor.read_data(0, bytemuck::cast_slice_mut(embeddings)); + } + } + + // Adjust the required memory per token if we didn't know that already + if session.mem_per_token == 0 { + session.mem_per_token = ctx0.used_mem() / n; + } + + // Adjust n_past to new length. + session.n_past += input_tokens.len(); + } + + /// Returns the vocabulary used by this model. + fn vocabulary(&self) -> &Vocabulary { + &self.vocabulary + } + + fn n_ctx(&self) -> usize { + self.n_context_tokens + } +} + +// NOTE: Field order matters! Data is laid out in the file exactly +// in this order. +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] +pub struct Hyperparameters { + pub n_vocab: usize, + pub n_embd: usize, + pub n_mult: usize, + pub n_head: usize, + pub n_layer: usize, + pub file_type: FileType, +} +impl llm_base::Hyperparameters for Hyperparameters { + fn read(reader: &mut dyn std::io::BufRead) -> Result { + Ok(Hyperparameters { + n_vocab: util::read_i32(reader)?.try_into()?, + n_embd: util::read_i32(reader)?.try_into()?, + n_mult: util::read_i32(reader)?.try_into()?, + n_head: util::read_i32(reader)?.try_into()?, + n_layer: util::read_i32(reader)?.try_into()?, + file_type: { + let ftype = util::read_i32(reader)?; + FileType::try_from(ftype).map_err(|_| LoadError::UnsupportedFileType(ftype))? + }, + }) + } + + fn n_vocabulary(&self) -> usize { + self.n_vocab + } +} + +struct Layer { + pub attention_norm: ggml::Tensor, + pub attention_norm_b: ggml::Tensor, + pub wo: ggml::Tensor, + pub wo_b: ggml::Tensor, + pub query_key_value: ggml::Tensor, + pub query_key_value_b: ggml::Tensor, + // normalization + pub ffn_norm: ggml::Tensor, + pub ffn_norm_b: ggml::Tensor, + // ff + pub w1: ggml::Tensor, + pub w1_b: ggml::Tensor, + pub w2: ggml::Tensor, + pub w2_b: ggml::Tensor, +} diff --git a/generate-ggml-bindings/Cargo.toml b/generate-ggml-bindings/Cargo.toml deleted file mode 100644 index 6f75538e..00000000 --- a/generate-ggml-bindings/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "generate-ggml-bindings" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bindgen = "0.64.0" diff --git a/generate-ggml-bindings/src/main.rs b/generate-ggml-bindings/src/main.rs deleted file mode 100644 index 9f8f7324..00000000 --- a/generate-ggml-bindings/src/main.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::{env, path::PathBuf}; - -fn main() { - // Parse arguments - let args: Vec = env::args().collect(); - - if args.len() != 2 { - println!("Usage: {} ", args[0]); - return; - } - - let ggml_crate_path = &args[1]; - - let header_path = format!("{ggml_crate_path}/ggml/ggml.h"); - - let bindings = bindgen::Builder::default() - .header(&header_path) - // Suppress some warnings - .raw_line("#![allow(non_upper_case_globals)]") - .raw_line("#![allow(non_camel_case_types)]") - .raw_line("#![allow(non_snake_case)]") - .raw_line("#![allow(unused)]") - // Do not generate code for ggml's includes (stdlib) - .allowlist_file(&header_path) - .generate() - .expect("Unable to generate bindings"); - - let out_path = PathBuf::from(ggml_crate_path).join("src").join("lib.rs"); - bindings - .write_to_file(out_path) - .expect("Couldn't write bindings"); - - println!("Successfully updated bindings in src/lib.rs"); -} diff --git a/ggml-format/src/lib.rs b/ggml-format/src/lib.rs deleted file mode 100644 index b26aa0f2..00000000 --- a/ggml-format/src/lib.rs +++ /dev/null @@ -1,45 +0,0 @@ -#![deny(missing_docs)] -//! A reader and writer for the `ggml` model format. -//! -//! The reader supports the GGML, GGMF and GGJT container formats, but -//! only single-part models. -//! -//! The writer isn't implemented yet. It will support the GGJT container -//! format only. - -/// Utilities for reading and writing. -pub mod util; - -mod loader; -mod saver; -#[cfg(test)] -mod tests; - -pub use loader::{ - data_size, load_model, LoadError, LoadHandler, PartialHyperparameters, TensorInfo, -}; -pub use saver::{save_model, SaveError, SaveHandler, TensorData}; - -/// The type of a tensor element. -pub type ElementType = ggml::Type; - -#[derive(Debug, PartialEq, Clone, Copy)] -/// The format of the file containing the model. -pub enum ContainerType { - /// `GGML`: legacy format, oldest ggml tensor file format - Ggml, - /// `GGMF`: also legacy format. Introduces versioning. Newer than GGML, older than GGJT. - Ggmf, - /// `GGJT`: mmap-able format. - Ggjt, -} -impl ContainerType { - /// Does this container type support mmap? - pub fn support_mmap(&self) -> bool { - match self { - ContainerType::Ggml => false, - ContainerType::Ggmf => false, - ContainerType::Ggjt => true, - } - } -} diff --git a/ggml-sys/Cargo.toml b/ggml-sys/Cargo.toml deleted file mode 100644 index 6a971dee..00000000 --- a/ggml-sys/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[build-dependencies.cc] -version = "^1.0" - -[dependencies] - -[package] -description = "Low level bindings for ggml" -edition = "2021" -name = "ggml-sys" -version = {workspace = true} diff --git a/ggml-sys/ggml/.gitattributes b/ggml-sys/ggml/.gitattributes deleted file mode 100644 index 304373d7..00000000 --- a/ggml-sys/ggml/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.c linguist-vendored -*.h linguist-vendored diff --git a/ggml-sys/ggml/CREDITS.txt b/ggml-sys/ggml/CREDITS.txt deleted file mode 100644 index af72b88a..00000000 --- a/ggml-sys/ggml/CREDITS.txt +++ /dev/null @@ -1,6 +0,0 @@ -Vendored version: https://github.com/ggerganov/llama.cpp/commit/0e018fe008eacebdbcfa2d61b6c988c245c961cd -For convenience, changes involved in this sync: -https://github.com/ggerganov/llama.cpp/compare/74f5899df4a6083fc467b620baa1cf821e37799d..0e018fe008eacebdbcfa2d61b6c988c245c961cd - -The ggml.c and ggml.h files are distributed under the terms of the MIT license. -Credit goes to the original authors: Copyright (c) 2023 Georgi Gerganov diff --git a/ggml-sys/ggml/ggml.c b/ggml-sys/ggml/ggml.c deleted file mode 100644 index 281b2028..00000000 --- a/ggml-sys/ggml/ggml.c +++ /dev/null @@ -1,12325 +0,0 @@ -// Defines CLOCK_MONOTONIC on Linux -#define _GNU_SOURCE - -#include "ggml.h" - -#if defined(_MSC_VER) || defined(__MINGW32__) -#include // using malloc.h with MSC/MINGW -#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// if C99 - static_assert is noop -// ref: https://stackoverflow.com/a/53923785/4039976 -#ifndef static_assert -#define static_assert(cond, msg) struct global_scope_noop_trick -#endif - -#if defined(_WIN32) - -#include - -typedef volatile LONG atomic_int; -typedef atomic_int atomic_bool; - -static void atomic_store(atomic_int* ptr, LONG val) { - InterlockedExchange(ptr, val); -} -static LONG atomic_load(atomic_int* ptr) { - return InterlockedCompareExchange(ptr, 0, 0); -} -static LONG atomic_fetch_add(atomic_int* ptr, LONG inc) { - return InterlockedExchangeAdd(ptr, inc); -} -static LONG atomic_fetch_sub(atomic_int* ptr, LONG dec) { - return atomic_fetch_add(ptr, -(dec)); -} - -typedef HANDLE pthread_t; - -typedef DWORD thread_ret_t; -static int pthread_create(pthread_t* out, void* unused, thread_ret_t(*func)(void*), void* arg) { - (void) unused; - HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL); - if (handle == NULL) - { - return EAGAIN; - } - - *out = handle; - return 0; -} - -static int pthread_join(pthread_t thread, void* unused) { - (void) unused; - return (int) WaitForSingleObject(thread, INFINITE); -} - -static int sched_yield (void) { - Sleep (0); - return 0; -} -#else -#include -#include - -typedef void* thread_ret_t; -#endif - -// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512 -#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__)) -#ifndef __FMA__ -#define __FMA__ -#endif -#ifndef __F16C__ -#define __F16C__ -#endif -#ifndef __SSE3__ -#define __SSE3__ -#endif -#endif - -#ifdef __HAIKU__ -#define static_assert(cond, msg) _Static_assert(cond, msg) -#endif - -/*#define GGML_PERF*/ -#define GGML_DEBUG 0 -#define GGML_GELU_FP16 -#define GGML_SILU_FP16 - -#define GGML_SOFT_MAX_UNROLL 4 -#define GGML_VEC_DOT_UNROLL 2 - -#ifdef GGML_USE_ACCELERATE -// uncomment to use vDSP for soft max computation -// note: not sure if it is actually faster -//#define GGML_SOFT_MAX_ACCELERATE -#endif - -#if UINTPTR_MAX == 0xFFFFFFFF - #define GGML_MEM_ALIGN 4 -#else - #define GGML_MEM_ALIGN 16 -#endif - -#if defined(_MSC_VER) || defined(__MINGW32__) -#define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN) -#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr) -#else -inline static void* ggml_aligned_malloc(size_t size) { - void* aligned_memory = NULL; - int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size); - if (result != 0) { - // Handle allocation failure - return NULL; - } - return aligned_memory; -} -#define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size) -#define GGML_ALIGNED_FREE(ptr) free(ptr) -#endif - -#define UNUSED(x) (void)(x) -#define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0) - -#define GGML_ASSERT(x) \ - do { \ - if (!(x)) { \ - fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ - abort(); \ - } \ - } while (0) - -#if defined(GGML_USE_ACCELERATE) -#include -#elif defined(GGML_USE_OPENBLAS) -#include -#elif defined(GGML_USE_CUBLAS) -#include "ggml-cuda.h" -#endif - -#undef MIN -#undef MAX -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - -// floating point type used to accumulate sums -typedef double ggml_float; - -// 16-bit float -// on Arm, we use __fp16 -// on x86, we use uint16_t -#ifdef __ARM_NEON - -// if YCM cannot find , make a symbolic link to it, for example: -// -// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ -// -#include - -#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x)) -#define GGML_COMPUTE_FP32_TO_FP16(x) (x) - -#define GGML_FP16_TO_FP32(x) ((float) (x)) -#define GGML_FP32_TO_FP16(x) (x) - -#else - -#ifdef __wasm_simd128__ -#include -#else -#ifdef __POWER9_VECTOR__ -#include -#undef bool -#define bool _Bool -#else -#include -#endif -#endif - -#ifdef __F16C__ - -#ifdef _MSC_VER -#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) -#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) -#else -#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) -#endif - -#elif defined(__POWER9_VECTOR__) - -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) -/* the inline asm below is about 12% faster than the lookup method */ -#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - register float f; - register double d; - __asm__( - "mtfprd %0,%2\n" - "xscvhpdp %0,%0\n" - "frsp %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=f"(f): - /* in */ "r"(h)); - return f; -} - -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - register double d; - register ggml_fp16_t r; - __asm__( /* xscvdphp can work on double or single precision */ - "xscvdphp %0,%2\n" - "mffprd %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=r"(r): - /* in */ "f"(f)); - return r; -} - -#else - -// FP16 <-> FP32 -// ref: https://github.com/Maratyszcza/FP16 - -static inline float fp32_from_bits(uint32_t w) { - union { - uint32_t as_bits; - float as_value; - } fp32; - fp32.as_bits = w; - return fp32.as_value; -} - -static inline uint32_t fp32_to_bits(float f) { - union { - float as_value; - uint32_t as_bits; - } fp32; - fp32.as_value = f; - return fp32.as_bits; -} - -static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - const uint32_t w = (uint32_t) h << 16; - const uint32_t sign = w & UINT32_C(0x80000000); - const uint32_t two_w = w + w; - - const uint32_t exp_offset = UINT32_C(0xE0) << 23; -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) - const float exp_scale = 0x1.0p-112f; -#else - const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); -#endif - const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; - - const uint32_t magic_mask = UINT32_C(126) << 23; - const float magic_bias = 0.5f; - const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; - - const uint32_t denormalized_cutoff = UINT32_C(1) << 27; - const uint32_t result = sign | - (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); - return fp32_from_bits(result); -} - -static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__) - const float scale_to_inf = 0x1.0p+112f; - const float scale_to_zero = 0x1.0p-110f; -#else - const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); - const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); -#endif - float base = (fabsf(f) * scale_to_inf) * scale_to_zero; - - const uint32_t w = fp32_to_bits(f); - const uint32_t shl1_w = w + w; - const uint32_t sign = w & UINT32_C(0x80000000); - uint32_t bias = shl1_w & UINT32_C(0xFF000000); - if (bias < UINT32_C(0x71000000)) { - bias = UINT32_C(0x71000000); - } - - base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; - const uint32_t bits = fp32_to_bits(base); - const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); - const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); - const uint32_t nonsign = exp_bits + mantissa_bits; - return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); -} - -#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) -#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - -#endif // __F16C__ - -#endif // __ARM_NEON - -// -// global data -// - -// precomputed gelu table for f16 (128 KB) -static ggml_fp16_t table_gelu_f16[1 << 16]; - -// precomputed silu table for f16 (128 KB) -static ggml_fp16_t table_silu_f16[1 << 16]; - -// precomputed exp table for f16 (128 KB) -static ggml_fp16_t table_exp_f16[1 << 16]; - -// precomputed f32 table for f16 (256 KB) -static float table_f32_f16[1 << 16]; - -// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, -// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. -// This is also true for POWER9. -#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16) - -inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { - uint16_t s; - memcpy(&s, &f, sizeof(uint16_t)); - return table_f32_f16[s]; -} - -#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - -#endif - -// note: do not use these inside ggml.c -// these are meant to be used via the ggml.h API -float ggml_fp16_to_fp32(ggml_fp16_t x) { - return (float) GGML_FP16_TO_FP32(x); -} - -ggml_fp16_t ggml_fp32_to_fp16(float x) { - return GGML_FP32_TO_FP16(x); -} - -// -// timing -// - -#if defined(_MSC_VER) || defined(__MINGW32__) -static int64_t timer_freq; -void ggml_time_init(void) { - LARGE_INTEGER frequency; - QueryPerformanceFrequency(&frequency); - timer_freq = frequency.QuadPart; -} -int64_t ggml_time_ms(void) { - LARGE_INTEGER t; - QueryPerformanceCounter(&t); - return (t.QuadPart * 1000) / timer_freq; -} -int64_t ggml_time_us(void) { - LARGE_INTEGER t; - QueryPerformanceCounter(&t); - return (t.QuadPart * 1000000) / timer_freq; -} -#else -void ggml_time_init(void) {} -int64_t ggml_time_ms(void) { - struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - return (int64_t)ts.tv_sec*1000 + (int64_t)ts.tv_nsec/1000000; -} - -int64_t ggml_time_us(void) { - struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - return (int64_t)ts.tv_sec*1000000 + (int64_t)ts.tv_nsec/1000; -} -#endif - -int64_t ggml_cycles(void) { - return clock(); -} - -int64_t ggml_cycles_per_ms(void) { - return CLOCKS_PER_SEC/1000; -} - -#ifdef GGML_PERF -#define ggml_perf_time_ms() ggml_time_ms() -#define ggml_perf_time_us() ggml_time_us() -#define ggml_perf_cycles() ggml_cycles() -#define ggml_perf_cycles_per_ms() ggml_cycles_per_ms() -#else -#define ggml_perf_time_ms() 0 -#define ggml_perf_time_us() 0 -#define ggml_perf_cycles() 0 -#define ggml_perf_cycles_per_ms() 0 -#endif - -// -// cache line -// - -#if defined(__cpp_lib_hardware_interference_size) -#define CACHE_LINE_SIZE hardware_destructive_interference_size -#else -#if defined(__POWER9_VECTOR__) -#define CACHE_LINE_SIZE 128 -#else -#define CACHE_LINE_SIZE 64 -#endif -#endif - -static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); - -// -// quantization -// - -#if __AVX__ || __AVX2__ || __AVX512F__ -// Unpack 16 4-bit fields into 16 bytes -// The output vector contains 16 bytes, each one in [ 0 .. 15 ] interval -static inline __m128i bytes_from_nibbles_16(const uint8_t * rsi) -{ - // Load 8 bytes from memory - __m128i tmp = _mm_loadu_si64( ( const __m128i* )rsi ); - - // Expand bytes into uint16_t values - __m128i bytes = _mm_cvtepu8_epi16( tmp ); - - // Unpack values into individual bytes - const __m128i lowMask = _mm_set1_epi8( 0xF ); - __m128i high = _mm_andnot_si128( lowMask, bytes ); - __m128i low = _mm_and_si128( lowMask, bytes ); - high = _mm_slli_epi16( high, 4 ); - bytes = _mm_or_si128( low, high ); - return bytes; -} - -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = _mm256_extractf128_ps(x, 1); - res = _mm_add_ps(res, _mm256_castps256_ps128(x)); - res = _mm_add_ps(res, _mm_movehl_ps(res, res)); - res = _mm_add_ss(res, _mm_movehdup_ps(res)); - return _mm_cvtss_f32(res); -} - -// horizontally add 8 int32_t -static inline int hsum_i32_8(const __m256i a) { - const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); - const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); - const __m128i sum64 = _mm_add_epi32(hi64, sum128); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -// horizontally add 4 int32_t -static inline int hsum_i32_4(const __m128i a) { - const __m128i hi64 = _mm_unpackhi_epi64(a, a); - const __m128i sum64 = _mm_add_epi32(hi64, a); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -#if __AVX2__ || __AVX512F__ -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - // Load 16 bytes from memory - __m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi ); - - // Expand bytes into uint16_t values - __m256i bytes = _mm256_cvtepu8_epi16( tmp ); - - // Unpack values into individual bytes - const __m256i lowMask = _mm256_set1_epi8( 0xF ); - __m256i high = _mm256_andnot_si256( lowMask, bytes ); - __m256i low = _mm256_and_si256( lowMask, bytes ); - high = _mm256_slli_epi16( high, 4 ); - bytes = _mm256_or_si256( low, high ); - return bytes; -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m256i x) { - const __m256i ones = _mm256_set1_epi16(1); - const __m256i summed_pairs = _mm256_madd_epi16(ones, x); - return _mm256_cvtepi32_ps(summed_pairs); -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { - // Get absolute values of x vectors - const __m256i ax = _mm256_sign_epi8(x, x); - // Sign the values of the y vectors - const __m256i sy = _mm256_sign_epi8(y, x); - // Perform multiplication and create 16-bit values - const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_float(dot); -} - -static inline __m128i packNibbles( __m256i bytes ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m256i lowByte = _mm256_set1_epi16( 0xFF ); - __m256i high = _mm256_andnot_si256( lowByte, bytes ); - __m256i low = _mm256_and_si256( lowByte, bytes ); - high = _mm256_srli_epi16( high, 4 ); - bytes = _mm256_or_si256( low, high ); - - // Compress uint16_t lanes into bytes - __m128i r0 = _mm256_castsi256_si128( bytes ); - __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); - return _mm_packus_epi16( r0, r1 ); -} -#else -static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m128i lowByte = _mm_set1_epi16( 0xFF ); - __m128i high = _mm_andnot_si128( lowByte, bytes1 ); - __m128i low = _mm_and_si128( lowByte, bytes1 ); - high = _mm_srli_epi16( high, 4 ); - bytes1 = _mm_or_si128( low, high ); - high = _mm_andnot_si128( lowByte, bytes2 ); - low = _mm_and_si128( lowByte, bytes2 ); - high = _mm_srli_epi16( high, 4 ); - bytes2 = _mm_or_si128( low, high ); - - return _mm_packus_epi16( bytes1, bytes2); -} -#endif -#endif // __AVX__ || __AVX2__ || __AVX512F__ - -#if __ARM_NEON - -#if !defined(__aarch64__) - -inline static uint16_t vaddvq_u8(uint8x16_t v) { - return - (uint16_t)vgetq_lane_u8(v, 0) + (uint16_t)vgetq_lane_u8(v, 1) + - (uint16_t)vgetq_lane_u8(v, 2) + (uint16_t)vgetq_lane_u8(v, 3) + - (uint16_t)vgetq_lane_u8(v, 4) + (uint16_t)vgetq_lane_u8(v, 5) + - (uint16_t)vgetq_lane_u8(v, 6) + (uint16_t)vgetq_lane_u8(v, 7) + - (uint16_t)vgetq_lane_u8(v, 8) + (uint16_t)vgetq_lane_u8(v, 9) + - (uint16_t)vgetq_lane_u8(v, 10) + (uint16_t)vgetq_lane_u8(v, 11) + - (uint16_t)vgetq_lane_u8(v, 12) + (uint16_t)vgetq_lane_u8(v, 13) + - (uint16_t)vgetq_lane_u8(v, 14) + (uint16_t)vgetq_lane_u8(v, 15); -} - -inline static int16_t vaddvq_s8(int8x16_t v) { - return - (int16_t)vgetq_lane_s8(v, 0) + (int16_t)vgetq_lane_s8(v, 1) + - (int16_t)vgetq_lane_s8(v, 2) + (int16_t)vgetq_lane_s8(v, 3) + - (int16_t)vgetq_lane_s8(v, 4) + (int16_t)vgetq_lane_s8(v, 5) + - (int16_t)vgetq_lane_s8(v, 6) + (int16_t)vgetq_lane_s8(v, 7) + - (int16_t)vgetq_lane_s8(v, 8) + (int16_t)vgetq_lane_s8(v, 9) + - (int16_t)vgetq_lane_s8(v, 10) + (int16_t)vgetq_lane_s8(v, 11) + - (int16_t)vgetq_lane_s8(v, 12) + (int16_t)vgetq_lane_s8(v, 13) + - (int16_t)vgetq_lane_s8(v, 14) + (int16_t)vgetq_lane_s8(v, 15); -} - -inline static int32_t vaddvq_s16(int16x8_t v) { - return - (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) + - (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) + - (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) + - (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7); -} - -inline static uint32_t vaddvq_u16(uint16x8_t v) { - return - (uint32_t)vgetq_lane_u16(v, 0) + (uint32_t)vgetq_lane_u16(v, 1) + - (uint32_t)vgetq_lane_u16(v, 2) + (uint32_t)vgetq_lane_u16(v, 3) + - (uint32_t)vgetq_lane_u16(v, 4) + (uint32_t)vgetq_lane_u16(v, 5) + - (uint32_t)vgetq_lane_u16(v, 6) + (uint32_t)vgetq_lane_u16(v, 7); -} - -inline static int32_t vaddvq_s32(int32x4_t v) { - return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3); -} - -inline static float vaddvq_f32(float32x4_t v) { - return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3); -} - -float vminvq_f32(float32x4_t v) { - return - MIN(MIN(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), - MIN(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); -} - -float vmaxvq_f32(float32x4_t v) { - return - MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)), - MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3))); -} - -int8x8_t vzip1_s8(int8x8_t a, int8x8_t b) { - return vget_low_s8(vcombine_s8(a, b)); -} - -int8x8_t vzip2_s8(int8x8_t a, int8x8_t b) { - return vget_high_s8(vcombine_s8(a, b)); -} - -uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) { - return vget_low_u8(vcombine_u8(a, b)); -} - -uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) { - return vget_high_u8(vcombine_u8(a, b)); -} - -#endif -#endif - - -#define QK4_0 32 -typedef struct { - float d; // delta - uint8_t qs[QK4_0 / 2]; // nibbles / quants -} block_q4_0; -static_assert(sizeof(block_q4_0) == sizeof(float) + QK4_0 / 2, "wrong q4_0 block size/padding"); - -#define QK4_1 32 -typedef struct { - float d; // delta - float m; // min - uint8_t qs[QK4_1 / 2]; // nibbles / quants -} block_q4_1; -static_assert(sizeof(block_q4_1) == 2 * sizeof(float) + QK4_1 / 2, "wrong q4_1 block size/padding"); - -#define QK4_2 16 -typedef struct { - ggml_fp16_t d; // delta - uint8_t qs[QK4_2 / 2]; // nibbles / quants -} block_q4_2; -static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding"); - -#define QK4_3 16 -typedef struct { - ggml_fp16_t d; // delta - ggml_fp16_t m; // min - uint8_t qs[QK4_3 / 2]; // nibbles / quants -} block_q4_3; -static_assert(sizeof(block_q4_3) == 2 * sizeof(ggml_fp16_t) + QK4_3 / 2, "wrong q4_3 block size/padding"); - -#define QK8_0 32 -typedef struct { - float d; // delta - float s0; // d * sum(qs[i]) low - float s1; // d * sum(qs[i]) high - int8_t qs[QK8_0]; // quants -} block_q8_0; -static_assert(sizeof(block_q8_0) == 3*sizeof(float) + QK8_0, "wrong q8_0 block size/padding"); - - -// reference implementation for deterministic creation of model files -static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) { - assert(k % QK4_0 == 0); - const int nb = k / QK4_0; - - uint8_t pp[QK4_0/2]; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - for (int l = 0; l < QK4_0; l++) { - const float v = x[i*QK4_0 + l]; - amax = MAX(amax, fabsf(v)); - } - - const float d = amax / ((1 << 3) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - for (int l = 0; l < QK4_0; l += 2) { - const float v0 = x[i*QK4_0 + l + 0]*id; - const float v1 = x[i*QK4_0 + l + 1]*id; - - const uint8_t vi0 = (int8_t)roundf(v0) + 8; - const uint8_t vi1 = (int8_t)roundf(v1) + 8; - - assert(vi0 < 16); - assert(vi1 < 16); - - pp[l/2] = vi0 | (vi1 << 4); - } - - memcpy(y[i].qs, pp, sizeof(pp)); - } -} - -static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int k) { - assert(k % QK4_0 == 0); - const int nb = k / QK4_0; - - block_q4_0 * restrict y = vy; - -#if defined(__POWER9_VECTOR__) - const vector float v85 = vec_splats(8.5f); - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - vector float srcv [8]; - vector float asrcv[8]; - vector float amaxv[8]; - - for (int l = 0; l < 8; l++) srcv[l] = *(vector float *)(x + i*32 + 4*l); - for (int l = 0; l < 8; l++) asrcv[l] = vec_abs(srcv[l]); - - for (int l = 0; l < 4; l++) amaxv[2*l] = vec_max(asrcv[2*l], asrcv[2*l+1]); - //for (int l = 0; l < 2; l++) amaxv[4*l] = vec_max(amaxv[4*l], amaxv[4*l+2]); - amaxv[0] = vec_max(amaxv[0], amaxv[2]); - amaxv[4] = vec_max(amaxv[4], amaxv[6]); - //for (int l = 0; l < 1; l++) amaxv[8*l] = vec_max(amaxv[8*l], amaxv[8*l+4]); - amaxv[0] = vec_max(amaxv[0], amaxv[4]); - - amax = MAX( - MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 3) - 1); - const float id = d ? 1.0/d : 0.0; - - y[i].d = d; - - const vector float vid = vec_splats(id); - uint8_t * restrict pb = y[i].qs; - for (int l = 0; l < 8; l++) { - const vector float vf = vec_madd(srcv[l], vid, v85); - const vector signed int vi = vec_signed(vf); - - pb[2*l + 0] = vec_extract(vi, 0) | (vec_extract(vi, 1) << 4); - pb[2*l + 1] = vec_extract(vi, 2) | (vec_extract(vi, 3) << 4); - } - } -#elif __ARM_NEON - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l); - for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]); - - for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]); - for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]); - for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 3) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - for (int l = 0; l < 8; l++) { - const float32x4_t v = vmulq_n_f32(srcv[l], id); - const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f)); - const int32x4_t vi = vcvtq_s32_f32(vf); - - y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); - y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); - } - } -#elif defined(__AVX2__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 7.0f; - y[i].d = d; - const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ] - const __m256i off = _mm256_set1_epi8( 8 ); - i0 = _mm256_add_epi8( i0, off ); - - // Compress the vector into 4 bit/value, and store - __m128i res = packNibbles( i0 ); - _mm_storeu_si128( ( __m128i* )y[i].qs, res ); - } -#elif defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 7.0f; - y[i].d = d; - const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ] - const __m128i off = _mm_set1_epi8( 8); - ni0 = _mm_add_epi8( ni0, off ); - ni4 = _mm_add_epi8( ni4, off ); - - // Compress the vector into 4 bit/value, and store - __m128i res = packNibbles( ni0, ni4 ); - _mm_storeu_si128( ( __m128i* )y[i].qs, res ); - } -#elif defined(__wasm_simd128__) - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int l = 0; l < 8; l++) srcv[l] = wasm_v128_load(x + i*32 + 4*l); - for (int l = 0; l < 8; l++) asrcv[l] = wasm_f32x4_abs(srcv[l]); - - for (int l = 0; l < 4; l++) amaxv[2*l] = wasm_f32x4_max(asrcv[2*l], asrcv[2*l+1]); - for (int l = 0; l < 2; l++) amaxv[4*l] = wasm_f32x4_max(amaxv[4*l], amaxv[4*l+2]); - for (int l = 0; l < 1; l++) amaxv[8*l] = wasm_f32x4_max(amaxv[8*l], amaxv[8*l+4]); - - amax = MAX( - MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 3) - 1); - const float id = d ? 1.0/d : 0.0; - - y[i].d = d; - - for (int l = 0; l < 8; l++) { - const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id)); - const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf); - - y[i].qs[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4); - y[i].qs[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4); - } - } -#else - // scalar - quantize_row_q4_0_reference(x, y, k); -#endif -} - -static void quantize_row_q4_1_reference(const float * restrict x, void * restrict vy, int k) { - assert(k % QK4_1 == 0); - const int nb = k / QK4_1; - - block_q4_1 * restrict y = vy; - - uint8_t pp[QK4_1/2]; - - for (int i = 0; i < nb; i++) { - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int l = 0; l < QK4_1; l++) { - const float v = x[i*QK4_1 + l]; - if (v < min) min = v; - if (v > max) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - y[i].m = min; - - for (int l = 0; l < QK4_1; l += 2) { - const float v0 = (x[i*QK4_1 + l + 0] - min)*id; - const float v1 = (x[i*QK4_1 + l + 1] - min)*id; - - const uint8_t vi0 = roundf(v0); - const uint8_t vi1 = roundf(v1); - - assert(vi0 < 16); - assert(vi1 < 16); - - pp[l/2] = vi0 | (vi1 << 4); - } - - memcpy(y[i].qs, pp, sizeof(pp)); - } -} - -static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) { - assert(k % QK4_1 == 0); - - const int nb = k / QK4_1; - - block_q4_1 * restrict y = vy; - -#if defined(__AVX2__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max for the block - __m256 vmax; - vmax = _mm256_max_ps( v0, v1 ); - vmax = _mm256_max_ps( vmax, v2 ); - vmax = _mm256_max_ps( vmax, v3 ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( vmax, 1 ), _mm256_castps256_ps128( vmax ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Compute min for the block - __m256 vmin; - vmin = _mm256_min_ps( v0, v1 ); - vmin = _mm256_min_ps( vmin, v2 ); - vmin = _mm256_min_ps( vmin, v3 ); - - __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( vmin, 1 ), _mm256_castps256_ps128( vmin ) ); - min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) ); - min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) ); - const float minScalar = _mm_cvtss_f32( min4 ); - - // Quantize these floats - const float d = (maxScalar - minScalar) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].m = minScalar; - y[i].d = d; - - // x = (x-min)*id - const __m256 mul = _mm256_set1_ps( id ); - const __m256 off = _mm256_set1_ps( minScalar ); - v0 = _mm256_mul_ps( _mm256_sub_ps( v0, off ), mul ); - v1 = _mm256_mul_ps( _mm256_sub_ps( v1, off ), mul ); - v2 = _mm256_mul_ps( _mm256_sub_ps( v2, off ), mul ); - v3 = _mm256_mul_ps( _mm256_sub_ps( v3, off ), mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - // Compress the vector into 4 bit/value, and store - __m128i res = packNibbles( i0 ); - _mm_storeu_si128( ( __m128i* )y[i].qs, res ); - } -#elif __ARM_NEON - for (int i = 0; i < nb; i++) { - float32x4_t srcv[8]; - float32x4_t minv[8]; - float32x4_t maxv[8]; - - for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*QK4_1 + 4*l); - - for (int l = 0; l < 4; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]); - for (int l = 0; l < 2; l++) minv[4*l] = vminq_f32(minv[4*l], minv[4*l + 2]); - for (int l = 0; l < 1; l++) minv[8*l] = vminq_f32(minv[8*l], minv[8*l + 4]); - - for (int l = 0; l < 4; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l + 1]); - for (int l = 0; l < 2; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l + 2]); - for (int l = 0; l < 1; l++) maxv[8*l] = vmaxq_f32(maxv[8*l], maxv[8*l + 4]); - - const float min = vminvq_f32(minv[0]); - const float max = vmaxvq_f32(maxv[0]); - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - y[i].m = min; - - const float32x4_t minv0 = vdupq_n_f32(min); - - for (int l = 0; l < 8; l++) { - const float32x4_t v = vmulq_n_f32(vsubq_f32(srcv[l], minv0), id); - const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(0.5f)); // needed to round to nearest - const int32x4_t vi = vcvtq_s32_f32(vf); - - y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4); - y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4); - } - } -#else - // scalar - quantize_row_q4_1_reference(x, vy, k); -#endif -} - -// reference implementation for deterministic creation of model files -static void quantize_row_q4_2_reference(const float * restrict x, block_q4_2 * restrict y, int k) { - assert(k % QK4_2 == 0); - - const int nb = k / QK4_2; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - for (int l = 0; l < QK4_2; l++) { - const float v = x[i*QK4_2 + l]; - amax = MAX(amax, fabsf(v)); - } - - const float d = amax / ((1 << 3) - 1); - - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int l = 0; l < QK4_2; l += 2) { - const float v0 = x[i*QK4_2 + l + 0]*id; - const float v1 = x[i*QK4_2 + l + 1]*id; - - const uint8_t vi0 = (uint8_t)(v0 + 8.5f); - const uint8_t vi1 = (uint8_t)(v1 + 8.5f); - - assert(vi0 < 16); - assert(vi1 < 16); - - y[i].qs[l/2] = vi0 | (vi1 << 4); - } - } -} - -static inline int nearest_int(float fval) { - assert(fval <= 4194303.f); - float val = fval + 12582912.f; - int i; memcpy(&i, &val, sizeof(int)); - return (i & 0x007fffff) - 0x00400000; -} - -static float kquantize_q4_with_bounds(int n, int nmin, int nmax, const float * restrict X, int nCandidates, - const float * restrict candidates, int8_t * restrict L) { - assert (nmin >= INT8_MIN); - assert (nmax <= INT8_MAX); - float amax = 0; - for (int i=0; i sumlxM2*suml2P) { - if (sumlxP2 > best*suml2P) { - best = sumlxP2/suml2P; bestScale = iscale; - } - } else { - if (sumlxM2 > best*suml2M) { - best = sumlxM2/suml2M; bestScale = -iscale; - } - } - } - float sumlx = 0; int suml2 = 0; - for (int i=0; i max) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - y[i].m = GGML_FP32_TO_FP16(min); - - for (int l = 0; l < QK4_3; l += 2) { - const float v0 = (x[i*QK4_3 + l + 0] - min)*id; - const float v1 = (x[i*QK4_3 + l + 1] - min)*id; - - const uint8_t vi0 = (int) (v0 + 0.5f); - const uint8_t vi1 = (int) (v1 + 0.5f); - - assert(vi0 < 16); - assert(vi1 < 16); - - y[i].qs[l/2] = vi0 | (vi1 << 4); - } - } -} - -static void quantize_row_q4_3(const float * restrict x, void * restrict vy, int k) { - assert(k % QK4_3 == 0); - - block_q4_3 * restrict y = vy; - - quantize_row_q4_3_reference(x, y, k); -} - -// reference implementation for deterministic creation of model files -static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) { - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - for (int l = 0; l < QK8_0; l++) { - const float v = x[i*QK8_0 + l]; - amax = MAX(amax, fabsf(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - int sum0 = 0; - int sum1 = 0; - - for (int l = 0; l < QK8_0/2; ++l) { - const float v0 = x[i*QK8_0 + l]*id; - const float v1 = x[i*QK8_0 + QK8_0/2 + l]*id; - - y[i].qs[ l] = roundf(v0); - y[i].qs[QK8_0/2 + l] = roundf(v1); - - sum0 += y[i].qs[ l]; - sum1 += y[i].qs[QK8_0/2 + l]; - } - - y[i].s0 = d * sum0; - y[i].s1 = d * sum1; - } -} - -static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) { - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l); - for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]); - - for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]); - for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]); - for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = d; - - int32x4_t accv0 = vdupq_n_s32(0); - int32x4_t accv1 = vdupq_n_s32(0); - - // low half - for (int l = 0; l < 4; l++) { - const float32x4_t v = vmulq_n_f32(srcv[l], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*l + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*l + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*l + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*l + 3] = vgetq_lane_s32(vi, 3); - - accv0 = vaddq_s32(accv0, vi); - } - - // high half - for (int l = 4; l < 8; l++) { - const float32x4_t v = vmulq_n_f32(srcv[l], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*l + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*l + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*l + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*l + 3] = vgetq_lane_s32(vi, 3); - - accv1 = vaddq_s32(accv1, vi); - } - - const int32_t sum0 = vaddvq_s32(accv0); - const int32_t sum1 = vaddvq_s32(accv1); - - y[i].s0 = d * sum0; - y[i].s1 = d * sum1; - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 127.f; - y[i].d = d; - const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Compute the sum of the quants and set y[i].s - //y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3))); - y[i].s0 = d * hsum_i32_8(_mm256_add_epi32(i0, i1)); - y[i].s1 = d * hsum_i32_8(_mm256_add_epi32(i2, i3)); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Compute the sum of the quants and set y[i].s - const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); - const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); - y[i].s0 = d * hsum_i32_4(s0); - y[i].s1 = d * hsum_i32_4(s1); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#else - // scalar - quantize_row_q8_0_reference(x, y, k); -#endif -} - -static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) { - assert(k % QK4_0 == 0); - const int nb = k / QK4_0; - - const block_q4_0 * restrict x = vx; - -#if defined(__AVX2__) - for (int i = 0; i < nb; i++) { - // scale factor - const __m256 d_v = _mm256_broadcast_ss(&x[i].d); - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_0; l += 32) { - // Load 32x4-bit integers into 32x8-bit integers - __m256i vx8 = bytes_from_nibbles_32(pp+l/2); - - // Subtract 8 from the integers - vx8 = _mm256_sub_epi8(vx8, _mm256_set1_epi8(8)); - - // Convert to 16-bit int - const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0)); - const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1)); - - // Convert to 32-bit int -> float 32 - const __m256 vf[4] = { - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))), - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))), - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))), - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1))) - }; - - // Scale and store - for (int j = 0; j < 4; j++) { - const __m256 result = _mm256_mul_ps(vf[j], d_v); - _mm256_storeu_ps(y + i * QK4_0 + l + j*8, result); - } - } - } -#elif defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - const float32x4_t vd = vdupq_n_f32(x[i].d); - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_0; l += 16) { - // Load 16x4-bit integers into 8x8-bit integers - const uint8x8_t v8 = vld1_u8(pp + l/2); - - // Expand 4-bit qs to 8-bit bytes - const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f)); - const uint8x8_t v1 = vshr_n_u8(v8, 4); - - // Convert to signed 8-bit integers - const int8x8_t vs_0 = vreinterpret_s8_u8(v0); - const int8x8_t vs_1 = vreinterpret_s8_u8(v1); - - // Subtract 8 from each byte - const int8x8_t vb_0 = vsub_s8(vs_0, vdup_n_s8(8)); - const int8x8_t vb_1 = vsub_s8(vs_1, vdup_n_s8(8)); - - // Interleave and combine - const int8x8_t vx_0 = vzip1_s8(vb_0, vb_1); - const int8x8_t vx_1 = vzip2_s8(vb_0, vb_1); - - const int8x16_t vq = vcombine_s8(vx_0, vx_1); - - // convert to 2x int16x8_t - const int16x8_t vi_0 = vmovl_s8(vget_low_s8 (vq)); - const int16x8_t vi_1 = vmovl_s8(vget_high_s8(vq)); - - // convert to 4x float32x4_t - const float32x4_t vf_0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_0))); - const float32x4_t vf_1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_0))); - const float32x4_t vf_2 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_1))); - const float32x4_t vf_3 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_1))); - - // Multiply by d - const float32x4_t r0 = vmulq_f32(vf_0, vd); - const float32x4_t r1 = vmulq_f32(vf_1, vd); - const float32x4_t r2 = vmulq_f32(vf_2, vd); - const float32x4_t r3 = vmulq_f32(vf_3, vd); - - // Store - vst1q_f32(y + i*QK4_0 + l + 0, r0); - vst1q_f32(y + i*QK4_0 + l + 4, r1); - vst1q_f32(y + i*QK4_0 + l + 8, r2); - vst1q_f32(y + i*QK4_0 + l + 12, r3); - } - } -#else - // scalar - for (int i = 0; i < nb; i++) { - const float d = x[i].d; - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_0; l += 2) { - const uint8_t vi = pp[l/2]; - - const int8_t vi0 = vi & 0xf; - const int8_t vi1 = vi >> 4; - - const float v0 = (vi0 - 8)*d; - const float v1 = (vi1 - 8)*d; - - //printf("d = %f, vi = %d, vi0 = %d, vi1 = %d, v0 = %f, v1 = %f\n", d, vi, vi0, vi1, v0, v1); - - y[i*QK4_0 + l + 0] = v0; - y[i*QK4_0 + l + 1] = v1; - - assert(!isnan(y[i*QK4_0 + l + 0])); - assert(!isnan(y[i*QK4_0 + l + 1])); - } - } -#endif -} - -static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, int k) { - assert(k % QK4_1 == 0); - const int nb = k / QK4_1; - - const block_q4_1 * restrict x = vx; - -#if defined(__AVX2__) - for (int i = 0; i < nb; i++) { - const __m256 d_v = _mm256_broadcast_ss(&x[i].d); - const __m256 d_m = _mm256_broadcast_ss(&x[i].m); - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_1; l += 32) { - // Load 32x4-bit integers into 32x8-bit integers - __m256i vx8 = bytes_from_nibbles_32(pp+l/2); - - // Convert to 16-bit int - const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0)); - const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1)); - - // Convert to 32-bit int -> float 32 - const __m256 vf[4] = { - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))), - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))), - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))), - _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1))) - }; - - // Scale, add m and store - for (int j = 0; j < 4; j++) { - const __m256 result = _mm256_add_ps(_mm256_mul_ps(vf[j], d_v), d_m); - _mm256_storeu_ps(y + i * QK4_1 + l + j*8, result); - } - } - } -#elif defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - const float32x4_t vd = vdupq_n_f32(x[i].d); - const float32x4_t vm = vdupq_n_f32(x[i].m); - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_1; l += 16) { - // Load 16x4-bit integers into 8x8-bit integers - const uint8x8_t v8 = vld1_u8(pp + l/2); - - // Expand 4-bit qs to 8-bit bytes - const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f)); - const uint8x8_t v1 = vshr_n_u8(v8, 4); - - // Interleave and combine - const uint8x8_t vx_0 = vzip1_u8(v0, v1); - const uint8x8_t vx_1 = vzip2_u8(v0, v1); - - const uint8x16_t vq = vcombine_u8(vx_0, vx_1); - - // convert to 2x uint16x8_t - const uint16x8_t vi_0 = vmovl_u8(vget_low_u8 (vq)); - const uint16x8_t vi_1 = vmovl_u8(vget_high_u8(vq)); - - // convert to 4x float32x4_t - const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0))); - const float32x4_t vf_1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_0))); - const float32x4_t vf_2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_1))); - const float32x4_t vf_3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_1))); - - // multiply by d and add m - const float32x4_t r0 = vmlaq_f32(vm, vf_0, vd); - const float32x4_t r1 = vmlaq_f32(vm, vf_1, vd); - const float32x4_t r2 = vmlaq_f32(vm, vf_2, vd); - const float32x4_t r3 = vmlaq_f32(vm, vf_3, vd); - - // Store - vst1q_f32(y + i*QK4_1 + l + 0, r0); - vst1q_f32(y + i*QK4_1 + l + 4, r1); - vst1q_f32(y + i*QK4_1 + l + 8, r2); - vst1q_f32(y + i*QK4_1 + l + 12, r3); - } - } -#else - for (int i = 0; i < nb; i++) { - const float d = x[i].d; - const float m = x[i].m; - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_1; l += 2) { - const uint8_t vi = pp[l/2]; - - const int8_t vi0 = vi & 0xf; - const int8_t vi1 = vi >> 4; - - const float v0 = vi0*d + m; - const float v1 = vi1*d + m; - - y[i*QK4_1 + l + 0] = v0; - y[i*QK4_1 + l + 1] = v1; - - assert(!isnan(y[i*QK4_1 + l + 0])); - assert(!isnan(y[i*QK4_1 + l + 1])); - } - } -#endif -} - -static void dequantize_row_q4_2(const void * restrict vx, float * restrict y, int k) { - assert(k % QK4_2 == 0); - const int nb = k / QK4_2; - - const block_q4_2 * restrict x = vx; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_2; l += 2) { - const uint8_t vi = pp[l/2]; - - const int8_t vi0 = vi & 0xf; - const int8_t vi1 = vi >> 4; - - const float v0 = (vi0 - 8)*d; - const float v1 = (vi1 - 8)*d; - - y[i*QK4_2 + l + 0] = v0; - y[i*QK4_2 + l + 1] = v1; - - assert(!isnan(y[i*QK4_2 + l + 0])); - assert(!isnan(y[i*QK4_2 + l + 1])); - } - } -} - -static void dequantize_row_q4_3(const void * restrict vx, float * restrict y, int k) { - assert(k % QK4_3 == 0); - const int nb = k / QK4_3; - - const block_q4_3 * restrict x = vx; - - for (int i = 0; i < nb; i++) { - const float d = GGML_FP16_TO_FP32(x[i].d); - const float m = GGML_FP16_TO_FP32(x[i].m); - - const uint8_t * restrict pp = x[i].qs; - - for (int l = 0; l < QK4_3; l += 2) { - const uint8_t vi = pp[l/2]; - - const int8_t vi0 = vi & 0xf; - const int8_t vi1 = vi >> 4; - - const float v0 = vi0*d + m; - const float v1 = vi1*d + m; - - y[i*QK4_3 + l + 0] = v0; - y[i*QK4_3 + l + 1] = v1; - - assert(!isnan(y[i*QK4_3 + l + 0])); - assert(!isnan(y[i*QK4_3 + l + 1])); - } - } -} - -static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q4_1_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); -static void ggml_vec_dot_q4_3_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy); - -static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = { - [GGML_TYPE_Q4_0] = { - .dequantize_row_q = dequantize_row_q4_0, - .quantize_row_q = quantize_row_q4_0, - .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_0_reference, - .quantize_row_q_dot = quantize_row_q8_0, - .vec_dot_q = ggml_vec_dot_q4_0_q8_0, - }, - [GGML_TYPE_Q4_1] = { - .dequantize_row_q = dequantize_row_q4_1, - .quantize_row_q = quantize_row_q4_1, - .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_1_reference, - .quantize_row_q_dot = quantize_row_q8_0, - .vec_dot_q = ggml_vec_dot_q4_1_q8_0, - }, - [GGML_TYPE_Q4_2] = { - .dequantize_row_q = dequantize_row_q4_2, - .quantize_row_q = quantize_row_q4_2, - .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_2_rmse, //quantize_row_q4_2_reference, - .quantize_row_q_dot = quantize_row_q8_0, - .vec_dot_q = ggml_vec_dot_q4_2_q8_0, - }, - [GGML_TYPE_Q4_3] = { - .dequantize_row_q = dequantize_row_q4_3, - .quantize_row_q = quantize_row_q4_3, - .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_3_reference, // TODO: RMSE optimization - .quantize_row_q_dot = quantize_row_q8_0, - .vec_dot_q = ggml_vec_dot_q4_3_q8_0, - }, - [GGML_TYPE_Q8_0] = { - .dequantize_row_q = NULL, // TODO - .quantize_row_q = quantize_row_q8_0, - .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q8_0_reference, - .quantize_row_q_dot = quantize_row_q8_0, - .vec_dot_q = NULL, // TODO - }, -}; - -// For internal test use -quantize_fns_t ggml_internal_get_quantize_fn(size_t i) { - GGML_ASSERT(i < GGML_TYPE_COUNT); - return quantize_fns[i]; -} - - -// -// simd mappings -// - -// we define a common set of C macros which map to specific intrinsics based on the current architecture -// we then implement the fundamental computation operations below using only these macros -// adding support for new architectures requires to define the corresponding SIMD macros -// -// GGML_F32_STEP / GGML_F16_STEP -// number of elements to process in a single step -// -// GGML_F32_EPR / GGML_F16_EPR -// number of elements to fit in a single register -// - -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) - -#define GGML_SIMD - -// F32 NEON - -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 float32x4_t -#define GGML_F32x4_ZERO vdupq_n_f32(0.0f) -#define GGML_F32x4_SET1(x) vdupq_n_f32(x) -#define GGML_F32x4_LOAD vld1q_f32 -#define GGML_F32x4_STORE vst1q_f32 -#define GGML_F32x4_FMA(a, b, c) vfmaq_f32(a, b, c) -#define GGML_F32x4_ADD vaddq_f32 -#define GGML_F32x4_MUL vmulq_f32 -#define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ - x[2*i] = vaddq_f32(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ - x[4*i] = vaddq_f32(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ - x[8*i] = vaddq_f32(x[8*i], x[8*i+4]); \ - } \ - res = GGML_F32x4_REDUCE_ONE(x[0]); \ -} - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 NEON - -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - #define GGML_F16_STEP 32 - #define GGML_F16_EPR 8 - - #define GGML_F16x8 float16x8_t - #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) - #define GGML_F16x8_SET1(x) vdupq_n_f16(x) - #define GGML_F16x8_LOAD vld1q_f16 - #define GGML_F16x8_STORE vst1q_f16 - #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) - #define GGML_F16x8_ADD vaddq_f16 - #define GGML_F16x8_MUL vmulq_f16 - #define GGML_F16x8_REDUCE(res, x) \ - { \ - for (int i = 0; i < GGML_F16_ARR/2; ++i) { \ - x[2*i] = vaddq_f16(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F16_ARR/4; ++i) { \ - x[4*i] = vaddq_f16(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F16_ARR/8; ++i) { \ - x[8*i] = vaddq_f16(x[8*i], x[8*i+4]); \ - } \ - const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \ - const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \ - res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ - } - - #define GGML_F16_VEC GGML_F16x8 - #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO - #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F16x8_FMA - #define GGML_F16_VEC_ADD GGML_F16x8_ADD - #define GGML_F16_VEC_MUL GGML_F16x8_MUL - #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE -#else - // if FP16 vector arithmetic is not supported, we use FP32 instead - // and take advantage of the vcvt_ functions to convert to/from FP16 - - #define GGML_F16_STEP 16 - #define GGML_F16_EPR 4 - - #define GGML_F32Cx4 float32x4_t - #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) - #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) - #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16(x)) - #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) - #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) - #define GGML_F32Cx4_ADD vaddq_f32 - #define GGML_F32Cx4_MUL vmulq_f32 - #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - - #define GGML_F16_VEC GGML_F32Cx4 - #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO - #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 - #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) - #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) - #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA - #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD - #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL - #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE -#endif - -#elif defined(__AVX__) - -#define GGML_SIMD - -// F32 AVX - -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 8 - -#define GGML_F32x8 __m256 -#define GGML_F32x8_ZERO _mm256_setzero_ps() -#define GGML_F32x8_SET1(x) _mm256_set1_ps(x) -#define GGML_F32x8_LOAD _mm256_loadu_ps -#define GGML_F32x8_STORE _mm256_storeu_ps -#if defined(__FMA__) - #define GGML_F32x8_FMA(a, b, c) _mm256_fmadd_ps(b, c, a) -#else - #define GGML_F32x8_FMA(a, b, c) _mm256_add_ps(_mm256_mul_ps(b, c), a) -#endif -#define GGML_F32x8_ADD _mm256_add_ps -#define GGML_F32x8_MUL _mm256_mul_ps -#define GGML_F32x8_REDUCE(res, x) \ -{ \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ - x[2*i] = _mm256_add_ps(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ - x[4*i] = _mm256_add_ps(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ - x[8*i] = _mm256_add_ps(x[8*i], x[8*i+4]); \ - } \ - const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \ - _mm256_extractf128_ps(x[0], 1)); \ - const __m128 t1 = _mm_hadd_ps(t0, t0); \ - res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \ -} -// TODO: is this optimal ? - -#define GGML_F32_VEC GGML_F32x8 -#define GGML_F32_VEC_ZERO GGML_F32x8_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x8_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x8_LOAD -#define GGML_F32_VEC_STORE GGML_F32x8_STORE -#define GGML_F32_VEC_FMA GGML_F32x8_FMA -#define GGML_F32_VEC_ADD GGML_F32x8_ADD -#define GGML_F32_VEC_MUL GGML_F32x8_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x8_REDUCE - -// F16 AVX - -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 8 - -// F16 arithmetic is not supported by AVX, so we use F32 instead - -#define GGML_F32Cx8 __m256 -#define GGML_F32Cx8_ZERO _mm256_setzero_ps() -#define GGML_F32Cx8_SET1(x) _mm256_set1_ps(x) - -#if defined(__F16C__) -// the _mm256_cvt intrinsics require F16C -#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((__m128i *)(x))) -#define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) -#else -static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { - float tmp[8]; - - for (int i = 0; i < 8; i++) - tmp[i] = GGML_FP16_TO_FP32(x[i]); - - return _mm256_loadu_ps(tmp); -} -static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { - float arr[8]; - - _mm256_storeu_ps(arr, y); - - for (int i = 0; i < 8; i++) - x[i] = GGML_FP32_TO_FP16(arr[i]); -} -#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) -#define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) -#endif - -#define GGML_F32Cx8_FMA GGML_F32x8_FMA -#define GGML_F32Cx8_ADD _mm256_add_ps -#define GGML_F32Cx8_MUL _mm256_mul_ps -#define GGML_F32Cx8_REDUCE GGML_F32x8_REDUCE - -#define GGML_F16_VEC GGML_F32Cx8 -#define GGML_F16_VEC_ZERO GGML_F32Cx8_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx8_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx8_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx8_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx8_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx8_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx8_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx8_REDUCE - -#elif defined(__POWER9_VECTOR__) - -#define GGML_SIMD - -// F32 POWER9 - -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 vector float -#define GGML_F32x4_ZERO 0.0f -#define GGML_F32x4_SET1 vec_splats -#define GGML_F32x4_LOAD(p) vec_xl(0, p) -#define GGML_F32x4_STORE(p, r) vec_xst(r, 0, p) -#define GGML_F32x4_FMA(a, b, c) vec_madd(b, c, a) -#define GGML_F32x4_ADD vec_add -#define GGML_F32x4_MUL vec_mul -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ - x[2*i] = vec_add(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ - x[4*i] = vec_add(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ - x[8*i] = vec_add(x[8*i], x[8*i+4]); \ - } \ - res = vec_extract(x[0], 0) + \ - vec_extract(x[0], 1) + \ - vec_extract(x[0], 2) + \ - vec_extract(x[0], 3); \ -} - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 POWER9 -#define GGML_F16_STEP GGML_F32_STEP -#define GGML_F16_EPR GGML_F32_EPR -#define GGML_F16_VEC GGML_F32x4 -#define GGML_F16_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F16_VEC_FMA GGML_F32x4_FMA -#define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE -// Use vec_xl, not vec_ld, in case the load address is not aligned. -#define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ - vec_extract_fp32_from_shorth(vec_xl(0, p - GGML_F16_EPR)) : \ - vec_extract_fp32_from_shortl(vec_xl(0, p)) -#define GGML_ENDIAN_BYTE(i) ((unsigned char *)&(uint16_t){1})[i] -#define GGML_F16_VEC_STORE(p, r, i) \ - if (i & 0x1) \ - vec_xst(vec_pack_to_short_fp32(r[i - GGML_ENDIAN_BYTE(1)], \ - r[i - GGML_ENDIAN_BYTE(0)]), \ - 0, p - GGML_F16_EPR) - -#elif defined(__wasm_simd128__) - -#define GGML_SIMD - -// F32 WASM - -#define GGML_F32_STEP 16 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 v128_t -#define GGML_F32x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F32x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F32x4_LOAD wasm_v128_load -#define GGML_F32x4_STORE wasm_v128_store -#define GGML_F32x4_FMA(a, b, c) wasm_f32x4_add(wasm_f32x4_mul(b, c), a) -#define GGML_F32x4_ADD wasm_f32x4_add -#define GGML_F32x4_MUL wasm_f32x4_mul -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ - x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ - x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ - x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \ - } \ - res = wasm_f32x4_extract_lane(x[0], 0) + \ - wasm_f32x4_extract_lane(x[0], 1) + \ - wasm_f32x4_extract_lane(x[0], 2) + \ - wasm_f32x4_extract_lane(x[0], 3); \ -} - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 WASM - -#define GGML_F16_STEP 16 -#define GGML_F16_EPR 4 - -inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { - float tmp[4]; - - tmp[0] = GGML_FP16_TO_FP32(p[0]); - tmp[1] = GGML_FP16_TO_FP32(p[1]); - tmp[2] = GGML_FP16_TO_FP32(p[2]); - tmp[3] = GGML_FP16_TO_FP32(p[3]); - - return wasm_v128_load(tmp); -} - -inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { - float tmp[4]; - - wasm_v128_store(tmp, x); - - p[0] = GGML_FP32_TO_FP16(tmp[0]); - p[1] = GGML_FP32_TO_FP16(tmp[1]); - p[2] = GGML_FP32_TO_FP16(tmp[2]); - p[3] = GGML_FP32_TO_FP16(tmp[3]); -} - -#define GGML_F16x4 v128_t -#define GGML_F16x4_ZERO wasm_f32x4_splat(0.0f) -#define GGML_F16x4_SET1(x) wasm_f32x4_splat(x) -#define GGML_F16x4_LOAD(x) __wasm_f16x4_load(x) -#define GGML_F16x4_STORE(x, y) __wasm_f16x4_store(x, y) -#define GGML_F16x4_FMA GGML_F32x4_FMA -#define GGML_F16x4_ADD wasm_f32x4_add -#define GGML_F16x4_MUL wasm_f32x4_mul -#define GGML_F16x4_REDUCE(res, x) \ -{ \ - for (int i = 0; i < GGML_F16_ARR/2; ++i) { \ - x[2*i] = wasm_f32x4_add(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F16_ARR/4; ++i) { \ - x[4*i] = wasm_f32x4_add(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F16_ARR/8; ++i) { \ - x[8*i] = wasm_f32x4_add(x[8*i], x[8*i+4]); \ - } \ - res = wasm_f32x4_extract_lane(x[0], 0) + \ - wasm_f32x4_extract_lane(x[0], 1) + \ - wasm_f32x4_extract_lane(x[0], 2) + \ - wasm_f32x4_extract_lane(x[0], 3); \ -} - -#define GGML_F16_VEC GGML_F16x4 -#define GGML_F16_VEC_ZERO GGML_F16x4_ZERO -#define GGML_F16_VEC_SET1 GGML_F16x4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F16x4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F16x4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F16x4_FMA -#define GGML_F16_VEC_ADD GGML_F16x4_ADD -#define GGML_F16_VEC_MUL GGML_F16x4_MUL -#define GGML_F16_VEC_REDUCE GGML_F16x4_REDUCE - -#elif defined(__SSE3__) - -#define GGML_SIMD - -// F32 SSE - -#define GGML_F32_STEP 32 -#define GGML_F32_EPR 4 - -#define GGML_F32x4 __m128 -#define GGML_F32x4_ZERO _mm_setzero_ps() -#define GGML_F32x4_SET1(x) _mm_set1_ps(x) -#define GGML_F32x4_LOAD _mm_loadu_ps -#define GGML_F32x4_STORE _mm_storeu_ps -#if defined(__FMA__) - // TODO: Does this work? - #define GGML_F32x4_FMA(a, b, c) _mm_fmadd_ps(b, c, a) -#else - #define GGML_F32x4_FMA(a, b, c) _mm_add_ps(_mm_mul_ps(b, c), a) -#endif -#define GGML_F32x4_ADD _mm_add_ps -#define GGML_F32x4_MUL _mm_mul_ps -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - for (int i = 0; i < GGML_F32_ARR/2; ++i) { \ - x[2*i] = _mm_add_ps(x[2*i], x[2*i+1]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/4; ++i) { \ - x[4*i] = _mm_add_ps(x[4*i], x[4*i+2]); \ - } \ - for (int i = 0; i < GGML_F32_ARR/8; ++i) { \ - x[8*i] = _mm_add_ps(x[8*i], x[8*i+4]); \ - } \ - const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \ - res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \ -} -// TODO: is this optimal ? - -#define GGML_F32_VEC GGML_F32x4 -#define GGML_F32_VEC_ZERO GGML_F32x4_ZERO -#define GGML_F32_VEC_SET1 GGML_F32x4_SET1 -#define GGML_F32_VEC_LOAD GGML_F32x4_LOAD -#define GGML_F32_VEC_STORE GGML_F32x4_STORE -#define GGML_F32_VEC_FMA GGML_F32x4_FMA -#define GGML_F32_VEC_ADD GGML_F32x4_ADD -#define GGML_F32_VEC_MUL GGML_F32x4_MUL -#define GGML_F32_VEC_REDUCE GGML_F32x4_REDUCE - -// F16 SSE - -#define GGML_F16_STEP 32 -#define GGML_F16_EPR 4 - -static inline __m128 __sse_f16x4_load(ggml_fp16_t *x) { - float tmp[4]; - - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); - - return _mm_loadu_ps(tmp); -} - -static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { - float arr[4]; - - _mm_storeu_ps(arr, y); - - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); -} - -#define GGML_F32Cx4 __m128 -#define GGML_F32Cx4_ZERO _mm_setzero_ps() -#define GGML_F32Cx4_SET1(x) _mm_set1_ps(x) -#define GGML_F32Cx4_LOAD(x) __sse_f16x4_load(x) -#define GGML_F32Cx4_STORE(x, y) __sse_f16x4_store(x, y) -#define GGML_F32Cx4_FMA GGML_F32x4_FMA -#define GGML_F32Cx4_ADD _mm_add_ps -#define GGML_F32Cx4_MUL _mm_mul_ps -#define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE - -#define GGML_F16_VEC GGML_F32Cx4 -#define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO -#define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 -#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) -#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i]) -#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA -#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD -#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE - -#endif - -// GGML_F32_ARR / GGML_F16_ARR -// number of registers to use per step -#ifdef GGML_SIMD -#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) -#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) -#endif - -// -// fundamental operations -// - -inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - -inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } -inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } -inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; } -inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } -inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } -inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } -inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } -inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } - -inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) { -#ifdef GGML_SIMD - float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i]*y[i]; - } -#else - // scalar - ggml_float sumf = 0.0; - for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(x[i]*y[i]); - } -#endif - - *s = sumf; -} - -inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) { - ggml_float sumf = 0.0; - -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); - - GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO }; - - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; - - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j); - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); - - sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F16_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); - } -#else - for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); - } -#endif - - *s = sumf; -} - -static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int nb = n / QK8_0; - - assert(n % QK8_0 == 0); - assert(nb % 2 == 0); - - const block_q4_0 * restrict x = vx; - const block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float sum8 = 0; - - for (int i = 0; i < nb; i += 2) { - const block_q4_0 * restrict x0 = &x[i + 0]; - const block_q4_0 * restrict x1 = &x[i + 1]; - const block_q8_0 * restrict y0 = &y[i + 0]; - const block_q8_0 * restrict y1 = &y[i + 1]; - - sum8 += x0->d * (y0->s0 + y0->s1) + x1->d * (y1->s0 + y1->s1); - - const uint8x16_t m4b = vdupq_n_u8(0xf); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - // interleave - const int8x16_t v1_0ls = vuzp1q_s8(v1_0l, v1_0h); - const int8x16_t v1_0hs = vuzp2q_s8(v1_0l, v1_0h); - const int8x16_t v1_1ls = vuzp1q_s8(v1_1l, v1_1h); - const int8x16_t v1_1hs = vuzp2q_s8(v1_1l, v1_1h); - -#if defined(__ARM_FEATURE_DOTPROD) - // dot product into int32x4_t - const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0ls), v0_0h, v1_0hs); - const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1ls), v0_1h, v1_1hs); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), x0->d*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), x1->d*y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0ls)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0ls)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0hs)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0hs)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1ls)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1ls)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1hs)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1hs)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0->d*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1->d*y1->d); -#endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) - 8 * sum8; -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) ); - - __m256i bx = bytes_from_nibbles_32(x[i].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = _mm256_set1_epi8( 8 ); - bx = _mm256_sub_epi8( bx, off ); - - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps( d, q, acc ); - } - - *s = hsum_float_8(acc); -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; ++i) { - // Compute combined scale for the block - const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) ); - - __m128i i32[2]; - for (int j = 0; j < 2; ++j) { - // Load 8 bytes, and unpack 4 bit fields into bytes, making 16 bytes - __m128i bx = bytes_from_nibbles_16(x[i].qs + 8*j); - __m128i by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16*j)); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m128i off = _mm_set1_epi8( 8 ); - bx = _mm_sub_epi8( bx, off ); - - // Get absolute values of x vectors - const __m128i ax = _mm_sign_epi8(bx, bx); - - // Sign the values of the y vectors - const __m128i sy = _mm_sign_epi8(by, bx); - - // Perform multiplication and create 16-bit values - const __m128i dot = _mm_maddubs_epi16(ax, sy); - - const __m128i ones = _mm_set1_epi16(1); - i32[j] = _mm_madd_epi16(ones, dot); - } - - // Convert int32_t to float - __m256 p = _mm256_cvtepi32_ps( _mm256_set_m128i( i32[0], i32[1] )); - // Apply the scale, and accumulate - acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc); - } - - *s = hsum_float_8(acc); -#else - // scalar - float sumf = 0.0; - for (int i = 0; i < nb; i++) { - const float d0 = x[i].d; - const float d1 = y[i].d; - - const uint8_t * restrict p0 = x[i].qs; - const int8_t * restrict p1 = y[i].qs; - - int sumi = 0; - for (int j = 0; j < QK8_0/2; j++) { - const uint8_t v0 = p0[j]; - - const int i0 = (int8_t) (v0 & 0xf) - 8; - const int i1 = (int8_t) (v0 >> 4) - 8; - - const int i2 = p1[2*j + 0]; - const int i3 = p1[2*j + 1]; - - sumi += i0*i2 + i1*i3; - } - sumf += d0*d1*sumi; - } - *s = sumf; -#endif -} - -static void ggml_vec_dot_q4_1_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int nb = n / QK8_0; - - assert(n % QK8_0 == 0); - assert(nb % 2 == 0); - - const block_q4_1 * restrict x = vx; - const block_q8_0 * restrict y = vy; - - // TODO: add AVX / WASM SIMD / etc -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs = 0; - - for (int i = 0; i < nb; i += 2) { - const block_q4_1 * restrict x0 = &x[i + 0]; - const block_q4_1 * restrict x1 = &x[i + 1]; - const block_q8_0 * restrict y0 = &y[i + 0]; - const block_q8_0 * restrict y1 = &y[i + 1]; - - summs += x0->m * (y0->s0 + y0->s1) + x1->m * (y1->s0 + y1->s1); - - const uint8x16_t m4b = vdupq_n_u8(0xf); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // interleave - const int8x16_t v0_0lz = vzip1q_s8(v0_0l, v0_0h); - const int8x16_t v0_0hz = vzip2q_s8(v0_0l, v0_0h); - const int8x16_t v0_1lz = vzip1q_s8(v0_1l, v0_1h); - const int8x16_t v0_1hz = vzip2q_s8(v0_1l, v0_1h); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - // dot product into int32x4_t - const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l), v0_0hz, v1_0h); - const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1lz, v1_1l), v0_1hz, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), x0->d*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), x1->d*y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lz), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lz), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hz), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hz), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0->d*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1->d*y1->d); -#endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - float summs = 0; - - // Main loop - for (int i = 0; i < nb; ++i) { - const float * d0 = &x[i].d; - const float * d1 = &y[i].d; - - summs += x[i].m * (y[i].s0 + y[i].s1); - - const __m256 d0v = _mm256_broadcast_ss( d0 ); - const __m256 d1v = _mm256_broadcast_ss( d1 ); - - // Compute combined scales - const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); - - // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - const __m256i bx = bytes_from_nibbles_32(x[i].qs); - const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs ); - - const __m256 xy = mul_sum_i8_pairs_float(bx, by); - - // Accumulate d0*d1*x*y - acc = _mm256_fmadd_ps( d0d1, xy, acc ); - } - - *s = hsum_float_8(acc) + summs; -#else - // scalar - float sumf = 0.0; - for (int i = 0; i < nb; i++) { - const float d0 = x[i].d; - const float m0 = x[i].m; - const float d1 = y[i].d; - - const uint8_t * restrict p0 = x[i].qs; - const int8_t * restrict p1 = y[i].qs; - - // TODO: this is very slow .. - for (int j = 0; j < QK8_0/2; j++) { - const uint8_t v0 = p0[j]; - - const float f0 = d0*(v0 & 0xf) + m0; - const float f1 = d0*(v0 >> 4) + m0; - - const float f2 = d1*p1[2*j + 0]; - const float f3 = d1*p1[2*j + 1]; - - sumf += f0*f2 + f1*f3; - } - } - *s = sumf; -#endif -} - -static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int nb = n / QK8_0; - - assert(n % QK8_0 == 0); - assert(nb % 2 == 0); - assert(QK8_0 == 2*QK4_2); - - const block_q4_2 * restrict x = vx; - const block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i += 2) { - const block_q4_2 * restrict x0_0 = &x[2*(i + 0) + 0]; - const block_q4_2 * restrict x0_1 = &x[2*(i + 0) + 1]; - const block_q4_2 * restrict x1_0 = &x[2*(i + 1) + 0]; - const block_q4_2 * restrict x1_1 = &x[2*(i + 1) + 1]; - - const block_q8_0 * restrict y0 = &y[i + 0]; - const block_q8_0 * restrict y1 = &y[i + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0xf); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vcombine_u8(vld1_u8(x0_0->qs), vld1_u8(x0_1->qs)); - const uint8x16_t v0_1 = vcombine_u8(vld1_u8(x1_0->qs), vld1_u8(x1_1->qs)); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); - const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); - const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); - const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); - - // interleave - const int8x16_t v0_0lz = vzip1q_s8(v0_0ls, v0_0hs); - const int8x16_t v0_0hz = vzip2q_s8(v0_0ls, v0_0hs); - const int8x16_t v0_1lz = vzip1q_s8(v0_1ls, v0_1hs); - const int8x16_t v0_1hz = vzip2q_s8(v0_1ls, v0_1hs); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - -#if defined(__ARM_FEATURE_DOTPROD) - sumv0 = vmlaq_n_f32(sumv0, vaddq_f32( - vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l)), GGML_FP16_TO_FP32(x0_0->d)), - vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0hz, v1_0h)), GGML_FP16_TO_FP32(x0_1->d))), y0->d); - - sumv1 = vmlaq_n_f32(sumv1, vaddq_f32( - vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_1lz, v1_1l)), GGML_FP16_TO_FP32(x1_0->d)), - vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_1hz, v1_1h)), GGML_FP16_TO_FP32(x1_1->d))), y1->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h)); - - const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lz), vget_low_s8 (v1_1l)); - const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lz), vget_high_s8(v1_1l)); - const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hz), vget_low_s8 (v1_1h)); - const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hz), vget_high_s8(v1_1h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h)); - const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h)); - - sumv0 = vmlaq_n_f32(sumv0, vaddq_f32( - vmulq_n_f32(vcvtq_f32_s32(pl0), GGML_FP16_TO_FP32(x0_0->d)), - vmulq_n_f32(vcvtq_f32_s32(ph0), GGML_FP16_TO_FP32(x0_1->d))), y0->d); - - sumv1 = vmlaq_n_f32(sumv1, vaddq_f32( - vmulq_n_f32(vcvtq_f32_s32(pl1), GGML_FP16_TO_FP32(x1_0->d)), - vmulq_n_f32(vcvtq_f32_s32(ph1), GGML_FP16_TO_FP32(x1_1->d))), y1->d); -#endif - } - - *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; i++) { - /* Compute combined scale for the block */ - const __m128 d0 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 0].d)); - const __m128 d1 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 1].d)); - const __m256 d = _mm256_mul_ps(_mm256_set_m128(d1, d0), _mm256_broadcast_ss(&y[i].d)); - - __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs); - __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs); - __m256i bx = _mm256_set_m128i(bx1, bx0); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = _mm256_set1_epi8(8); - bx = _mm256_sub_epi8(bx, off); - - __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps(d, q, acc); - } - - *s = hsum_float_8(acc); -#else - // scalar - float sumf = 0.0; - for (int i = 0; i < nb; i++) { - const uint8_t * restrict x0 = x[2*i + 0].qs; - const uint8_t * restrict x1 = x[2*i + 1].qs; - const int8_t * restrict y0 = y[i].qs; - - const float d0 = GGML_FP16_TO_FP32(x[2*i + 0].d); - const float d1 = GGML_FP16_TO_FP32(x[2*i + 1].d); - - int sumi_0 = 0; - int sumi_1 = 0; - - for (int j = 0; j < QK8_0/4; j++) { - const uint8_t v0 = x0[j]; - const uint8_t v1 = x1[j]; - - const int i0_0 = (int8_t) (v0 & 0xf) - 8; - const int i1_0 = (int8_t) (v0 >> 4) - 8; - - const int i0_1 = (int8_t) (v1 & 0xf) - 8; - const int i1_1 = (int8_t) (v1 >> 4) - 8; - - const int i2_0 = y0[2*j + 0]; - const int i3_0 = y0[2*j + 1]; - - const int i2_1 = y0[2*(j + QK8_0/4) + 0]; - const int i3_1 = y0[2*(j + QK8_0/4) + 1]; - - sumi_0 += i0_0*i2_0 + i1_0*i3_0; - sumi_1 += i0_1*i2_1 + i1_1*i3_1; - } - - sumf += (d0 * y[i].d) * sumi_0; - sumf += (d1 * y[i].d) * sumi_1; - } - *s = sumf; -#endif -} - -static void ggml_vec_dot_q4_3_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) { - const int nb = n / QK8_0; - - assert(n % QK8_0 == 0); - assert(nb % 2 == 0); - assert(QK8_0 == 2*QK4_2); - - const block_q4_3 * restrict x = vx; - const block_q8_0 * restrict y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs0 = 0.0f; - float summs1 = 0.0f; - - for (int i = 0; i < nb; ++i) { - const block_q4_3 * restrict x0_0 = &x[2*(i + 0) + 0]; - const block_q4_3 * restrict x0_1 = &x[2*(i + 0) + 1]; - - const block_q8_0 * restrict y0 = &y[i + 0]; - - summs0 += GGML_FP16_TO_FP32(x0_0->m) * y0->s0; - summs1 += GGML_FP16_TO_FP32(x0_1->m) * y0->s1; - - const uint8x16_t v0_0 = vcombine_u8(vld1_u8(x0_0->qs), vld1_u8(x0_1->qs)); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, vdupq_n_u8(0xf))); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - - // interleave - const int8x16_t v0_0lz = vzip1q_s8(v0_0l, v0_0h); - const int8x16_t v0_0hz = vzip2q_s8(v0_0l, v0_0h); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - - const float x0_0d = GGML_FP16_TO_FP32(x0_0->d); - const float x0_1d = GGML_FP16_TO_FP32(x0_1->d); - -#if defined(__ARM_FEATURE_DOTPROD) - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l)), x0_0d*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0hz, v1_0h)), x0_1d*y0->d); -#else - const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l)); - const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l)); - const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h)); - const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h)); - - const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); - const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(pl0), x0_0d*y0->d); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(ph0), x0_1d*y0->d); -#endif - } - - *s = vaddvq_f32(vaddq_f32(sumv0, sumv1)) + summs0 + summs1; -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (int i = 0; i < nb; i++) { - const __m128 d0 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 0].d)); - const __m128 d1 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 1].d)); - const __m256 dx = _mm256_set_m128(d1, d0); - - const __m128 m0 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 0].m)); - const __m128 m1 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 1].m)); - const __m256 mx = _mm256_set_m128(m1, m0); - - const __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs); - const __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs); - const __m256i bx = _mm256_set_m128i(bx1, bx0); - - const __m256 dy = _mm256_broadcast_ss(&y[i].d); - const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs); - - const __m256i syi = _mm256_maddubs_epi16(_mm256_set1_epi8(1), by); - const __m256 syf = sum_i16_pairs_float(syi); - - const __m256 q = mul_sum_i8_pairs_float(bx, by); - - const __m256 sxy = _mm256_fmadd_ps(q, dx, _mm256_mul_ps(mx, syf)); - acc = _mm256_fmadd_ps(sxy, dy, acc); - } - - *s = hsum_float_8(acc); -#else - // scalar - float sumf = 0.0; - for (int i = 0; i < nb; i++) { - const uint8_t * restrict x0 = x[2*i + 0].qs; - const uint8_t * restrict x1 = x[2*i + 1].qs; - const int8_t * restrict y0 = y[i].qs; - - const float d0 = GGML_FP16_TO_FP32(x[2*i + 0].d); - const float m0 = GGML_FP16_TO_FP32(x[2*i + 0].m); - const float d1 = GGML_FP16_TO_FP32(x[2*i + 1].d); - const float m1 = GGML_FP16_TO_FP32(x[2*i + 1].m); - - int sxy_0 = 0; - int sxy_1 = 0; - - for (int j = 0; j < QK8_0/4; j++) { - const uint8_t v0 = x0[j]; - const uint8_t v1 = x1[j]; - - const int x0_0 = v0 & 0xf; - const int x1_0 = v0 >> 4; - - const int x0_1 = v1 & 0xf; - const int x1_1 = v1 >> 4; - - const int y0_0 = y0[2*j + 0]; - const int y1_0 = y0[2*j + 1]; - - const int y0_1 = y0[2*(j + QK8_0/4) + 0]; - const int y1_1 = y0[2*(j + QK8_0/4) + 1]; - - sxy_0 += x0_0*y0_0 + x1_0*y1_0; - sxy_1 += x0_1*y0_1 + x1_1*y1_1; - } - - sumf += (d0*sxy_0 + d1*sxy_1)*y[i].d + m0*y[i].s0 + m1*y[i].s1; - } - *s = sumf; -#endif -} - - -// compute GGML_VEC_DOT_UNROLL dot products at once -// xs - x row stride in bytes -inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) { - ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 }; - - ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL]; - - for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { - x[i] = (ggml_fp16_t *) ((char *) xv + i*xs); - } - -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F16_STEP - 1)); - - GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } }; - - GGML_F16_VEC ax[GGML_F16_ARR]; - GGML_F16_VEC ay[GGML_F16_ARR]; - - for (int i = 0; i < np; i += GGML_F16_STEP) { - for (int j = 0; j < GGML_F16_ARR; j++) { - ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j); - - for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { - ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j); - - sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]); - } - } - } - - // reduce sum0..sum3 to sum0 - for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) { - GGML_F16_VEC_REDUCE(sumf[k], sum[k]); - } - - // leftovers - for (int i = np; i < n; ++i) { - for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); - } - } -#else - for (int i = 0; i < n; ++i) { - for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); - } - } -#endif - - for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) { - s[i] = sumf[i]; - } -} - -inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) { -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); - - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); - } - } - - // leftovers - for (int i = np; i < n; ++i) { - y[i] += x[i]*v; - } -#else - // scalar - for (int i = 0; i < n; ++i) { - y[i] += x[i]*v; - } -#endif -} - -//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } -inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { -#if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); - - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_MUL(ay[j], vx); - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); - } - } - - // leftovers - for (int i = np; i < n; ++i) { - y[i] *= v; - } -#else - // scalar - for (int i = 0; i < n; ++i) { - y[i] *= v; - } -#endif -} - -inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); } -inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } -inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } -inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } -inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } -inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } -inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } - -static const float GELU_COEF_A = 0.044715f; -static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; - -inline static float ggml_gelu_f32(float x) { - return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); -} - -inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { - const uint16_t * i16 = (const uint16_t *) x; - for (int i = 0; i < n; ++i) { - y[i] = table_gelu_f16[i16[i]]; - } -} - -#ifdef GGML_GELU_FP16 -inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { - uint16_t t; - for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]); - } -} -#else -inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { - for (int i = 0; i < n; ++i) { - y[i] = ggml_gelu_f32(x[i]); - } -} -#endif - -// Sigmoid Linear Unit (SiLU) function -inline static float ggml_silu_f32(float x) { - return x/(1.0f + expf(-x)); -} - -inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { - const uint16_t * i16 = (const uint16_t *) x; - for (int i = 0; i < n; ++i) { - y[i] = table_silu_f16[i16[i]]; - } -} - -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { - uint16_t t; - for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]); - } -} -#else -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { - for (int i = 0; i < n; ++i) { - y[i] = ggml_silu_f32(x[i]); - } -} -#endif - -inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { -#ifndef GGML_USE_ACCELERATE - ggml_float sum = 0.0; - for (int i = 0; i < n; ++i) { - sum += (ggml_float)x[i]; - } - *s = sum; -#else - vDSP_sve(x, 1, s, n); -#endif -} - -inline static void ggml_vec_max_f32(const int n, float * s, const float * x) { -#ifndef GGML_USE_ACCELERATE - float max = -INFINITY; - for (int i = 0; i < n; ++i) { - max = MAX(max, x[i]); - } - *s = max; -#else - vDSP_maxv(x, 1, s, n); -#endif -} - -inline static void ggml_vec_norm_inv_f32(const int n, float * s, const float * x) { - ggml_vec_norm_f32(n, s, x); - *s = 1.f/(*s); -} - -// -// logging -// - -#if (GGML_DEBUG >= 1) -#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG(...) -#endif - -#if (GGML_DEBUG >= 5) -#define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_5(...) -#endif - -#if (GGML_DEBUG >= 10) -#define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_10(...) -#endif - -#define GGML_PRINT(...) printf(__VA_ARGS__) - -// -// data types -// - -static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = { - [GGML_TYPE_F32] = 1, - [GGML_TYPE_F16] = 1, - [GGML_TYPE_Q4_0] = QK4_0, - [GGML_TYPE_Q4_1] = QK4_1, - [GGML_TYPE_Q4_2] = QK4_2, - [GGML_TYPE_Q4_3] = QK4_3, - [GGML_TYPE_Q8_0] = QK8_0, - [GGML_TYPE_I8] = 1, - [GGML_TYPE_I16] = 1, - [GGML_TYPE_I32] = 1, -}; -static_assert(GGML_TYPE_COUNT == 10, "GGML_BLCK_SIZE is outdated"); - -static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = { - [GGML_TYPE_F32] = sizeof(float), - [GGML_TYPE_F16] = sizeof(ggml_fp16_t), - [GGML_TYPE_Q4_0] = sizeof(block_q4_0), - [GGML_TYPE_Q4_1] = sizeof(block_q4_1), - [GGML_TYPE_Q4_2] = sizeof(block_q4_2), - [GGML_TYPE_Q4_3] = sizeof(block_q4_3), - [GGML_TYPE_Q8_0] = sizeof(block_q8_0), - [GGML_TYPE_I8] = sizeof(int8_t), - [GGML_TYPE_I16] = sizeof(int16_t), - [GGML_TYPE_I32] = sizeof(int32_t), -}; -static_assert(GGML_TYPE_COUNT == 10, "GGML_TYPE_SIZE is outdated"); - - -static const char * GGML_TYPE_NAME[GGML_TYPE_COUNT] = { - [GGML_TYPE_F32] = "f32", - [GGML_TYPE_F16] = "f16", - [GGML_TYPE_Q4_0] = "q4_0", - [GGML_TYPE_Q4_1] = "q4_1", - [GGML_TYPE_Q4_2] = "q4_2", - [GGML_TYPE_Q4_3] = "q4_3", - [GGML_TYPE_Q8_0] = "q8_0", - [GGML_TYPE_I8] = "i8", - [GGML_TYPE_I16] = "i16", - [GGML_TYPE_I32] = "i32", -}; -static_assert(GGML_TYPE_COUNT == 10, "GGML_TYPE_NAME is outdated"); - -static bool GGML_IS_QUANTIZED[GGML_TYPE_COUNT] = { - [GGML_TYPE_F32] = false, - [GGML_TYPE_F16] = false, - [GGML_TYPE_Q4_0] = true, - [GGML_TYPE_Q4_1] = true, - [GGML_TYPE_Q4_2] = true, - [GGML_TYPE_Q4_3] = true, - [GGML_TYPE_Q8_0] = true, - [GGML_TYPE_I8] = false, - [GGML_TYPE_I16] = false, - [GGML_TYPE_I32] = false, -}; -static_assert(GGML_TYPE_COUNT == 10, "GGML_IS_QUANTIZED is outdated"); - -static const char * GGML_OP_LABEL[GGML_OP_COUNT] = { - "NONE", - - "DUP", - "ADD", - "SUB", - "MUL", - "DIV", - "SQR", - "SQRT", - "SUM", - "MEAN", - "REPEAT", - "ABS", - "SGN", - "NEG", - "STEP", - "RELU", - "GELU", - "SILU", - "NORM", - "RMS_NORM", - - "MUL_MAT", - - "SCALE", - "CPY", - "CONT", - "RESHAPE", - "VIEW", - "PERMUTE", - "TRANSPOSE", - "GET_ROWS", - "DIAG_MASK_INF", - "SOFT_MAX", - "ROPE", - "CONV_1D_1S", - "CONV_1D_2S", - - "FLASH_ATTN", - "FLASH_FF", - - "MAP_UNARY", - "MAP_BINARY", -}; - -static_assert(GGML_OP_COUNT == 38, "GGML_OP_COUNT != 38"); - -static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { - "none", - - "x", - "x+y", - "x-y", - "x*y", - "x/y", - "x^2", - "√x", - "Σx", - "Σx/n", - "repeat(x)", - "abs(x)", - "sgn(x)", - "-x", - "step(x)", - "relu(x)", - "gelu(x)", - "silu(x)", - "norm(x)", - "rms_norm(x)", - - "X*Y", - - "x*v", - "x-\\>y", - "cont(x)", - "reshape(x)", - "view(x)", - "permute(x)", - "transpose(x)", - "get_rows(x)", - "diag_mask_inf(x)", - "soft_max(x)", - "rope(x)", - "conv_1d_1s(x)", - "conv_1d_2s(x)", - - "flash_attn(x)", - "flash_ff(x)", - - "f(x)", - "f(x,y)", -}; - -static_assert(GGML_OP_COUNT == 38, "GGML_OP_COUNT != 38"); - -static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); -static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN"); - -// -// ggml context -// - -struct ggml_context { - size_t mem_size; - void * mem_buffer; - bool mem_buffer_owned; - bool no_alloc; - - int n_objects; - - struct ggml_object * objects_begin; - struct ggml_object * objects_end; - - struct ggml_scratch scratch; - struct ggml_scratch scratch_save; -}; - -struct ggml_context_container { - bool used; - - struct ggml_context context; -}; - -// -// compute types -// - -enum ggml_task_type { - GGML_TASK_INIT = 0, - GGML_TASK_COMPUTE, - GGML_TASK_FINALIZE, -}; - -struct ggml_compute_params { - enum ggml_task_type type; - - int ith, nth; - - // work buffer for all threads - size_t wsize; - void * wdata; -}; - -// -// ggml state -// - -struct ggml_state { - struct ggml_context_container contexts[GGML_MAX_CONTEXTS]; -}; - -// global state -static struct ggml_state g_state; -static atomic_int g_state_barrier = 0; - -// barrier via spin lock -inline static void ggml_critical_section_start(void) { - int processing = atomic_fetch_add(&g_state_barrier, 1); - - while (processing > 0) { - // wait for other threads to finish - atomic_fetch_sub(&g_state_barrier, 1); - sched_yield(); // TODO: reconsider this - processing = atomic_fetch_add(&g_state_barrier, 1); - } -} - -// TODO: make this somehow automatically executed -// some sort of "sentry" mechanism -inline static void ggml_critical_section_end(void) { - atomic_fetch_sub(&g_state_barrier, 1); -} - -//////////////////////////////////////////////////////////////////////////////// - -void ggml_print_object(const struct ggml_object * obj) { - GGML_PRINT(" - ggml_object: offset = %zu, size = %zu, next = %p\n", - obj->offs, obj->size, (const void *) obj->next); -} - -void ggml_print_objects(const struct ggml_context * ctx) { - struct ggml_object * obj = ctx->objects_begin; - - GGML_PRINT("%s: objects in context %p:\n", __func__, (const void *) ctx); - - while (obj != NULL) { - ggml_print_object(obj); - obj = obj->next; - } - - GGML_PRINT("%s: --- end ---\n", __func__); -} - -int64_t ggml_nelements(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return tensor->ne[0]*tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; -} - -int ggml_nrows(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return tensor->ne[1]*tensor->ne[2]*tensor->ne[3]; -} - -size_t ggml_nbytes(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]; -} - -int ggml_blck_size(enum ggml_type type) { - return GGML_BLCK_SIZE[type]; -} - -size_t ggml_type_size(enum ggml_type type) { - return GGML_TYPE_SIZE[type]; -} - -float ggml_type_sizef(enum ggml_type type) { - return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type]; -} - -const char * ggml_type_name(enum ggml_type type) { - return GGML_TYPE_NAME[type]; -} - - -size_t ggml_element_size(const struct ggml_tensor * tensor) { - return GGML_TYPE_SIZE[tensor->type]; -} - -static inline bool ggml_is_scalar(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return tensor->ne[0] == 1 && tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; -} - -static inline bool ggml_is_vector(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return tensor->ne[1] == 1 && tensor->ne[2] == 1 && tensor->ne[3] == 1; -} - -static inline bool ggml_is_matrix(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return tensor->ne[2] == 1 && tensor->ne[3] == 1; -} - -static inline bool ggml_can_mul_mat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return - (t0->ne[0] == t1->ne[0]) && - (t0->ne[2] == t1->ne[2]) && - (t0->ne[3] == t1->ne[3]); -} - -bool ggml_is_quantized(enum ggml_type type) { - return GGML_IS_QUANTIZED[type]; -} - -static inline bool ggml_is_transposed(const struct ggml_tensor * tensor) { - return tensor->nb[0] > tensor->nb[1]; -} - -static inline bool ggml_is_contiguous(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return - tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && - tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] && - tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && - tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; -} - -static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return - tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] && - tensor->nb[2] == tensor->nb[1]*tensor->ne[1] && - tensor->nb[3] == tensor->nb[2]*tensor->ne[2]; -} - -static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return - (t0->ne[0] == t1->ne[0] ) && - (t0->ne[1] == t1->ne[1] ) && - (t0->ne[2] == t1->ne[2] ) && - (t0->ne[3] == t1->ne[3] ); -} - -// check if t1 can be represented as a repeatition of t0 -static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { - static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); - - return - (t1->ne[0]%t0->ne[0] == 0) && - (t1->ne[1]%t0->ne[1] == 0) && - (t1->ne[2]%t0->ne[2] == 0) && - (t1->ne[3]%t0->ne[3] == 0); -} - -static inline int ggml_up32(int n) { - return (n + 31) & ~31; -} - -static inline int ggml_up64(int n) { - return (n + 63) & ~63; -} - -static inline int ggml_up(int n, int m) { - // assert m is a power of 2 - GGML_ASSERT((m & (m - 1)) == 0); - return (n + m - 1) & ~(m - 1); -} - -// assert that pointer is aligned to GGML_MEM_ALIGN -#define ggml_assert_aligned(ptr) \ - GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0) - -//////////////////////////////////////////////////////////////////////////////// - -struct ggml_context * ggml_init(struct ggml_init_params params) { - // make this function thread safe - ggml_critical_section_start(); - - static bool is_first_call = true; - - if (is_first_call) { - // initialize time system (required on Windows) - ggml_time_init(); - - // initialize GELU, SILU and EXP F32 tables - { - const uint64_t t_start = ggml_time_us(); UNUSED(t_start); - - ggml_fp16_t ii; - for (int i = 0; i < (1 << 16); ++i) { - uint16_t ui = i; - memcpy(&ii, &ui, sizeof(ii)); - const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii); - table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); - table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); - table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); - } - - const uint64_t t_end = ggml_time_us(); UNUSED(t_end); - - GGML_PRINT_DEBUG("%s: GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); - } - - // initialize g_state - { - const uint64_t t_start = ggml_time_us(); UNUSED(t_start); - - g_state = (struct ggml_state) { - /*.contexts =*/ { { 0 } }, - }; - - for (int i = 0; i < GGML_MAX_CONTEXTS; ++i) { - g_state.contexts[i].used = false; - } - - const uint64_t t_end = ggml_time_us(); UNUSED(t_end); - - GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f); - } - - // initialize cuBLAS - #if defined(GGML_USE_CUBLAS) - ggml_init_cublas(); - #endif - - is_first_call = false; - } - - // find non-used context in g_state - struct ggml_context * ctx = NULL; - - for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { - if (!g_state.contexts[i].used) { - g_state.contexts[i].used = true; - ctx = &g_state.contexts[i].context; - - GGML_PRINT_DEBUG("%s: found unused context %d\n", __func__, i); - break; - } - } - - if (ctx == NULL) { - GGML_PRINT_DEBUG("%s: no unused context found\n", __func__); - - ggml_critical_section_end(); - - return NULL; - } - - const size_t mem_size = (params.mem_size + GGML_MEM_ALIGN - 1) & ~(GGML_MEM_ALIGN - 1); - - *ctx = (struct ggml_context) { - /*.mem_size =*/ mem_size, - /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : GGML_ALIGNED_MALLOC(mem_size), - /*.mem_buffer_owned =*/ params.mem_buffer ? false : true, - /*.no_alloc =*/ params.no_alloc, - /*.n_objects =*/ 0, - /*.objects_begin =*/ NULL, - /*.objects_end =*/ NULL, - /*.scratch =*/ { 0, 0, NULL, }, - /*.scratch_save =*/ { 0, 0, NULL, }, - }; - - GGML_ASSERT(ctx->mem_buffer != NULL); - - ggml_assert_aligned(ctx->mem_buffer); - - GGML_PRINT_DEBUG("%s: context initialized\n", __func__); - - ggml_critical_section_end(); - - return ctx; -} - -void ggml_free(struct ggml_context * ctx) { - // make this function thread safe - ggml_critical_section_start(); - - bool found = false; - - for (int i = 0; i < GGML_MAX_CONTEXTS; i++) { - if (&g_state.contexts[i].context == ctx) { - g_state.contexts[i].used = false; - - GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n", - __func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size); - - if (ctx->mem_buffer_owned) { - GGML_ALIGNED_FREE(ctx->mem_buffer); - } - - found = true; - break; - } - } - - if (!found) { - GGML_PRINT_DEBUG("%s: context not found\n", __func__); - } - - ggml_critical_section_end(); -} - -size_t ggml_used_mem(const struct ggml_context * ctx) { - return ctx->objects_end->offs + ctx->objects_end->size; -} - -size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch) { - const size_t result = ctx->scratch.data ? ctx->scratch.offs : 0; - - ctx->scratch = scratch; - - return result; -} - -//////////////////////////////////////////////////////////////////////////////// - -struct ggml_tensor * ggml_new_tensor_impl( - struct ggml_context * ctx, - enum ggml_type type, - int n_dims, - const int64_t* ne, - void* data) { - // always insert objects at the end of the context's memory pool - struct ggml_object * obj_cur = ctx->objects_end; - - const size_t cur_offs = obj_cur == NULL ? 0 : obj_cur->offs; - const size_t cur_size = obj_cur == NULL ? 0 : obj_cur->size; - const size_t cur_end = cur_offs + cur_size; - - size_t size_needed = 0; - - if (data == NULL && !ctx->no_alloc) { - size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]); - for (int i = 1; i < n_dims; i++) { - size_needed *= ne[i]; - } - // align to GGML_MEM_ALIGN - size_needed = ((size_needed + GGML_MEM_ALIGN - 1)/GGML_MEM_ALIGN)*GGML_MEM_ALIGN; - } - - char * const mem_buffer = ctx->mem_buffer; - struct ggml_object * const obj_new = (struct ggml_object *)(mem_buffer + cur_end); - - if (ctx->scratch.data == NULL || data != NULL) { - size_needed += sizeof(struct ggml_tensor); - - if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) { - GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", - __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size); - assert(false); - return NULL; - } - - *obj_new = (struct ggml_object) { - .offs = cur_end + GGML_OBJECT_SIZE, - .size = size_needed, - .next = NULL, - }; - } else { - if (ctx->scratch.offs + size_needed > ctx->scratch.size) { - GGML_PRINT("%s: not enough space in the scratch memory\n", __func__); - assert(false); - return NULL; - } - - if (cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE > ctx->mem_size) { - GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", - __func__, cur_end + sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE, ctx->mem_size); - assert(false); - return NULL; - } - - data = (char * const) ctx->scratch.data + ctx->scratch.offs; - - *obj_new = (struct ggml_object) { - .offs = cur_end + GGML_OBJECT_SIZE, - .size = sizeof(struct ggml_tensor), - .next = NULL, - }; - - //printf("scratch offs = %zu, size_needed = %zu\n", ctx->scratch.offs, size_needed); - - ctx->scratch.offs += size_needed; - } - - if (obj_cur != NULL) { - obj_cur->next = obj_new; - } else { - // this is the first object in this context - ctx->objects_begin = obj_new; - } - - ctx->objects_end = obj_new; - - //printf("%s: inserted new object at %zu, size = %zu\n", __func__, cur_end, obj_new->size); - - struct ggml_tensor * const result = (struct ggml_tensor *)(mem_buffer + obj_new->offs); - - ggml_assert_aligned(result); - - *result = (struct ggml_tensor) { - /*.type =*/ type, - /*.n_dims =*/ n_dims, - /*.ne =*/ { 1, 1, 1, 1 }, - /*.nb =*/ { 0, 0, 0, 0 }, - /*.op =*/ GGML_OP_NONE, - /*.is_param =*/ false, - /*.grad =*/ NULL, - /*.src0 =*/ NULL, - /*.src1 =*/ NULL, - /*.opt =*/ { NULL }, - /*.n_tasks =*/ 0, - /*.perf_runs =*/ 0, - /*.perf_cycles =*/ 0, - /*.perf_time_us =*/ 0, - /*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data, - /*.pad =*/ { 0 }, - }; - - // TODO: this should not be needed as long as we don't rely on aligned SIMD loads - //ggml_assert_aligned(result->data); - - for (int i = 0; i < n_dims; i++) { - result->ne[i] = ne[i]; - } - - result->nb[0] = GGML_TYPE_SIZE[type]; - result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]); - for (int i = 2; i < GGML_MAX_DIMS; i++) { - result->nb[i] = result->nb[i - 1]*result->ne[i - 1]; - } - - ctx->n_objects++; - - return result; -} - -struct ggml_tensor * ggml_new_tensor( - struct ggml_context * ctx, - enum ggml_type type, - int n_dims, - const int64_t * ne) { - return ggml_new_tensor_impl(ctx, type, n_dims, ne, NULL); -} - -struct ggml_tensor * ggml_new_tensor_1d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0) { - return ggml_new_tensor(ctx, type, 1, &ne0); -} - -struct ggml_tensor * ggml_new_tensor_2d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0, - int64_t ne1) { - const int64_t ne[2] = { ne0, ne1 }; - return ggml_new_tensor(ctx, type, 2, ne); -} - -struct ggml_tensor * ggml_new_tensor_3d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0, - int64_t ne1, - int64_t ne2) { - const int64_t ne[3] = { ne0, ne1, ne2 }; - return ggml_new_tensor(ctx, type, 3, ne); -} - -struct ggml_tensor * ggml_new_tensor_4d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0, - int64_t ne1, - int64_t ne2, - int64_t ne3) { - const int64_t ne[4] = { ne0, ne1, ne2, ne3 }; - return ggml_new_tensor(ctx, type, 4, ne); -} - -struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { - ctx->scratch_save = ctx->scratch; - ctx->scratch.data = NULL; - - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); - - ctx->scratch = ctx->scratch_save; - - ggml_set_i32(result, value); - - return result; -} - -struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { - ctx->scratch_save = ctx->scratch; - ctx->scratch.data = NULL; - - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); - - ctx->scratch = ctx->scratch_save; - - ggml_set_f32(result, value); - - return result; -} - -struct ggml_tensor * ggml_dup_tensor(struct ggml_context * ctx, const struct ggml_tensor * src) { - return ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, NULL); -} - -struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) { - memset(tensor->data, 0, ggml_nbytes(tensor)); - return tensor; -} - -struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { - const int n = ggml_nrows(tensor); - const int nc = tensor->ne[0]; - const size_t n1 = tensor->nb[1]; - - char * const data = tensor->data; - - switch (tensor->type) { - case GGML_TYPE_I8: - { - assert(tensor->nb[0] == sizeof(int8_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_I16: - { - assert(tensor->nb[0] == sizeof(int16_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_I32: - { - assert(tensor->nb[0] == sizeof(int32_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_F16: - { - assert(tensor->nb[0] == sizeof(ggml_fp16_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_F32: - { - assert(tensor->nb[0] == sizeof(float)); - for (int i = 0; i < n; i++) { - ggml_vec_set_f32(nc, (float *)(data + i*n1), value); - } - } break; - default: - { - GGML_ASSERT(false); - } break; - } - - return tensor; -} - -struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { - const int n = ggml_nrows(tensor); - const int nc = tensor->ne[0]; - const size_t n1 = tensor->nb[1]; - - char * const data = tensor->data; - - switch (tensor->type) { - case GGML_TYPE_I8: - { - assert(tensor->nb[0] == sizeof(int8_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_I16: - { - assert(tensor->nb[0] == sizeof(int16_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_I32: - { - assert(tensor->nb[0] == sizeof(int32_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_F16: - { - assert(tensor->nb[0] == sizeof(ggml_fp16_t)); - for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), value); - } - } break; - case GGML_TYPE_F32: - { - assert(tensor->nb[0] == sizeof(float)); - for (int i = 0; i < n; i++) { - ggml_vec_set_f32(nc, (float *)(data + i*n1), value); - } - } break; - default: - { - GGML_ASSERT(false); - } break; - } - - return tensor; -} - -int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { - switch (tensor->type) { - case GGML_TYPE_I8: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); - return ((int8_t *)(tensor->data))[i]; - } break; - case GGML_TYPE_I16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); - return ((int16_t *)(tensor->data))[i]; - } break; - case GGML_TYPE_I32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); - return ((int32_t *)(tensor->data))[i]; - } break; - case GGML_TYPE_F16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); - } break; - case GGML_TYPE_F32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); - return ((float *)(tensor->data))[i]; - } break; - default: - { - GGML_ASSERT(false); - } break; - } - - return 0.0f; -} - -void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { - switch (tensor->type) { - case GGML_TYPE_I8: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); - ((int8_t *)(tensor->data))[i] = value; - } break; - case GGML_TYPE_I16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); - ((int16_t *)(tensor->data))[i] = value; - } break; - case GGML_TYPE_I32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); - ((int32_t *)(tensor->data))[i] = value; - } break; - case GGML_TYPE_F16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); - } break; - case GGML_TYPE_F32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); - ((float *)(tensor->data))[i] = value; - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { - switch (tensor->type) { - case GGML_TYPE_I8: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); - return ((int8_t *)(tensor->data))[i]; - } break; - case GGML_TYPE_I16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); - return ((int16_t *)(tensor->data))[i]; - } break; - case GGML_TYPE_I32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); - return ((int32_t *)(tensor->data))[i]; - } break; - case GGML_TYPE_F16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); - } break; - case GGML_TYPE_F32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); - return ((float *)(tensor->data))[i]; - } break; - default: - { - GGML_ASSERT(false); - } break; - } - - return 0.0f; -} - -void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { - switch (tensor->type) { - case GGML_TYPE_I8: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); - ((int8_t *)(tensor->data))[i] = value; - } break; - case GGML_TYPE_I16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); - ((int16_t *)(tensor->data))[i] = value; - } break; - case GGML_TYPE_I32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); - ((int32_t *)(tensor->data))[i] = value; - } break; - case GGML_TYPE_F16: - { - GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); - } break; - case GGML_TYPE_F32: - { - GGML_ASSERT(tensor->nb[0] == sizeof(float)); - ((float *)(tensor->data))[i] = value; - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -void * ggml_get_data(const struct ggml_tensor * tensor) { - return tensor->data; -} - -float * ggml_get_data_f32(const struct ggml_tensor * tensor) { - assert(tensor->type == GGML_TYPE_F32); - return (float *)(tensor->data); -} - -struct ggml_tensor * ggml_view_tensor( - struct ggml_context * ctx, - const struct ggml_tensor * src) { - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data); - - result->nb[0] = src->nb[0]; - result->nb[1] = src->nb[1]; - result->nb[2] = src->nb[2]; - result->nb[3] = src->nb[3]; - - return result; -} - -//////////////////////////////////////////////////////////////////////////////// - -// ggml_dup - -struct ggml_tensor * ggml_dup_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_DUP; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_dup( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_dup_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_dup_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_dup_impl(ctx, a, true); -} - -// ggml_add - -struct ggml_tensor * ggml_add_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_ADD; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -struct ggml_tensor * ggml_add( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add_impl(ctx, a, b, false); -} - -struct ggml_tensor * ggml_add_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_add_impl(ctx, a, b, true); -} - -// ggml_sub - -struct ggml_tensor * ggml_sub_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_SUB; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -struct ggml_tensor * ggml_sub( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_sub_impl(ctx, a, b, false); -} - -struct ggml_tensor * ggml_sub_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_sub_impl(ctx, a, b, true); -} - -// ggml_mul - -struct ggml_tensor * ggml_mul_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - is_node = true; - } - - if (inplace) { - GGML_ASSERT(is_node == false); - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_MUL; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -struct ggml_tensor * ggml_mul( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_mul_impl(ctx, a, b, false); -} - -struct ggml_tensor * ggml_mul_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_mul_impl(ctx, a, b, true); -} - -// ggml_div - -struct ggml_tensor * ggml_div_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - is_node = true; - } - - if (inplace) { - GGML_ASSERT(is_node == false); - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_DIV; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -struct ggml_tensor * ggml_div( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_div_impl(ctx, a, b, false); -} - -struct ggml_tensor * ggml_div_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_div_impl(ctx, a, b, true); -} - -// ggml_sqr - -struct ggml_tensor * ggml_sqr_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_SQR; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_sqr( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqr_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_sqr_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqr_impl(ctx, a, true); -} - -// ggml_sqrt - -struct ggml_tensor * ggml_sqrt_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_SQRT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_sqrt( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqrt_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_sqrt_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sqrt_impl(ctx, a, true); -} - -// ggml_sum - -struct ggml_tensor * ggml_sum( - struct ggml_context * ctx, - struct ggml_tensor * a) { - bool is_node = false; - - if (a->grad) { - is_node = true; - } - - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, a->type, 1); - - result->op = GGML_OP_SUM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -// ggml_mean - -struct ggml_tensor * ggml_mean( - struct ggml_context * ctx, - struct ggml_tensor * a) { - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement - is_node = true; - } - - int64_t ne[GGML_MAX_DIMS] = { 1, a->ne[1], a->ne[2], a->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, ne); - - result->op = GGML_OP_MEAN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -// ggml_repeat - -struct ggml_tensor * ggml_repeat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_can_repeat(a, b)); - - bool is_node = false; - - if (a->grad) { - is_node = true; - } - - if (ggml_are_same_shape(a, b) && !is_node) { - return a; - } - - struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, b->n_dims, b->ne); - - result->op = GGML_OP_REPEAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_abs - -struct ggml_tensor * ggml_abs_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_ABS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_abs( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_abs_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_abs_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_abs_impl(ctx, a, true); -} - - -// ggml_sgn - -struct ggml_tensor * ggml_sgn_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_SGN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_sgn( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sgn_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_sgn_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_sgn_impl(ctx, a, true); -} - -// ggml_neg - -struct ggml_tensor * ggml_neg_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_NEG; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_neg( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_neg_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_neg_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_neg_impl(ctx, a, true); -} - -// ggml_step - -struct ggml_tensor * ggml_step_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_STEP; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_step( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_step_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_step_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_step_impl(ctx, a, true); -} - -// ggml_relu - -struct ggml_tensor * ggml_relu_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_RELU; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_relu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_relu_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_relu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_relu_impl(ctx, a, true); -} - -// ggml_gelu - -struct ggml_tensor * ggml_gelu_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_GELU; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_gelu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_gelu_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_gelu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_gelu_impl(ctx, a, true); -} - -// ggml_silu - -struct ggml_tensor * ggml_silu_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_SILU; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_silu( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_silu_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_silu_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_silu_impl(ctx, a, true); -} - -// ggml_norm - -struct ggml_tensor * ggml_norm_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_NORM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; // TODO: maybe store epsilon here? - - return result; -} - -struct ggml_tensor * ggml_norm( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_norm_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_norm_impl(ctx, a, true); -} - -struct ggml_tensor * ggml_rms_norm_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && (a->grad)) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_RMS_NORM; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; // TODO: maybe store epsilon here? - - return result; -} - -struct ggml_tensor * ggml_rms_norm( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_rms_norm_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_rms_norm_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_rms_norm_impl(ctx, a, true); -} - -// ggml_mul_mat - -struct ggml_tensor * ggml_mul_mat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_can_mul_mat(a, b)); - GGML_ASSERT(!ggml_is_transposed(a)); - - bool is_node = false; - - if (a->grad || b->grad) { - is_node = true; - } - - const int64_t ne[4] = { a->ne[1], b->ne[1], a->ne[2], b->ne[3] }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne); - - result->op = GGML_OP_MUL_MAT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_scale - -struct ggml_tensor * ggml_scale_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - bool inplace) { - GGML_ASSERT(ggml_is_scalar(b)); - GGML_ASSERT(ggml_is_padded_1d(a)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - - result->op = GGML_OP_SCALE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -struct ggml_tensor * ggml_scale( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, false); -} - -struct ggml_tensor * ggml_scale_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_scale_impl(ctx, a, b, true); -} - -// ggml_cpy - -struct ggml_tensor * ggml_cpy_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - bool inplace) { - GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // make a view of the destination - struct ggml_tensor * result = ggml_view_tensor(ctx, b); - - result->op = GGML_OP_CPY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -struct ggml_tensor * ggml_cpy( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_cpy_impl(ctx, a, b, false); -} - -struct ggml_tensor * ggml_cpy_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - return ggml_cpy_impl(ctx, a, b, true); -} - -// ggml_cont - -struct ggml_tensor * ggml_cont_impl( - struct ggml_context * ctx, - struct ggml_tensor * a, - bool inplace) { - bool is_node = false; - - if (!inplace && a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_CONT; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_cont( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_cont_impl(ctx, a, false); -} - -struct ggml_tensor * ggml_cont_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a) { - return ggml_cont_impl(ctx, a, true); -} - -// ggml_reshape - -struct ggml_tensor * ggml_reshape( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_is_contiguous(b)); - GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b)); - - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data); - - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_reshape_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_nelements(a) == ne0*ne1); - - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t ne[2] = { ne0, ne1 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data); - - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -struct ggml_tensor * ggml_reshape_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1, - int64_t ne2) { - GGML_ASSERT(ggml_is_contiguous(a)); - GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2); - - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t ne[3] = { ne0, ne1, ne2 }; - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data); - - result->op = GGML_OP_RESHAPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -// ggml_view_1d - -struct ggml_tensor * ggml_view_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - size_t offset) { - if (a->grad) { - GGML_ASSERT(false); // gradient propagation is not supported - } - - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset); - - result->op = GGML_OP_VIEW; - result->grad = NULL; - result->src0 = a; - result->src1 = NULL; // TODO: maybe store the offset here? - - return result; -} - -// ggml_view_2d - -struct ggml_tensor * ggml_view_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1, - size_t nb1, - size_t offset) { - if (a->grad) { - GGML_ASSERT(false); // gradient propagation is not supported - } - - const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 }; - - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset); - - result->nb[1] = nb1; - result->nb[2] = result->nb[1]*ne1; - result->nb[3] = result->nb[2]; - - result->op = GGML_OP_VIEW; - result->grad = NULL; - result->src0 = a; - result->src1 = NULL; // TODO: maybe store the offset here? - - return result; -} - -// ggml_view_3d - -struct ggml_tensor * ggml_view_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1, - int64_t ne2, - size_t nb1, - size_t nb2, - size_t offset) { - if (a->grad) { - GGML_ASSERT(false); // gradient propagation is not supported - } - - const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 }; - - struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset); - - result->nb[1] = nb1; - result->nb[2] = nb2; - result->nb[3] = result->nb[2]*ne2; - - result->op = GGML_OP_VIEW; - result->grad = NULL; - result->src0 = a; - result->src1 = NULL; // TODO: maybe store the offset here? - - return result; -} - -// ggml_permute - -struct ggml_tensor * ggml_permute( - struct ggml_context * ctx, - struct ggml_tensor * a, - int axis0, - int axis1, - int axis2, - int axis3) { - GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS); - GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS); - GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS); - GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS); - - GGML_ASSERT(axis0 != axis1); - GGML_ASSERT(axis0 != axis2); - GGML_ASSERT(axis0 != axis3); - GGML_ASSERT(axis1 != axis2); - GGML_ASSERT(axis1 != axis3); - GGML_ASSERT(axis2 != axis3); - - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - - int ne[GGML_MAX_DIMS]; - int nb[GGML_MAX_DIMS]; - - ne[axis0] = a->ne[0]; - ne[axis1] = a->ne[1]; - ne[axis2] = a->ne[2]; - ne[axis3] = a->ne[3]; - - nb[axis0] = a->nb[0]; - nb[axis1] = a->nb[1]; - nb[axis2] = a->nb[2]; - nb[axis3] = a->nb[3]; - - result->ne[0] = ne[0]; - result->ne[1] = ne[1]; - result->ne[2] = ne[2]; - result->ne[3] = ne[3]; - - result->nb[0] = nb[0]; - result->nb[1] = nb[1]; - result->nb[2] = nb[2]; - result->nb[3] = nb[3]; - - result->op = GGML_OP_PERMUTE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; // TODO: maybe store the permutation here? - - return result; -} - -// ggml_transpose - -struct ggml_tensor * ggml_transpose( - struct ggml_context * ctx, - struct ggml_tensor * a) { - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - - result->ne[0] = a->ne[1]; - result->ne[1] = a->ne[0]; - - result->nb[0] = a->nb[1]; - result->nb[1] = a->nb[0]; - - result->op = GGML_OP_TRANSPOSE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -// ggml_get_rows - -struct ggml_tensor * ggml_get_rows( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32); - - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // TODO: implement non F32 return - //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]); - struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, a->ne[0], b->ne[0]); - - result->op = GGML_OP_GET_ROWS; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_diag_mask_inf - -struct ggml_tensor * ggml_diag_mask_inf( - struct ggml_context * ctx, - struct ggml_tensor * a, - int n_past) { - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - struct ggml_tensor * b = ggml_new_i32(ctx, n_past); - - result->op = GGML_OP_DIAG_MASK_INF; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_soft_max - -struct ggml_tensor * ggml_soft_max( - struct ggml_context * ctx, - struct ggml_tensor * a) { - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - - result->op = GGML_OP_SOFT_MAX; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = NULL; - - return result; -} - -// ggml_rope - -struct ggml_tensor * ggml_rope( - struct ggml_context * ctx, - struct ggml_tensor * a, - int n_past, - int n_dims, - int mode) { - GGML_ASSERT(n_past >= 0); - bool is_node = false; - - if (a->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - // TODO: when implement backward, fix this: - //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_view_tensor(ctx, a); - - struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3); - ((int32_t *) b->data)[0] = n_past; - ((int32_t *) b->data)[1] = n_dims; - ((int32_t *) b->data)[2] = mode; - - result->op = GGML_OP_ROPE; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_conv_1d_1s - -struct ggml_tensor * ggml_conv_1d_1s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_is_matrix(b)); - GGML_ASSERT(a->ne[1] == b->ne[1]); - GGML_ASSERT(a->ne[3] == 1); - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); - - result->op = GGML_OP_CONV_1D_1S; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_conv_1d_2s - -struct ggml_tensor * ggml_conv_1d_2s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b) { - GGML_ASSERT(ggml_is_matrix(b)); - GGML_ASSERT(a->ne[1] == b->ne[1]); - GGML_ASSERT(a->ne[3] == 1); - bool is_node = false; - - if (a->grad || b->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne); - - result->op = GGML_OP_CONV_1D_2S; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - - return result; -} - -// ggml_flash_attn - -struct ggml_tensor * ggml_flash_attn( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, - bool masked) { - GGML_ASSERT(ggml_can_mul_mat(k, q)); - // TODO: check if vT can be multiplied by (k*qT) - - bool is_node = false; - - if (q->grad || k->grad || v->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - //struct ggml_tensor * result = ggml_dup_tensor(ctx, q); - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, q->ne); - - result->op = GGML_OP_FLASH_ATTN; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = q; - result->src1 = k; - result->opt[0] = v; - result->opt[1] = ggml_new_i32(ctx, masked ? 1 : 0); - - return result; -} - -// ggml_flash_ff - -struct ggml_tensor * ggml_flash_ff( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b0, - struct ggml_tensor * b1, - struct ggml_tensor * c0, - struct ggml_tensor * c1) { - GGML_ASSERT(ggml_can_mul_mat(b0, a)); - // TODO: more checks - - bool is_node = false; - - if (a->grad || b0->grad || b1->grad || c0->grad || c1->grad) { - GGML_ASSERT(false); // TODO: implement backward - is_node = true; - } - - //struct ggml_tensor * result = ggml_dup_tensor(ctx, a); - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, a->ne); - - result->op = GGML_OP_FLASH_FF; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b0; - result->opt[0] = b1; - result->opt[1] = c0; - result->opt[2] = c1; - - return result; -} - -// ggml_map_unary - -struct ggml_tensor * ggml_map_unary_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun, - bool inplace) { - bool is_node = false; - - if (!inplace && a->grad) { - is_node = true; - } - - struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t)); - *((void (**)(void))addr_tensor->data) = (void (*)(void))fun; - struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_MAP_UNARY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->opt[0] = addr_tensor; - - return result; -} - -struct ggml_tensor * ggml_map_unary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun) { - return ggml_map_unary_impl_f32(ctx, a, fun, false); -} - -struct ggml_tensor * ggml_map_unary_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun) { - return ggml_map_unary_impl_f32(ctx, a, fun, true); -} - -// ggml_map_binary - -struct ggml_tensor * ggml_map_binary_impl_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun, - bool inplace) { - GGML_ASSERT(ggml_are_same_shape(a, b)); - - bool is_node = false; - - if (!inplace && (a->grad || b->grad)) { - is_node = true; - } - - struct ggml_tensor * addr_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(void *) / sizeof(int32_t)); - *((void (**)(void))addr_tensor->data) = (void (*)(void))fun; - struct ggml_tensor *result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - - result->op = GGML_OP_MAP_BINARY; - result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; - result->src0 = a; - result->src1 = b; - result->opt[0] = addr_tensor; - - return result; -} - -struct ggml_tensor * ggml_map_binary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun) { - return ggml_map_binary_impl_f32(ctx, a, b, fun, false); -} - -struct ggml_tensor * ggml_map_binary_inplace_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun) { - return ggml_map_binary_impl_f32(ctx, a, b, fun, true); -} - -//////////////////////////////////////////////////////////////////////////////// - -void ggml_set_param( - struct ggml_context * ctx, - struct ggml_tensor * tensor) { - tensor->is_param = true; - - GGML_ASSERT(tensor->grad == NULL); - tensor->grad = ggml_dup_tensor(ctx, tensor); -} - -// ggml_compute_forward_dup - -static void ggml_compute_forward_dup_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - const size_t nb02 = src0->nb[2]; - const size_t nb03 = src0->nb[3]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - const size_t nb2 = dst->nb[2]; - const size_t nb3 = dst->nb[3]; - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) { - // parallelize by elements - const int ne = ggml_nelements(dst); - const int dr = (ne + nth - 1) / nth; - const int ie0 = dr * ith; - const int ie1 = MIN(ie0 + dr, ne); - - memcpy( - ((char *) dst->data + ie0*nb0), - ((char *) src0->data + ie0*nb00), - (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]); - - return; - } - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (src0->type == dst->type && - ne00 == ne0 && - nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) { - // copy by rows - const size_t rs = ne00*nb00; - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy( - ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), - ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03), - rs); - } - } - } - return; - } - - // TODO: add more special-case implementations for tensor shapes/strides that can benefit from memcpy - - if (ggml_is_contiguous(dst)) { - if (nb00 == sizeof(ggml_fp16_t)) { - if (dst->type == GGML_TYPE_F16) { - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else if (dst->type == GGML_TYPE_F32) { - size_t id = 0; - float * dst_ptr = (float *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - for (int i00 = 0; i00 < ne00; i00++) { - dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } else if (ggml_is_quantized(dst->type)) { - quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q; - float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - size_t id = 0; - size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]); - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - - for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]); - } - - quantize_row_q(src0_f32, dst_ptr + id, ne00); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - GGML_ASSERT(false); // TODO: implement - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - if (dst->type == GGML_TYPE_F32) { - size_t id = 0; - float * dst_ptr = (float *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - - dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } else if (dst->type == GGML_TYPE_F16) { - size_t id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - - dst_ptr[id] = *src0_ptr; - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } else { - GGML_ASSERT(false); // TODO: implement - } - } - return; - } - - // dst counters - int64_t i10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - if (dst->type == GGML_TYPE_F16) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - - memcpy(dst_ptr, src0_ptr, sizeof(ggml_fp16_t)); - - if (++i10 == ne00) { - i10 = 0; - if (++i11 == ne01) { - i11 = 0; - if (++i12 == ne02) { - i12 = 0; - if (++i13 == ne03) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } else if (dst->type == GGML_TYPE_F32) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - - *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr); - - if (++i10 == ne0) { - i10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } else { - GGML_ASSERT(false); // TODO: implement - } -} - -static void ggml_compute_forward_dup_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - const size_t nb02 = src0->nb[2]; - const size_t nb03 = src0->nb[3]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - const size_t nb2 = dst->nb[2]; - const size_t nb3 = dst->nb[3]; - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) { - // parallelize by elements - const int ne = ggml_nelements(dst); - const int dr = (ne + nth - 1) / nth; - const int ie0 = dr * ith; - const int ie1 = MIN(ie0 + dr, ne); - - memcpy( - ((char *) dst->data + ie0*nb0), - ((char *) src0->data + ie0*nb00), - (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]); - - return; - } - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (src0->type == dst->type && - ne00 == ne0 && - nb00 == GGML_TYPE_SIZE[src0->type] && nb0 == GGML_TYPE_SIZE[dst->type]) { - // copy by rows - const size_t rs = ne00*nb00; - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy( - ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), - ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03), - rs); - } - } - } - return; - } - - if (ggml_is_contiguous(dst)) { - // TODO: simplify - if (nb00 == sizeof(float)) { - if (dst->type == GGML_TYPE_F32) { - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else if (dst->type == GGML_TYPE_F16) { - size_t id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } else if (ggml_is_quantized(dst->type)) { - quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q; - - size_t id = 0; - size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]); - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - quantize_row_q(src0_ptr, dst_ptr + id, ne00); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - GGML_ASSERT(false); // TODO: implement - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - if (dst->type == GGML_TYPE_F32) { - size_t id = 0; - float * dst_ptr = (float *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - - dst_ptr[id] = *src0_ptr; - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } else if (dst->type == GGML_TYPE_F16) { - size_t id = 0; - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } else { - GGML_ASSERT(false); // TODO: implement - } - } - - return; - } - - // dst counters - - int64_t i10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - if (dst->type == GGML_TYPE_F32) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - - memcpy(dst_ptr, src0_ptr, sizeof(float)); - - if (++i10 == ne0) { - i10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } else if (dst->type == GGML_TYPE_F16) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - - *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr); - - if (++i10 == ne0) { - i10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } else { - GGML_ASSERT(false); // TODO: implement - } -} - -static void ggml_compute_forward_dup( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_dup_f16(params, src0, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_dup_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_add - -static void ggml_compute_forward_add_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb10 = src1->nb[0]; - const size_t nb11 = src1->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT( nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - if (nb10 == sizeof(float)) { - for (int j = ith; j < n; j += nth) { -#ifdef GGML_USE_ACCELERATE - vDSP_vadd( - (float *) ((char *) src0->data + j*nb01), 1, - (float *) ((char *) src1->data + j*nb11), 1, - (float *) ((char *) dst->data + j*nb1), 1, nc); -#else - ggml_vec_add_f32(nc, - (float *) ((char *) dst->data + j*nb1), - (float *) ((char *) src0->data + j*nb01), - (float *) ((char *) src1->data + j*nb11)); -#endif - } - } else { - // src1 is not contiguous - for (int j = ith; j < n; j += nth) { - float * dst_ptr = (float *) ((char *) dst->data + j*nb1); - float * src0_ptr = (float *) ((char *) src0->data + j*nb01); - for (int i = 0; i < nc; i++) { - float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10); - - dst_ptr[i] = src0_ptr[i] + *src1_ptr; - } - } - } -} - -static void ggml_compute_forward_add_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb10 = src1->nb[0]; - const size_t nb11 = src1->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - if (nb10 == sizeof(float)) { - for (int j = ith; j < n; j += nth) { - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01); - for (int i = 0; i < nc; i++) { - float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10); - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + *src1_ptr); - } - } - } - else { - // src1 is not contiguous - GGML_ASSERT(false); - } -} - -static void ggml_compute_forward_add_f16_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb10 = src1->nb[0]; - const size_t nb11 = src1->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT( nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - if (nb10 == sizeof(ggml_fp16_t)) { - for (int j = ith; j < n; j += nth) { - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01); - for (int i = 0; i < nc; i++) { - ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + j*nb11 + i*nb10); - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(*src1_ptr)); - } - } - } - else { - // src1 is not contiguous - GGML_ASSERT(false); - } -} - -static void ggml_compute_forward_add_q_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - //const int64_t ne10 = src1->ne[0]; - //const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - //const int64_t ne0 = dst->ne[0]; - //const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - const int nb12 = src1->nb[2]; - const int nb13 = src1->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - const enum ggml_type type = src0->type; - dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q; - quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]); - GGML_ASSERT(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(dst->type == src0->type); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // total rows in src0 - const int nr = ne01*ne02*ne03; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir/(ne02*ne01); - const int i02 = (ir - i03*ne02*ne01)/ne01; - const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - - // src1 and dst are same shape as src0 => same indices - const int i13 = i03; - const int i12 = i02; - const int i11 = i01; - - const int i3 = i03; - const int i2 = i02; - const int i1 = i01; - - void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - float * src1_row = (float *)((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)); - void * dst_row = (void *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb0)); - - assert(ne00 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne00); - // add src1 - ggml_vec_acc_f32(ne00, wdata, src1_row); - // quantize row to dst - quantize_row_q(wdata, dst_row, ne00); - } -} - -static void ggml_compute_forward_add( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F16: - { - if (src1->type == GGML_TYPE_F16) { - ggml_compute_forward_add_f16_f16(params, src0, src1, dst); - } - else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add_f16_f32(params, src0, src1, dst); - } - else { - GGML_ASSERT(false); - } - } break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: - case GGML_TYPE_Q4_3: - { - ggml_compute_forward_add_q_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_sub - -static void ggml_compute_forward_sub_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - assert(src1->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_sub_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1])), - (float *) ((char *) src1->data + i*(src1->nb[1]))); - } -} - -static void ggml_compute_forward_sub( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sub_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_mul - -static void ggml_compute_forward_mul_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - assert(src1->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_mul_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1])), - (float *) ((char *) src1->data + i*(src1->nb[1]))); - } -} - -static void ggml_compute_forward_mul( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_mul_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_div - -static void ggml_compute_forward_div_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - assert(src1->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_div_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1])), - (float *) ((char *) src1->data + i*(src1->nb[1]))); - } -} - -static void ggml_compute_forward_div( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_div_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_sqr - -static void ggml_compute_forward_sqr_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_sqr_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_sqr( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sqr_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_sqrt - -static void ggml_compute_forward_sqrt_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_sqrt_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_sqrt( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sqrt_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_sum - -static void ggml_compute_forward_sum_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_is_scalar(dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - assert(ggml_is_scalar(dst)); - assert(src0->nb[0] == sizeof(float)); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const size_t nb01 = src0->nb[1]; - const size_t nb02 = src0->nb[2]; - const size_t nb03 = src0->nb[3]; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, - (float *) (dst->data), - (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); - } - } - } -} - -static void ggml_compute_forward_sum( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_mean - -static void ggml_compute_forward_mean_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const size_t nb01 = src0->nb[1]; - const size_t nb02 = src0->nb[2]; - const size_t nb03 = src0->nb[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - - assert(ne0 == 1); - assert(ne1 == ne01); - assert(ne2 == ne02); - assert(ne3 == ne03); - - UNUSED(ne0); - UNUSED(ne1); - UNUSED(ne2); - UNUSED(ne3); - - const size_t nb1 = dst->nb[1]; - const size_t nb2 = dst->nb[2]; - const size_t nb3 = dst->nb[3]; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, - (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3), - (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03)); - - *(float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3) /= (float) ne00; - } - } - } -} - -static void ggml_compute_forward_mean( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_mean_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_repeat - -static void ggml_compute_forward_repeat_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_can_repeat(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - // TODO: implement support for rank > 2 tensors - assert(src0->ne[2] == 1); - assert(src0->ne[3] == 1); - assert( dst->ne[2] == 1); - assert( dst->ne[3] == 1); - - const int nc = dst->ne[0]; - const int nr = dst->ne[1]; - const int nc0 = src0->ne[0]; - const int nr0 = src0->ne[1]; - const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat - const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat - - // TODO: support for transposed / permuted tensors - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - // TODO: maybe this is not optimal? - for (int i = 0; i < nrr; i++) { - for (int j = 0; j < ncr; j++) { - for (int k = 0; k < nr0; k++) { - ggml_vec_cpy_f32(nc0, - (float *) ((char *) dst->data + (i*nr0 + k)*( dst->nb[1]) + j*nc0*( dst->nb[0])), - (float *) ((char *) src0->data + ( k)*(src0->nb[1]))); - } - } - } -} - -static void ggml_compute_forward_repeat( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_repeat_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_abs - -static void ggml_compute_forward_abs_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_abs_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_abs( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_abs_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_sgn - -static void ggml_compute_forward_sgn_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_sgn_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_sgn( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sgn_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_neg - -static void ggml_compute_forward_neg_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_neg_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_neg( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_neg_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_step - -static void ggml_compute_forward_step_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_step_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_step( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_step_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_relu - -static void ggml_compute_forward_relu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_relu_f32(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - -static void ggml_compute_forward_relu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_relu_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_gelu - -static void ggml_compute_forward_gelu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f32(nc, - (float *) ((char *) dst->data + i1*( dst->nb[1])), - (float *) ((char *) src0->data + i1*(src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } - - //printf("XXXXXXXX gelu\n"); -} - -// ggml_compute_forward_silu - -static void ggml_compute_forward_silu_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f32(nc, - (float *) ((char *) dst->data + i1*( dst->nb[1])), - (float *) ((char *) src0->data + i1*(src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - - -// ggml_compute_forward_norm - -static void ggml_compute_forward_norm_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const size_t nb01 = src0->nb[1]; - const size_t nb02 = src0->nb[2]; - const size_t nb03 = src0->nb[3]; - - const size_t nb1 = dst->nb[1]; - const size_t nb2 = dst->nb[2]; - const size_t nb3 = dst->nb[3]; - - const float eps = 1e-5f; // TODO: make this a parameter - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float)x[i00]; - } - - float mean = sum/ne00; - - float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); - - ggml_float sum2 = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - float v = x[i00] - mean; - y[i00] = v; - sum2 += (ggml_float)(v*v); - } - - float variance = sum2/ne00; - const float scale = 1.0f/sqrtf(variance + eps); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -static void ggml_compute_forward_norm( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_norm_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -static void ggml_compute_forward_rms_norm_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const size_t nb01 = src0->nb[1]; - const size_t nb02 = src0->nb[2]; - const size_t nb03 = src0->nb[3]; - - const size_t nb1 = dst->nb[1]; - const size_t nb2 = dst->nb[2]; - const size_t nb3 = dst->nb[3]; - - const float eps = 1e-6f; // TODO: make this a parameter - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float)(x[i00] * x[i00]); - } - - float mean = sum/ne00; - - float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); - - memcpy(y, x, ne00 * sizeof(float)); - // for (int i00 = 0; i00 < ne00; i00++) { - // y[i00] = x[i00]; - // } - - const float scale = 1.0f/sqrtf(mean + eps); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -static void ggml_compute_forward_rms_norm( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - - -// ggml_compute_forward_mul_mat - -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) -// helper function to determine if it is better to use BLAS or not -// for large matrices, BLAS is faster -static bool ggml_compute_forward_mul_mat_use_blas( - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - //const int64_t ne00 = src0->ne[0]; - //const int64_t ne01 = src0->ne[1]; - - const int64_t ne10 = src1->ne[0]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - - // TODO: find the optimal values for these - if (ggml_is_contiguous(src0) && - ggml_is_contiguous(src1) && ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) { - - /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/ - return true; - } - - return false; -} -#endif - -static void ggml_compute_forward_mul_mat_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - const int64_t ne10 = src1->ne[0]; -#endif - const int64_t ne11 = src1->ne[1]; -#ifndef NDEBUG - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - - const int nb00 = src0->nb[0]; -#endif - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - const int nb03 = src0->nb[3]; - -#ifndef NDEBUG - const int nb10 = src1->nb[0]; -#endif - const int nb11 = src1->nb[1]; - const int nb12 = src1->nb[2]; - const int nb13 = src1->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - assert(ne02 == ne12); - assert(ne03 == ne13); - assert(ne2 == ne12); - assert(ne3 == ne13); - - // we don't support permuted src0 or src1 - assert(nb00 == sizeof(float)); - assert(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - assert(nb0 == sizeof(float)); - assert(nb0 <= nb1); - assert(nb1 <= nb2); - assert(nb2 <= nb3); - - assert(ne0 == ne01); - assert(ne1 == ne11); - assert(ne2 == ne02); - assert(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - if (params->ith != 0) { - return; - } - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - -#if defined(GGML_USE_CUBLAS) - const float alpha = 1.0f; - const float beta = 0.0f; - const int x_ne = ne01 * ne10; - const int y_ne = ne11 * ne10; - const int d_ne = ne11 * ne01; - - size_t x_size, y_size, d_size; - float *d_X = ggml_cuda_pool_malloc(sizeof(float) * x_ne, &x_size); - float *d_Y = ggml_cuda_pool_malloc(sizeof(float) * y_ne, &y_size); - float *d_D = ggml_cuda_pool_malloc(sizeof(float) * d_ne, &d_size); -#endif - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); - const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); - - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - -#if defined(GGML_USE_CUBLAS) - // copy data to device - CUDA_CHECK(cudaMemcpyAsync(d_X, x, sizeof(float) * x_ne, cudaMemcpyHostToDevice, g_cudaStream)); - CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(float) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); - - // compute - CUBLAS_CHECK( - cublasSgemm(g_cublasH, CUBLAS_OP_T, CUBLAS_OP_N, - ne01, ne11, ne10, - &alpha, d_X, ne00, - d_Y, ne10, - &beta, d_D, ne01)); - - // copy data to host - CUDA_CHECK(cudaMemcpyAsync(d, d_D, sizeof(float) * d_ne, cudaMemcpyDeviceToHost, g_cudaStream)); -#else - // zT = y * xT - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne11, ne01, ne10, - 1.0f, y, ne10, - x, ne00, - 0.0f, d, ne01); -#endif - } - } -#if defined(GGML_USE_CUBLAS) - CUDA_CHECK(cudaStreamSynchronize(g_cudaStream)); - ggml_cuda_pool_free(d_X, x_size); - ggml_cuda_pool_free(d_Y, y_size); - ggml_cuda_pool_free(d_D, d_size); -#endif - //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); - - return; - } -#endif - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // parallelize by src0 rows using ggml_vec_dot_f32 - - // total rows in src0 - const int nr = ne01*ne02*ne03; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir/(ne02*ne01); - const int i02 = (ir - i03*ne02*ne01)/ne01; - const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - - for (int64_t ic = 0; ic < ne11; ++ic) { - // src1 indices - const int i13 = i03; - const int i12 = i02; - const int i11 = ic; - - // dst indices - const int i0 = i01; - const int i1 = i11; - const int i2 = i02; - const int i3 = i03; - - ggml_vec_dot_f32(ne00, - (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)), - (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13))); - } - } - - //int64_t t1 = ggml_perf_time_us(); - //static int64_t acc = 0; - //acc += t1 - t0; - //if (t1 - t0 > 10) { - // printf("\n"); - // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); - // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); - // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); - // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13); - - // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); - //} -} - -static void ggml_compute_forward_mul_mat_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - //const int64_t ne = ne0*ne1*ne2*ne3; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - const int nb12 = src1->nb[2]; - const int nb13 = src1->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // TODO: we don't support permuted src0 - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->ith != 0) { - return; - } - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - -#if defined(GGML_USE_CUBLAS) - ggml_fp16_t * const wdata = params->wdata; - - const float alpha = 1.0f; - const float beta = 0.0f; - const int x_ne = ne01 * ne10; - const int y_ne = ne11 * ne10; - const int d_ne = ne11 * ne01; - - size_t x_size, y_size, d_size; - float *d_X = ggml_cuda_pool_malloc(sizeof(float) * x_ne, &x_size); - float *d_Y = ggml_cuda_pool_malloc(sizeof(float) * y_ne, &y_size); - float *d_D = ggml_cuda_pool_malloc(sizeof(float) * d_ne, &d_size); -#else - float * const wdata = params->wdata; -#endif - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { -#if defined(GGML_USE_CUBLAS) - // with cuBlAS, instead of converting src0 to fp32, we convert src1 to fp16 - { - size_t id = 0; - for (int64_t i01 = 0; i01 < ne11; ++i01) { - for (int64_t i00 = 0; i00 < ne10; ++i00) { - wdata[id++] = GGML_FP32_TO_FP16(*(float *) ((char *) src1->data + i03*nb13 + i02*nb12 + i01*nb11 + i00*nb10)); - } - } - } -#else - { - size_t id = 0; - for (int64_t i01 = 0; i01 < ne01; ++i01) { - for (int64_t i00 = 0; i00 < ne00; ++i00) { - wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00)); - } - } - } -#endif - -#if defined(GGML_USE_CUBLAS) - const ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + i02*nb02 + i03*nb03); - const ggml_fp16_t * y = (ggml_fp16_t *) wdata; - - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - - // copy data to device - CUDA_CHECK(cudaMemcpyAsync(d_X, x, sizeof(ggml_fp16_t) * x_ne, cudaMemcpyHostToDevice, g_cudaStream)); - CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(ggml_fp16_t) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); - - // compute - CUBLAS_CHECK( - cublasGemmEx(g_cublasH, CUBLAS_OP_T, CUBLAS_OP_N, - ne01, ne11, ne10, - &alpha, d_X, CUDA_R_16F, ne00, - d_Y, CUDA_R_16F, ne10, - &beta, d_D, CUDA_R_32F, ne01, - CUBLAS_COMPUTE_32F, - CUBLAS_GEMM_DEFAULT)); - - // copy data to host - CUDA_CHECK(cudaMemcpyAsync(d, d_D, sizeof(float) * d_ne, cudaMemcpyDeviceToHost, g_cudaStream)); -#else - const float * x = wdata; - const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); - - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - - // zT = y * xT - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne11, ne01, ne10, - 1.0f, y, ne10, - x, ne00, - 0.0f, d, ne01); -#endif - } - } - -#if defined(GGML_USE_CUBLAS) - CUDA_CHECK(cudaStreamSynchronize(g_cudaStream)); - ggml_cuda_pool_free(d_X, x_size); - ggml_cuda_pool_free(d_Y, y_size); - ggml_cuda_pool_free(d_D, d_size); -#endif - /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/ - - return; - } -#endif - - if (params->type == GGML_TASK_INIT) { - ggml_fp16_t * const wdata = params->wdata; - - size_t id = 0; - for (int64_t i13 = 0; i13 < ne13; ++i13) { - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = 0; i11 < ne11; ++i11) { - for (int64_t i10 = 0; i10 < ne10; ++i10) { - wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10)); - } - } - } - } - - GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize); - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // fp16 -> half the size, so divide by 2 - // TODO: do not support transposed src1 - assert(nb10/2 == sizeof(ggml_fp16_t)); - - // parallelize by src0 rows using ggml_vec_dot_f16 - - // total rows in src0 - const int nr = ne01*ne02*ne03; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - ggml_fp16_t * wdata = params->wdata; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir/(ne02*ne01); - const int i02 = (ir - i03*ne02*ne01)/ne01; - const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - - const int i13 = i03; - const int i12 = i02; - - const int i0 = i01; - const int i2 = i02; - const int i3 = i03; - - ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00; - - float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); - - for (int64_t ic = 0; ic < ne11; ++ic) { - ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00); - } - } - - //int64_t t1 = ggml_time_us(); - //static int64_t acc = 0; - //acc += t1 - t0; - //if (t1 - t0 > 10) { - // printf("\n"); - // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); - // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); - // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); - - // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); - //} -} - -static void ggml_compute_forward_mul_mat_q_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - const int64_t ne03 = src0->ne[3]; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - const int64_t ne3 = dst->ne[3]; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - const int nb12 = src1->nb[2]; - const int nb13 = src1->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - const enum ggml_type type = src0->type; - quantize_row_q_t const quantize_row_q_dot = quantize_fns[type].quantize_row_q_dot; - vec_dot_q_t const vec_dot_q = quantize_fns[type].vec_dot_q; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]); - GGML_ASSERT(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) { - if (params->ith != 0) { - return; - } - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - -#if defined(GGML_USE_CUBLAS) - const float alpha = 1.0f; - const float beta = 0.0f; - const int x_ne = ne01 * ne10; - const int y_ne = ne11 * ne10; - const int d_ne = ne11 * ne01; - - size_t x_size, y_size, d_size, q_size; - float *d_X = ggml_cuda_pool_malloc(sizeof(float) * x_ne, &x_size); - float *d_Y = ggml_cuda_pool_malloc(sizeof(float) * y_ne, &y_size); - float *d_D = ggml_cuda_pool_malloc(sizeof(float) * d_ne, &d_size); - float *d_Q = ggml_cuda_pool_malloc(GGML_TYPE_SIZE[type] * x_ne / GGML_BLCK_SIZE[type], &q_size); - - void (*dequantize_row_q_cuda)(const void * x, float * y, int k, cudaStream_t stream) = NULL; - if (type == GGML_TYPE_Q4_0) { - dequantize_row_q_cuda = dequantize_row_q4_0_cuda; - } - else if (type == GGML_TYPE_Q4_1) { - dequantize_row_q_cuda = dequantize_row_q4_1_cuda; - } - else if (type == GGML_TYPE_Q4_2) { - dequantize_row_q_cuda = dequantize_row_q4_2_cuda; - } - else if (type == GGML_TYPE_Q4_3) { - dequantize_row_q_cuda = dequantize_row_q4_3_cuda; - } - else { - GGML_ASSERT(false); - } -#else - float * const wdata = params->wdata; - dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q; -#endif - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13); - - float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3); - -#if defined(GGML_USE_CUBLAS) - // copy and dequantize on device - CUDA_CHECK( - cudaMemcpyAsync(d_Q, (char *) src0->data + i03*nb03 + i02*nb02, - GGML_TYPE_SIZE[type] * x_ne / GGML_BLCK_SIZE[type], cudaMemcpyHostToDevice, g_cudaStream)); - - dequantize_row_q_cuda(d_Q, d_X, ne01 * ne00, g_cudaStream); - CUDA_CHECK(cudaGetLastError()); -#else - { - size_t id = 0; - for (int64_t i01 = 0; i01 < ne01; ++i01) { - dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00); - id += ne00; - } - } - const float * x = wdata; -#endif - - -#if defined(GGML_USE_CUBLAS) - // copy data to device - CUDA_CHECK(cudaMemcpyAsync(d_Y, y, sizeof(float) * y_ne, cudaMemcpyHostToDevice, g_cudaStream)); - - // compute - CUBLAS_CHECK( - cublasSgemm(g_cublasH, CUBLAS_OP_T, CUBLAS_OP_N, - ne01, ne11, ne10, - &alpha, d_X, ne00, - d_Y, ne10, - &beta, d_D, ne01)); - - // copy data to host - CUDA_CHECK(cudaMemcpyAsync(d, d_D, sizeof(float) * d_ne, cudaMemcpyDeviceToHost, g_cudaStream)); -#else - // zT = y * xT - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne11, ne01, ne10, - 1.0f, y, ne10, - x, ne00, - 0.0f, d, ne01); -#endif - } - } - -#if defined(GGML_USE_CUBLAS) - CUDA_CHECK(cudaStreamSynchronize(g_cudaStream)); - ggml_cuda_pool_free(d_X, x_size); - ggml_cuda_pool_free(d_Y, y_size); - ggml_cuda_pool_free(d_D, d_size); - ggml_cuda_pool_free(d_Q, q_size); -#endif - //printf("CBLAS = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3); - - return; - } -#endif - - if (params->type == GGML_TASK_INIT) { - char * wdata = params->wdata; - const size_t row_size = ne10*GGML_TYPE_SIZE[GGML_TYPE_Q8_0]/GGML_BLCK_SIZE[GGML_TYPE_Q8_0]; - - for (int64_t i13 = 0; i13 < ne13; ++i13) { - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = 0; i11 < ne11; ++i11) { - quantize_row_q_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10); - wdata += row_size; - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // parallelize by src0 rows using ggml_vec_dot_q - - // total rows in src0 - const int nr = ne01*ne02*ne03; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - void * wdata = params->wdata; - const size_t row_size = ne00*GGML_TYPE_SIZE[GGML_TYPE_Q8_0]/GGML_BLCK_SIZE[GGML_TYPE_Q8_0]; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir/(ne02*ne01); - const int i02 = (ir - i03*ne02*ne01)/ne01; - const int i01 = (ir - i03*ne02*ne01 - i02*ne01); - - const int i13 = i03; - const int i12 = i02; - - const int i0 = i01; - const int i2 = i02; - const int i3 = i03; - - void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)); - char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*row_size)); - - float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3)); - - assert(ne00 % 32 == 0); - - for (int64_t ic = 0; ic < ne11; ++ic) { - vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size)); - } - } - - //int64_t t1 = ggml_time_us(); - //static int64_t acc = 0; - //acc += t1 - t0; - //if (t1 - t0 > 10) { - // printf("\n"); - // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03); - // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03); - // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13); - - // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc); - //} -} - -static void ggml_compute_forward_mul_mat( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: - case GGML_TYPE_Q4_3: - case GGML_TYPE_Q8_0: - { - ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F16: - { - ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_mul_mat_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_scale - -static void ggml_compute_forward_scale_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - // scale factor - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v); - } -} - -static void ggml_compute_forward_scale( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_scale_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_cpy - -static void ggml_compute_forward_cpy( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - ggml_compute_forward_dup(params, src0, dst); -} - -// ggml_compute_forward_cont - -static void ggml_compute_forward_cont( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - ggml_compute_forward_dup(params, src0, dst); -} - -// ggml_compute_forward_reshape - -static void ggml_compute_forward_reshape( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - // NOP - UNUSED(params); - UNUSED(src0); - UNUSED(dst); -} - -// ggml_compute_forward_view - -static void ggml_compute_forward_view( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { - // NOP - UNUSED(params); - UNUSED(src0); -} - -// ggml_compute_forward_permute - -static void ggml_compute_forward_permute( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { - // NOP - UNUSED(params); - UNUSED(src0); -} - -// ggml_compute_forward_transpose - -static void ggml_compute_forward_transpose( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0) { - // NOP - UNUSED(params); - UNUSED(src0); -} - -// ggml_compute_forward_get_rows - -static void ggml_compute_forward_get_rows_q( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - const enum ggml_type type = src0->type; - dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q; - - assert( dst->ne[0] == nc); - assert( dst->ne[1] == nr); - assert(src0->nb[0] == GGML_TYPE_SIZE[type]); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - dequantize_row_q( - (const void *) ((char *) src0->data + r*src0->nb[1]), - (float *) ((char *) dst->data + i*dst->nb[1]), nc); - } -} - -static void ggml_compute_forward_get_rows_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - assert( dst->ne[0] == nc); - assert( dst->ne[1] == nr); - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + r*src0->nb[1]))[j]; - ((float *) ((char *) dst->data + i*dst->nb[1]))[j] = GGML_FP16_TO_FP32(v); - } - } -} - -static void ggml_compute_forward_get_rows_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - assert( dst->ne[0] == nc); - assert( dst->ne[1] == nr); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - ggml_vec_cpy_f32(nc, - (float *) ((char *) dst->data + i*dst->nb[1]), - (float *) ((char *) src0->data + r*src0->nb[1])); - } -} - -static void ggml_compute_forward_get_rows( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q4_2: - case GGML_TYPE_Q4_3: - case GGML_TYPE_Q8_0: - { - ggml_compute_forward_get_rows_q(params, src0, src1, dst); - } break; - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_f16(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_get_rows_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -// ggml_compute_forward_diag_mask_inf - -static void ggml_compute_forward_diag_mask_inf_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(params->ith == 0); - assert(src1->type == GGML_TYPE_I32); - assert(ggml_nelements(src1) == 1); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n_past = ((int32_t *) src1->data)[0]; - - // TODO: handle transposed/permuted matrices - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - const int nr = src0->ne[1]; - const int nz = n/nr; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int k = 0; k < nz; k++) { - for (int j = 0; j < nr; j++) { - for (int i = n_past; i < nc; i++) { - if (i > n_past + j) { - *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = -INFINITY; - } - } - } - } -} - -static void ggml_compute_forward_diag_mask_inf( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_soft_max - -static void ggml_compute_forward_soft_max_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - // TODO: handle transposed/permuted matrices - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float *p = (float *)((char *) dst->data + i1*dst->nb[1]); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(p[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, p); - - ggml_float sum = 0.0; - - uint16_t scvt; - for (int i = 0; i < nc; i++) { - if (p[i] == -INFINITY) { - p[i] = 0.0f; - } else { - //const float val = (p[i] == -INFINITY) ? 0.0 : exp(p[i] - max); - ggml_fp16_t s = GGML_FP32_TO_FP16(p[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]); - sum += (ggml_float)val; - p[i] = val; - } - } - - assert(sum > 0.0); - - sum = 1.0/sum; - ggml_vec_scale_f32(nc, p, sum); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - assert(!isnan(p[i])); - assert(!isinf(p[i])); - } -#endif - } -} - -static void ggml_compute_forward_soft_max( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_f32(params, src0, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_rope - -static void ggml_compute_forward_rope_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(src1->type == GGML_TYPE_I32); - assert(ggml_nelements(src1) == 3); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n_past = ((int32_t *) src1->data)[0]; - const int n_dims = ((int32_t *) src1->data)[1]; - const int mode = ((int32_t *) src1->data)[2]; - - //const int64_t ne0 = src0->ne[0]; - const int64_t ne1 = src0->ne[1]; - const int64_t ne2 = src0->ne[2]; - const int64_t ne3 = src0->ne[3]; - - const int nb0 = src0->nb[0]; - const int nb1 = src0->nb[1]; - const int nb2 = src0->nb[2]; - const int nb3 = src0->nb[3]; - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - assert(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(10000.0, -2.0f/n_dims); - - const bool is_neox = mode & 2; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) { - const int p = ((mode & 1) == 0 ? n_past + i2 : i2); - for (int64_t i1 = 0; i1 < ne1; i1++) { - if (ir++ < ir0) continue; - if (ir > ir1) break; - - float theta = (float)p; - - for (int i0 = 0; i0 < n_dims; i0 += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); - - theta *= theta_scale; - - if (!is_neox) { - const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float x0 = src[0]; - const float x1 = src[1]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[1] = x0*sin_theta + x1*cos_theta; - } else { - const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0); - - const float x0 = src[0]; - const float x1 = src[n_dims/2]; - - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; - } - } - } - } - } -} - -static void ggml_compute_forward_rope_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - assert(src1->type == GGML_TYPE_I32); - assert(ggml_nelements(src1) == 3); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n_past = ((int32_t *) src1->data)[0]; - const int n_dims = ((int32_t *) src1->data)[1]; - const int mode = ((int32_t *) src1->data)[2]; - - //const int64_t ne0 = src0->ne[0]; - const int64_t ne1 = src0->ne[1]; - const int64_t ne2 = src0->ne[2]; - const int64_t ne3 = src0->ne[3]; - - const int nb0 = src0->nb[0]; - const int nb1 = src0->nb[1]; - const int nb2 = src0->nb[2]; - const int nb3 = src0->nb[3]; - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - assert(nb0 == sizeof(ggml_fp16_t)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(10000.0, -2.0f/n_dims); - - const bool is_neox = mode & 2; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) { - const int p = ((mode & 1) == 0 ? n_past + i2 : i2); - for (int64_t i1 = 0; i1 < ne1; i1++) { - if (ir++ < ir0) continue; - if (ir > ir1) break; - - float theta = (float)p; - - for (int i0 = 0; i0 < n_dims; i0 += 2) { - const float cos_theta = cosf(theta); - const float sin_theta = sinf(theta); - - theta *= theta_scale; - - if (!is_neox) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[1]); - - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - } else { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0); - - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); - - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); - } - } - } - } - } -} - -static void ggml_compute_forward_rope( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_f16(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_conv_1d_1s - -static void ggml_compute_forward_conv_1d_1s_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - //const int64_t ne03 = src0->ne[3]; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - //const int64_t ne12 = src1->ne[2]; - //const int64_t ne13 = src1->ne[3]; - - //const int64_t ne0 = dst->ne[0]; - //const int64_t ne1 = dst->ne[1]; - //const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - //const int64_t ne = ne0*ne1*ne2*ne3; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - //const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - //const int nb12 = src1->nb[2]; - //const int nb13 = src1->nb[3]; - - //const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - //const int nb2 = dst->nb[2]; - //const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00; - const int nh = nk/2; - - const int ew0 = ggml_up32(ne01); - - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - // TODO: fix this memset (wsize is overestimated) - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); - ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00*ew0 + i01] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - ggml_fp16_t * dst_data = wdata; - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]); - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // total rows in dst - const int nr = ne02; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int64_t i0 = 0; i0 < ne10; ++i0) { - dst_data[i0] = 0; - for (int k = -nh; k <= nh; k++) { - float v = 0.0f; - ggml_vec_dot_f16(ew0, &v, - (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, - (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); - - dst_data[i0] += v; - } - } - } -} - -static void ggml_compute_forward_conv_1d_1s_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - //const int64_t ne03 = src0->ne[3]; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - //const int64_t ne12 = src1->ne[2]; - //const int64_t ne13 = src1->ne[3]; - - //const int64_t ne0 = dst->ne[0]; - //const int64_t ne1 = dst->ne[1]; - //const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - //const int64_t ne = ne0*ne1*ne2*ne3; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - //const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - //const int nb12 = src1->nb[2]; - //const int nb13 = src1->nb[3]; - - //const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - //const int nb2 = dst->nb[2]; - //const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00; - const int nh = nk/2; - - const int ew0 = ggml_up32(ne01); - - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - // TODO: fix this memset (wsize is overestimated) - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) - { - float * const wdata = (float *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); - float * dst_data = wdata + i02*ew0*ne00; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00*ew0 + i01] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - float * const wdata = (float *) params->wdata + ne02*ew0*ne00; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - float * dst_data = wdata; - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[(i10 + nh)*ew0 + i11] = src[i10]; - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // total rows in dst - const int nr = ne02; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int64_t i0 = 0; i0 < ne10; ++i0) { - dst_data[i0] = 0; - for (int k = -nh; k <= nh; k++) { - float v = 0.0f; - ggml_vec_dot_f32(ew0, &v, - (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, - (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); - - dst_data[i0] += v; - } - } - } -} - -static void ggml_compute_forward_conv_1d_1s( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_1d_1s_f16_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_conv_1d_2s - -static void ggml_compute_forward_conv_1d_2s_f16_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - //const int64_t ne03 = src0->ne[3]; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - //const int64_t ne12 = src1->ne[2]; - //const int64_t ne13 = src1->ne[3]; - - //const int64_t ne0 = dst->ne[0]; - //const int64_t ne1 = dst->ne[1]; - //const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - //const int64_t ne = ne0*ne1*ne2*ne3; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - //const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - //const int nb12 = src1->nb[2]; - //const int nb13 = src1->nb[3]; - - //const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - //const int nb2 = dst->nb[2]; - //const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00; - const int nh = nk/2; - - const int ew0 = ggml_up32(ne01); - - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - // TODO: fix this memset (wsize is overestimated) - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01); - ggml_fp16_t * dst_data = wdata + i02*ew0*ne00; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00*ew0 + i01] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - ggml_fp16_t * dst_data = wdata; - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]); - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // total rows in dst - const int nr = ne02; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int64_t i0 = 0; i0 < ne10; i0 += 2) { - dst_data[i0/2] = 0; - for (int k = -nh; k <= nh; k++) { - float v = 0.0f; - ggml_vec_dot_f16(ew0, &v, - (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, - (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); - - dst_data[i0/2] += v; - } - } - } -} - -static void ggml_compute_forward_conv_1d_2s_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - //const int64_t ne03 = src0->ne[3]; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - //const int64_t ne12 = src1->ne[2]; - //const int64_t ne13 = src1->ne[3]; - - //const int64_t ne0 = dst->ne[0]; - //const int64_t ne1 = dst->ne[1]; - //const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - //const int64_t ne = ne0*ne1*ne2*ne3; - - const int nb00 = src0->nb[0]; - const int nb01 = src0->nb[1]; - const int nb02 = src0->nb[2]; - //const int nb03 = src0->nb[3]; - - const int nb10 = src1->nb[0]; - const int nb11 = src1->nb[1]; - //const int nb12 = src1->nb[2]; - //const int nb13 = src1->nb[3]; - - //const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - //const int nb2 = dst->nb[2]; - //const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00; - const int nh = nk/2; - - const int ew0 = ggml_up32(ne01); - - GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (params->type == GGML_TASK_INIT) { - // TODO: fix this memset (wsize is overestimated) - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) - { - float * const wdata = (float *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01); - float * dst_data = wdata + i02*ew0*ne00; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00*ew0 + i01] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - float * const wdata = (float *) params->wdata + ne02*ew0*ne00; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *)((char *) src1->data + i11*nb11); - float * dst_data = wdata; - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[(i10 + nh)*ew0 + i11] = src[i10]; - } - } - } - - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // total rows in dst - const int nr = ne02; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *)((char *) dst->data + i1*nb1); - for (int64_t i0 = 0; i0 < ne10; i0 += 2) { - dst_data[i0/2] = 0; - for (int k = -nh; k <= nh; k++) { - float v = 0.0f; - ggml_vec_dot_f32(ew0, &v, - (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0, - (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0); - - dst_data[i0/2] += v; - } - } - } -} - -static void ggml_compute_forward_conv_1d_2s( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst) { - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_1d_2s_f16_f32(params, src0, src1, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_flash_attn - -static void ggml_compute_forward_flash_attn_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, - const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t neq0 = q->ne[0]; - const int64_t neq1 = q->ne[1]; - const int64_t neq2 = q->ne[2]; - const int64_t neq3 = q->ne[3]; - - const int64_t nek0 = k->ne[0]; - const int64_t nek1 = k->ne[1]; - //const int64_t nek2 = k->ne[2]; - //const int64_t nek3 = k->ne[3]; - - //const int64_t nev0 = v->ne[0]; - const int64_t nev1 = v->ne[1]; - //const int64_t nev2 = v->ne[2]; - //const int64_t nev3 = v->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - //const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - - const int nbk0 = k->nb[0]; - const int nbk1 = k->nb[1]; - const int nbk2 = k->nb[2]; - const int nbk3 = k->nb[3]; - - const int nbq0 = q->nb[0]; - const int nbq1 = q->nb[1]; - const int nbq2 = q->nb[2]; - const int nbq3 = q->nb[3]; - - const int nbv0 = v->nb[0]; - const int nbv1 = v->nb[1]; - const int nbv2 = v->nb[2]; - const int nbv3 = v->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t D = neq0; - const int64_t N = neq1; - const int64_t P = nek1 - N; - const int64_t M = P + N; - - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); - - GGML_ASSERT(ne0 == D); - GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); - - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); - - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); - - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // parallelize by q rows using ggml_vec_dot_f32 - - // total rows in q - const int nr = neq1*neq2*neq3; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float scale = 1.0f/sqrtf(D); - - //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); - - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int iq3 = ir/(neq2*neq1); - const int iq2 = (ir - iq3*neq2*neq1)/neq1; - const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1); - - float * S = (float *) params->wdata + ith*(Mup + CACHE_LINE_SIZE_F32); - - for (int i = M; i < Mup; ++i) { - S[i] = -INFINITY; - } - - for (int64_t ic = 0; ic < nek1; ++ic) { - // k indices - const int ik3 = iq3; - const int ik2 = iq2; - const int ik1 = ic; - - // S indices - const int i1 = ik1; - - ggml_vec_dot_f32(neq0, - S + i1, - (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); - } - - // scale - ggml_vec_scale_f32(nek1, S, scale); - - if (masked) { - for (int64_t i = P; i < M; i++) { - if (i > P + iq1) { - S[i] = -INFINITY; - } - } - } - - // softmax - { - float max = -INFINITY; - ggml_vec_max_f32(M, &max, S); - - ggml_float sum = 0.0; - { -#ifdef GGML_SOFT_MAX_ACCELERATE - max = -max; - vDSP_vsadd(S, 1, &max, S, 1, Mup); - vvexpf(S, S, &Mup); - ggml_vec_sum_f32(Mup, &sum, S); -#else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - float * SS = S + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (SS[j] == -INFINITY) { - SS[j] = 0.0f; - } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); - sump[j] += (ggml_float)val; - SS[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } -#endif - } - - assert(sum > 0.0); - - sum = 1.0/sum; - ggml_vec_scale_f32(M, S, sum); - -#ifndef NDEBUG - for (int i = 0; i < M; ++i) { - assert(!isnan(S[i])); - assert(!isinf(S[i])); - } -#endif - } - - for (int64_t ic = 0; ic < nev1; ++ic) { - // dst indices - const int i1 = iq1; - const int i2 = iq2; - const int i3 = iq3; - - ggml_vec_dot_f32(nek1, - (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), - S); - } - } -} - -static void ggml_compute_forward_flash_attn_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, - const bool masked, - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t neq0 = q->ne[0]; - const int64_t neq1 = q->ne[1]; - const int64_t neq2 = q->ne[2]; - const int64_t neq3 = q->ne[3]; - - const int64_t nek0 = k->ne[0]; - const int64_t nek1 = k->ne[1]; - //const int64_t nek2 = k->ne[2]; - //const int64_t nek3 = k->ne[3]; - - //const int64_t nev0 = v->ne[0]; - const int64_t nev1 = v->ne[1]; - //const int64_t nev2 = v->ne[2]; - //const int64_t nev3 = v->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - //const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - - const int nbk0 = k->nb[0]; - const int nbk1 = k->nb[1]; - const int nbk2 = k->nb[2]; - const int nbk3 = k->nb[3]; - - const int nbq0 = q->nb[0]; - const int nbq1 = q->nb[1]; - const int nbq2 = q->nb[2]; - const int nbq3 = q->nb[3]; - - const int nbv0 = v->nb[0]; - const int nbv1 = v->nb[1]; - const int nbv2 = v->nb[2]; - const int nbv3 = v->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t D = neq0; - const int64_t N = neq1; - const int64_t P = nek1 - N; - const int64_t M = P + N; - - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); - - GGML_ASSERT(ne0 == D); - GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); - - GGML_ASSERT(nbq0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbk0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbv0 == sizeof(ggml_fp16_t)); - - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); - - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // parallelize by q rows using ggml_vec_dot_f32 - - // total rows in q - const int nr = neq1*neq2*neq3; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float scale = 1.0f/sqrtf(D); - - //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); - - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int iq3 = ir/(neq2*neq1); - const int iq2 = (ir - iq3*neq2*neq1)/neq1; - const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1); - - float * S = (float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32); - - for (int i = M; i < Mup; ++i) { - S[i] = -INFINITY; - } - - if (GGML_VEC_DOT_UNROLL > 2 || nek1 % GGML_VEC_DOT_UNROLL != 0) { - for (int64_t ic = 0; ic < nek1; ++ic) { - // k indices - const int ik3 = iq3; - const int ik2 = iq2; - const int ik1 = ic; - - // S indices - const int i1 = ik1; - - ggml_vec_dot_f16(neq0, - S + i1, - (ggml_fp16_t *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); - } - } else { - for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) { - // k indices - const int ik3 = iq3; - const int ik2 = iq2; - const int ik1 = ic; - - // S indices - const int i1 = ik1; - - ggml_vec_dot_f16_unroll(neq0, nbk1, - S + i1, - ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)), - (ggml_fp16_t *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3))); - } - } - - // scale - ggml_vec_scale_f32(nek1, S, scale); - - if (masked) { - for (int64_t i = P; i < M; i++) { - if (i > P + iq1) { - S[i] = -INFINITY; - } - } - } - - // softmax - { - float max = -INFINITY; - ggml_vec_max_f32(M, &max, S); - - ggml_float sum = 0.0; - { -#ifdef GGML_SOFT_MAX_ACCELERATE - max = -max; - vDSP_vsadd(S, 1, &max, S, 1, Mup); - vvexpf(S, S, &Mup); - ggml_vec_sum_f32(Mup, &sum, S); -#else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - float * SS = S + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (SS[j] == -INFINITY) { - SS[j] = 0.0f; - } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]); - sump[j] += (ggml_float)val; - SS[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } -#endif - } - - assert(sum > 0.0); - - sum = 1.0/sum; - ggml_vec_scale_f32(M, S, sum); - -#ifndef NDEBUG - for (int i = 0; i < M; ++i) { - assert(!isnan(S[i])); - assert(!isinf(S[i])); - } -#endif - } - - ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*Mup + CACHE_LINE_SIZE_F32) + Mup); - - for (int64_t i = 0; i < M; i++) { - S16[i] = GGML_FP32_TO_FP16(S[i]); - } - - if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) { - for (int64_t ic = 0; ic < nev1; ++ic) { - // dst indices - const int i1 = iq1; - const int i2 = iq2; - const int i3 = iq3; - - ggml_vec_dot_f16(nek1, - (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), - S16); - } - } else { - for (int64_t ic = 0; ic < nev1; ic += GGML_VEC_DOT_UNROLL) { - // dst indices - const int i1 = iq1; - const int i2 = iq2; - const int i3 = iq3; - - ggml_vec_dot_f16_unroll(nek1, nbv1, - (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)), - S16); - } - } - } -} - -static void ggml_compute_forward_flash_attn( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, - const bool masked, - struct ggml_tensor * dst) { - switch (q->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_flash_attn_f16(params, q, k, v, masked, dst); - } break; - case GGML_TYPE_F32: - { - ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_flash_ff - -static void ggml_compute_forward_flash_ff_f16( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, // F16 - const struct ggml_tensor * b0, // F16 fc_w - const struct ggml_tensor * b1, // F32 fc_b - const struct ggml_tensor * c0, // F16 proj_w - const struct ggml_tensor * c1, // F32 proj_b - struct ggml_tensor * dst) { - int64_t t0 = ggml_perf_time_us(); - UNUSED(t0); - - const int64_t nea0 = a->ne[0]; - const int64_t nea1 = a->ne[1]; - const int64_t nea2 = a->ne[2]; - const int64_t nea3 = a->ne[3]; - - const int64_t neb00 = b0->ne[0]; - const int64_t neb01 = b0->ne[1]; - //const int64_t neb02 = b0->ne[2]; - //const int64_t neb03 = b0->ne[3]; - - const int64_t neb10 = b1->ne[0]; - const int64_t neb11 = b1->ne[1]; - //const int64_t neb12 = b1->ne[2]; - //const int64_t neb13 = b1->ne[3]; - - const int64_t nec00 = c0->ne[0]; - const int64_t nec01 = c0->ne[1]; - //const int64_t nec02 = c0->ne[2]; - //const int64_t nec03 = c0->ne[3]; - - const int64_t nec10 = c1->ne[0]; - const int64_t nec11 = c1->ne[1]; - //const int64_t nec12 = c1->ne[2]; - //const int64_t nec13 = c1->ne[3]; - - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - //const int64_t ne3 = dst->ne[3]; - - const int nba0 = a->nb[0]; - const int nba1 = a->nb[1]; - const int nba2 = a->nb[2]; - const int nba3 = a->nb[3]; - - const int nbb00 = b0->nb[0]; - const int nbb01 = b0->nb[1]; - const int nbb02 = b0->nb[2]; - const int nbb03 = b0->nb[3]; - - const int nbb10 = b1->nb[0]; - //const int nbb11 = b1->nb[1]; - //const int nbb12 = b1->nb[2]; - //const int nbb13 = b1->nb[3]; - - const int nbc00 = c0->nb[0]; - const int nbc01 = c0->nb[1]; - const int nbc02 = c0->nb[2]; - const int nbc03 = c0->nb[3]; - - const int nbc10 = c1->nb[0]; - //const int nbc11 = c1->nb[1]; - //const int nbc12 = c1->nb[2]; - //const int nbc13 = c1->nb[3]; - - const int nb0 = dst->nb[0]; - const int nb1 = dst->nb[1]; - const int nb2 = dst->nb[2]; - const int nb3 = dst->nb[3]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t D = nea0; - //const int64_t N = nea1; - const int64_t M = neb01; - - GGML_ASSERT(ne0 == nea0); - GGML_ASSERT(ne1 == nea1); - GGML_ASSERT(ne2 == nea2); - - GGML_ASSERT(nba0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbb10 == sizeof(float)); - GGML_ASSERT(nbc00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nbc10 == sizeof(float)); - - GGML_ASSERT(neb00 == D); - GGML_ASSERT(neb01 == M); - GGML_ASSERT(neb10 == M); - GGML_ASSERT(neb11 == 1); - - GGML_ASSERT(nec00 == M); - GGML_ASSERT(nec01 == D); - GGML_ASSERT(nec10 == D); - GGML_ASSERT(nec11 == 1); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - if (params->type == GGML_TASK_INIT) { - return; - } - - if (params->type == GGML_TASK_FINALIZE) { - return; - } - - // parallelize by a rows using ggml_vec_dot_f32 - - // total rows in a - const int nr = nea1*nea2*nea3; - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // a indices - const int ia3 = ir/(nea2*nea1); - const int ia2 = (ir - ia3*nea2*nea1)/nea1; - const int ia1 = (ir - ia3*nea2*nea1 - ia2*nea1); - - float * S = (float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32); - - for (int64_t ic = 0; ic < neb01; ++ic) { - // b0 indices - const int ib03 = ia3; - const int ib02 = ia2; - const int ib01 = ic; - - // S indices - const int i1 = ib01; - - ggml_vec_dot_f16(nea0, - S + i1, - (ggml_fp16_t *) ((char *) b0->data + (ib01*nbb01 + ib02*nbb02 + ib03*nbb03)), - (ggml_fp16_t *) ((char *) a->data + ( ia1*nba1 + ia2*nba2 + ia3*nba3))); - } - - ggml_vec_add_f32(neb01, S, S, (float *) b1->data); - //ggml_vec_gelu_f32(neb01, S, S); - - ggml_fp16_t * S16 = (ggml_fp16_t *) ((float *) params->wdata + ith*(2*M + CACHE_LINE_SIZE_F32) + M); - - for (int64_t i = 0; i < M; i++) { - S16[i] = GGML_FP32_TO_FP16(S[i]); - } - - ggml_vec_gelu_f16(neb01, S16, S16); - - { - // dst indices - const int i1 = ia1; - const int i2 = ia2; - const int i3 = ia3; - - for (int64_t ic = 0; ic < nec01; ++ic) { - - ggml_vec_dot_f16(neb01, - (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)), - (ggml_fp16_t *) ((char *) c0->data + ( ic*nbc01 + i2*nbc02 + i3*nbc03)), - S16); - } - - ggml_vec_add_f32(nec01, - (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)), - (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)), - (float *) c1->data); - } - } -} - -static void ggml_compute_forward_flash_ff( - const struct ggml_compute_params * params, - const struct ggml_tensor * a, - const struct ggml_tensor * b0, - const struct ggml_tensor * b1, - const struct ggml_tensor * c0, - const struct ggml_tensor * c1, - struct ggml_tensor * dst) { - switch (b0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_flash_ff_f16(params, a, b0, b1, c0, c1, dst); - } break; - case GGML_TYPE_F32: - { - GGML_ASSERT(false); // TODO - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_map_unary - -static void ggml_compute_forward_map_unary_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst, - const ggml_unary_op_f32_t fun) { - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - fun(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1]))); - } -} - - -static void ggml_compute_forward_map_unary( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - struct ggml_tensor * dst, - const ggml_unary_op_f32_t fun) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_map_unary_f32(params, src0, dst, fun); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -// ggml_compute_forward_map_binary - -static void ggml_compute_forward_map_binary_f32( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst, - const ggml_binary_op_f32_t fun) { - assert(params->ith == 0); - assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { - return; - } - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - assert( dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - assert(src1->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - fun(nc, - (float *) ((char *) dst->data + i*( dst->nb[1])), - (float *) ((char *) src0->data + i*(src0->nb[1])), - (float *) ((char *) src1->data + i*(src1->nb[1]))); - } -} - - -static void ggml_compute_forward_map_binary( - const struct ggml_compute_params * params, - const struct ggml_tensor * src0, - const struct ggml_tensor * src1, - struct ggml_tensor * dst, - const ggml_binary_op_f32_t fun) { - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_map_binary_f32(params, src0, src1, dst, fun); - } break; - default: - { - GGML_ASSERT(false); - } break; - } -} - -///////////////////////////////// - -static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { - GGML_ASSERT(params); - - switch (tensor->op) { - case GGML_OP_DUP: - { - ggml_compute_forward_dup(params, tensor->src0, tensor); - } break; - case GGML_OP_ADD: - { - ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_SUB: - { - ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_MUL: - { - ggml_compute_forward_mul(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_DIV: - { - ggml_compute_forward_div(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_SQR: - { - ggml_compute_forward_sqr(params, tensor->src0, tensor); - } break; - case GGML_OP_SQRT: - { - ggml_compute_forward_sqrt(params, tensor->src0, tensor); - } break; - case GGML_OP_SUM: - { - ggml_compute_forward_sum(params, tensor->src0, tensor); - } break; - case GGML_OP_MEAN: - { - ggml_compute_forward_mean(params, tensor->src0, tensor); - } break; - case GGML_OP_REPEAT: - { - ggml_compute_forward_repeat(params, tensor->src0, tensor); - } break; - case GGML_OP_ABS: - { - ggml_compute_forward_abs(params, tensor->src0, tensor); - } break; - case GGML_OP_SGN: - { - ggml_compute_forward_sgn(params, tensor->src0, tensor); - } break; - case GGML_OP_NEG: - { - ggml_compute_forward_neg(params, tensor->src0, tensor); - } break; - case GGML_OP_STEP: - { - ggml_compute_forward_step(params, tensor->src0, tensor); - } break; - case GGML_OP_RELU: - { - ggml_compute_forward_relu(params, tensor->src0, tensor); - } break; - case GGML_OP_GELU: - { - ggml_compute_forward_gelu(params, tensor->src0, tensor); - } break; - case GGML_OP_SILU: - { - ggml_compute_forward_silu(params, tensor->src0, tensor); - } break; - case GGML_OP_NORM: - { - ggml_compute_forward_norm(params, tensor->src0, tensor); - } break; - case GGML_OP_RMS_NORM: - { - ggml_compute_forward_rms_norm(params, tensor->src0, tensor); - } break; - case GGML_OP_MUL_MAT: - { - ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_SCALE: - { - ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_CPY: - { - ggml_compute_forward_cpy(params, tensor->src0, tensor); - } break; - case GGML_OP_CONT: - { - ggml_compute_forward_cont(params, tensor->src0, tensor); - } break; - case GGML_OP_RESHAPE: - { - ggml_compute_forward_reshape(params, tensor->src0, tensor); - } break; - case GGML_OP_VIEW: - { - ggml_compute_forward_view(params, tensor->src0); - } break; - case GGML_OP_PERMUTE: - { - ggml_compute_forward_permute(params, tensor->src0); - } break; - case GGML_OP_TRANSPOSE: - { - ggml_compute_forward_transpose(params, tensor->src0); - } break; - case GGML_OP_GET_ROWS: - { - ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_DIAG_MASK_INF: - { - ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_SOFT_MAX: - { - ggml_compute_forward_soft_max(params, tensor->src0, tensor); - } break; - case GGML_OP_ROPE: - { - ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_CONV_1D_1S: - { - ggml_compute_forward_conv_1d_1s(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_CONV_1D_2S: - { - ggml_compute_forward_conv_1d_2s(params, tensor->src0, tensor->src1, tensor); - } break; - case GGML_OP_FLASH_ATTN: - { - int32_t t = ggml_get_i32_1d(tensor->opt[1], 0); - GGML_ASSERT(t == 0 || t == 1); - bool masked = t != 0; - ggml_compute_forward_flash_attn(params, tensor->src0, tensor->src1, tensor->opt[0], masked, tensor); - } break; - case GGML_OP_FLASH_FF: - { - ggml_compute_forward_flash_ff(params, tensor->src0, tensor->src1, tensor->opt[0], tensor->opt[1], tensor->opt[2], tensor); - } break; - case GGML_OP_MAP_UNARY: - { - const ggml_unary_op_f32_t fun = *((ggml_unary_op_f32_t *)tensor->opt[0]->data); - ggml_compute_forward_map_unary(params, tensor->src0, tensor, fun); - } - break; - case GGML_OP_MAP_BINARY: - { - const ggml_binary_op_f32_t fun = *((ggml_binary_op_f32_t *)tensor->opt[0]->data); - ggml_compute_forward_map_binary(params, tensor->src0, tensor->src1, tensor, fun); - } - break; - case GGML_OP_NONE: - { - // nop - } break; - case GGML_OP_COUNT: - { - GGML_ASSERT(false); - } break; - } -} - -//////////////////////////////////////////////////////////////////////////////// - -static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) { - struct ggml_tensor * src0 = tensor->src0; - struct ggml_tensor * src1 = tensor->src1; - - switch (tensor->op) { - case GGML_OP_DUP: - { - if (src0->grad) { - src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); - } - } break; - case GGML_OP_ADD: - { - if (src0->grad) { - src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); - } - if (src1->grad) { - src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace); - } - } break; - case GGML_OP_SUB: - { - if (src0->grad) { - src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace); - } - if (src1->grad) { - src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace); - } - } break; - case GGML_OP_MUL: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_mul(ctx, src1, tensor->grad), - inplace); - } - if (src1->grad) { - src1->grad = - ggml_add_impl(ctx, - src1->grad, - ggml_mul(ctx, src0, tensor->grad), - inplace); - } - } break; - case GGML_OP_DIV: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_div(ctx, tensor->grad, src1), - inplace); - } - if (src1->grad) { - src1->grad = - ggml_sub_impl(ctx, - src1->grad, - ggml_mul(ctx, - tensor->grad, - ggml_div(ctx, tensor, src1)), - inplace); - } - } break; - case GGML_OP_SQR: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_mul(ctx, - ggml_mul(ctx, src0, tensor->grad), - ggml_repeat(ctx, ggml_new_f32(ctx, 2.0f), src0)), - inplace); - } - } break; - case GGML_OP_SQRT: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_div(ctx, - ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor), - tensor), - inplace); - } - } break; - case GGML_OP_SUM: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_repeat(ctx, tensor->grad, src0->grad), - inplace); - } - } break; - case GGML_OP_MEAN: - { - GGML_ASSERT(false); // TODO: implement - } break; - case GGML_OP_REPEAT: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_sum(ctx, tensor->grad), - inplace); - } - } break; - case GGML_OP_ABS: - { - if (src0->grad) { - src0->grad = - ggml_add_impl(ctx, - src0->grad, - ggml_mul(ctx, - ggml_sgn(ctx, src0), - tensor->grad), - inplace); - } - } break; - case GGML_OP_SGN: - { - if (src0->grad) { - // noop - } - } break; - case GGML_OP_NEG: - { - if (src0->grad) { - src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace); - } - } break; - case GGML_OP_STEP: - { - if (src0->grad) { - // noop - } - } break; - case GGML_OP_RELU: - { - if (src0->grad) { - src0->grad = ggml_sub_impl(ctx, - src0->grad, - ggml_mul(ctx, - ggml_step(ctx, src0), - tensor->grad), - inplace); - } - } break; - case GGML_OP_GELU: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_SILU: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_NORM: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_RMS_NORM: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_MUL_MAT: - { - if (src0->grad) { - // TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad); - GGML_ASSERT(false); - } - if (src1->grad) { - src1->grad = - ggml_add_impl(ctx, - src1->grad, - ggml_mul_mat(ctx, - ggml_cont(ctx, ggml_transpose(ctx, src0)), - tensor->grad), - inplace); - } - } break; - case GGML_OP_SCALE: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CPY: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONT: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_RESHAPE: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_VIEW: - { - GGML_ASSERT(false); // not supported - } break; - case GGML_OP_PERMUTE: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_TRANSPOSE: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_GET_ROWS: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_DIAG_MASK_INF: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_SOFT_MAX: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_ROPE: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONV_1D_1S: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_CONV_1D_2S: - { - GGML_ASSERT(false); // TODO: not implemented - } break; - case GGML_OP_FLASH_ATTN: - { - GGML_ASSERT(false); // not supported - } break; - case GGML_OP_FLASH_FF: - { - GGML_ASSERT(false); // not supported - } break; - case GGML_OP_MAP_UNARY: - case GGML_OP_MAP_BINARY: - { - GGML_ASSERT(false); // not supported - } break; - case GGML_OP_NONE: - { - // nop - } break; - case GGML_OP_COUNT: - { - GGML_ASSERT(false); - } break; - } -} - -static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { - if (node->grad == NULL) { - // this usually happens when we generate intermediate nodes from constants in the backward pass - // it can also happen during forward pass, if the user performs computations with constants - if (node->op != GGML_OP_NONE) { - //GGML_PRINT_DEBUG("%s: warning: node %p has no grad, but op %d\n", __func__, (void *) node, node->op); - } - } - - // check if already visited - for (int i = 0; i < cgraph->n_nodes; i++) { - if (cgraph->nodes[i] == node) { - return; - } - } - - for (int i = 0; i < cgraph->n_leafs; i++) { - if (cgraph->leafs[i] == node) { - return; - } - } - - if (node->src0) { - ggml_visit_parents(cgraph, node->src0); - } - - if (node->src1) { - ggml_visit_parents(cgraph, node->src1); - } - - for (int i = 0; i < GGML_MAX_OPT; ++i) { - if (node->opt[i]) { - ggml_visit_parents(cgraph, node->opt[i]); - } - } - - if (node->op == GGML_OP_NONE && node->grad == NULL) { - // reached a leaf node, not part of the gradient graph (e.g. a constant) - GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES); - - cgraph->leafs[cgraph->n_leafs] = node; - cgraph->n_leafs++; - } else { - GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES); - - cgraph->nodes[cgraph->n_nodes] = node; - cgraph->grads[cgraph->n_nodes] = node->grad; - cgraph->n_nodes++; - } -} - -static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { - if (!expand) { - cgraph->n_nodes = 0; - cgraph->n_leafs = 0; - } - - const int n0 = cgraph->n_nodes; - UNUSED(n0); - - ggml_visit_parents(cgraph, tensor); - - const int n_new = cgraph->n_nodes - n0; - GGML_PRINT_DEBUG("%s: visited %d new nodes\n", __func__, n_new); - - if (n_new > 0) { - // the last added node should always be starting point - GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor); - } -} - -void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor) { - ggml_build_forward_impl(cgraph, tensor, true); -} - -struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) { - struct ggml_cgraph result = { - /*.n_nodes =*/ 0, - /*.n_leafs =*/ 0, - /*.n_threads =*/ GGML_DEFAULT_N_THREADS, - /*.work_size =*/ 0, - /*.work =*/ NULL, - /*.nodes =*/ { NULL }, - /*.grads =*/ { NULL }, - /*.leafs =*/ { NULL }, - /*.perf_runs =*/ 0, - /*.perf_cycles =*/ 0, - /*.perf_time_us =*/ 0, - }; - - ggml_build_forward_impl(&result, tensor, false); - - return result; -} - -struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) { - struct ggml_cgraph result = *gf; - - GGML_ASSERT(gf->n_nodes > 0); - - // if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph - if (keep) { - for (int i = 0; i < gf->n_nodes; i++) { - struct ggml_tensor * node = gf->nodes[i]; - - if (node->grad) { - node->grad = ggml_dup_tensor(ctx, node); - gf->grads[i] = node->grad; - } - } - } - - for (int i = gf->n_nodes - 1; i >= 0; i--) { - struct ggml_tensor * node = gf->nodes[i]; - - // because we detached the grad nodes from the original graph, we can afford inplace operations - if (node->grad) { - ggml_compute_backward(ctx, node, keep); - } - } - - for (int i = gf->n_nodes - 1; i >= 0; i--) { - struct ggml_tensor * node = gf->nodes[i]; - - if (node->is_param) { - GGML_PRINT_DEBUG("%s: found root node %p\n", __func__, (void *) node); - ggml_build_forward_impl(&result, node->grad, true); - } - } - - return result; -} - -// -// thread data -// -// synchronization is done via busy loops -// I tried using spin locks, but not sure how to use them correctly - the things I tried were slower than busy loops -// - -#ifdef __APPLE__ - -//#include -// -//typedef os_unfair_lock ggml_lock_t; -// -//#define ggml_lock_init(x) UNUSED(x) -//#define ggml_lock_destroy(x) UNUSED(x) -//#define ggml_lock_lock os_unfair_lock_lock -//#define ggml_lock_unlock os_unfair_lock_unlock -// -//#define GGML_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT - -typedef int ggml_lock_t; - -#define ggml_lock_init(x) UNUSED(x) -#define ggml_lock_destroy(x) UNUSED(x) -#define ggml_lock_lock(x) UNUSED(x) -#define ggml_lock_unlock(x) UNUSED(x) - -#define GGML_LOCK_INITIALIZER 0 - -typedef pthread_t ggml_thread_t; - -#define ggml_thread_create pthread_create -#define ggml_thread_join pthread_join - -#else - -//typedef pthread_spinlock_t ggml_lock_t; - -//#define ggml_lock_init(x) pthread_spin_init(x, PTHREAD_PROCESS_PRIVATE) -//#define ggml_lock_destroy pthread_spin_destroy -//#define ggml_lock_lock pthread_spin_lock -//#define ggml_lock_unlock pthread_spin_unlock - -typedef int ggml_lock_t; - -#define ggml_lock_init(x) UNUSED(x) -#define ggml_lock_destroy(x) UNUSED(x) -#define ggml_lock_lock(x) UNUSED(x) -#define ggml_lock_unlock(x) UNUSED(x) - -#define GGML_LOCK_INITIALIZER 0 - -typedef pthread_t ggml_thread_t; - -#define ggml_thread_create pthread_create -#define ggml_thread_join pthread_join - -#endif - -struct ggml_compute_state_shared { - ggml_lock_t spin; - - int n_threads; - - // synchronization primitives - atomic_int n_ready; - atomic_bool has_work; - atomic_bool stop; // stop all threads -}; - -struct ggml_compute_state { - ggml_thread_t thrd; - - struct ggml_compute_params params; - struct ggml_tensor * node; - - struct ggml_compute_state_shared * shared; -}; - -static thread_ret_t ggml_graph_compute_thread(void * data) { - struct ggml_compute_state * state = (struct ggml_compute_state *) data; - - const int n_threads = state->shared->n_threads; - - while (true) { - if (atomic_fetch_add(&state->shared->n_ready, 1) == n_threads - 1) { - atomic_store(&state->shared->has_work, false); - } else { - while (atomic_load(&state->shared->has_work)) { - if (atomic_load(&state->shared->stop)) { - return 0; - } - ggml_lock_lock (&state->shared->spin); - ggml_lock_unlock(&state->shared->spin); - } - } - - atomic_fetch_sub(&state->shared->n_ready, 1); - - // wait for work - while (!atomic_load(&state->shared->has_work)) { - if (atomic_load(&state->shared->stop)) { - return 0; - } - ggml_lock_lock (&state->shared->spin); - ggml_lock_unlock(&state->shared->spin); - } - - // check if we should stop - if (atomic_load(&state->shared->stop)) { - break; - } - - if (state->node) { - if (state->params.ith < state->params.nth) { - ggml_compute_forward(&state->params, state->node); - } - - state->node = NULL; - } else { - break; - } - } - - return 0; -} - -void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) { - const int n_threads = cgraph->n_threads; - - struct ggml_compute_state_shared state_shared = { - /*.spin =*/ GGML_LOCK_INITIALIZER, - /*.n_threads =*/ n_threads, - /*.n_ready =*/ 0, - /*.has_work =*/ false, - /*.stop =*/ false, - }; - struct ggml_compute_state * workers = n_threads > 1 ? alloca(sizeof(struct ggml_compute_state)*(n_threads - 1)) : NULL; - - // create thread pool - if (n_threads > 1) { - ggml_lock_init(&state_shared.spin); - - atomic_store(&state_shared.has_work, true); - - for (int j = 0; j < n_threads - 1; j++) { - workers[j] = (struct ggml_compute_state) { - .thrd = 0, - .params = { - .type = GGML_TASK_COMPUTE, - .ith = j + 1, - .nth = n_threads, - .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, - .wdata = cgraph->work ? cgraph->work->data : NULL, - }, - .node = NULL, - .shared = &state_shared, - }; - - int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]); - GGML_ASSERT(rc == 0); - UNUSED(rc); - } - } - - // initialize tasks + work buffer - { - size_t work_size = 0; - - // thread scheduling for the different operations - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; - - switch (node->op) { - case GGML_OP_CPY: - case GGML_OP_DUP: - { - node->n_tasks = n_threads; - - size_t cur = 0; - if (ggml_is_quantized(node->type)) { - cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->ne[0] * n_threads; - } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_ADD: - { - node->n_tasks = n_threads; - - size_t cur = 0; - - if (ggml_is_quantized(node->src0->type)) { - cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src0->ne[0] * n_threads; - } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_SUB: - case GGML_OP_MUL: - case GGML_OP_DIV: - case GGML_OP_SQR: - case GGML_OP_SQRT: - case GGML_OP_SUM: - case GGML_OP_MEAN: - case GGML_OP_REPEAT: - case GGML_OP_ABS: - case GGML_OP_SGN: - case GGML_OP_NEG: - case GGML_OP_STEP: - case GGML_OP_RELU: - { - node->n_tasks = 1; - } break; - case GGML_OP_GELU: - { - node->n_tasks = n_threads; - } break; - case GGML_OP_SILU: - { - node->n_tasks = n_threads; - } break; - case GGML_OP_NORM: - case GGML_OP_RMS_NORM: - { - node->n_tasks = n_threads; - } break; - case GGML_OP_MUL_MAT: - { - node->n_tasks = n_threads; - - // TODO: use different scheduling for different matrix sizes - //const int nr0 = ggml_nrows(node->src0); - //const int nr1 = ggml_nrows(node->src1); - - //node->n_tasks = MIN(n_threads, MAX(1, nr0/128)); - //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks); - - size_t cur = 0; - - if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { - node->n_tasks = 1; // TODO: this actually is doing nothing - // the threads are still spinning - cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); - //printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]); - //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]); - //printf("cur = %zu\n", cur); - } else { - cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1); - } -#else - cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1); -#endif - } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) { - cur = 0; - } else if (ggml_is_quantized(node->src0->type) && node->src1->type == GGML_TYPE_F32) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) { - node->n_tasks = 1; - cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]); - } else -#endif - { - cur = GGML_TYPE_SIZE[GGML_TYPE_Q8_0]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[GGML_TYPE_Q8_0]; - } - } else { - GGML_ASSERT(false); - } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_SCALE: - { - node->n_tasks = n_threads; - } break; - case GGML_OP_CONT: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - case GGML_OP_GET_ROWS: - case GGML_OP_DIAG_MASK_INF: - { - node->n_tasks = 1; - } break; - case GGML_OP_SOFT_MAX: - { - node->n_tasks = n_threads; - } break; - case GGML_OP_ROPE: - { - node->n_tasks = n_threads; - } break; - case GGML_OP_CONV_1D_1S: - case GGML_OP_CONV_1D_2S: - { - node->n_tasks = n_threads; - - GGML_ASSERT(node->src0->ne[3] == 1); - GGML_ASSERT(node->src1->ne[2] == 1); - GGML_ASSERT(node->src1->ne[3] == 1); - - size_t cur = 0; - const int nk = node->src0->ne[0]; - - if (node->src0->type == GGML_TYPE_F16 && - node->src1->type == GGML_TYPE_F32) { - cur = sizeof(ggml_fp16_t)*( - nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] + - ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1] - ); - } else if (node->src0->type == GGML_TYPE_F32 && - node->src1->type == GGML_TYPE_F32) { - cur = sizeof(float)*( - nk*ggml_up32(node->src0->ne[1])*node->src0->ne[2] + - ( 2*(nk/2) + node->src1->ne[0])*node->src1->ne[1] - ); - } else { - GGML_ASSERT(false); - } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_FLASH_ATTN: - { - node->n_tasks = n_threads; - - size_t cur = 0; - - const int64_t ne11 = ggml_up(node->src1->ne[1], GGML_SOFT_MAX_UNROLL); - - if (node->src1->type == GGML_TYPE_F32) { - cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2 - } - - if (node->src1->type == GGML_TYPE_F16) { - cur = sizeof(float)*ne11*node->n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*ne11*node->n_tasks; // this is overestimated by x2 - } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_FLASH_FF: - { - node->n_tasks = n_threads; - - size_t cur = 0; - - if (node->src1->type == GGML_TYPE_F32) { - cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2 - } - - if (node->src1->type == GGML_TYPE_F16) { - cur = sizeof(float)*node->src1->ne[1]*node->n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*node->src1->ne[1]*node->n_tasks; // this is overestimated by x2 - } - - work_size = MAX(work_size, cur); - } break; - case GGML_OP_MAP_UNARY: - case GGML_OP_MAP_BINARY: - { - node->n_tasks = 1; - } break; - case GGML_OP_NONE: - { - node->n_tasks = 1; - } break; - case GGML_OP_COUNT: - { - GGML_ASSERT(false); - } break; - } - } - - if (cgraph->work != NULL && work_size > cgraph->work_size) { - GGML_ASSERT(false); // TODO: better handling - } - - if (work_size > 0 && cgraph->work == NULL) { - cgraph->work_size = work_size + CACHE_LINE_SIZE*(n_threads - 1); - - GGML_PRINT_DEBUG("%s: allocating work buffer for graph (%zu bytes)\n", __func__, cgraph->work_size); - cgraph->work = ggml_new_tensor_1d(ctx, GGML_TYPE_I8, cgraph->work_size); - } - } - - const int64_t perf_start_cycles = ggml_perf_cycles(); - const int64_t perf_start_time_us = ggml_perf_time_us(); - - for (int i = 0; i < cgraph->n_nodes; i++) { - GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, i, cgraph->n_nodes); - - struct ggml_tensor * node = cgraph->nodes[i]; - - // TODO: this could be used to avoid unnecessary computations, but it needs to be improved - //if (node->grad == NULL && node->perf_runs > 0) { - // continue; - //} - - const int64_t perf_node_start_cycles = ggml_perf_cycles(); - const int64_t perf_node_start_time_us = ggml_perf_time_us(); - - // INIT - struct ggml_compute_params params = { - /*.type =*/ GGML_TASK_INIT, - /*.ith =*/ 0, - /*.nth =*/ node->n_tasks, - /*.wsize =*/ cgraph->work ? ggml_nbytes(cgraph->work) : 0, - /*.wdata =*/ cgraph->work ? cgraph->work->data : NULL, - }; - - ggml_compute_forward(¶ms, node); - - // COMPUTE - if (node->n_tasks > 1) { - if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { - atomic_store(&state_shared.has_work, false); - } - - while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - - // launch thread pool - for (int j = 0; j < n_threads - 1; j++) { - workers[j].params = (struct ggml_compute_params) { - .type = GGML_TASK_COMPUTE, - .ith = j + 1, - .nth = node->n_tasks, - .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, - .wdata = cgraph->work ? cgraph->work->data : NULL, - }; - workers[j].node = node; - } - - atomic_fetch_sub(&state_shared.n_ready, 1); - - while (atomic_load(&state_shared.n_ready) > 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - - atomic_store(&state_shared.has_work, true); - } - - params.type = GGML_TASK_COMPUTE; - ggml_compute_forward(¶ms, node); - - // wait for thread pool - if (node->n_tasks > 1) { - if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { - atomic_store(&state_shared.has_work, false); - } - - while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - - atomic_fetch_sub(&state_shared.n_ready, 1); - - while (atomic_load(&state_shared.n_ready) != 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - } - - // FINALIZE - if (node->n_tasks > 1) { - if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { - atomic_store(&state_shared.has_work, false); - } - - while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - - // launch thread pool - for (int j = 0; j < n_threads - 1; j++) { - workers[j].params = (struct ggml_compute_params) { - .type = GGML_TASK_FINALIZE, - .ith = j + 1, - .nth = node->n_tasks, - .wsize = cgraph->work ? ggml_nbytes(cgraph->work) : 0, - .wdata = cgraph->work ? cgraph->work->data : NULL, - }; - workers[j].node = node; - } - - atomic_fetch_sub(&state_shared.n_ready, 1); - - while (atomic_load(&state_shared.n_ready) > 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - - atomic_store(&state_shared.has_work, true); - } - - params.type = GGML_TASK_FINALIZE; - ggml_compute_forward(¶ms, node); - - // wait for thread pool - if (node->n_tasks > 1) { - if (atomic_fetch_add(&state_shared.n_ready, 1) == n_threads - 1) { - atomic_store(&state_shared.has_work, false); - } - - while (atomic_load(&state_shared.has_work)) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - - atomic_fetch_sub(&state_shared.n_ready, 1); - - while (atomic_load(&state_shared.n_ready) != 0) { - ggml_lock_lock (&state_shared.spin); - ggml_lock_unlock(&state_shared.spin); - } - } - - // performance stats (node) - { - int64_t perf_cycles_cur = ggml_perf_cycles() - perf_node_start_cycles; - int64_t perf_time_us_cur = ggml_perf_time_us() - perf_node_start_time_us; - - node->perf_runs++; - node->perf_cycles += perf_cycles_cur; - node->perf_time_us += perf_time_us_cur; - } - } - - // join thread pool - if (n_threads > 1) { - atomic_store(&state_shared.stop, true); - atomic_store(&state_shared.has_work, true); - - for (int j = 0; j < n_threads - 1; j++) { - int rc = ggml_thread_join(workers[j].thrd, NULL); - GGML_ASSERT(rc == 0); - UNUSED(rc); - } - - ggml_lock_destroy(&state_shared.spin); - } - - // performance stats (graph) - { - int64_t perf_cycles_cur = ggml_perf_cycles() - perf_start_cycles; - int64_t perf_time_us_cur = ggml_perf_time_us() - perf_start_time_us; - - cgraph->perf_runs++; - cgraph->perf_cycles += perf_cycles_cur; - cgraph->perf_time_us += perf_time_us_cur; - - GGML_PRINT_DEBUG("%s: perf (%d) - cpu = %.3f / %.3f ms, wall = %.3f / %.3f ms\n", - __func__, cgraph->perf_runs, - (double) perf_cycles_cur / (double) ggml_cycles_per_ms(), - (double) cgraph->perf_cycles / (double) ggml_cycles_per_ms() / (double) cgraph->perf_runs, - (double) perf_time_us_cur / 1000.0, - (double) cgraph->perf_time_us / 1000.0 / cgraph->perf_runs); - } -} - -void ggml_graph_reset(struct ggml_cgraph * cgraph) { - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * grad = cgraph->grads[i]; - - if (grad) { - ggml_set_zero(grad); - } - } -} - -void ggml_graph_print(const struct ggml_cgraph * cgraph) { - int64_t perf_total_per_op_us[GGML_OP_COUNT] = {0}; - - GGML_PRINT("=== GRAPH ===\n"); - - GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads); - GGML_PRINT_DEBUG("total work size = %zu bytes\n", cgraph->work_size); - - GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes); - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; - - perf_total_per_op_us[node->op] += node->perf_time_us; - - GGML_PRINT(" - %3d: [ %" PRId64 ", %" PRId64 ", %" PRId64 "] %16s %s (%3d) cpu = %7.3f / %7.3f ms, wall = %7.3f / %7.3f ms\n", - i, - node->ne[0], node->ne[1], node->ne[2], - GGML_OP_LABEL[node->op], node->is_param ? "x" : node->grad ? "g" : " ", node->perf_runs, - (double) node->perf_cycles / (double) ggml_cycles_per_ms(), - (double) node->perf_cycles / (double) ggml_cycles_per_ms() / (double) node->perf_runs, - (double) node->perf_time_us / 1000.0, - (double) node->perf_time_us / 1000.0 / node->perf_runs); - } - - GGML_PRINT("n_leafs = %d\n", cgraph->n_leafs); - for (int i = 0; i < cgraph->n_leafs; i++) { - struct ggml_tensor * node = cgraph->leafs[i]; - - GGML_PRINT(" - %3d: [ %" PRId64 ", %" PRId64 "] %8s\n", - i, - node->ne[0], node->ne[1], - GGML_OP_LABEL[node->op]); - } - - for (int i = 0; i < GGML_OP_COUNT; i++) { - GGML_PRINT("perf_total_per_op_us[%16s] = %7.3f ms\n", GGML_OP_LABEL[i], (double) perf_total_per_op_us[i] / 1000.0); - } - - GGML_PRINT("========================================\n"); -} - -// check if node is part of the graph -static bool ggml_graph_find(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { - if (cgraph == NULL) { - return true; - } - - for (int i = 0; i < cgraph->n_nodes; i++) { - if (cgraph->nodes[i] == node) { - return true; - } - } - - return false; -} - -static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * parent = cgraph->nodes[i]; - - if (parent->grad == node) { - return parent; - } - } - - return NULL; -} - -void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename) { - char color[16]; - - FILE * fp = fopen(filename, "w"); - GGML_ASSERT(fp); - - fprintf(fp, "digraph G {\n"); - fprintf(fp, " newrank = true;\n"); - fprintf(fp, " rankdir = LR;\n"); - - for (int i = 0; i < gb->n_nodes; i++) { - struct ggml_tensor * node = gb->nodes[i]; - - if (ggml_graph_get_parent(gb, node) != NULL) { - continue; - } - - if (node->is_param) { - snprintf(color, sizeof(color), "yellow"); - } else if (node->grad) { - if (ggml_graph_find(gf, node)) { - snprintf(color, sizeof(color), "green"); - } else { - snprintf(color, sizeof(color), "lightblue"); - } - } else { - snprintf(color, sizeof(color), "white"); - } - - fprintf(fp, " \"%p\" [ \ -style = filled; fillcolor = %s; shape = record; \ -label=\"%d [%" PRId64 ", %" PRId64 "] | %s", - (void *) node, color, - i, node->ne[0], node->ne[1], - GGML_OP_SYMBOL[node->op]); - - if (node->grad) { - fprintf(fp, " | %s\"; ]\n", GGML_OP_SYMBOL[node->grad->op]); - } else { - fprintf(fp, "\"; ]\n"); - } - } - - for (int i = 0; i < gb->n_leafs; i++) { - struct ggml_tensor * node = gb->leafs[i]; - - snprintf(color, sizeof(color), "pink"); - - if (ggml_nelements(node) == 1) { - fprintf(fp, " \"%p\" [ \ -style = filled; fillcolor = %s; shape = record; \ -label=\"%.1e\"; ]\n", - (void *) node, color, (double)ggml_get_f32_1d(node, 0)); - } else { - fprintf(fp, " \"%p\" [ \ -style = filled; fillcolor = %s; shape = record; \ -label=\"CONST %d [%" PRId64 ", %" PRId64 "]\"; ]\n", - (void *) node, color, - i, node->ne[0], node->ne[1]); - } - } - - for (int i = 0; i < gb->n_nodes; i++) { - struct ggml_tensor * node = gb->nodes[i]; - - struct ggml_tensor * parent = ggml_graph_get_parent(gb, node); - - if (node->src0) { - struct ggml_tensor * parent0 = ggml_graph_get_parent(gb, node->src0); - - fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"x\"; ]\n", - parent0 ? (void *) parent0 : (void *) node->src0, - parent0 ? "g" : "x", - parent ? (void *) parent : (void *) node, - parent ? "g" : "x", - parent ? "empty" : "vee", - parent ? "dashed" : "solid"); - } - - if (node->src1) { - struct ggml_tensor * parent1 = ggml_graph_get_parent(gb, node->src1); - - fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"y\"; ]\n", - parent1 ? (void *) parent1 : (void *) node->src1, - parent1 ? "g" : "x", - parent ? (void *) parent : (void *) node, - parent ? "g" : "x", - parent ? "empty" : "vee", - parent ? "dashed" : "solid"); - } - } - - for (int i = 0; i < gb->n_leafs; i++) { - struct ggml_tensor * node = gb->leafs[i]; - - if (node->src0) { - fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"x\"; ]\n", - (void *) node->src0, "x", - (void *) node, "x"); - } - - if (node->src1) { - fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"y\"; ]\n", - (void *) node->src1, "x", - (void *) node, "x"); - } - } - - fprintf(fp, "}\n"); - - fclose(fp); - - GGML_PRINT("%s: dot -Tpng %s -o %s.png && open %s.png\n", __func__, filename, filename, filename); -} - -//////////////////////////////////////////////////////////////////////////////// - -static void ggml_opt_set_params(int np, struct ggml_tensor * const ps[], const float * x) { - int i = 0; - for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; - // TODO: add function to set tensor from array - for (int64_t j = 0; j < ne; ++j) { - ggml_set_f32_1d(ps[p], j, x[i++]); - } - } -} - -static void ggml_opt_get_params(int np, struct ggml_tensor * const ps[], float * x) { - int i = 0; - for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; - // TODO: add function to get all elements at once - for (int64_t j = 0; j < ne; ++j) { - x[i++] = ggml_get_f32_1d(ps[p], j); - } - } -} - -static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) { - int i = 0; - for (int p = 0; p < np; ++p) { - const int64_t ne = ggml_nelements(ps[p]) ; - // TODO: add function to get all elements at once - for (int64_t j = 0; j < ne; ++j) { - g[i++] = ggml_get_f32_1d(ps[p]->grad, j); - } - } -} - -// -// ADAM -// -// ref: https://arxiv.org/pdf/1412.6980.pdf -// - -static enum ggml_opt_result ggml_opt_adam( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb) { - GGML_ASSERT(ggml_is_scalar(f)); - - gf->n_threads = params.n_threads; - gb->n_threads = params.n_threads; - - // these will store the parameters we want to optimize - struct ggml_tensor * ps[GGML_MAX_PARAMS]; - - int np = 0; - int nx = 0; - for (int i = 0; i < gf->n_nodes; ++i) { - if (gf->nodes[i]->is_param) { - GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - - GGML_ASSERT(np < GGML_MAX_PARAMS); - - ps[np++] = gf->nodes[i]; - nx += ggml_nelements(gf->nodes[i]); - } - } - - // constants - const float alpha = params.adam.alpha; - const float beta1 = params.adam.beta1; - const float beta2 = params.adam.beta2; - const float eps = params.adam.eps; - - float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // view of the parameters - float * g1 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient - float * g2 = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // gradient squared - float * m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment - float * v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment - float * mh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // first moment hat - float * vh = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // second moment hat - - float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values - - // initialize - ggml_vec_set_f32(nx, m, 0.0f); - ggml_vec_set_f32(nx, v, 0.0f); - - // update view - ggml_opt_get_params(np, ps, x); - - // compute the function value - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); - - float fx_prev = ggml_get_f32_1d(f, 0); - if (pf) { - pf[0] = fx_prev; - } - - int n_no_improvement = 0; - float fx_best = fx_prev; - - // run the optimizer - for (int t = 0; t < params.adam.n_iter; ++t) { - GGML_PRINT_DEBUG ("=== iter %d ===\n", t); - - GGML_PRINT_DEBUG ("f = %10.6f\n", ggml_get_f32_1d(f, 0)); - GGML_PRINT_DEBUG_5("df/dx0 = %10.6f\n", ggml_get_f32_1d(ps[0]->grad, 0)); - GGML_PRINT_DEBUG_5("df/dx1 = %10.6f\n", ggml_get_f32_1d(ps[1]->grad, 0)); - - for (int i = 0; i < np; ++i) { - GGML_PRINT_DEBUG("param %d: %10.6f, g = %10.6f\n", i, - ggml_get_f32_1d(ps[i], 0), ggml_get_f32_1d(ps[i]->grad, 0)); - } - - const int64_t t_start_wall = ggml_time_us(); - const int64_t t_start_cpu = ggml_cycles(); - UNUSED(t_start_wall); - UNUSED(t_start_cpu); - - { - // update the gradient - ggml_opt_get_grad(np, ps, g1); - - // m_t = beta1*m_t-1 + (1 - beta1)*g_t - ggml_vec_scale_f32(nx, m, beta1); - ggml_vec_mad_f32 (nx, m, g1, 1.0f - beta1); - - // g2 = g1^2 - ggml_vec_sqr_f32 (nx, g2, g1); - - // v_t = beta2*v_t-1 + (1 - beta2)*g_t^2 - ggml_vec_scale_f32(nx, v, beta2); - ggml_vec_mad_f32 (nx, v, g2, 1.0f - beta2); - - // m^hat = m_t / (1 - beta1^t) - // v^hat = v_t / (1 - beta2^t) - // x_t = x_t-1 - alpha*m^hat/(sqrt(v^hat) + eps) - ggml_vec_cpy_f32 (nx, mh, m); - ggml_vec_cpy_f32 (nx, vh, v); - - ggml_vec_scale_f32(nx, mh, alpha/(1.0f - powf(beta1, t + 1))); - ggml_vec_scale_f32(nx, vh, 1.0f/(1.0f - powf(beta2, t + 1))); - - ggml_vec_sqrt_f32 (nx, vh, vh); - ggml_vec_acc1_f32 (nx, vh, eps); - - ggml_vec_div_f32 (nx, mh, mh, vh); - ggml_vec_sub_f32 (nx, x, x, mh); - - // update the parameters - ggml_opt_set_params(np, ps, x); - } - - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); - - const float fx = ggml_get_f32_1d(f, 0); - - // check convergence - if (fabsf(fx - fx_prev)/fx < params.adam.eps_f) { - GGML_PRINT_DEBUG("converged\n"); - - return GGML_OPT_OK; - } - - // delta-based convergence test - if (pf != NULL) { - // need at least params.past iterations to start checking for convergence - if (params.past <= t) { - const float rate = (pf[t%params.past] - fx)/fx; - - if (fabsf(rate) < params.delta) { - return GGML_OPT_OK; - } - } - - pf[t%params.past] = fx; - } - - // check for improvement - if (params.max_no_improvement > 0) { - if (fx_best > fx) { - fx_best = fx; - n_no_improvement = 0; - } else { - ++n_no_improvement; - - if (n_no_improvement >= params.max_no_improvement) { - return GGML_OPT_OK; - } - } - } - - fx_prev = fx; - - { - const int64_t t_end_cpu = ggml_cycles(); - GGML_PRINT_DEBUG("time iter: %5.3f s\n", ((float)(t_end_cpu - t_start_cpu))/CLOCKS_PER_SEC); - UNUSED(t_end_cpu); - - const int64_t t_end_wall = ggml_time_us(); - GGML_PRINT_DEBUG("wall time iter: %5.3f s\n", (t_end_wall - t_start_wall)/1e6); - UNUSED(t_end_wall); - } - } - - return GGML_OPT_DID_NOT_CONVERGE; -} - -// -// L-BFGS -// -// the L-BFGS implementation below is based on the following implementation: -// -// https://github.com/chokkan/liblbfgs -// - -struct ggml_lbfgs_iteration_data { - float alpha; - float ys; - float * s; - float * y; -}; - -static enum ggml_opt_result linesearch_backtracking( - struct ggml_context * ctx, - const struct ggml_opt_params * params, - int nx, - float * x, - float * fx, - float * g, - float * d, - float * step, - const float * xp, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb, - const int np, - struct ggml_tensor * ps[]) { - int count = 0; - - float width = 0.0f; - float dg = 0.0f; - float finit = 0.0f; - float dginit = 0.0f; - float dgtest = 0.0f; - - const float dec = 0.5f; - const float inc = 2.1f; - - if (*step <= 0.f) { - return GGML_LINESEARCH_INVALID_PARAMETERS; - } - - // compute the initial gradient in the search direction - ggml_vec_dot_f32(nx, &dginit, g, d); - - // make sure that d points to a descent direction - if (0 < dginit) { - return GGML_LINESEARCH_FAIL; - } - - // initialize local variables - finit = *fx; - dgtest = params->lbfgs.ftol*dginit; - - while (true) { - ggml_vec_cpy_f32(nx, x, xp); - ggml_vec_mad_f32(nx, x, d, *step); - - // evaluate the function and gradient values - { - ggml_opt_set_params(np, ps, x); - - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); - - ggml_opt_get_grad(np, ps, g); - - *fx = ggml_get_f32_1d(f, 0); - } - - ++count; - - if (*fx > finit + (*step)*dgtest) { - width = dec; - } else { - // Armijo condition is satisfied - if (params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_ARMIJO) { - return count; - } - - ggml_vec_dot_f32(nx, &dg, g, d); - - // check the Wolfe condition - if (dg < params->lbfgs.wolfe * dginit) { - width = inc; - } else { - if(params->lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE) { - // regular Wolfe conditions - return count; - } - - if(dg > -params->lbfgs.wolfe*dginit) { - width = dec; - } else { - // strong Wolfe condition (GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) - return count; - } - return count; - } - } - - if (*step < params->lbfgs.min_step) { - return GGML_LINESEARCH_MINIMUM_STEP; - } - if (*step > params->lbfgs.max_step) { - return GGML_LINESEARCH_MAXIMUM_STEP; - } - if (params->lbfgs.max_linesearch <= count) { - return GGML_LINESEARCH_MAXIMUM_ITERATIONS; - } - - (*step) *= width; - } - - return GGML_LINESEARCH_FAIL; -} - -static enum ggml_opt_result ggml_opt_lbfgs( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f, - struct ggml_cgraph * gf, - struct ggml_cgraph * gb) { - if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE || - params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) { - if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) { - return GGML_OPT_INVALID_WOLFE; - } - } - - gf->n_threads = params.n_threads; - gb->n_threads = params.n_threads; - - const int m = params.lbfgs.m; - - // these will store the parameters we want to optimize - struct ggml_tensor * ps[GGML_MAX_PARAMS]; - - int np = 0; - int nx = 0; - for (int i = 0; i < gf->n_nodes; ++i) { - if (gf->nodes[i]->is_param) { - GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op); - - GGML_ASSERT(np < GGML_MAX_PARAMS); - - ps[np++] = gf->nodes[i]; - nx += ggml_nelements(gf->nodes[i]); - } - } - - float * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current parameters - float * xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous parameters - float * g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // current gradient - float * gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // previous gradient - float * d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; // search direction - - float * pf = params.past > 0 ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)->data : NULL; // past function values - - float fx = 0.0f; // cost function value - float xnorm = 0.0f; // ||x|| - float gnorm = 0.0f; // ||g|| - float step = 0.0f; - - // initialize x from the graph nodes - ggml_opt_get_params(np, ps, x); - - // the L-BFGS memory - struct ggml_lbfgs_iteration_data * lm = alloca(sizeof(struct ggml_lbfgs_iteration_data)*m); - - for (int i = 0; i < m; ++i) { - lm[i].alpha = 0.0f; - lm[i].ys = 0.0f; - lm[i].s = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; - lm[i].y = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx)->data; - } - - // evaluate the function value and its gradient - { - ggml_opt_set_params(np, ps, x); - - ggml_graph_reset (gf); - ggml_set_f32 (f->grad, 1.0f); - ggml_graph_compute(ctx, gb); - - ggml_opt_get_grad(np, ps, g); - - fx = ggml_get_f32_1d(f, 0); - } - - if (pf) { - pf[0] = fx; - } - - float fx_best = fx; - - // search direction = -gradient - ggml_vec_neg_f32(nx, d, g); - - // ||x||, ||g|| - ggml_vec_norm_f32(nx, &xnorm, x); - ggml_vec_norm_f32(nx, &gnorm, g); - - if (xnorm < 1.0f) { - xnorm = 1.0f; - } - - // already optimized - if (gnorm/xnorm <= params.lbfgs.eps) { - return GGML_OPT_OK; - } - - // initial step - ggml_vec_norm_inv_f32(nx, &step, d); - - int j = 0; - int k = 1; - int ls = 0; - int end = 0; - int bound = 0; - int n_no_improvement = 0; - - float ys = 0.0f; - float yy = 0.0f; - float beta = 0.0f; - - while (true) { - // store the current position and gradient vectors - ggml_vec_cpy_f32(nx, xp, x); - ggml_vec_cpy_f32(nx, gp, g); - - ls = linesearch_backtracking(ctx, ¶ms, nx, x, &fx, g, d, &step, xp, f, gf, gb, np, ps); - - if (ls < 0) { - // linesearch failed - go back to the previous point and return - ggml_vec_cpy_f32(nx, x, xp); - ggml_vec_cpy_f32(nx, g, gp); - - return ls; - } - - ggml_vec_norm_f32(nx, &xnorm, x); - ggml_vec_norm_f32(nx, &gnorm, g); - - GGML_PRINT_DEBUG("f = %10.6f\n", ggml_get_f32_1d(f, 0)); - - if (xnorm < 1.0f) { - xnorm = 1.0f; - } - if (gnorm/xnorm <= params.lbfgs.eps) { - // converged - return GGML_OPT_OK; - } - - // delta-based convergence test - if (pf != NULL) { - // need at least params.past iterations to start checking for convergence - if (params.past <= k) { - const float rate = (pf[k%params.past] - fx)/fx; - - if (fabsf(rate) < params.delta) { - return GGML_OPT_OK; - } - } - - pf[k%params.past] = fx; - } - - // check for improvement - if (params.max_no_improvement > 0) { - if (fx < fx_best) { - fx_best = fx; - n_no_improvement = 0; - } else { - n_no_improvement++; - - if (n_no_improvement >= params.max_no_improvement) { - return GGML_OPT_OK; - } - } - } - - if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < k + 1) { - // reached the maximum number of iterations - return GGML_OPT_DID_NOT_CONVERGE; - } - - // update vectors s and y: - // s_{k+1} = x_{k+1} - x_{k} = \step * d_{k}. - // y_{k+1} = g_{k+1} - g_{k}. - // - ggml_vec_sub_f32(nx, lm[end].s, x, xp); - ggml_vec_sub_f32(nx, lm[end].y, g, gp); - - // compute scalars ys and yy: - // ys = y^t \cdot s -> 1 / \rho. - // yy = y^t \cdot y. - // - ggml_vec_dot_f32(nx, &ys, lm[end].y, lm[end].s); - ggml_vec_dot_f32(nx, &yy, lm[end].y, lm[end].y); - - lm[end].ys = ys; - - // find new search direction - // ref: https://en.wikipedia.org/wiki/Limited-memory_BFGS - - bound = (m <= k) ? m : k; - k++; - end = (end + 1)%m; - - // initialize search direction with -g - ggml_vec_neg_f32(nx, d, g); - - j = end; - for (int i = 0; i < bound; ++i) { - j = (j + m - 1) % m; - // \alpha_{j} = \rho_{j} s^{t}_{j} \cdot q_{k+1} - ggml_vec_dot_f32(nx, &lm[j].alpha, lm[j].s, d); - lm[j].alpha /= lm[j].ys; - // q_{i} = q_{i+1} - \alpha_{i} y_{i} - ggml_vec_mad_f32(nx, d, lm[j].y, -lm[j].alpha); - } - - ggml_vec_scale_f32(nx, d, ys/yy); - - for (int i = 0; i < bound; ++i) { - // \beta_{j} = \rho_{j} y^t_{j} \cdot \gamma_{i} - ggml_vec_dot_f32(nx, &beta, lm[j].y, d); - beta /= lm[j].ys; - // \gamma_{i+1} = \gamma_{i} + (\alpha_{j} - \beta_{j}) s_{j} - ggml_vec_mad_f32(nx, d, lm[j].s, lm[j].alpha - beta); - j = (j + 1)%m; - } - - step = 1.0; - } - - return GGML_OPT_DID_NOT_CONVERGE; -} - -struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) { - struct ggml_opt_params result; - - switch (type) { - case GGML_OPT_ADAM: - { - result = (struct ggml_opt_params) { - .type = GGML_OPT_ADAM, - .n_threads = 1, - .past = 0, - .delta = 1e-5f, - - .max_no_improvement = 100, - - .print_forward_graph = true, - .print_backward_graph = true, - - .adam = { - .n_iter = 10000, - .alpha = 0.001f, - .beta1 = 0.9f, - .beta2 = 0.999f, - .eps = 1e-8f, - .eps_f = 1e-5f, - .eps_g = 1e-3f, - }, - }; - } break; - case GGML_OPT_LBFGS: - { - result = (struct ggml_opt_params) { - .type = GGML_OPT_LBFGS, - .n_threads = 1, - .past = 0, - .delta = 1e-5f, - - .max_no_improvement = 0, - - .print_forward_graph = true, - .print_backward_graph = true, - - .lbfgs = { - .m = 6, - .n_iter = 100, - .max_linesearch = 20, - - .eps = 1e-5f, - .ftol = 1e-4f, - .wolfe = 0.9f, - .min_step = 1e-20f, - .max_step = 1e+20f, - - .linesearch = GGML_LINESEARCH_DEFAULT, - }, - }; - } break; - } - - return result; -} - -enum ggml_opt_result ggml_opt( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f) { - bool free_ctx = false; - if (ctx == NULL) { - struct ggml_init_params params_ctx = { - .mem_size = 16*1024*1024, - .mem_buffer = NULL, - .no_alloc = false, - }; - - ctx = ggml_init(params_ctx); - if (ctx == NULL) { - return GGML_OPT_NO_CONTEXT; - } - - free_ctx = true; - } - - enum ggml_opt_result result = GGML_OPT_OK; - - // build forward + backward compute graphs - struct ggml_cgraph gf = ggml_build_forward (f); - struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, false); - - switch (params.type) { - case GGML_OPT_ADAM: - { - result = ggml_opt_adam(ctx, params, f, &gf, &gb); - } break; - case GGML_OPT_LBFGS: - { - result = ggml_opt_lbfgs(ctx, params, f, &gf, &gb); - } break; - } - - if (params.print_forward_graph) { - ggml_graph_print (&gf); - ggml_graph_dump_dot(&gf, NULL, "opt-forward.dot"); - } - - if (params.print_backward_graph) { - ggml_graph_print (&gb); - ggml_graph_dump_dot(&gb, &gf, "opt-backward.dot"); - } - - if (free_ctx) { - ggml_free(ctx); - } - - return result; -} - -//////////////////////////////////////////////////////////////////////////////// - -size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist) { - assert(k % QK4_0 == 0); - const int nb = k / QK4_0; - - for (int j = 0; j < n; j += k) { - block_q4_0 * restrict y = (block_q4_0 *)dst + j/QK4_0; - - quantize_row_q4_0_reference(src + j, y, k); - - for (int i = 0; i < nb; i++) { - for (int l = 0; l < QK4_0; l += 2) { - const uint8_t vi0 = y[i].qs[l/2] & 0xF; - const uint8_t vi1 = y[i].qs[l/2] >> 4; - - hist[vi0]++; - hist[vi1]++; - } - } - } - - return (n/QK4_0*sizeof(block_q4_0)); -} - -size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist) { - assert(k % QK4_1 == 0); - const int nb = k / QK4_1; - - for (int j = 0; j < n; j += k) { - block_q4_1 * restrict y = (block_q4_1 *)dst + j/QK4_1; - - quantize_row_q4_1_reference(src + j, y, k); - - for (int i = 0; i < nb; i++) { - for (int l = 0; l < QK4_1; l += 2) { - const uint8_t vi0 = y[i].qs[l/2] & 0xF; - const uint8_t vi1 = y[i].qs[l/2] >> 4; - - hist[vi0]++; - hist[vi1]++; - } - } - } - - return (n/QK4_1*sizeof(block_q4_1)); -} - -size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist) { - assert(k % QK4_2 == 0); - const int nb = k / QK4_2; - - for (int j = 0; j < n; j += k) { - block_q4_2 * restrict y = (block_q4_2 *)dst + j/QK4_2; - - //quantize_row_q4_2_reference(src + j, y, k); - quantize_row_q4_2_rmse(src + j, y, k); - - for (int i = 0; i < nb; i++) { - for (int l = 0; l < QK4_2; l += 2) { - const uint8_t vi0 = y[i].qs[l/2] & 0xF; - const uint8_t vi1 = y[i].qs[l/2] >> 4; - - hist[vi0]++; - hist[vi1]++; - } - } - } - - return (n/QK4_2*sizeof(block_q4_2)); -} - -size_t ggml_quantize_q4_3(const float * src, void * dst, int n, int k, int64_t * hist) { - assert(k % QK4_3 == 0); - const int nb = k / QK4_3; - - for (int j = 0; j < n; j += k) { - block_q4_3 * restrict y = (block_q4_3 *)dst + j/QK4_3; - - quantize_row_q4_3_reference(src + j, y, k); - - for (int i = 0; i < nb; i++) { - for (int l = 0; l < QK4_3; l += 2) { - const uint8_t vi0 = y[i].qs[l/2] & 0xF; - const uint8_t vi1 = y[i].qs[l/2] >> 4; - - hist[vi0]++; - hist[vi1]++; - } - } - } - - return (n/QK4_3*sizeof(block_q4_3)); -} - -size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist) { - size_t result = 0; - switch (type) { - case GGML_TYPE_Q4_0: - { - GGML_ASSERT(start % QK4_0 == 0); - block_q4_0 * block = (block_q4_0*)dst + start / QK4_0; - result = ggml_quantize_q4_0(src + start, block, n, n, hist); - } break; - case GGML_TYPE_Q4_1: - { - GGML_ASSERT(start % QK4_1 == 0); - block_q4_1 * block = (block_q4_1*)dst + start / QK4_1; - result = ggml_quantize_q4_1(src + start, block, n, n, hist); - } break; - case GGML_TYPE_Q4_2: - { - GGML_ASSERT(start % QK4_2 == 0); - block_q4_2 * block = (block_q4_2*)dst + start / QK4_2; - result = ggml_quantize_q4_2(src + start, block, n, n, hist); - } break; - case GGML_TYPE_Q4_3: - { - GGML_ASSERT(start % QK4_3 == 0); - block_q4_3 * block = (block_q4_3*)dst + start / QK4_3; - result = ggml_quantize_q4_3(src + start, block, n, n, hist); - } break; - default: - assert(false); - } - return result; -} - -//////////////////////////////////////////////////////////////////////////////// - -int ggml_cpu_has_avx(void) { -#if defined(__AVX__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_avx2(void) { -#if defined(__AVX2__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_avx512(void) { -#if defined(__AVX512F__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_avx512_vbmi(void) { -#if defined(__AVX512VBMI__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_avx512_vnni(void) { -#if defined(__AVX512VNNI__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_fma(void) { -#if defined(__FMA__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_neon(void) { -#if defined(__ARM_NEON) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_arm_fma(void) { -#if defined(__ARM_FEATURE_FMA) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_f16c(void) { -#if defined(__F16C__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_fp16_va(void) { -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_wasm_simd(void) { -#if defined(__wasm_simd128__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_blas(void) { -#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUBLAS) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_cublas(void) { -#if defined(GGML_USE_CUBLAS) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_sse3(void) { -#if defined(__SSE3__) - return 1; -#else - return 0; -#endif -} - -int ggml_cpu_has_vsx(void) { -#if defined(__POWER9_VECTOR__) - return 1; -#else - return 0; -#endif -} - -//////////////////////////////////////////////////////////////////////////////// diff --git a/ggml-sys/ggml/ggml.h b/ggml-sys/ggml/ggml.h deleted file mode 100644 index 460d4ffe..00000000 --- a/ggml-sys/ggml/ggml.h +++ /dev/null @@ -1,866 +0,0 @@ -#pragma once - -// -// GGML Tensor Library -// -// This documentation is still a work in progress. -// If you wish some specific topics to be covered, feel free to drop a comment: -// -// https://github.com/ggerganov/whisper.cpp/issues/40 -// -// ## Overview -// -// This library implements: -// -// - a set of tensor operations -// - automatic differentiation -// - basic optimization algorithms -// -// The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes, -// but is not limited to, the following: -// -// - linear regression -// - support vector machines -// - neural networks -// -// The library allows the user to define a certain function using the available tensor operations. This function -// definition is represented internally via a computation graph. Each tensor operation in the function definition -// corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the -// function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized -// using one of the available optimization algorithms. -// -// For example, here we define the function: f(x) = a*x^2 + b -// -// { -// struct ggml_init_params params = { -// .mem_size = 16*1024*1024, -// .mem_buffer = NULL, -// }; -// -// // memory allocation happens here -// struct ggml_context * ctx = ggml_init(params); -// -// struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// -// ggml_set_param(ctx, x); // x is an input variable -// -// struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -// struct ggml_tensor * x2 = ggml_mul(ctx, x, x); -// struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b); -// -// ... -// } -// -// Notice that the function definition above does not involve any actual computation. The computation is performed only -// when the user explicitly requests it. For example, to compute the function's value at x = 2.0: -// -// { -// ... -// -// struct ggml_cgraph gf = ggml_build_forward(f); -// -// // set the input variable and parameter values -// ggml_set_f32(x, 2.0f); -// ggml_set_f32(a, 3.0f); -// ggml_set_f32(b, 4.0f); -// -// ggml_graph_compute(ctx0, &gf); -// -// printf("f = %f\n", ggml_get_f32_1d(f, 0)); -// -// ... -// } -// -// The actual computation is performed in the ggml_graph_compute() function. -// -// The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the -// ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know -// in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory -// and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was -// actually needed. -// -// The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic -// differentiation and optimization algorithms. -// -// The described approach allows to define the function graph once and then compute its forward or backward graphs -// multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way -// the user can avoid the memory allocation overhead at runtime. -// -// The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class -// citizens, but in theory the library can be extended to support FP8 and integer data types. -// -// Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary -// and binary operations. Most of the available operations fall into one of these two categories. With time, it became -// clear that the library needs to support more complex operations. The way to support these operations is not clear -// yet, but a few examples are demonstrated in the following operations: -// -// - ggml_permute() -// - ggml_conv_1d_1s() -// - ggml_conv_1d_2s() -// -// For each tensor operator, the library implements a forward and backward computation function. The forward function -// computes the output tensor value given the input tensor values. The backward function computes the adjoint of the -// input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a -// calculus class, or watch the following video: -// -// What is Automatic Differentiation? -// https://www.youtube.com/watch?v=wG_nF1awSSY -// -// -// ## Tensor data (struct ggml_tensor) -// -// The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of -// the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains -// pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example: -// -// { -// struct ggml_tensor * c = ggml_add(ctx, a, b); -// -// assert(c->src[0] == a); -// assert(c->src[1] == b); -// } -// -// The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the -// number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows -// to store tensors that are not contiguous in memory, which is useful for operations such as transposition and -// permutation. All tensor operations have to take the stride into account and not assume that the tensor is -// contiguous in memory. -// -// The data of the tensor is accessed via the "data" pointer. For example: -// -// { -// struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 2, 3); -// -// // a[1, 2] = 1.0f; -// *(float *) ((char *) a->data + 2*a->nb[1] + 1*a->nb[0]) = 1.0f; -// -// // a[2, 0] = 2.0f; -// *(float *) ((char *) a->data + 0*a->nb[1] + 2*a->nb[0]) = 2.0f; -// -// ... -// } -// -// Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used. -// -// ## The matrix multiplication operator (ggml_mul_mat) -// -// TODO -// -// -// ## Multi-threading -// -// TODO -// -// -// ## Overview of ggml.c -// -// TODO -// -// -// ## SIMD optimizations -// -// TODO -// -// -// ## Debugging ggml -// -// TODO -// -// - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -#define GGML_MAX_DIMS 4 -#define GGML_MAX_NODES 4096 -#define GGML_MAX_PARAMS 16 -#define GGML_MAX_CONTEXTS 64 -#define GGML_MAX_OPT 4 -#define GGML_DEFAULT_N_THREADS 4 - -#ifdef __ARM_NEON -// we use the built-in 16-bit float type -typedef __fp16 ggml_fp16_t; -#else -typedef uint16_t ggml_fp16_t; -#endif - -// convert FP16 <-> FP32 -float ggml_fp16_to_fp32(ggml_fp16_t x); -ggml_fp16_t ggml_fp32_to_fp16(float x); - -struct ggml_object; -struct ggml_context; - -enum ggml_type { - // explicitly numbered values are used in llama.cpp files - GGML_TYPE_F32 = 0, - GGML_TYPE_F16 = 1, - GGML_TYPE_Q4_0 = 2, - GGML_TYPE_Q4_1 = 3, - GGML_TYPE_Q4_2 = 4, - GGML_TYPE_Q4_3 = 5, - GGML_TYPE_Q8_0 = 6, - GGML_TYPE_I8, - GGML_TYPE_I16, - GGML_TYPE_I32, - GGML_TYPE_COUNT, -}; - -// available tensor operations: -enum ggml_op { - GGML_OP_NONE = 0, - - GGML_OP_DUP, - GGML_OP_ADD, - GGML_OP_SUB, - GGML_OP_MUL, - GGML_OP_DIV, - GGML_OP_SQR, - GGML_OP_SQRT, - GGML_OP_SUM, - GGML_OP_MEAN, - GGML_OP_REPEAT, - GGML_OP_ABS, - GGML_OP_SGN, - GGML_OP_NEG, - GGML_OP_STEP, - GGML_OP_RELU, - GGML_OP_GELU, - GGML_OP_SILU, - GGML_OP_NORM, // normalize - GGML_OP_RMS_NORM, - - GGML_OP_MUL_MAT, - - GGML_OP_SCALE, - GGML_OP_CPY, - GGML_OP_CONT, - GGML_OP_RESHAPE, - GGML_OP_VIEW, - GGML_OP_PERMUTE, - GGML_OP_TRANSPOSE, - GGML_OP_GET_ROWS, - GGML_OP_DIAG_MASK_INF, - GGML_OP_SOFT_MAX, - GGML_OP_ROPE, - GGML_OP_CONV_1D_1S, - GGML_OP_CONV_1D_2S, - - GGML_OP_FLASH_ATTN, - GGML_OP_FLASH_FF, - - GGML_OP_MAP_UNARY, - GGML_OP_MAP_BINARY, - - GGML_OP_COUNT, -}; - - -// ggml object -struct ggml_object { - size_t offs; - size_t size; - - struct ggml_object * next; - - char padding[8]; -}; - -static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object); - -// n-dimensional tensor -struct ggml_tensor { - enum ggml_type type; - - int n_dims; - int64_t ne[GGML_MAX_DIMS]; // number of elements - size_t nb[GGML_MAX_DIMS]; // stride in bytes: - // nb[0] = sizeof(type) - // nb[1] = nb[0] * ne[0] + padding - // nb[i] = nb[i-1] * ne[i-1] - - // compute data - enum ggml_op op; - - bool is_param; - - struct ggml_tensor * grad; - struct ggml_tensor * src0; - struct ggml_tensor * src1; - struct ggml_tensor * opt[GGML_MAX_OPT]; - - // thread scheduling - int n_tasks; - - // performance - int perf_runs; - int64_t perf_cycles; - int64_t perf_time_us; - - void * data; - char padding[8]; -}; - -// computation graph -struct ggml_cgraph { - int n_nodes; - int n_leafs; - int n_threads; - - size_t work_size; - struct ggml_tensor * work; - - struct ggml_tensor * nodes[GGML_MAX_NODES]; - struct ggml_tensor * grads[GGML_MAX_NODES]; - struct ggml_tensor * leafs[GGML_MAX_NODES]; - - // performance - int perf_runs; - int64_t perf_cycles; - int64_t perf_time_us; -}; - -// scratch buffer -struct ggml_scratch { - size_t offs; - size_t size; - void * data; -}; - -struct ggml_init_params { - // memory pool - size_t mem_size; // bytes - void * mem_buffer; // if NULL, memory will be allocated internally - bool no_alloc; // don't allocate memory for the tensor data -}; - -void ggml_time_init(void); // call this once at the beginning of the program -int64_t ggml_time_ms(void); -int64_t ggml_time_us(void); -int64_t ggml_cycles(void); -int64_t ggml_cycles_per_ms(void); - -void ggml_print_object (const struct ggml_object * obj); -void ggml_print_objects(const struct ggml_context * ctx); - -int64_t ggml_nelements(const struct ggml_tensor * tensor); -size_t ggml_nbytes (const struct ggml_tensor * tensor); - -int ggml_blck_size (enum ggml_type type); -size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block -float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float - -const char * ggml_type_name(enum ggml_type type); - -size_t ggml_element_size(const struct ggml_tensor * tensor); - -bool ggml_is_quantized(enum ggml_type type); - -struct ggml_context * ggml_init(struct ggml_init_params params); -void ggml_free(struct ggml_context * ctx); - -size_t ggml_used_mem(const struct ggml_context * ctx); - -size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch); - -struct ggml_tensor * ggml_new_tensor( - struct ggml_context * ctx, - enum ggml_type type, - int n_dims, - const int64_t *ne); - -struct ggml_tensor * ggml_new_tensor_1d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0); - -struct ggml_tensor * ggml_new_tensor_2d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0, - int64_t ne1); - -struct ggml_tensor * ggml_new_tensor_3d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0, - int64_t ne1, - int64_t ne2); - -struct ggml_tensor * ggml_new_tensor_4d( - struct ggml_context * ctx, - enum ggml_type type, - int64_t ne0, - int64_t ne1, - int64_t ne2, - int64_t ne3); - -struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value); -struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value); - -struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src); -struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, const struct ggml_tensor * src); - -struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor); -struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); -struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); - -int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); -void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); - -float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); -void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); - - void * ggml_get_data (const struct ggml_tensor * tensor); -float * ggml_get_data_f32(const struct ggml_tensor * tensor); - -// -// operations on tensors with backpropagation -// - -struct ggml_tensor * ggml_dup( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_add( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - - -struct ggml_tensor * ggml_add_inplace( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_sub( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_mul( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_div( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_sqr( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_sqrt( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// return scalar -// TODO: compute sum along rows -struct ggml_tensor * ggml_sum( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// mean along rows -struct ggml_tensor * ggml_mean( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// if a is the same shape as b, and a is not parameter, return a -// otherwise, return a new tensor: repeat(a) to fit in b -struct ggml_tensor * ggml_repeat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_abs( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_sgn( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_neg( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_step( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_relu( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// TODO: double-check this computation is correct -struct ggml_tensor * ggml_gelu( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_silu( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// normalize along rows -// TODO: eps is hardcoded to 1e-5 for now -struct ggml_tensor * ggml_norm( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_rms_norm( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// A: m rows, n columns -// B: p rows, n columns (i.e. we transpose it internally) -// result is m columns, p rows -struct ggml_tensor * ggml_mul_mat( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -// -// operations on tensors without backpropagation -// - -// in-place, returns view(a) -struct ggml_tensor * ggml_scale( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -// a -> b, return view(b) -struct ggml_tensor * ggml_cpy( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -// make contiguous -struct ggml_tensor * ggml_cont( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// return view(a), b specifies the new shape -// TODO: when we start computing gradient, make a copy instead of view -struct ggml_tensor * ggml_reshape( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -// return view(a) -// TODO: when we start computing gradient, make a copy instead of view -struct ggml_tensor * ggml_reshape_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1); - -// return view(a) -// TODO: when we start computing gradient, make a copy instead of view -struct ggml_tensor * ggml_reshape_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1, - int64_t ne2); - -// offset in bytes -struct ggml_tensor * ggml_view_1d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - size_t offset); - -struct ggml_tensor * ggml_view_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1, - size_t nb1, // row stride in bytes - size_t offset); - -struct ggml_tensor * ggml_view_3d( - struct ggml_context * ctx, - struct ggml_tensor * a, - int64_t ne0, - int64_t ne1, - int64_t ne2, - size_t nb1, // row stride in bytes - size_t nb2, // slice stride in bytes - size_t offset); - -struct ggml_tensor * ggml_permute( - struct ggml_context * ctx, - struct ggml_tensor * a, - int axis0, - int axis1, - int axis2, - int axis3); - -// alias for ggml_permute(ctx, a, 1, 0, 2, 3) -struct ggml_tensor * ggml_transpose( - struct ggml_context * ctx, - struct ggml_tensor * a); - -struct ggml_tensor * ggml_get_rows( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -// set elements above the diagonal to -INF -// in-place, returns view(a) -struct ggml_tensor * ggml_diag_mask_inf( - struct ggml_context * ctx, - struct ggml_tensor * a, - int n_past); - -// in-place, returns view(a) -struct ggml_tensor * ggml_soft_max( - struct ggml_context * ctx, - struct ggml_tensor * a); - -// rotary position embedding -// in-place, returns view(a) -// if mode & 1 == 1, skip n_past elements -// if mode & 2 == 1, GPT-NeoX style -// TODO: avoid creating a new tensor every time -struct ggml_tensor * ggml_rope( - struct ggml_context * ctx, - struct ggml_tensor * a, - int n_past, - int n_dims, - int mode); - -// padding = 1 -// TODO: we don't support extra parameters for now -// that's why we are hard-coding the stride, padding, and dilation -// not great .. -struct ggml_tensor * ggml_conv_1d_1s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_conv_1d_2s( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b); - -struct ggml_tensor * ggml_flash_attn( - struct ggml_context * ctx, - struct ggml_tensor * q, - struct ggml_tensor * k, - struct ggml_tensor * v, - bool masked); - -struct ggml_tensor * ggml_flash_ff( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b0, - struct ggml_tensor * b1, - struct ggml_tensor * c0, - struct ggml_tensor * c1); - -// Mapping operations -typedef void (*ggml_unary_op_f32_t)(const int, float *, const float *); -typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *); - -struct ggml_tensor * ggml_map_unary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - const ggml_unary_op_f32_t fun); - -struct ggml_tensor * ggml_map_binary_f32( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - const ggml_binary_op_f32_t fun); - -// -// automatic differentiation -// - -void ggml_set_param( - struct ggml_context * ctx, - struct ggml_tensor * tensor); - -void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); - -struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); -struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep); - -void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); -void ggml_graph_reset (struct ggml_cgraph * cgraph); - -// print info and performance information for the graph -void ggml_graph_print(const struct ggml_cgraph * cgraph); - -// dump the graph into a file using the dot format -void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename); - -// -// optimization -// - -// optimization methods -enum ggml_opt_type { - GGML_OPT_ADAM, - GGML_OPT_LBFGS, -}; - -// linesearch methods -enum ggml_linesearch { - GGML_LINESEARCH_DEFAULT = 1, - - GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0, - GGML_LINESEARCH_BACKTRACKING_WOLFE = 1, - GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2, -}; - -// optimization return values -enum ggml_opt_result { - GGML_OPT_OK = 0, - GGML_OPT_DID_NOT_CONVERGE, - GGML_OPT_NO_CONTEXT, - GGML_OPT_INVALID_WOLFE, - GGML_OPT_FAIL, - - GGML_LINESEARCH_FAIL = -128, - GGML_LINESEARCH_MINIMUM_STEP, - GGML_LINESEARCH_MAXIMUM_STEP, - GGML_LINESEARCH_MAXIMUM_ITERATIONS, - GGML_LINESEARCH_INVALID_PARAMETERS, -}; - -// optimization parameters -// -// see ggml.c (ggml_opt_default_params) for default values -// -struct ggml_opt_params { - enum ggml_opt_type type; - - int n_threads; - - // delta-based convergence test - // - // if past == 0 - disabled - // if past > 0: - // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|) - // - int past; - float delta; - - // maximum number of iterations without improvement - // - // if 0 - disabled - // if > 0: - // assume convergence if no cost improvement in this number of iterations - // - int max_no_improvement; - - bool print_forward_graph; - bool print_backward_graph; - - // ADAM parameters - struct { - int n_iter; - - float alpha; // learning rate - float beta1; - float beta2; - float eps; // epsilon for numerical stability - float eps_f; // epsilon for convergence test - float eps_g; // epsilon for convergence test - } adam; - - // LBFGS parameters - struct { - int m; // number of corrections to approximate the inv. Hessian - int n_iter; - int max_linesearch; - - float eps; // convergence tolerance - float ftol; // line search tolerance - float wolfe; - float min_step; - float max_step; - - enum ggml_linesearch linesearch; - } lbfgs; -}; - -struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type); - -// optimize the function defined by the tensor f -enum ggml_opt_result ggml_opt( - struct ggml_context * ctx, - struct ggml_opt_params params, - struct ggml_tensor * f); - -// -// quantization -// - -size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist); -size_t ggml_quantize_q4_3(const float * src, void * dst, int n, int k, int64_t * hist); - -size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist); - -// -// system info -// - -int ggml_cpu_has_avx(void); -int ggml_cpu_has_avx2(void); -int ggml_cpu_has_avx512(void); -int ggml_cpu_has_avx512_vbmi(void); -int ggml_cpu_has_avx512_vnni(void); -int ggml_cpu_has_fma(void); -int ggml_cpu_has_neon(void); -int ggml_cpu_has_arm_fma(void); -int ggml_cpu_has_f16c(void); -int ggml_cpu_has_fp16_va(void); -int ggml_cpu_has_wasm_simd(void); -int ggml_cpu_has_blas(void); -int ggml_cpu_has_cublas(void); -int ggml_cpu_has_sse3(void); -int ggml_cpu_has_vsx(void); - - -// -// Internal types and functions exposed for tests and benchmarks -// - -#ifdef __cplusplus -// restrict not standard in C++ -#define GGML_RESTRICT -#else -#define GGML_RESTRICT restrict -#endif -typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k); -typedef void (*quantize_row_q_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k); -typedef void (*vec_dot_q_t)(const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y); - -typedef struct { - dequantize_row_q_t dequantize_row_q; - quantize_row_q_t quantize_row_q; - quantize_row_q_t quantize_row_q_reference; - quantize_row_q_t quantize_row_q_dot; - vec_dot_q_t vec_dot_q; -} quantize_fns_t; - -quantize_fns_t ggml_internal_get_quantize_fn(size_t i); - -#ifdef __cplusplus -} -#endif diff --git a/ggml/Cargo.toml b/ggml/Cargo.toml index 3f1e43a8..255e3a7a 100644 --- a/ggml/Cargo.toml +++ b/ggml/Cargo.toml @@ -4,4 +4,8 @@ version = { workspace = true } edition = "2021" [dependencies] -ggml-sys = { path = "../ggml-sys" } \ No newline at end of file +thiserror = "1.0" +ggml-sys = { path = "sys" } + +[dev-dependencies] +rand = "0.8" \ No newline at end of file diff --git a/ggml/src/context.rs b/ggml/src/context.rs new file mode 100644 index 00000000..c4ac5d68 --- /dev/null +++ b/ggml/src/context.rs @@ -0,0 +1,406 @@ +use std::{ + os::raw::{c_int, c_void}, + ptr::NonNull, + sync::Arc, +}; + +use crate::{sys, usize_to_i32, usize_to_i64, Buffer, ComputationGraph, Tensor, Type}; + +/// Acts as a RAII-guard over a `sys::ggml_context`, allocating via +/// `ggml_init` and dropping via `ggml_free`. +pub struct Context { + /// An `Arc` is used to model the relation between the context and the + /// allocated tensors. Tensors are owned by the object, so a [`Tensor`] + /// contains a `Weak` reference underneath and doesn't let you do anything + /// with it if the underlying context has been deallocated. + ptr: Arc>, +} + +impl Context { + /// Creates a new [Context] with the specified `mem_size` as a working area. + pub fn init(mem_size: usize, alloc: bool) -> Self { + let raw = unsafe { + sys::ggml_init(sys::ggml_init_params { + mem_size, + // Null here means we want ggml to own this memory. We don't + // support passing an owned buffer from the Rust side. + mem_buffer: std::ptr::null_mut(), + no_alloc: !alloc, + }) + }; + Self { + ptr: Arc::new(NonNull::new(raw).expect("Should not be null")), + } + } + + /// Wraps a raw tensor with a weak pointer to the context. + fn new_tensor_raw(&self, raw: *mut sys::ggml_tensor) -> Tensor { + Tensor { + ptr: NonNull::new(raw).expect("Should not be null"), + ctx: Arc::downgrade(&self.ptr), + } + } + + /// Creates a new 1D tensor. + pub fn new_tensor_1d(&self, typ: Type, ne0: usize) -> Tensor { + let raw = + unsafe { sys::ggml_new_tensor_1d(self.ptr.as_ptr(), typ.into(), usize_to_i64(ne0)) }; + self.new_tensor_raw(raw) + } + + /// Creates a new 2D tensor. + pub fn new_tensor_2d(&self, typ: Type, ne0: usize, ne1: usize) -> Tensor { + let raw = unsafe { + sys::ggml_new_tensor_2d( + self.ptr.as_ptr(), + typ.into(), + usize_to_i64(ne0), + usize_to_i64(ne1), + ) + }; + self.new_tensor_raw(raw) + } + + /// Creates a new 3D tensor. + pub fn new_tensor_3d(&self, typ: Type, ne0: usize, ne1: usize, ne2: usize) -> Tensor { + let raw = unsafe { + sys::ggml_new_tensor_3d( + self.ptr.as_ptr(), + typ.into(), + usize_to_i64(ne0), + usize_to_i64(ne1), + usize_to_i64(ne2), + ) + }; + self.new_tensor_raw(raw) + } + + /// Creates a new 1D tensor with the specified value. + pub fn new_f32(&self, x: f32) -> Tensor { + let raw = unsafe { sys::ggml_new_f32(self.ptr.as_ptr(), x) }; + self.new_tensor_raw(raw) + } + + /// Unknown, aside from the obvious. It's transposing something! + pub fn op_transpose(&self, a: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_transpose(self.ptr.as_ptr(), a.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Unknown. + pub fn op_get_rows(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = + unsafe { sys::ggml_get_rows(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the values of `a`, but normalized. + pub fn op_norm(&self, a: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_norm(self.ptr.as_ptr(), a.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the values of `a`, but normalized using RMSNorm. + pub fn op_rms_norm(&self, a: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_rms_norm(self.ptr.as_ptr(), a.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the multiplication of `a` and `b`. + pub fn op_mul(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_mul(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Unknown. + pub fn op_repeat(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_repeat(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the multiplication of `a` and `b` as if they were matrices. + /// + /// `a`: m rows, n columns + /// + /// `b`: p rows, n columns (i.e. we transpose it internally) + /// + /// Result is m columns, p rows + pub fn op_mul_mat(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = + unsafe { sys::ggml_mul_mat(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the addition of `a` and `b`. + pub fn op_add(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_add(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the [SiLU](https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html) activation function applied to `a`. + pub fn op_silu(&self, a: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_silu(self.ptr.as_ptr(), a.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// In-place, scales `a` by the 1D tensor `b`. + pub fn op_scale(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_scale(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// In-place, sets the elements above the diagonal to -INF. + pub fn op_diag_mask_inf(&self, a: &Tensor, n_past: usize) -> Tensor { + let tensor = unsafe { + sys::ggml_diag_mask_inf(self.ptr.as_ptr(), a.ptr.as_ptr(), usize_to_i32(n_past)) + }; + self.new_tensor_raw(tensor) + } + + /// In-place, applies the [Softmax function](https://en.wikipedia.org/wiki/Softmax_function) to `a`. + pub fn op_soft_max(&self, a: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_soft_max(self.ptr.as_ptr(), a.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with result of mapping `fun` with `a`. + /// + /// `cnt` is the number of `f32` elements to be mapped. + /// `src` is source for elements to be mapped. + /// `dst` is the destination for mapped elements. + /// + /// # Safety + /// + /// This is marked unsafe since we're passing pointers into C code, and not + /// only vanilla pointers but a pointer to a function. For obvious reasons, it's + /// important not to do anything crazy like mutate any of these values concurrently. + /// + /// Don't make assumptions about how/when the function will be called. It may be called + /// on a row, it may be called on a whole tensor. It may be called concurrently or not. + /// Once you give that function pointer to C land, all bets are off. + pub unsafe fn op_map_unary( + &self, + a: &Tensor, + fun: unsafe extern "C" fn(cnt: c_int, dst: *mut f32, src: *const f32), + ) -> Tensor { + let tensor = + unsafe { sys::ggml_map_unary_f32(self.ptr.as_ptr(), a.ptr.as_ptr(), Some(fun)) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with result of mapping `fun` with `a` and `b`. + /// + /// `cnt` is the number of `f32` elements to be mapped. + /// `src0`, `src1` are the sources of elements to be mapped. + /// `dst` is the destination for mapped elements. + /// + /// # Safety + /// + /// This is marked unsafe since we're passing pointers into C code, and not + /// only vanilla pointers but a pointer to a function. For obvious reasons, it's + /// important not to do anything crazy like mutate any of these values concurrently. + /// + /// Don't make assumptions about how/when the function will be called. It may be called + /// on a row, it may be called on a whole tensor. It may be called concurrently or not. + /// Once you give that function pointer to C land, all bets are off. + pub unsafe fn op_map_binary( + &self, + a: &Tensor, + b: &Tensor, + fun: unsafe extern "C" fn(cnt: c_int, dst: *mut f32, src0: *const f32, src1: *const f32), + ) -> Tensor { + let tensor = unsafe { + sys::ggml_map_binary_f32(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr(), Some(fun)) + }; + self.new_tensor_raw(tensor) + } + + /// Creates a 1D view over `a`. + pub fn op_view_1d(&self, a: &Tensor, ne0: usize, offset: usize) -> Tensor { + let tensor = unsafe { + sys::ggml_view_1d(self.ptr.as_ptr(), a.ptr.as_ptr(), usize_to_i64(ne0), offset) + }; + self.new_tensor_raw(tensor) + } + + /// Creates a 2D view over `a`. + pub fn op_view_2d(&self, a: &Tensor, ne: (usize, usize), nb1: usize, offset: usize) -> Tensor { + let (ne0, ne1) = ne; + let tensor = unsafe { + sys::ggml_view_2d( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i64(ne0), + usize_to_i64(ne1), + nb1, + offset, + ) + }; + self.new_tensor_raw(tensor) + } + + /// Creates a 3d view over `a`. + pub fn op_view_3d( + &self, + a: &Tensor, + ne: (usize, usize, usize), + nb: (usize, usize), + offset: usize, + ) -> Tensor { + let (ne0, ne1, ne2) = ne; + let (nb1, nb2) = nb; + let tensor = unsafe { + sys::ggml_view_3d( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i64(ne0), + usize_to_i64(ne1), + usize_to_i64(ne2), + nb1, + nb2, + offset, + ) + }; + self.new_tensor_raw(tensor) + } + + /// Copies `a` to `b` and returns `b`. + pub fn op_cpy(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_cpy(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// Creates a new tensor with the axes of `a` permuted as described by the parameters. + pub fn op_permute( + &self, + a: &Tensor, + axis0: usize, + axis1: usize, + axis2: usize, + axis3: usize, + ) -> Tensor { + let tensor = unsafe { + sys::ggml_permute( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i32(axis0), + usize_to_i32(axis1), + usize_to_i32(axis2), + usize_to_i32(axis3), + ) + }; + self.new_tensor_raw(tensor) + } + + /// In-place; reshapes `a` in accordance with the dimensions of `b` + pub fn op_reshape(&self, a: &Tensor, b: &Tensor) -> Tensor { + let tensor = + unsafe { sys::ggml_reshape(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } + + /// In-place; reshapes `a` in accordance with the specified dimensions. + pub fn op_reshape_2d(&self, a: &Tensor, ne0: usize, ne1: usize) -> Tensor { + let tensor = unsafe { + sys::ggml_reshape_2d( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i64(ne0), + usize_to_i64(ne1), + ) + }; + self.new_tensor_raw(tensor) + } + + /// In-place; reshapes `a` in accordance with the specified dimensions. + pub fn op_reshape_3d(&self, a: &Tensor, ne0: usize, ne1: usize, ne2: usize) -> Tensor { + let tensor = unsafe { + sys::ggml_reshape_3d( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i64(ne0), + usize_to_i64(ne1), + usize_to_i64(ne2), + ) + }; + self.new_tensor_raw(tensor) + } + + /// In-place; applies ROtary Positional Encoding. + pub fn op_rope(&self, a: &Tensor, npast: usize, ndims: usize, mode: i32) -> Tensor { + let tensor = unsafe { + sys::ggml_rope( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i32(npast), + usize_to_i32(ndims), + mode, + ) + }; + self.new_tensor_raw(tensor) + } + + /// Computes the specified graph. Must be run in order to evaluate the graph. + pub fn graph_compute(&self, graph: &mut ComputationGraph) { + unsafe { + sys::ggml_graph_compute(self.ptr.as_ptr(), &mut graph.inner); + } + } + + /// Retrieves the memory used by this [Context]. + pub fn used_mem(&self) -> usize { + unsafe { sys::ggml_used_mem(self.ptr.as_ptr()) } + } + + /// Sets the scratch buffer to be used by this [Context]. + /// + /// If `scratch_buffer` is `None`, the scratch buffer will be disabled. + pub fn use_scratch<'a>(&'a self, scratch_buffer: Option<&'a mut Buffer>) { + let (size, data) = if let Some(buffer) = scratch_buffer { + (buffer.data.len(), buffer.data.as_ptr() as *mut c_void) + } else { + (0, std::ptr::null_mut()) + }; + // SAFETY: this just passes (most likely uninitialized) memory buffer to the ggml C API + unsafe { + sys::ggml_set_scratch( + self.ptr.as_ptr(), + sys::ggml_scratch { + offs: 0, + size, + data, + }, + ); + } + } + + /// TODO: something something + pub fn op_alibi(&self, a: &Tensor, n_past: usize, n_head: usize) -> Tensor { + let tensor = unsafe { + sys::ggml_alibi( + self.ptr.as_ptr(), + a.ptr.as_ptr(), + usize_to_i32(n_past), + usize_to_i32(n_head), + ) + }; + + self.new_tensor_raw(tensor) + } + + /// Gaussian Error Linear Units + pub fn op_gelu(&self, a: &Tensor) -> Tensor { + let tensor = unsafe { sys::ggml_gelu(self.ptr.as_ptr(), a.ptr.as_ptr()) }; + self.new_tensor_raw(tensor) + } +} + +impl Drop for Context { + fn drop(&mut self) { + // SAFETY: The only non-weak copy of ptr is no longer accessible after + // this drop call. + unsafe { + sys::ggml_free(self.ptr.as_ptr()); + } + } +} diff --git a/ggml/src/lib.rs b/ggml/src/lib.rs index 6d8905f8..09ae4b95 100644 --- a/ggml/src/lib.rs +++ b/ggml/src/lib.rs @@ -1,5 +1,3 @@ -#![deny(missing_docs)] - //! `ggml` is a semi-idiomatic wrapper for the `ggml` C library. //! //! It exposes a subset of operations (currently used to implement the [llama-rs](https://crates.io/crates/llama-rs) library). @@ -8,11 +6,48 @@ //! `ggml` operates on a computational graph; no values will be computed until [Context::graph_compute] is executed. //! All [Tensor]s are nodes in this computational graph, and values cannot be retrieved until computation is completed. -use std::{ - os::raw::{c_int, c_void}, - ptr::NonNull, - sync::{Arc, Weak}, -}; +use std::os::raw::{c_int, c_void}; + +pub use tensor::Tensor; + +/// Utilities for reading and writing. +pub mod util; + +pub mod loader; + +pub mod saver; + +pub mod context; +mod tensor; + +pub(crate) use ggml_sys as sys; + +#[cfg(test)] +mod tests; + +/// The type of a tensor element. +pub type ElementType = Type; + +#[derive(Debug, PartialEq, Clone, Copy)] +/// The format of the file containing the model. +pub enum ContainerType { + /// `GGML`: legacy format, oldest ggml tensor file format + Ggml, + /// `GGMF`: also legacy format. Introduces versioning. Newer than GGML, older than GGJT. + Ggmf, + /// `GGJT`: mmap-able format. + Ggjt, +} +impl ContainerType { + /// Does this container type support mmap? + pub fn support_mmap(&self) -> bool { + match self { + ContainerType::Ggml => false, + ContainerType::Ggmf => false, + ContainerType::Ggjt => true, + } + } +} /// Magic constant for `ggml` files (versioned, ggmf). pub const FILE_MAGIC_GGMF: u32 = 0x67676d66; @@ -25,7 +60,7 @@ pub const FILE_MAGIC_UNVERSIONED: u32 = 0x67676d6c; pub const FORMAT_VERSION: u32 = 1; /// The size of a `ggml` object. -pub const OBJECT_SIZE: usize = ggml_sys::GGML_OBJECT_SIZE; +pub const OBJECT_SIZE: usize = sys::GGML_OBJECT_SIZE; #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] /// The type of a value in `ggml`. @@ -37,10 +72,14 @@ pub enum Type { Q4_1, /// Quantized 4-bit (type 2). Q4_2, - /// Quantized 4-bit (type 3). - Q4_3, + /// Quantized 5-bit (type 0). + Q5_0, + /// Quantized 5-bit (type 1). + Q5_1, /// Quantized 8-bit (type 0). Q8_0, + /// Quantized 8-bit (type 1). + Q8_1, /// Integer 32-bit. I32, /// Float 16-bit. @@ -48,32 +87,36 @@ pub enum Type { /// Float 32-bit. F32, } -impl From for ggml_sys::ggml_type { +impl From for sys::ggml_type { fn from(t: Type) -> Self { match t { - Type::Q4_0 => ggml_sys::ggml_type_GGML_TYPE_Q4_0, - Type::Q4_1 => ggml_sys::ggml_type_GGML_TYPE_Q4_1, - Type::Q4_2 => ggml_sys::ggml_type_GGML_TYPE_Q4_2, - Type::Q4_3 => ggml_sys::ggml_type_GGML_TYPE_Q4_3, - Type::Q8_0 => ggml_sys::ggml_type_GGML_TYPE_Q8_0, - Type::I32 => ggml_sys::ggml_type_GGML_TYPE_I32, - Type::F16 => ggml_sys::ggml_type_GGML_TYPE_F16, - Type::F32 => ggml_sys::ggml_type_GGML_TYPE_F32, + Type::Q4_0 => sys::ggml_type_GGML_TYPE_Q4_0, + Type::Q4_1 => sys::ggml_type_GGML_TYPE_Q4_1, + Type::Q4_2 => sys::ggml_type_GGML_TYPE_Q4_2, + Type::Q5_0 => sys::ggml_type_GGML_TYPE_Q5_0, + Type::Q5_1 => sys::ggml_type_GGML_TYPE_Q5_1, + Type::Q8_0 => sys::ggml_type_GGML_TYPE_Q8_0, + Type::Q8_1 => sys::ggml_type_GGML_TYPE_Q8_1, + Type::I32 => sys::ggml_type_GGML_TYPE_I32, + Type::F16 => sys::ggml_type_GGML_TYPE_F16, + Type::F32 => sys::ggml_type_GGML_TYPE_F32, } } } -impl TryFrom for Type { +impl TryFrom for Type { type Error = (); - fn try_from(t: ggml_sys::ggml_type) -> Result { + fn try_from(t: sys::ggml_type) -> Result { match t { - ggml_sys::ggml_type_GGML_TYPE_Q4_0 => Ok(Type::Q4_0), - ggml_sys::ggml_type_GGML_TYPE_Q4_1 => Ok(Type::Q4_1), - ggml_sys::ggml_type_GGML_TYPE_Q4_2 => Ok(Type::Q4_2), - ggml_sys::ggml_type_GGML_TYPE_Q4_3 => Ok(Type::Q4_3), - ggml_sys::ggml_type_GGML_TYPE_Q8_0 => Ok(Type::Q8_0), - ggml_sys::ggml_type_GGML_TYPE_I32 => Ok(Type::I32), - ggml_sys::ggml_type_GGML_TYPE_F16 => Ok(Type::F16), - ggml_sys::ggml_type_GGML_TYPE_F32 => Ok(Type::F32), + sys::ggml_type_GGML_TYPE_Q4_0 => Ok(Type::Q4_0), + sys::ggml_type_GGML_TYPE_Q4_1 => Ok(Type::Q4_1), + sys::ggml_type_GGML_TYPE_Q4_2 => Ok(Type::Q4_2), + sys::ggml_type_GGML_TYPE_Q5_0 => Ok(Type::Q5_0), + sys::ggml_type_GGML_TYPE_Q5_1 => Ok(Type::Q5_1), + sys::ggml_type_GGML_TYPE_Q8_0 => Ok(Type::Q8_0), + sys::ggml_type_GGML_TYPE_Q8_1 => Ok(Type::Q8_1), + sys::ggml_type_GGML_TYPE_I32 => Ok(Type::I32), + sys::ggml_type_GGML_TYPE_F16 => Ok(Type::F16), + sys::ggml_type_GGML_TYPE_F32 => Ok(Type::F32), _ => Err(()), } } @@ -84,8 +127,10 @@ impl std::fmt::Display for Type { Type::Q4_0 => write!(f, "q4_0"), Type::Q4_1 => write!(f, "q4_1"), Type::Q4_2 => write!(f, "q4_2"), - Type::Q4_3 => write!(f, "q4_3"), + Type::Q5_0 => write!(f, "q5_0"), + Type::Q5_1 => write!(f, "q5_1"), Type::Q8_0 => write!(f, "q8_0"), + Type::Q8_1 => write!(f, "q8_1"), Type::I32 => write!(f, "i32"), Type::F16 => write!(f, "f16"), Type::F32 => write!(f, "f32"), @@ -93,395 +138,6 @@ impl std::fmt::Display for Type { } } -/// Acts as a RAII-guard over a `ggml_sys::ggml_context`, allocating via -/// `ggml_init` and dropping via `ggml_free`. -pub struct Context { - /// An `Arc` is used to model the relation between the context and the - /// allocated tensors. Tensors are owned by the object, so a [`Tensor`] - /// contains a `Weak` reference underneath and doesn't let you do anything - /// with it if the underlying context has been deallocated. - ptr: Arc>, -} -impl Context { - /// Creates a new [Context] with the specified `mem_size` as a working area. - pub fn init(mem_size: usize, alloc: bool) -> Self { - let raw = unsafe { - ggml_sys::ggml_init(ggml_sys::ggml_init_params { - mem_size, - // Null here means we want ggml to own this memory. We don't - // support passing an owned buffer from the Rust side. - mem_buffer: std::ptr::null_mut(), - no_alloc: !alloc, - }) - }; - Self { - ptr: Arc::new(NonNull::new(raw).expect("Should not be null")), - } - } - - /// Wraps a raw tensor with a weak pointer to the context. - fn new_tensor_raw(&self, raw: *mut ggml_sys::ggml_tensor) -> Tensor { - Tensor { - ptr: NonNull::new(raw).expect("Should not be null"), - ctx: Arc::downgrade(&self.ptr), - } - } - - /// Creates a new 1D tensor. - pub fn new_tensor_1d(&self, typ: Type, ne0: usize) -> Tensor { - let raw = unsafe { - ggml_sys::ggml_new_tensor_1d(self.ptr.as_ptr(), typ.into(), usize_to_i64(ne0)) - }; - self.new_tensor_raw(raw) - } - - /// Creates a new 2D tensor. - pub fn new_tensor_2d(&self, typ: Type, ne0: usize, ne1: usize) -> Tensor { - let raw = unsafe { - ggml_sys::ggml_new_tensor_2d( - self.ptr.as_ptr(), - typ.into(), - usize_to_i64(ne0), - usize_to_i64(ne1), - ) - }; - self.new_tensor_raw(raw) - } - - /// Creates a new 3D tensor. - pub fn new_tensor_3d(&self, typ: Type, ne0: usize, ne1: usize, ne2: usize) -> Tensor { - let raw = unsafe { - ggml_sys::ggml_new_tensor_3d( - self.ptr.as_ptr(), - typ.into(), - usize_to_i64(ne0), - usize_to_i64(ne1), - usize_to_i64(ne2), - ) - }; - self.new_tensor_raw(raw) - } - - /// Creates a new 1D tensor with the specified value. - pub fn new_f32(&self, x: f32) -> Tensor { - let raw = unsafe { ggml_sys::ggml_new_f32(self.ptr.as_ptr(), x) }; - self.new_tensor_raw(raw) - } - - /// Unknown, aside from the obvious. It's transposing something! - pub fn op_transpose(&self, a: &Tensor) -> Tensor { - let tensor = unsafe { ggml_sys::ggml_transpose(self.ptr.as_ptr(), a.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Unknown. - pub fn op_get_rows(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_get_rows(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the values of `a`, but normalized. - pub fn op_norm(&self, a: &Tensor) -> Tensor { - let tensor = unsafe { ggml_sys::ggml_norm(self.ptr.as_ptr(), a.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the values of `a`, but normalized using RMSNorm. - pub fn op_rms_norm(&self, a: &Tensor) -> Tensor { - let tensor = unsafe { ggml_sys::ggml_rms_norm(self.ptr.as_ptr(), a.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the multiplication of `a` and `b`. - pub fn op_mul(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_mul(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Unknown. - pub fn op_repeat(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_repeat(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the multiplication of `a` and `b` as if they were matrices. - /// - /// `a`: m rows, n columns - /// - /// `b`: p rows, n columns (i.e. we transpose it internally) - /// - /// Result is m columns, p rows - pub fn op_mul_mat(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_mul_mat(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the addition of `a` and `b`. - pub fn op_add(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_add(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the [SiLU](https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html) activation function applied to `a`. - pub fn op_silu(&self, a: &Tensor) -> Tensor { - let tensor = unsafe { ggml_sys::ggml_silu(self.ptr.as_ptr(), a.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// In-place, scales `a` by the 1D tensor `b`. - pub fn op_scale(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_scale(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// In-place, sets the elements above the diagonal to -INF. - pub fn op_diag_mask_inf(&self, a: &Tensor, n_past: usize) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_diag_mask_inf(self.ptr.as_ptr(), a.ptr.as_ptr(), usize_to_i32(n_past)) - }; - self.new_tensor_raw(tensor) - } - - /// In-place, applies the [Softmax function](https://en.wikipedia.org/wiki/Softmax_function) to `a`. - pub fn op_soft_max(&self, a: &Tensor) -> Tensor { - let tensor = unsafe { ggml_sys::ggml_soft_max(self.ptr.as_ptr(), a.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with result of mapping `fun` with `a`. - /// - /// `cnt` is the number of `f32` elements to be mapped. - /// `src` is source for elements to be mapped. - /// `dst` is the destination for mapped elements. - /// - /// # Safety - /// - /// This is marked unsafe since we're passing pointers into C code, and not - /// only vanilla pointers but a pointer to a function. For obvious reasons, it's - /// important not to do anything crazy like mutate any of these values concurrently. - /// - /// Don't make assumptions about how/when the function will be called. It may be called - /// on a row, it may be called on a whole tensor. It may be called concurrently or not. - /// Once you give that function pointer to C land, all bets are off. - pub unsafe fn op_map_unary( - &self, - a: &Tensor, - fun: unsafe extern "C" fn(cnt: c_int, dst: *mut f32, src: *const f32), - ) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_map_unary_f32(self.ptr.as_ptr(), a.ptr.as_ptr(), Some(fun)) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with result of mapping `fun` with `a` and `b`. - /// - /// `cnt` is the number of `f32` elements to be mapped. - /// `src0`, `src1` are the sources of elements to be mapped. - /// `dst` is the destination for mapped elements. - /// - /// # Safety - /// - /// This is marked unsafe since we're passing pointers into C code, and not - /// only vanilla pointers but a pointer to a function. For obvious reasons, it's - /// important not to do anything crazy like mutate any of these values concurrently. - /// - /// Don't make assumptions about how/when the function will be called. It may be called - /// on a row, it may be called on a whole tensor. It may be called concurrently or not. - /// Once you give that function pointer to C land, all bets are off. - pub unsafe fn op_map_binary( - &self, - a: &Tensor, - b: &Tensor, - fun: unsafe extern "C" fn(cnt: c_int, dst: *mut f32, src0: *const f32, src1: *const f32), - ) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_map_binary_f32( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - b.ptr.as_ptr(), - Some(fun), - ) - }; - self.new_tensor_raw(tensor) - } - - /// Creates a 1D view over `a`. - pub fn op_view_1d(&self, a: &Tensor, ne0: usize, offset: usize) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_view_1d(self.ptr.as_ptr(), a.ptr.as_ptr(), usize_to_i64(ne0), offset) - }; - self.new_tensor_raw(tensor) - } - - /// Creates a 2D view over `a`. - pub fn op_view_2d(&self, a: &Tensor, ne: (usize, usize), nb1: usize, offset: usize) -> Tensor { - let (ne0, ne1) = ne; - let tensor = unsafe { - ggml_sys::ggml_view_2d( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - usize_to_i64(ne0), - usize_to_i64(ne1), - nb1, - offset, - ) - }; - self.new_tensor_raw(tensor) - } - - /// Creates a 3d view over `a`. - pub fn op_view_3d( - &self, - a: &Tensor, - ne: (usize, usize, usize), - nb: (usize, usize), - offset: usize, - ) -> Tensor { - let (ne0, ne1, ne2) = ne; - let (nb1, nb2) = nb; - let tensor = unsafe { - ggml_sys::ggml_view_3d( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - usize_to_i64(ne0), - usize_to_i64(ne1), - usize_to_i64(ne2), - nb1, - nb2, - offset, - ) - }; - self.new_tensor_raw(tensor) - } - - /// Copies `a` to `b` and returns `b`. - pub fn op_cpy(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_cpy(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// Creates a new tensor with the axes of `a` permuted as described by the parameters. - pub fn op_permute( - &self, - a: &Tensor, - axis0: usize, - axis1: usize, - axis2: usize, - axis3: usize, - ) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_permute( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - usize_to_i32(axis0), - usize_to_i32(axis1), - usize_to_i32(axis2), - usize_to_i32(axis3), - ) - }; - self.new_tensor_raw(tensor) - } - - /// In-place; reshapes `a` in accordance with the dimensions of `b` - pub fn op_reshape(&self, a: &Tensor, b: &Tensor) -> Tensor { - let tensor = - unsafe { ggml_sys::ggml_reshape(self.ptr.as_ptr(), a.ptr.as_ptr(), b.ptr.as_ptr()) }; - self.new_tensor_raw(tensor) - } - - /// In-place; reshapes `a` in accordance with the specified dimensions. - pub fn op_reshape_2d(&self, a: &Tensor, ne0: usize, ne1: usize) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_reshape_2d( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - usize_to_i64(ne0), - usize_to_i64(ne1), - ) - }; - self.new_tensor_raw(tensor) - } - - /// In-place; reshapes `a` in accordance with the specified dimensions. - pub fn op_reshape_3d(&self, a: &Tensor, ne0: usize, ne1: usize, ne2: usize) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_reshape_3d( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - usize_to_i64(ne0), - usize_to_i64(ne1), - usize_to_i64(ne2), - ) - }; - self.new_tensor_raw(tensor) - } - - /// In-place; applies ROtary Positional Encoding. - pub fn op_rope(&self, a: &Tensor, npast: usize, ndims: usize, mode: i32) -> Tensor { - let tensor = unsafe { - ggml_sys::ggml_rope( - self.ptr.as_ptr(), - a.ptr.as_ptr(), - usize_to_i32(npast), - usize_to_i32(ndims), - mode, - ) - }; - self.new_tensor_raw(tensor) - } - - /// Computes the specified graph. Must be run in order to evaluate the graph. - pub fn graph_compute(&self, graph: &mut ComputationGraph) { - unsafe { - ggml_sys::ggml_graph_compute(self.ptr.as_ptr(), &mut graph.inner); - } - } - - /// Retrieves the memory used by this [Context]. - pub fn used_mem(&self) -> usize { - unsafe { ggml_sys::ggml_used_mem(self.ptr.as_ptr()) } - } - - /// Sets the scratch buffer to be used by this [Context]. - /// - /// If `scratch_buffer` is `None`, the scratch buffer will be disabled. - pub fn use_scratch<'a>(&'a self, scratch_buffer: Option<&'a mut Buffer>) { - let (size, data) = if let Some(buffer) = scratch_buffer { - (buffer.data.len(), buffer.data.as_ptr() as *mut c_void) - } else { - (0, std::ptr::null_mut()) - }; - // SAFETY: this just passes (most likely uninitialized) memory buffer to the ggml C API - unsafe { - ggml_sys::ggml_set_scratch( - self.ptr.as_ptr(), - ggml_sys::ggml_scratch { - offs: 0, - size, - data, - }, - ); - } - } -} - -impl Drop for Context { - fn drop(&mut self) { - // SAFETY: The only non-weak copy of ptr is no longer accessible after - // this drop call. - unsafe { - ggml_sys::ggml_free(self.ptr.as_ptr()); - } - } -} - /// A buffer of memory that can be used as a scratch buffer for a [Context]. /// /// See [Context::use_scratch]. @@ -507,166 +163,43 @@ impl Buffer { } } -/// Tensors are owned by the context. A tensor is alive as long as the -/// underlying context it was created with is alive. -pub struct Tensor { - ptr: NonNull, - ctx: Weak>, -} - -impl Tensor { - /// Size of the `ggml_tensor` struct in bytes. - /// - /// Exposed for purposes of determining context size. - pub const C_TYPE_SIZE: usize = std::mem::size_of::(); - - /// Creates a shared copy of this tensor pointer. - pub fn share(&self) -> Self { - Tensor { - ptr: self.ptr, - ctx: Weak::clone(&self.ctx), - } - } - - fn with_alive_ctx(&self, mut f: impl FnMut() -> U) -> U { - if let Some(_ctx) = self.ctx.upgrade() { - f() - } else { - panic!("Using a tensor after the context was dropped") - } - } - - fn with_alive_ctx_mut(&self, mut f: impl FnMut() -> U) -> U { - if let Some(_ctx) = self.ctx.upgrade() { - f() - } else { - panic!("Using a tensor after the context was dropped") - } - } - - /// Number of bytes used by this tensor. - pub fn nbytes(&self) -> usize { - self.with_alive_ctx(|| { - // SAFETY: The with_alive_call guarantees the context is alive - unsafe { ggml_sys::ggml_nbytes(self.ptr.as_ptr()) } - }) - } - - /// Provides raw mutable access to the data contained within the tensor. - /// - /// # Safety - /// - /// Only `std::slice::from_raw_parts_mut(tensor.data(), tensor.nbytes())` is safe to mutate. - pub unsafe fn data(&mut self) -> *mut c_void { - self.with_alive_ctx(|| { - // SAFETY: The with_alive_call guarantees the context is alive - unsafe { *self.ptr.as_ptr() }.data - }) - } - - /// Set the tensor's data pointer (useful for mmap-ed data) - /// - /// # Safety - /// - /// The memory region from `data_ptr` to `data_ptr.offset(tensor.nbytes())` will be read from. - pub unsafe fn set_data(&mut self, data_ptr: *mut c_void) { - let tensor = self.ptr.as_mut(); - self.with_alive_ctx_mut(|| { - // SAFETY: The with_alive_call guarantees the context is alive - tensor.data = data_ptr; - }) - } - - /// Number of elements in this tensor. - pub fn nelements(&self) -> usize { - self.with_alive_ctx(|| { - // SAFETY: The with_alive_call guarantees the context is alive - i64_to_usize(unsafe { ggml_sys::ggml_nelements(self.ptr.as_ptr()) }) - }) - } - - /// Number of elements in each dimension. - pub fn get_ne(&self) -> [i64; 4] { - self.with_alive_ctx(|| unsafe { *self.ptr.as_ptr() }.ne) - } - - /// Stride of each dimension. - pub fn get_nb(&self) -> [usize; 4] { - self.with_alive_ctx(|| unsafe { *self.ptr.as_ptr() }.nb) - } - - /// The data type. - pub fn get_type(&self) -> Type { - self.with_alive_ctx(|| unsafe { *self.ptr.as_ptr() }.type_.try_into().unwrap()) - } - - /// The size of the element type in bytes. - pub fn element_size(&self) -> usize { - self.with_alive_ctx(|| unsafe { ggml_sys::ggml_element_size(self.ptr.as_ptr()) }) - } - - /// Writes `src` to this tensor. - /// - /// # Safety - /// - /// This tensor must not be written to or read by from any other code. - pub unsafe fn write_data(&mut self, src: &[u8]) { - std::ptr::copy_nonoverlapping(src.as_ptr(), self.data() as *mut u8, src.len()) - } - - /// Zeroes out this tensor. - pub fn zero_data(&mut self) { - unsafe { std::ptr::write_bytes(self.data() as *mut u8, 0, self.nbytes()) } - } - - /// Reads this tensor into `dst`, starting from `offset`. - /// - /// # Safety - /// - /// This tensor must not be written to or read by from any other code. - pub unsafe fn read_data(&self, offset: usize, dst: &mut [u8]) { - let data = unsafe { ggml_sys::ggml_get_data(self.ptr.as_ptr()).add(offset) }; - std::ptr::copy_nonoverlapping(data, dst as *mut _ as _, dst.len()) - } -} - /// A `ggml` computation graph. Keeps track of all state during computation. pub struct ComputationGraph { - inner: ggml_sys::ggml_cgraph, + inner: sys::ggml_cgraph, } impl ComputationGraph { /// Create a new [ComputationGraph] with the specified `n_threads`. pub fn new(n_threads: usize) -> Self { Self { - inner: ggml_sys::ggml_cgraph { + inner: sys::ggml_cgraph { n_threads: usize_to_i32(n_threads), // SAFETY: This should be safe to zero. The original C++ impl // just leaves it uninitialized - ..unsafe { std::mem::zeroed::() } + ..unsafe { std::mem::zeroed::() } }, } } /// Build this computational graph in the forward direction in preparation for computation. pub fn build_forward_expand(&mut self, tensor: &Tensor) { - unsafe { ggml_sys::ggml_build_forward_expand(&mut self.inner, tensor.ptr.as_ptr()) } + unsafe { sys::ggml_build_forward_expand(&mut self.inner, tensor.ptr.as_ptr()) } } } /// The size of `t` as bytes. pub fn type_size(t: Type) -> usize { - unsafe { ggml_sys::ggml_type_size(t.into()) } + unsafe { sys::ggml_type_size(t.into()) } } /// [type_size]/[blck_size] as float. pub fn type_sizef(x: Type) -> f64 { - (unsafe { ggml_sys::ggml_type_sizef(x.into()) }) as f64 + (unsafe { sys::ggml_type_sizef(x.into()) }) as f64 } /// The size of a block for `t`. Only relevant for quantized types. pub fn blck_size(t: Type) -> usize { - i32_to_usize(unsafe { ggml_sys::ggml_blck_size(t.into()) }) + i32_to_usize(unsafe { sys::ggml_blck_size(t.into()) }) } fn usize_to_i32(val: usize) -> i32 { @@ -698,7 +231,7 @@ pub struct QuantizationResult { /// You must ensure that `src.len() == n_elements`, and `n_elements_0` /// is the first dimension of `src`. pub fn quantize_q4_0(src: &[f32], n_elements: usize, n_elements_0: usize) -> QuantizationResult { - quantize_impl(src, n_elements, n_elements_0, ggml_sys::ggml_quantize_q4_0) + quantize_impl(src, n_elements, n_elements_0, sys::ggml_quantize_q4_0) } /// Quantizes `src` into `dst` using `q4_1` quantization. @@ -706,7 +239,7 @@ pub fn quantize_q4_0(src: &[f32], n_elements: usize, n_elements_0: usize) -> Qua /// You must ensure that `src.len() == n_elements`, and `n_elements_0` /// is the first dimension of `src`. pub fn quantize_q4_1(src: &[f32], n_elements: usize, n_elements_0: usize) -> QuantizationResult { - quantize_impl(src, n_elements, n_elements_0, ggml_sys::ggml_quantize_q4_1) + quantize_impl(src, n_elements, n_elements_0, sys::ggml_quantize_q4_1) } fn quantize_impl( diff --git a/ggml-format/src/loader.rs b/ggml/src/loader.rs similarity index 92% rename from ggml-format/src/loader.rs rename to ggml/src/loader.rs index ffc99c9b..30963fd0 100644 --- a/ggml-format/src/loader.rs +++ b/ggml/src/loader.rs @@ -35,7 +35,7 @@ pub enum LoadError { /// The name of the tensor. tensor_name: String, /// The format type that was encountered. - ftype: i32, + ftype: u32, }, #[error("invariant broken: {0}")] /// An invariant was broken. @@ -75,7 +75,7 @@ impl TensorInfo { /// /// Do not use this if loading with `mmap`. pub fn read_data(&self, reader: &mut R) -> std::io::Result> { - let n_bytes = self.n_elements * ggml::type_size(self.element_type); + let n_bytes = self.n_elements * crate::type_size(self.element_type); let mut data = vec![0; n_bytes]; reader.seek(SeekFrom::Start(self.start_offset))?; reader.read_exact(&mut data)?; @@ -85,7 +85,7 @@ impl TensorInfo { /// Returns the size occupied by a tensor's data in bytes given the element type and number of elements. pub fn data_size(element_type: ElementType, n_elements: usize) -> usize { - (ggml::type_size(element_type) * n_elements) / ggml::blck_size(element_type) + (crate::type_size(element_type) * n_elements) / crate::blck_size(element_type) } #[derive(Debug, Clone)] @@ -118,9 +118,9 @@ pub fn load_model( ) -> Result<(), LoadError> { // Verify magic let container_type: ContainerType = match read_u32(reader)? { - ggml::FILE_MAGIC_GGMF => ContainerType::Ggmf, - ggml::FILE_MAGIC_GGJT => ContainerType::Ggjt, - ggml::FILE_MAGIC_UNVERSIONED => ContainerType::Ggml, + crate::FILE_MAGIC_GGMF => ContainerType::Ggmf, + crate::FILE_MAGIC_GGJT => ContainerType::Ggjt, + crate::FILE_MAGIC_UNVERSIONED => ContainerType::Ggml, magic => return Err(LoadError::InvalidMagic(magic)), }; handler @@ -131,7 +131,7 @@ pub fn load_model( match container_type { ContainerType::Ggmf | ContainerType::Ggjt => { let _version: u32 = match read_u32(reader)? { - ggml::FORMAT_VERSION => ggml::FORMAT_VERSION, + crate::FORMAT_VERSION => crate::FORMAT_VERSION, version => return Err(LoadError::InvalidFormatVersion(container_type, version)), }; } @@ -180,7 +180,7 @@ fn load_weights( // load tensor header let n_dims: usize = read_i32(reader)?.try_into()?; let name_len = read_i32(reader)?; - let ftype = read_i32(reader)?; + let ftype = read_u32(reader)?; let mut n_elements: usize = 1; let mut dims = [1usize, 1]; @@ -198,10 +198,11 @@ fn load_weights( // load tensor name let name = String::from_utf8(read_bytes_with_len(reader, name_len.try_into()?)?)?; - let ftype = ggml::Type::try_from(ftype).map_err(|_| LoadError::UnsupportedElementType { - tensor_name: name.clone(), - ftype, - })?; + let ftype = + crate::Type::try_from(ftype).map_err(|_| LoadError::UnsupportedElementType { + tensor_name: name.clone(), + ftype, + })?; // sanity check match ftype { diff --git a/ggml-format/src/saver.rs b/ggml/src/saver.rs similarity index 95% rename from ggml-format/src/saver.rs rename to ggml/src/saver.rs index 565032a3..9a5e86e8 100644 --- a/ggml-format/src/saver.rs +++ b/ggml/src/saver.rs @@ -61,8 +61,8 @@ pub fn save_model( tensor_names: &[String], ) -> Result<(), SaveError> { // Write header and hyperparameters - util::write_u32(writer, ggml::FILE_MAGIC_GGJT)?; - util::write_u32(writer, ggml::FORMAT_VERSION)?; + util::write_u32(writer, crate::FILE_MAGIC_GGJT)?; + util::write_u32(writer, crate::FORMAT_VERSION)?; handler .write_hyperparameters(writer) .map_err(SaveError::ImplementationError)?; @@ -97,7 +97,7 @@ pub fn save_model( // Write tensor header util::write_i32(writer, n_dims.try_into()?)?; util::write_i32(writer, name.len().try_into()?)?; - util::write_i32(writer, element_type.into())?; + util::write_u32(writer, element_type.into())?; for &dim in &dims[0..n_dims] { util::write_i32(writer, dim.try_into()?)?; } diff --git a/ggml/src/tensor.rs b/ggml/src/tensor.rs new file mode 100644 index 00000000..0a5edd82 --- /dev/null +++ b/ggml/src/tensor.rs @@ -0,0 +1,126 @@ +use std::{os::raw::c_void, ptr::NonNull, sync::Weak}; + +use crate::{i64_to_usize, sys, Type}; + +/// Tensors are owned by the context. A tensor is alive as long as the +/// underlying context it was created with is alive. +pub struct Tensor { + pub(crate) ptr: NonNull, + pub(crate) ctx: Weak>, +} + +impl Tensor { + /// Size of the `ggml_tensor` struct in bytes. + /// + /// Exposed for purposes of determining context size. + pub const C_TYPE_SIZE: usize = std::mem::size_of::(); + + /// Creates a shared copy of this tensor pointer. + pub fn share(&self) -> Self { + Tensor { + ptr: self.ptr, + ctx: Weak::clone(&self.ctx), + } + } + + fn with_alive_ctx(&self, mut f: impl FnMut() -> U) -> U { + if let Some(_ctx) = self.ctx.upgrade() { + f() + } else { + panic!("Using a tensor after the context was dropped") + } + } + + fn with_alive_ctx_mut(&self, mut f: impl FnMut() -> U) -> U { + if let Some(_ctx) = self.ctx.upgrade() { + f() + } else { + panic!("Using a tensor after the context was dropped") + } + } + + /// Number of bytes used by this tensor. + pub fn nbytes(&self) -> usize { + self.with_alive_ctx(|| { + // SAFETY: The with_alive_call guarantees the context is alive + unsafe { sys::ggml_nbytes(self.ptr.as_ptr()) } + }) + } + + /// Provides raw mutable access to the data contained within the tensor. + /// + /// # Safety + /// + /// Only `std::slice::from_raw_parts_mut(tensor.data(), tensor.nbytes())` is safe to mutate. + pub unsafe fn data(&mut self) -> *mut c_void { + self.with_alive_ctx(|| { + // SAFETY: The with_alive_call guarantees the context is alive + unsafe { *self.ptr.as_ptr() }.data + }) + } + + /// Set the tensor's data pointer (useful for mmap-ed data) + /// + /// # Safety + /// + /// The memory region from `data_ptr` to `data_ptr.offset(tensor.nbytes())` will be read from. + pub unsafe fn set_data(&mut self, data_ptr: *mut c_void) { + let tensor = self.ptr.as_mut(); + self.with_alive_ctx_mut(|| { + // SAFETY: The with_alive_call guarantees the context is alive + tensor.data = data_ptr; + }) + } + + /// Number of elements in this tensor. + pub fn nelements(&self) -> usize { + self.with_alive_ctx(|| { + // SAFETY: The with_alive_call guarantees the context is alive + i64_to_usize(unsafe { sys::ggml_nelements(self.ptr.as_ptr()) }) + }) + } + + /// Number of elements in each dimension. + pub fn get_ne(&self) -> [i64; 4] { + self.with_alive_ctx(|| unsafe { *self.ptr.as_ptr() }.ne) + } + + /// Stride of each dimension. + pub fn get_nb(&self) -> [usize; 4] { + self.with_alive_ctx(|| unsafe { *self.ptr.as_ptr() }.nb) + } + + /// The data type. + pub fn get_type(&self) -> Type { + self.with_alive_ctx(|| unsafe { *self.ptr.as_ptr() }.type_.try_into().unwrap()) + } + + /// The size of the element type in bytes. + pub fn element_size(&self) -> usize { + self.with_alive_ctx(|| unsafe { sys::ggml_element_size(self.ptr.as_ptr()) }) + } + + /// Writes `src` to this tensor. + /// + /// # Safety + /// + /// This tensor must not be written to or read by from any other code. + pub unsafe fn write_data(&mut self, src: &[u8]) { + std::ptr::copy_nonoverlapping(src.as_ptr(), self.data() as *mut u8, src.len()) + } + + /// Zeroes out this tensor. + pub fn zero_data(&mut self) { + unsafe { std::ptr::write_bytes(self.data() as *mut u8, 0, self.nbytes()) } + } + + /// Reads this tensor into `dst`, starting from `offset`. + /// + /// # Safety + /// + /// This tensor must not be written to or read by from any other code. + pub unsafe fn read_data(&self, offset: usize, dst: &mut [u8]) { + let data = unsafe { sys::ggml_get_data(self.ptr.as_ptr()).add(offset) }; + std::ptr::copy_nonoverlapping(data, dst as *mut _ as _, dst.len()) + } +} diff --git a/ggml-format/src/tests.rs b/ggml/src/tests.rs similarity index 84% rename from ggml-format/src/tests.rs rename to ggml/src/tests.rs index 91d925bb..9b17acd7 100644 --- a/ggml-format/src/tests.rs +++ b/ggml/src/tests.rs @@ -26,7 +26,7 @@ fn can_roundtrip_loader_and_saver() { ]; let mut rng = rand::thread_rng(); - let element_type = ggml::Type::F16; + let element_type = crate::Type::F16; let model = Model { hyperparameters: Hyperparameters { some_hyperparameter: random(), @@ -43,13 +43,13 @@ fn can_roundtrip_loader_and_saver() { .collect::>(); let n_elements = dims.iter().product::(); - let data = (0..data_size(element_type, n_elements)) + let data = (0..loader::data_size(element_type, n_elements)) .map(|_| random()) .collect::>(); ( format!("tensor_{}", i), - TensorData { + saver::TensorData { n_dims, dims: dims.try_into().unwrap(), element_type, @@ -64,7 +64,7 @@ fn can_roundtrip_loader_and_saver() { let mut buffer = Vec::new(); let mut cursor = std::io::Cursor::new(&mut buffer); let mut save_handler = MockSaveHandler { model: &model }; - save_model( + saver::save_model( &mut cursor, &mut save_handler, &model.vocabulary, @@ -78,7 +78,7 @@ fn can_roundtrip_loader_and_saver() { data: &buffer, loaded_model: Model::default(), }; - load_model(&mut cursor, &mut load_handler).unwrap(); + loader::load_model(&mut cursor, &mut load_handler).unwrap(); assert_eq!(load_handler.loaded_model, model); } @@ -92,14 +92,14 @@ impl Hyperparameters { fn read(reader: &mut dyn BufRead) -> Result { Ok(Self { some_hyperparameter: util::read_u32(reader)?, - some_other_hyperparameter: util::read_u32(reader)? as u32, + some_other_hyperparameter: util::read_u32(reader)?, vocabulary_size: util::read_u32(reader)?, }) } fn write(&self, writer: &mut dyn Write) -> Result<(), std::io::Error> { util::write_u32(writer, self.some_hyperparameter)?; - util::write_u32(writer, self.some_other_hyperparameter as u32)?; + util::write_u32(writer, self.some_other_hyperparameter)?; util::write_u32(writer, self.vocabulary_size)?; Ok(()) } @@ -109,19 +109,19 @@ impl Hyperparameters { struct Model { hyperparameters: Hyperparameters, vocabulary: Vec<(Vec, f32)>, - tensors: BTreeMap, + tensors: BTreeMap, } struct MockSaveHandler<'a> { model: &'a Model, } -impl SaveHandler for MockSaveHandler<'_> { +impl saver::SaveHandler for MockSaveHandler<'_> { fn write_hyperparameters(&mut self, writer: &mut dyn Write) -> Result<(), DummyError> { self.model.hyperparameters.write(writer).unwrap(); Ok(()) } - fn tensor_data(&mut self, tensor_name: &str) -> Result { + fn tensor_data(&mut self, tensor_name: &str) -> Result { self.model .tensors .get(tensor_name) @@ -134,7 +134,7 @@ struct MockLoadHandler<'a> { data: &'a [u8], loaded_model: Model, } -impl LoadHandler for MockLoadHandler<'_> { +impl loader::LoadHandler for MockLoadHandler<'_> { fn container_type(&mut self, container_type: ContainerType) -> Result<(), DummyError> { assert_eq!(container_type, ContainerType::Ggjt); Ok(()) @@ -149,9 +149,9 @@ impl LoadHandler for MockLoadHandler<'_> { fn read_hyperparameters( &mut self, reader: &mut dyn BufRead, - ) -> Result { + ) -> Result { self.loaded_model.hyperparameters = Hyperparameters::read(reader).unwrap(); - Ok(PartialHyperparameters { + Ok(loader::PartialHyperparameters { n_vocab: self .loaded_model .hyperparameters @@ -161,8 +161,8 @@ impl LoadHandler for MockLoadHandler<'_> { }) } - fn tensor_buffer(&mut self, info: TensorInfo) -> Result<(), DummyError> { - let data = TensorData { + fn tensor_buffer(&mut self, info: loader::TensorInfo) -> Result<(), DummyError> { + let data = saver::TensorData { n_dims: info.n_dims, dims: info.dims, element_type: info.element_type, diff --git a/ggml-format/src/util.rs b/ggml/src/util.rs similarity index 100% rename from ggml-format/src/util.rs rename to ggml/src/util.rs diff --git a/ggml/sys/Cargo.toml b/ggml/sys/Cargo.toml new file mode 100644 index 00000000..793d25b3 --- /dev/null +++ b/ggml/sys/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "ggml-sys" +version = { workspace = true } +edition = "2021" + +[build-dependencies] +cc = "^1.0" diff --git a/ggml-sys/build.rs b/ggml/sys/build.rs similarity index 91% rename from ggml-sys/build.rs rename to ggml/sys/build.rs index a4b89b70..0b99fa0f 100644 --- a/ggml-sys/build.rs +++ b/ggml/sys/build.rs @@ -1,17 +1,18 @@ use std::env; +// By default, this crate will attempt to compile ggml with the features of your host system if +// the host and target are the same. If they are not, it will turn off auto-feature-detection, +// and you will need to manually specify target features through target-features. fn main() { - // By default, this crate will attempt to compile ggml with the features of your host system if - // the host and target are the same. If they are not, it will turn off auto-feature-detection, - // and you will need to manually specify target features through target-features. - println!("cargo:rerun-if-changed=ggml"); - let ggml_src = ["ggml/ggml.c"]; + let ggml_src = ["ggml/src/ggml.c"]; let mut builder = cc::Build::new(); - let build = builder.files(ggml_src.iter()).include("include"); + let build = builder + .files(ggml_src.iter()) + .include("./ggml/include/ggml"); // This is a very basic heuristic for applying compile flags. // Feel free to update this to fit your operating system. diff --git a/ggml/sys/ggml b/ggml/sys/ggml new file mode 160000 index 00000000..43dfb439 --- /dev/null +++ b/ggml/sys/ggml @@ -0,0 +1 @@ +Subproject commit 43dfb439fbf03eaf2db34a511f9e60d8338493e7 diff --git a/ggml-sys/src/lib.rs b/ggml/sys/src/lib.rs similarity index 93% rename from ggml-sys/src/lib.rs rename to ggml/sys/src/lib.rs index d2d1e22a..be3d5d3b 100644 --- a/ggml-sys/src/lib.rs +++ b/ggml/sys/src/lib.rs @@ -1,10 +1,12 @@ -/* automatically generated by rust-bindgen 0.64.0 */ +/* automatically generated by rust-bindgen 0.65.1 */ #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(unused)] +pub const GGML_FILE_MAGIC: u32 = 1734831468; +pub const GGML_FILE_VERSION: u32 = 1; pub const GGML_MAX_DIMS: u32 = 4; pub const GGML_MAX_NODES: u32 = 4096; pub const GGML_MAX_PARAMS: u32 = 16; @@ -28,13 +30,26 @@ pub const ggml_type_GGML_TYPE_F16: ggml_type = 1; pub const ggml_type_GGML_TYPE_Q4_0: ggml_type = 2; pub const ggml_type_GGML_TYPE_Q4_1: ggml_type = 3; pub const ggml_type_GGML_TYPE_Q4_2: ggml_type = 4; -pub const ggml_type_GGML_TYPE_Q4_3: ggml_type = 5; -pub const ggml_type_GGML_TYPE_Q8_0: ggml_type = 6; -pub const ggml_type_GGML_TYPE_I8: ggml_type = 7; -pub const ggml_type_GGML_TYPE_I16: ggml_type = 8; -pub const ggml_type_GGML_TYPE_I32: ggml_type = 9; -pub const ggml_type_GGML_TYPE_COUNT: ggml_type = 10; -pub type ggml_type = ::std::os::raw::c_int; +pub const ggml_type_GGML_TYPE_Q5_0: ggml_type = 6; +pub const ggml_type_GGML_TYPE_Q5_1: ggml_type = 7; +pub const ggml_type_GGML_TYPE_Q8_0: ggml_type = 8; +pub const ggml_type_GGML_TYPE_Q8_1: ggml_type = 9; +pub const ggml_type_GGML_TYPE_I8: ggml_type = 10; +pub const ggml_type_GGML_TYPE_I16: ggml_type = 11; +pub const ggml_type_GGML_TYPE_I32: ggml_type = 12; +pub const ggml_type_GGML_TYPE_COUNT: ggml_type = 13; +pub type ggml_type = ::std::os::raw::c_uint; +pub const ggml_ftype_GGML_FTYPE_UNKNOWN: ggml_ftype = -1; +pub const ggml_ftype_GGML_FTYPE_ALL_F32: ggml_ftype = 0; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_F16: ggml_ftype = 1; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q4_0: ggml_ftype = 2; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q4_1: ggml_ftype = 3; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: ggml_ftype = 4; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q4_2: ggml_ftype = 5; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q8_0: ggml_ftype = 7; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q5_0: ggml_ftype = 8; +pub const ggml_ftype_GGML_FTYPE_MOSTLY_Q5_1: ggml_ftype = 9; +pub type ggml_ftype = ::std::os::raw::c_int; pub const ggml_op_GGML_OP_NONE: ggml_op = 0; pub const ggml_op_GGML_OP_DUP: ggml_op = 1; pub const ggml_op_GGML_OP_ADD: ggml_op = 2; @@ -67,14 +82,15 @@ pub const ggml_op_GGML_OP_GET_ROWS: ggml_op = 28; pub const ggml_op_GGML_OP_DIAG_MASK_INF: ggml_op = 29; pub const ggml_op_GGML_OP_SOFT_MAX: ggml_op = 30; pub const ggml_op_GGML_OP_ROPE: ggml_op = 31; -pub const ggml_op_GGML_OP_CONV_1D_1S: ggml_op = 32; -pub const ggml_op_GGML_OP_CONV_1D_2S: ggml_op = 33; -pub const ggml_op_GGML_OP_FLASH_ATTN: ggml_op = 34; -pub const ggml_op_GGML_OP_FLASH_FF: ggml_op = 35; -pub const ggml_op_GGML_OP_MAP_UNARY: ggml_op = 36; -pub const ggml_op_GGML_OP_MAP_BINARY: ggml_op = 37; -pub const ggml_op_GGML_OP_COUNT: ggml_op = 38; -pub type ggml_op = ::std::os::raw::c_int; +pub const ggml_op_GGML_OP_ALIBI: ggml_op = 32; +pub const ggml_op_GGML_OP_CONV_1D_1S: ggml_op = 33; +pub const ggml_op_GGML_OP_CONV_1D_2S: ggml_op = 34; +pub const ggml_op_GGML_OP_FLASH_ATTN: ggml_op = 35; +pub const ggml_op_GGML_OP_FLASH_FF: ggml_op = 36; +pub const ggml_op_GGML_OP_MAP_UNARY: ggml_op = 37; +pub const ggml_op_GGML_OP_MAP_BINARY: ggml_op = 38; +pub const ggml_op_GGML_OP_COUNT: ggml_op = 39; +pub type ggml_op = ::std::os::raw::c_uint; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct ggml_object { @@ -623,6 +639,9 @@ extern "C" { extern "C" { pub fn ggml_is_quantized(type_: ggml_type) -> bool; } +extern "C" { + pub fn ggml_ftype_to_ggml_type(ftype: ggml_ftype) -> ggml_type; +} extern "C" { pub fn ggml_init(params: ggml_init_params) -> *mut ggml_context; } @@ -917,6 +936,14 @@ extern "C" { mode: ::std::os::raw::c_int, ) -> *mut ggml_tensor; } +extern "C" { + pub fn ggml_alibi( + ctx: *mut ggml_context, + a: *mut ggml_tensor, + n_past: ::std::os::raw::c_int, + n_head: ::std::os::raw::c_int, + ) -> *mut ggml_tensor; +} extern "C" { pub fn ggml_conv_1d_1s( ctx: *mut ggml_context, @@ -1010,12 +1037,12 @@ extern "C" { } pub const ggml_opt_type_GGML_OPT_ADAM: ggml_opt_type = 0; pub const ggml_opt_type_GGML_OPT_LBFGS: ggml_opt_type = 1; -pub type ggml_opt_type = ::std::os::raw::c_int; +pub type ggml_opt_type = ::std::os::raw::c_uint; pub const ggml_linesearch_GGML_LINESEARCH_DEFAULT: ggml_linesearch = 1; pub const ggml_linesearch_GGML_LINESEARCH_BACKTRACKING_ARMIJO: ggml_linesearch = 0; pub const ggml_linesearch_GGML_LINESEARCH_BACKTRACKING_WOLFE: ggml_linesearch = 1; pub const ggml_linesearch_GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE: ggml_linesearch = 2; -pub type ggml_linesearch = ::std::os::raw::c_int; +pub type ggml_linesearch = ::std::os::raw::c_uint; pub const ggml_opt_result_GGML_OPT_OK: ggml_opt_result = 0; pub const ggml_opt_result_GGML_OPT_DID_NOT_CONVERGE: ggml_opt_result = 1; pub const ggml_opt_result_GGML_OPT_NO_CONTEXT: ggml_opt_result = 2; @@ -1399,7 +1426,25 @@ extern "C" { ) -> usize; } extern "C" { - pub fn ggml_quantize_q4_3( + pub fn ggml_quantize_q5_0( + src: *const f32, + dst: *mut ::std::os::raw::c_void, + n: ::std::os::raw::c_int, + k: ::std::os::raw::c_int, + hist: *mut i64, + ) -> usize; +} +extern "C" { + pub fn ggml_quantize_q5_1( + src: *const f32, + dst: *mut ::std::os::raw::c_void, + n: ::std::os::raw::c_int, + k: ::std::os::raw::c_int, + hist: *mut i64, + ) -> usize; +} +extern "C" { + pub fn ggml_quantize_q8_0( src: *const f32, dst: *mut ::std::os::raw::c_void, n: ::std::os::raw::c_int, @@ -1456,6 +1501,12 @@ extern "C" { extern "C" { pub fn ggml_cpu_has_cublas() -> ::std::os::raw::c_int; } +extern "C" { + pub fn ggml_cpu_has_clblast() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn ggml_cpu_has_gpublas() -> ::std::os::raw::c_int; +} extern "C" { pub fn ggml_cpu_has_sse3() -> ::std::os::raw::c_int; } @@ -1484,6 +1535,7 @@ pub struct quantize_fns_t { pub quantize_row_q_reference: quantize_row_q_t, pub quantize_row_q_dot: quantize_row_q_t, pub vec_dot_q: vec_dot_q_t, + pub vec_dot_type: ggml_type, } #[test] fn bindgen_test_layout_quantize_fns_t() { @@ -1491,7 +1543,7 @@ fn bindgen_test_layout_quantize_fns_t() { let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), - 40usize, + 48usize, concat!("Size of: ", stringify!(quantize_fns_t)) ); assert_eq!( @@ -1549,6 +1601,16 @@ fn bindgen_test_layout_quantize_fns_t() { stringify!(vec_dot_q) ) ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).vec_dot_type) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(quantize_fns_t), + "::", + stringify!(vec_dot_type) + ) + ); } extern "C" { pub fn ggml_internal_get_quantize_fn(i: usize) -> quantize_fns_t; diff --git a/gpt2/Cargo.toml b/gpt2/Cargo.toml new file mode 100644 index 00000000..021dc446 --- /dev/null +++ b/gpt2/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "gpt2" +version = { workspace = true } +edition = "2021" + +[dependencies] +ggml = { path = "../ggml" } +llm-base = { path = "../llm-base" } + +bytemuck = { workspace = true } + +[dev-dependencies] +rand = { workspace = true } diff --git a/gpt2/examples/gpt2_inference.rs b/gpt2/examples/gpt2_inference.rs new file mode 100644 index 00000000..8db4e97b --- /dev/null +++ b/gpt2/examples/gpt2_inference.rs @@ -0,0 +1,42 @@ +use std::{convert::Infallible, env::args, io::Write}; + +use llm_base::{load_progress_callback, model::KnownModel}; + +extern crate gpt2; + +fn main() { + let args: Vec = args().collect(); + let loc = &args[1]; + let prompt = match &args.len() { + 3 => &args[2], + _ => "Rust is a cool programming language because ", + }; + + println!(" >>> Loading model from {loc}..."); + let now = std::time::Instant::now(); + + let gpt2 = gpt2::Gpt2::load(loc, true, 512, load_progress_callback) + .unwrap_or_else(|e| panic!("Error loading model from {loc}: {e}")); + + println!(" >>> Model loaded in {} ms.", now.elapsed().as_millis()); + + let mut session = gpt2.start_session(Default::default()); + let res = session.inference_with_prompt::( + &gpt2, + &Default::default(), + prompt, + None, + &mut rand::thread_rng(), + |t| { + print!("{t}"); + std::io::stdout().flush().unwrap(); + + Ok(()) + }, + ); + + match res { + Ok(result) => println!("\n\nInference stats:\n{result}"), + Err(err) => println!("\n{err}"), + } +} diff --git a/gpt2/src/lib.rs b/gpt2/src/lib.rs new file mode 100644 index 00000000..b0f20c95 --- /dev/null +++ b/gpt2/src/lib.rs @@ -0,0 +1,445 @@ +use std::path::Path; + +use ggml::Tensor; +use llm_base::{ + util, EvaluateOutputRequest, FileType, InferenceParameters, InferenceSession, + InferenceSessionParameters, KnownModel, LoadError, LoadProgress, TokenId, Vocabulary, +}; + +pub struct Gpt2 { + hyperparameters: Hyperparameters, + n_context_tokens: usize, + vocabulary: Vocabulary, + ln_f_g: Tensor, + ln_f_b: Tensor, + wte: Tensor, + wpe: Tensor, + lm_head: Tensor, + layers: Vec, + _context: ggml::context::Context, +} + +impl KnownModel for Gpt2 { + type Hyperparameters = Hyperparameters; + + fn new( + hyperparameters: Self::Hyperparameters, + n_context_tokens: usize, + vocabulary: Vocabulary, + tensor_loader: impl llm_base::TensorLoader, + ) -> Result { + let n_embd = hyperparameters.n_embd; + let n_layer = hyperparameters.n_layer; + let n_vocab = hyperparameters.n_vocab; + let n_ctx = hyperparameters.n_ctx; + + let mut tl = tensor_loader; + // prepare memory for weights + let ln_f_g = tl.load("model/ln_f/g", &[n_embd])?; + let ln_f_b = tl.load("model/ln_f/b", &[n_embd])?; + let wte = tl.load("model/wte", &[n_embd, n_vocab])?; + let wpe = tl.load("model/wpe", &[n_embd, n_ctx])?; + let lm_head = tl.load("model/lm_head", &[n_embd, n_vocab])?; + + let mut layers = Vec::new(); + for i in 0..n_layer { + let layer = Layer { + ln_1_g: tl.load(&format!("model/h{i}/ln_1/g"), &[n_embd])?, + ln_1_b: tl.load(&format!("model/h{i}/ln_1/b"), &[n_embd])?, + ln_2_g: tl.load(&format!("model/h{i}/ln_2/g"), &[n_embd])?, + ln_2_b: tl.load(&format!("model/h{i}/ln_2/b"), &[n_embd])?, + c_attn_attn_w: tl + .load(&format!("model/h{i}/attn/c_attn/w"), &[n_embd, n_embd * 3])?, + c_attn_attn_b: tl.load(&format!("model/h{i}/attn/c_attn/b"), &[n_embd * 3])?, + c_attn_proj_w: tl.load(&format!("model/h{i}/attn/c_proj/w"), &[n_embd, n_embd])?, + c_attn_proj_b: tl.load(&format!("model/h{i}/attn/c_proj/b"), &[n_embd])?, + c_mlp_fc_w: tl.load(&format!("model/h{i}/mlp/c_fc/w"), &[n_embd, n_embd * 4])?, + c_mlp_fc_b: tl.load(&format!("model/h{i}/mlp/c_fc/b"), &[n_embd * 4])?, + c_mlp_proj_w: tl + .load(&format!("model/h{i}/mlp/c_proj/w"), &[n_embd * 4, n_embd])?, + c_mlp_proj_b: tl.load(&format!("model/h{i}/mlp/c_proj/b"), &[n_embd])?, + }; + + layers.push(layer); + } + + let (_context, _, _mmap) = tl.finish(); + + Ok(Gpt2 { + hyperparameters, + n_context_tokens, + vocabulary, + layers, + ln_f_g, + ln_f_b, + wte, + wpe, + lm_head, + _context, + }) + } + + fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession { + InferenceSession::new( + params, + self.hyperparameters.n_ctx, + self.hyperparameters.n_layer, + self.hyperparameters.n_embd, + self.hyperparameters.n_vocab, + ) + } + + fn evaluate( + &self, + session: &mut InferenceSession, + params: &InferenceParameters, + input_tokens: &[TokenId], + output_request: &mut EvaluateOutputRequest, + ) { + let n = input_tokens.len(); + let n_threads = params.n_threads; + + let Hyperparameters { + n_embd, + n_head, + n_vocab, + n_layer, + .. + } = self.hyperparameters; + let n_ctx = self.n_context_tokens; + + // For the first run, we need to guess a maximum buffer size so we can measure + // the actual memory consumption of the temporary ggml context. + // + // These numbers are from `llama.cpp`, and could potentially be more efficient. + let mut buf_size = { + let buf_size_mb = if n_layer >= 80 { + 1536 + } else if n_layer >= 60 { + 1280 + } else { + 1024 + }; + buf_size_mb * 1024 * 1024 + }; + if session.mem_per_token > 0 && session.mem_per_token * n > buf_size { + // add 10% to account for ggml object overhead + buf_size = (1.1f64 * session.mem_per_token as f64 * n as f64) as usize; + }; + let ctx0 = ggml::context::Context::init(buf_size, true); + + let mut gf = ggml::ComputationGraph::new(n_threads); + + let mut embd = ctx0.new_tensor_1d(ggml::Type::I32, n); + unsafe { embd.write_data(bytemuck::cast_slice(input_tokens)) }; + + let n_past = session.n_past; + + let mut position_buf = vec![]; + for position_idx in 0..n { + position_buf.push(n_past + position_idx); + } + + let mut position = ctx0.new_tensor_1d(ggml::Type::I32, n); + unsafe { position.write_data(bytemuck::cast_slice(&position_buf)) }; + + let mut input_layer = ctx0.op_add( + &ctx0.op_get_rows(&self.wte, &embd), + &ctx0.op_get_rows(&self.wpe, &position), + ); + + let memory_k = &session.memory_k; + let memory_k_size = memory_k.element_size(); + + let memory_v = &session.memory_v; + let memory_v_size = memory_v.element_size(); + + for il in 0..n_layer { + // norm + let mut current = ctx0.op_norm(&input_layer); + current = ctx0.op_add( + &ctx0.op_mul(&ctx0.op_repeat(&self.layers[il].ln_1_g, ¤t), ¤t), + &ctx0.op_repeat(&self.layers[il].ln_1_b, ¤t), + ); + + // attn + current = ctx0.op_mul_mat(&self.layers[il].c_attn_attn_w, ¤t); + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].c_attn_attn_b, ¤t), + ¤t, + ); + + // self-attn + let nb = current.get_nb()[1]; + let f32_size = std::mem::size_of::(); + let qcur = ctx0.op_view_2d(¤t, (n_embd, n), nb, 0); + let kcur = ctx0.op_view_2d(¤t, (n_embd, n), nb, f32_size * n_embd); + let vcur = ctx0.op_view_2d(¤t, (n_embd, n), nb, f32_size * n_embd * 2); + + if n >= 1 { + let k = ctx0.op_view_1d( + memory_k, + n * n_embd, + (memory_k_size * n_embd) * (il * n_ctx + n_past), + ); + let v = ctx0.op_view_1d( + memory_v, + n * n_embd, + (memory_v_size * n_embd) * (il * n_ctx + n_past), + ); + + gf.build_forward_expand(&ctx0.op_cpy(&kcur, &k)); + gf.build_forward_expand(&ctx0.op_cpy(&vcur, &v)); + } + + let q = ctx0.op_permute( + &ctx0.op_cpy( + &qcur, + &ctx0.new_tensor_3d(ggml::Type::F32, n_embd / n_head, n_head, n), + ), + 0, + 2, + 1, + 3, + ); + + let k = ctx0.op_permute( + &ctx0.op_reshape_3d( + &ctx0.op_view_1d( + &session.memory_k, + (n_past + n) * n_embd, + il * n_ctx * memory_k_size * n_embd, + ), + n_embd / n_head, + n_head, + n_past + n, + ), + 0, + 2, + 1, + 3, + ); + + let kq = ctx0.op_mul_mat(&k, &q); + let kq_scaled = ctx0.op_scale( + &kq, + &ctx0.new_f32(1f32 / f32::sqrt(n_embd as f32 / n_head as f32)), + ); + + let kq_masked = ctx0.op_diag_mask_inf(&kq_scaled, n_past); + let kq_softmax = ctx0.op_soft_max(&kq_masked); + + let v_trans = ctx0.op_cpy( + &ctx0.op_permute( + &ctx0.op_reshape_3d( + &ctx0.op_view_1d( + memory_v, + (n_past + n) * n_embd, + il * n_ctx * memory_v_size * n_embd, + ), + n_embd / n_head, + n_head, + n_past + n, + ), + 1, + 2, + 0, + 3, + ), + &ctx0.new_tensor_3d(memory_v.get_type(), n_past + n, n_embd / n_head, n_head), + ); + + let kqv = ctx0.op_mul_mat(&v_trans, &kq_softmax); + let kqv_merged = ctx0.op_permute(&kqv, 0, 2, 1, 3); + + current = ctx0.op_cpy(&kqv_merged, &ctx0.new_tensor_2d(ggml::Type::F32, n_embd, n)); + + // projection + current = ctx0.op_mul_mat(&self.layers[il].c_attn_proj_w, ¤t); + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].c_attn_proj_b, ¤t), + ¤t, + ); + + // add input + current = ctx0.op_add(¤t, &input_layer); + + // feed-forward + let ff_in = current.share(); + + // feed-forward normalization + current = ctx0.op_norm(&ff_in); + current = ctx0.op_add( + &ctx0.op_mul(&ctx0.op_repeat(&self.layers[il].ln_2_g, ¤t), ¤t), + &ctx0.op_repeat(&self.layers[il].ln_2_b, ¤t), + ); + + // feed-forward fully connected + current = ctx0.op_mul_mat(&self.layers[il].c_mlp_fc_w, ¤t); + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].c_mlp_fc_b, ¤t), + ¤t, + ); + + // feed-forward activation + current = ctx0.op_gelu(¤t); + + // feed-forward projection + current = ctx0.op_mul_mat(&self.layers[il].c_mlp_proj_w, ¤t); + current = ctx0.op_add( + &ctx0.op_repeat(&self.layers[il].c_mlp_proj_b, ¤t), + ¤t, + ); + + // input for next layer + input_layer = ctx0.op_add(¤t, &ff_in); + } + + // normalization + input_layer = ctx0.op_norm(&input_layer); + input_layer = ctx0.op_add( + &ctx0.op_mul(&ctx0.op_repeat(&self.ln_f_g, &input_layer), &input_layer), + &ctx0.op_repeat(&self.ln_f_b, &input_layer), + ); + + input_layer = ctx0.op_mul_mat(&self.lm_head, &input_layer); + + // run the computation + gf.build_forward_expand(&input_layer); + ctx0.graph_compute(&mut gf); + + // return result for just the last token + // SAFETY: yolo + assert_eq!(session.last_logits.len(), n_vocab); + unsafe { + input_layer.read_data( + n_vocab * (n - 1) * std::mem::size_of::(), + bytemuck::cast_slice_mut(&mut session.last_logits), + ) + }; + + // Extract logits + if let Some(all_logits) = &mut output_request.all_logits { + all_logits.resize(n_vocab * n, 0.0); + // SAFETY: Tensor data can be read (properly aligned, initialized, + // data will not be mutated or otherwise aliased during the copy), + // and we're not reading past the end of the tensor data. + assert_eq!(input_layer.nelements(), n_vocab * n); + unsafe { + input_layer.read_data(0, bytemuck::cast_slice_mut(all_logits)); + } + } + + // Extract embeddings + if let Some(embeddings) = &mut output_request.embeddings { + embeddings.resize(n_embd * n, 0.0); + // SAFETY: Same rationale as for the "Extract logits" section applies. + assert_eq!(embd.nelements(), n_embd * n); + unsafe { + embd.read_data(0, bytemuck::cast_slice_mut(embeddings)); + } + } + + // Adjust the required memory per token if we didn't know that already + if session.mem_per_token == 0 { + session.mem_per_token = ctx0.used_mem() / n; + } + + // Adjust n_past to new length. + session.n_past += input_tokens.len(); + } + + fn vocabulary(&self) -> &Vocabulary { + &self.vocabulary + } + + fn n_ctx(&self) -> usize { + self.hyperparameters.n_ctx + } +} + +impl Gpt2 { + /// Load the model from `path` with `n_context_tokens` context tokens. + /// + /// The status of the loading process will be reported through `load_progress_callback`. + pub fn load( + path: impl AsRef, + prefer_mmap: bool, + n_context_tokens: usize, + load_progress_callback: impl FnMut(LoadProgress), + ) -> Result { + llm_base::load(path, prefer_mmap, n_context_tokens, load_progress_callback) + } +} + +/// The hyperparameters of the model. +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] +pub struct Hyperparameters { + /// n_vocab + n_vocab: usize, + /// n_ctx + n_ctx: usize, + /// n_embd + n_embd: usize, + /// n_head + n_head: usize, + /// n_layer + n_layer: usize, + /// file type + file_type: FileType, +} +impl llm_base::Hyperparameters for Hyperparameters { + fn read(reader: &mut dyn std::io::BufRead) -> Result { + let hyperparameters = Hyperparameters { + n_vocab: util::read_i32(reader)?.try_into()?, + n_ctx: util::read_i32(reader)?.try_into()?, + n_embd: util::read_i32(reader)?.try_into()?, + n_head: util::read_i32(reader)?.try_into()?, + n_layer: util::read_i32(reader)?.try_into()?, + file_type: { + let ftype = util::read_i32(reader)?; + FileType::try_from(ftype).map_err(|_| LoadError::UnsupportedFileType(ftype))? + }, + }; + + let n_vocab = util::read_i32(reader)? as usize; + if hyperparameters.n_vocab != n_vocab { + return Err(LoadError::InvariantBroken { + path: None, + invariant: format!( + "GPT2 model expected n_vocab {} found {}", + hyperparameters.n_vocab, n_vocab + ), + }); + } + + Ok(hyperparameters) + } + + fn n_vocabulary(&self) -> usize { + self.n_vocab + } +} + +struct Layer { + // normalization + ln_1_g: Tensor, + ln_1_b: Tensor, + + ln_2_g: Tensor, + ln_2_b: Tensor, + + // attention + c_attn_attn_w: Tensor, + c_attn_attn_b: Tensor, + + c_attn_proj_w: Tensor, + c_attn_proj_b: Tensor, + + // mlp + c_mlp_fc_w: Tensor, + c_mlp_fc_b: Tensor, + + c_mlp_proj_w: Tensor, + c_mlp_proj_b: Tensor, +} diff --git a/llama-rs/src/loader2.rs b/llama-rs/src/loader2.rs deleted file mode 100644 index 37edb77b..00000000 --- a/llama-rs/src/loader2.rs +++ /dev/null @@ -1,271 +0,0 @@ -use ggml_format::{ - util::read_i32, ContainerType, LoadError as FormatLoadError, PartialHyperparameters, TensorInfo, -}; -use memmap2::Mmap; - -use std::{ - collections::HashMap, - fs::File, - io::{BufRead, BufReader, Read, Seek, SeekFrom}, - path::{Path, PathBuf}, -}; - -use crate::{ - loader_common::FileType, model::TensorLoader, util, Hyperparameters, LoadError, LoadProgress, - Model, TokenId, Vocabulary, -}; - -impl LoadError { - pub(crate) fn from_format_error(value: FormatLoadError, path: PathBuf) -> Self { - match value { - FormatLoadError::InvalidMagic(magic) => LoadError::InvalidMagic { path, magic }, - FormatLoadError::InvalidFormatVersion(container_type, version) => { - LoadError::InvalidFormatVersion { - container_type, - version, - } - } - FormatLoadError::Io(err) => LoadError::Io(err), - FormatLoadError::InvalidUtf8(err) => LoadError::InvalidUtf8(err), - FormatLoadError::InvalidIntegerConversion(err) => { - LoadError::InvalidIntegerConversion(err) - } - FormatLoadError::ImplementationError(err) => err, - FormatLoadError::UnsupportedElementType { tensor_name, ftype } => { - LoadError::UnsupportedElementType { - path, - tensor_name, - ftype, - } - } - FormatLoadError::InvariantBroken(invariant) => { - LoadError::InvariantBroken { path, invariant } - } - } - } -} - -pub(crate) fn load( - path: impl AsRef, - prefer_mmap: bool, - n_context_tokens: usize, - mut load_progress_callback: impl FnMut(LoadProgress), -) -> Result { - let main_path = path.as_ref(); - - let file = File::open(main_path).map_err(|e| LoadError::OpenFileFailed { - source: e, - path: main_path.to_owned(), - })?; - let mut reader = BufReader::new(&file); - - // We've found the main file - verify that this isn't a multipart model. - // NOTE: We do this after reading the main file, since otherwise we'll get a nonsensical error - // message. - let paths = util::find_all_model_files(main_path)?; - if paths.len() != 1 { - return Err(LoadError::MultipartNotSupported { paths }); - } - - let path = path.as_ref().to_owned(); - - (load_progress_callback)(LoadProgress::PartLoading { - file: &path, - current_part: 0, - total_parts: 1, - }); - - let mut loader = Loader::new(n_context_tokens, load_progress_callback); - - ggml_format::load_model(&mut reader, &mut loader) - .map_err(|err| LoadError::from_format_error(err, path.clone()))?; - - let Loader { - hyperparameters, - vocabulary, - tensors, - mut load_progress_callback, - container_type, - .. - } = loader; - - let Hyperparameters { n_embd, n_mult, .. } = hyperparameters; - let n_ff = ((2 * (4 * n_embd) / 3 + n_mult - 1) / n_mult) * n_mult; - - let use_mmap = prefer_mmap && container_type.support_mmap(); - - let ctx_size = tensors - .values() - .map(|ti| { - ggml::Tensor::C_TYPE_SIZE - + ggml::OBJECT_SIZE - + if use_mmap { 0 } else { ti.calc_size() } - }) - .sum::(); - (load_progress_callback)(LoadProgress::ContextSize { bytes: ctx_size }); - let context = ggml::Context::init(ctx_size, !use_mmap); - - let mmap = if use_mmap { - let file = File::open(&path)?; - Some(util::mmap_populate(&file)?) - } else { - None - }; - - struct TensorLoader2<'a> { - path: PathBuf, - file: File, - tensors: HashMap, - context: ggml::Context, - mmap: Option, - load_progress_callback: &'a mut dyn FnMut(LoadProgress), - loaded_tensors: HashMap, - } - impl TensorLoader for TensorLoader2<'_> { - fn load(&mut self, name: &str, ne: &[usize]) -> Result { - let info = self - .tensors - .get(name) - .ok_or_else(|| LoadError::UnknownTensor { - path: self.path.clone(), - tensor_name: name.to_owned(), - })?; - - let ctx = &self.context; - let mut tensor = match ne.len() { - 1 => ctx.new_tensor_1d(info.element_type, ne[0]), - 2 => ctx.new_tensor_2d(info.element_type, ne[0], ne[1]), - 3 => ctx.new_tensor_3d(info.element_type, ne[0], ne[1], ne[2]), - _ => { - return Err(LoadError::InvariantBroken { - path: self.path.clone(), - invariant: format!( - "the tensor {name} had an unsupported dimension count: {ne:?}" - ), - }) - } - }; - - match self.mmap.as_ref() { - Some(mmap) => unsafe { - let ptr = mmap.as_ptr().offset(info.start_offset as isize); - tensor.set_data(ptr as *mut std::ffi::c_void); - }, - None => { - let buf: &mut [u8] = unsafe { - std::slice::from_raw_parts_mut(tensor.data() as *mut u8, tensor.nbytes()) - }; - self.file.seek(SeekFrom::Start(info.start_offset))?; - self.file.read_exact(buf)?; - } - } - - self.loaded_tensors.insert(name.to_owned(), tensor.share()); - (self.load_progress_callback)(LoadProgress::PartTensorLoaded { - file: &self.path, - current_tensor: self.loaded_tensors.len(), - tensor_count: self.tensors.len(), - }); - - Ok(tensor) - } - - fn finish(self) -> (ggml::Context, HashMap, Option) { - (self.context, self.loaded_tensors, self.mmap) - } - } - - let tensors_len = tensors.len(); - let tl = TensorLoader2 { - path: path.clone(), - file, - tensors, - context, - mmap, - load_progress_callback: &mut load_progress_callback, - loaded_tensors: Default::default(), - }; - - let model = Model::new_loader2(hyperparameters, vocabulary, n_ff, tl)?; - - (load_progress_callback)(LoadProgress::PartLoaded { - file: &path, - byte_size: 0, - tensor_count: tensors_len, - }); - - Ok(model) -} - -pub(crate) struct Loader { - // Input - n_ctx: usize, - load_progress_callback: F, - - // Output - pub(crate) container_type: ContainerType, - pub(crate) hyperparameters: Hyperparameters, - pub(crate) vocabulary: Vocabulary, - pub(crate) tensors: HashMap, -} -impl Loader { - pub(crate) fn new(n_ctx: usize, load_progress_callback: F) -> Self { - Self { - n_ctx, - load_progress_callback, - - container_type: ContainerType::Ggjt, - hyperparameters: Hyperparameters::default(), - vocabulary: Vocabulary::default(), - tensors: HashMap::default(), - } - } -} -impl ggml_format::LoadHandler for Loader { - fn container_type(&mut self, container_type: ContainerType) -> Result<(), LoadError> { - self.container_type = container_type; - Ok(()) - } - - fn vocabulary_token(&mut self, i: usize, token: Vec, score: f32) -> Result<(), LoadError> { - let id = match TokenId::try_from(i) { - Ok(id) => id, - Err(err) => return Err(LoadError::InvalidIntegerConversion(err)), - }; - self.vocabulary.push_token(id, token, score); - - Ok(()) - } - - fn read_hyperparameters( - &mut self, - reader: &mut dyn BufRead, - ) -> Result { - // NOTE: Field order matters! Data is laid out in the file exactly in this order. - let hyperparameters = Hyperparameters { - n_vocab: read_i32(reader)?.try_into()?, - n_embd: read_i32(reader)?.try_into()?, - n_mult: read_i32(reader)?.try_into()?, - n_head: read_i32(reader)?.try_into()?, - n_layer: read_i32(reader)?.try_into()?, - n_rot: read_i32(reader)?.try_into()?, - file_type: { - let ftype = read_i32(reader)?; - FileType::try_from(ftype).map_err(|_| LoadError::UnsupportedFileType(ftype))? - }, - n_ctx: self.n_ctx, - }; - let partial = PartialHyperparameters { - n_vocab: hyperparameters.n_vocab, - }; - self.hyperparameters = hyperparameters; - (self.load_progress_callback)(LoadProgress::HyperparametersLoaded(&self.hyperparameters)); - - Ok(partial) - } - - fn tensor_buffer(&mut self, info: TensorInfo) -> Result<(), LoadError> { - self.tensors.insert(info.name.clone(), info); - Ok(()) - } -} diff --git a/llama-rs/src/loader_common.rs b/llama-rs/src/loader_common.rs deleted file mode 100644 index 7ca7603f..00000000 --- a/llama-rs/src/loader_common.rs +++ /dev/null @@ -1,236 +0,0 @@ -use std::{ - fmt::{Display, Formatter}, - path::{Path, PathBuf}, -}; - -use ggml_format::ContainerType; -use thiserror::Error; - -use crate::{util::FindAllModelFilesError, Hyperparameters}; - -/// How the tensors are stored in the GGML LLaMA model. -#[derive(Debug, PartialEq, Clone, Copy, Eq, Default)] -pub enum FileType { - /// All tensors are stored as f32. - F32, - #[default] - /// All tensors are mostly stored as `f16`, except for the 1D tensors (32-bit). - MostlyF16, - /// All tensors are mostly stored as `Q4_0`, except for the 1D tensors (32-bit). - MostlyQ4_0, - /// All tensors are mostly stored as `Q4_1`, except for the 1D tensors (32-bit) - MostlyQ4_1, - /// All tensors are mostly stored as `Q4_1`, except for the 1D tensors (32-bit) - /// and the `tok_embeddings.weight` (f16) and `output.weight` tensors (f16). - MostlyQ4_1SomeF16, - /// All tensors are mostly stored as `Q4_2`, except for the 1D tensors (32-bit). - MostlyQ4_2, - /// All tensors are mostly stored as `Q4_3`, except for the 1D tensors (32-bit). - MostlyQ4_3, -} -impl From for i32 { - fn from(value: FileType) -> Self { - match value { - FileType::F32 => 0, - FileType::MostlyF16 => 1, - FileType::MostlyQ4_0 => 2, - FileType::MostlyQ4_1 => 3, - FileType::MostlyQ4_1SomeF16 => 4, - FileType::MostlyQ4_2 => 5, - FileType::MostlyQ4_3 => 6, - } - } -} -impl TryFrom for FileType { - type Error = (); - - fn try_from(value: i32) -> Result { - match value { - 0 => Ok(FileType::F32), - 1 => Ok(FileType::MostlyF16), - 2 => Ok(FileType::MostlyQ4_0), - 3 => Ok(FileType::MostlyQ4_1), - 4 => Ok(FileType::MostlyQ4_1SomeF16), - 5 => Ok(FileType::MostlyQ4_2), - 6 => Ok(FileType::MostlyQ4_3), - _ => Err(()), - } - } -} -impl Display for FileType { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - FileType::F32 => write!(f, "f32"), - FileType::MostlyF16 => write!(f, "f16"), - FileType::MostlyQ4_0 => write!(f, "q4_0"), - FileType::MostlyQ4_1 => write!(f, "q4_1"), - FileType::MostlyQ4_1SomeF16 => write!(f, "q4_1_with_f16"), - FileType::MostlyQ4_2 => write!(f, "q4_2"), - FileType::MostlyQ4_3 => write!(f, "q4_3"), - } - } -} - -/// Each variant represents a step within the process of loading the model. -/// These can be used to report progress to the user. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum LoadProgress<'a> { - /// The hyperparameters have been loaded from the model. - HyperparametersLoaded(&'a Hyperparameters), - /// The context has been created. - ContextSize { - /// The size of the context. - bytes: usize, - }, - /// A part of the model is being loaded. - PartLoading { - /// The path to the model part. - file: &'a Path, - /// The current part (0-indexed). - current_part: usize, - /// The number of total parts. - total_parts: usize, - }, - /// A tensor from the current part has been loaded. - PartTensorLoaded { - /// The path to the model part. - file: &'a Path, - /// The current tensor (0-indexed). - current_tensor: usize, - /// The number of total tensors. - tensor_count: usize, - }, - /// A model part has finished fully loading. - PartLoaded { - /// The path to the model part. - file: &'a Path, - /// The number of bytes in the part. - byte_size: usize, - /// The number of tensors in the part. - tensor_count: usize, - }, -} - -#[derive(Error, Debug)] -/// Errors encountered during the loading process. -pub enum LoadError { - #[error("could not open file {path:?}")] - /// A file failed to open. - OpenFileFailed { - /// The original error. - source: std::io::Error, - /// The path that failed. - path: PathBuf, - }, - #[error("no parent path for {path:?}")] - /// There is no parent path for a given path. - NoParentPath { - /// The path without a parent. - path: PathBuf, - }, - #[error("unable to read exactly {bytes} bytes")] - /// Reading exactly `bytes` from a file failed. - ReadExactFailed { - /// The original error. - source: std::io::Error, - /// The number of bytes that were attempted to be read. - bytes: usize, - }, - #[error("non-specific I/O error")] - /// A non-specific IO error. - Io(#[from] std::io::Error), - #[error("could not convert bytes to a UTF-8 string")] - /// One of the strings encountered was not valid UTF-8. - InvalidUtf8(#[from] std::string::FromUtf8Error), - #[error("invalid integer conversion")] - /// One of the integers encountered could not be converted to a more appropriate type. - InvalidIntegerConversion(#[from] std::num::TryFromIntError), - #[error("unsupported f16_: {0}")] - /// The `f16_` hyperparameter had an invalid value. - UnsupportedFileType(i32), - #[error("invalid magic number {magic:#x} for {path:?}")] - /// An invalid magic number was encountered during the loading process. - InvalidMagic { - /// The path that failed. - path: PathBuf, - /// The magic number that was encountered. - magic: u32, - }, - #[error("invalid file format version {version}")] - /// The version of the format is not supported by this version of `llama-rs`. - InvalidFormatVersion { - /// The format that was encountered. - container_type: ContainerType, - /// The version that was encountered. - version: u32, - }, - #[error("invalid value {ftype} for `f16` in hyperparameters")] - /// The `f16` hyperparameter had an invalid value. - HyperparametersF16Invalid { - /// The format type that was encountered. - ftype: i32, - }, - #[error("unknown tensor `{tensor_name}` in {path:?}")] - /// The tensor `tensor_name` was encountered during the loading of `path`, but was not seen during - /// the model prelude. - UnknownTensor { - /// The name of the tensor. - tensor_name: String, - /// The path that failed. - path: PathBuf, - }, - #[error("the tensor `{tensor_name}` has the wrong size in {path:?}")] - /// The tensor `tensor_name` did not match its expected size. - TensorWrongSize { - /// The name of the tensor. - tensor_name: String, - /// The path that failed. - path: PathBuf, - }, - /// The tensor `tensor_name` did not have the expected format type. - #[error("invalid ftype {ftype} for tensor `{tensor_name}` in {path:?}")] - UnsupportedElementType { - /// The name of the tensor. - tensor_name: String, - /// The format type that was encountered. - ftype: i32, - /// The path that failed. - path: PathBuf, - }, - /// An invariant was broken. - /// - /// This error is not relevant unless `loader2` is being used. - #[error("invariant broken: {invariant} in {path:?}")] - InvariantBroken { - /// The path that failed. - path: PathBuf, - /// The invariant that was broken. - invariant: String, - }, - /// The model could not be created. - /// - /// This implies that there were no tensors in the model to be loaded. - /// - /// This error is not relevant unless `loader2` is being used. - #[error("could not create model from {path:?}")] - ModelNotCreated { - /// The path that failed. - path: PathBuf, - }, - /// Multiple parts of the model were found. - /// - /// Multi-part models are not supported. Please convert the model to a single part. - #[error("multipart models are not supported")] - MultipartNotSupported { - /// The paths that were found. - paths: Vec, - }, -} -impl From for LoadError { - fn from(value: FindAllModelFilesError) -> Self { - match value { - FindAllModelFilesError::NoParentPath { path } => LoadError::NoParentPath { path }, - FindAllModelFilesError::IO(err) => LoadError::Io(err), - } - } -} diff --git a/llama-rs/Cargo.toml b/llama/Cargo.toml similarity index 71% rename from llama-rs/Cargo.toml rename to llama/Cargo.toml index 7b3e2b6e..3a916b3a 100644 --- a/llama-rs/Cargo.toml +++ b/llama/Cargo.toml @@ -1,23 +1,20 @@ [package] -name = "llama-rs" +name = "llama" version = { workspace = true } edition = "2021" -rust-version = "1.65" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ggml = { path = "../ggml" } -ggml-format = { path = "../ggml-format" } +llm-base = { path = "../llm-base" } +bytemuck = { workspace = true } +serde = { workspace = true } rand = { workspace = true } -bytemuck = "1.13.1" partial_sort = "0.2.0" thiserror = "1.0" -serde = { version = "1.0", features = ["derive"] } serde_bytes = "0.11" -memmap2 = "0.5.10" # Used for the `convert` feature serde_json = { version = "1.0", optional = true } @@ -26,7 +23,11 @@ rust_tokenizers = { version = "3.1.2", optional = true } # Used for the `quantize` feature half = { version = "2.2.1", optional = true } +ggml = { path = "../ggml", optional = true } [features] convert = ["dep:serde_json", "dep:protobuf", "dep:rust_tokenizers"] -quantize = ["dep:half"] +quantize = ["dep:half", "dep:ggml"] + +[dev-dependencies] +rand = { workspace = true } diff --git a/llama/examples/llama_inference.rs b/llama/examples/llama_inference.rs new file mode 100644 index 00000000..9b0ceb2b --- /dev/null +++ b/llama/examples/llama_inference.rs @@ -0,0 +1,42 @@ +use std::{convert::Infallible, env::args, io::Write}; + +use llm_base::{load_progress_callback, model::KnownModel}; + +extern crate llama; + +fn main() { + let args: Vec = args().collect(); + let loc = &args[1]; + let prompt = match &args.len() { + 3 => &args[2], + _ => "Rust is a cool programming language because ", + }; + + println!(" >>> Loading model from {loc}..."); + let now = std::time::Instant::now(); + + let llama = llama::Llama::load(loc, true, 512, load_progress_callback) + .unwrap_or_else(|e| panic!("Error loading model from {loc}: {e}")); + + println!(" >>> Model loaded in {} ms.", now.elapsed().as_millis()); + + let mut session = llama.start_session(Default::default()); + let res = session.inference_with_prompt::( + &llama, + &Default::default(), + prompt, + None, + &mut rand::thread_rng(), + |t| { + print!("{t}"); + std::io::stdout().flush().unwrap(); + + Ok(()) + }, + ); + + match res { + Ok(result) => println!("\n\nInference stats:\n{result}"), + Err(err) => println!("\n{err}"), + } +} diff --git a/llama-rs/src/convert.rs b/llama/src/convert.rs similarity index 96% rename from llama-rs/src/convert.rs rename to llama/src/convert.rs index 07c4939c..2acf23af 100644 --- a/llama-rs/src/convert.rs +++ b/llama/src/convert.rs @@ -3,6 +3,7 @@ //! This is *incomplete* and does not convert the weights. It only converts the //! vocabulary and hyperparameters. It is included as a preliminary step to //! full conversion. +use llm_base::FileType; /// /// For reference, see [the PR](https://github.com/rustformers/llama-rs/pull/83). use rust_tokenizers::preprocessing::vocab::sentencepiece_proto::sentencepiece_model::ModelProto; @@ -16,7 +17,7 @@ use std::{ vec, }; -use crate::{loader_common::FileType, util, Hyperparameters, Vocabulary}; +use crate::{Hyperparameters, Vocabulary}; /// Converts a `pth` file to a `ggml` file. pub fn convert_pth_to_ggml(model_directory: &Path, file_type: FileType) { @@ -25,7 +26,7 @@ pub fn convert_pth_to_ggml(model_directory: &Path, file_type: FileType) { let hparams = load_hyperparameters(model_directory, file_type, &vocab); - let model_files = util::find_all_model_files(model_directory).unwrap(); + let model_files = llm_base::util::find_all_model_files(model_directory).unwrap(); for (i, _file) in model_files.iter().enumerate() { let fname_out = model_directory.join(format!("rust-model-{file_type}.bin")); @@ -79,7 +80,6 @@ fn load_hyperparameters(path: &Path, file_type: FileType, vocab: &Vocabulary) -> let json: HyperParametersJson = serde_json::from_str(&json).expect("Unable to parse json"); Hyperparameters { file_type, - n_ctx: 0, n_embd: json.dim, n_head: json.n_heads, n_layer: json.n_layers, diff --git a/llama-rs/src/model.rs b/llama/src/lib.rs similarity index 72% rename from llama-rs/src/model.rs rename to llama/src/lib.rs index 730afc14..133cdaff 100644 --- a/llama-rs/src/model.rs +++ b/llama/src/lib.rs @@ -1,19 +1,27 @@ -use std::{collections::HashMap, error::Error, path::Path}; +use std::{error::Error, path::Path}; -use crate::{ - loader, loader2, loader_common::FileType, vocabulary::TokenId, EvaluateOutputRequest, - InferenceParameters, InferenceSession, InferenceSessionParameters, LoadError, LoadProgress, - Vocabulary, +use llm_base::{ + util, EvaluateOutputRequest, FileType, InferenceParameters, InferenceSession, + InferenceSessionParameters, KnownModel, LoadError, LoadProgress, Mmap, TensorLoader, }; -use memmap2::Mmap; +#[cfg(feature = "convert")] +pub mod convert; + +#[cfg(feature = "quantize")] +pub mod quantize; + +mod old_loader; + +pub use llm_base::{ggml, util::TokenUtf8Buffer, TokenBias, TokenId, Vocabulary}; /// The weights for the LLaMA model. All the mutable state is split into a /// separate struct `InferenceSession`. /// /// # Safety /// This implements [Send] and [Sync] as it is immutable after construction. -pub struct Model { +pub struct Llama { hyperparameters: Hyperparameters, + n_context_tokens: usize, vocabulary: Vocabulary, @@ -24,107 +32,43 @@ pub struct Model { layers: Vec, - tensors: HashMap, - /// Needs to kept alive while the model is alive _mmap: Option, // Must be kept alive for the model - _context: ggml::Context, + _context: ggml::context::Context, } -unsafe impl Send for Model {} -unsafe impl Sync for Model {} - -impl Model { - pub(crate) fn new_loader1( - context: ggml::Context, - hparams: Hyperparameters, - vocabulary: Vocabulary, - n_ff: usize, - wtype: ggml::Type, - mmap: Option, - ) -> Model { - let n_embd = hparams.n_embd; - let n_layer = hparams.n_layer; - let n_vocab = hparams.n_vocab; - - let mut tensors = HashMap::new(); - - let tok_embeddings = context.new_tensor_2d(wtype, n_embd, n_vocab); - let norm = context.new_tensor_1d(ggml::Type::F32, n_embd); - let output = context.new_tensor_2d(wtype, n_embd, n_vocab); - - tensors.insert("tok_embeddings.weight".to_owned(), tok_embeddings.share()); - tensors.insert("norm.weight".to_owned(), norm.share()); - tensors.insert("output.weight".to_owned(), output.share()); - - let mut layers = Vec::new(); - for i in 0..n_layer { - let layer = Layer { - attention_norm: context.new_tensor_1d(ggml::Type::F32, n_embd), - wq: context.new_tensor_2d(wtype, n_embd, n_embd), - wk: context.new_tensor_2d(wtype, n_embd, n_embd), - wv: context.new_tensor_2d(wtype, n_embd, n_embd), - wo: context.new_tensor_2d(wtype, n_embd, n_embd), - ffn_norm: context.new_tensor_1d(ggml::Type::F32, n_embd), - w1: context.new_tensor_2d(wtype, n_embd, n_ff), - w2: context.new_tensor_2d(wtype, n_ff, n_embd), - w3: context.new_tensor_2d(wtype, n_embd, n_ff), - }; +unsafe impl Send for Llama {} +unsafe impl Sync for Llama {} - tensors.insert( - format!("layers.{i}.attention_norm.weight"), - layer.attention_norm.share(), - ); - - tensors.insert(format!("layers.{i}.attention.wq.weight"), layer.wq.share()); - tensors.insert(format!("layers.{i}.attention.wk.weight"), layer.wk.share()); - tensors.insert(format!("layers.{i}.attention.wv.weight"), layer.wv.share()); - tensors.insert(format!("layers.{i}.attention.wo.weight"), layer.wo.share()); - - tensors.insert( - format!("layers.{i}.ffn_norm.weight"), - layer.ffn_norm.share(), - ); - - tensors.insert( - format!("layers.{i}.feed_forward.w1.weight"), - layer.w1.share(), - ); - tensors.insert( - format!("layers.{i}.feed_forward.w2.weight"), - layer.w2.share(), - ); - tensors.insert( - format!("layers.{i}.feed_forward.w3.weight"), - layer.w3.share(), - ); - - layers.push(layer); - } - - Model { - hyperparameters: hparams, - vocabulary, - tok_embeddings, - norm, - output, - layers, - tensors, - _context: context, - _mmap: mmap, - } +impl Llama { + /// Load the model from `path` with `n_context_tokens` context tokens. + /// + /// The status of the loading process will be reported through `load_progress_callback`. + pub fn load( + path: impl AsRef, + prefer_mmap: bool, + n_context_tokens: usize, + load_progress_callback: impl FnMut(LoadProgress), + ) -> Result { + llm_base::load(path, prefer_mmap, n_context_tokens, load_progress_callback) } +} +impl KnownModel for Llama { + type Hyperparameters = Hyperparameters; - pub(crate) fn new_loader2( - hyperparameters: Hyperparameters, + fn new( + hyperparameters: Self::Hyperparameters, + n_context_tokens: usize, vocabulary: Vocabulary, - n_ff: usize, tensor_loader: impl TensorLoader, - ) -> Result { + ) -> Result { let n_embd = hyperparameters.n_embd; let n_layer = hyperparameters.n_layer; let n_vocab = hyperparameters.n_vocab; + let n_mult = hyperparameters.n_mult; + + let n_ff = ((2 * (4 * n_embd) / 3 + n_mult - 1) / n_mult) * n_mult; let mut tl = tensor_loader; @@ -170,65 +114,33 @@ impl Model { layers.push(layer); } - let (_context, tensors, _mmap) = tl.finish(); + let (_context, _tensors, _mmap) = tl.finish(); - Ok(Model { + Ok(Self { hyperparameters, + n_context_tokens, vocabulary, tok_embeddings, norm, output, layers, - tensors, _context, _mmap, }) } - /// Load the model from `path` with `n_context_tokens` context tokens. - /// - /// The status of the loading process will be reported through `load_progress_callback`. - pub fn load( - path: impl AsRef, - prefer_mmap: bool, - n_context_tokens: usize, - load_progress_callback: impl FnMut(LoadProgress), - ) -> Result { - // Loader2 is the default. It can support GGML, GGMF and GGJT, but does not support multipart models. - // - // Loader1 is the old loader. It can support multipart models, but will be deprecated. - let use_loader_2: bool = match std::env::var("GGML_LOADER").as_deref() { - Ok("2") => true, - Ok("1") => false, - Ok(_) => panic!("Please use GGML_LOADER=1 or GGML_LOADER=2"), - Err(_) => true, - }; - - if use_loader_2 { - loader2::load(path, prefer_mmap, n_context_tokens, load_progress_callback) - } else { - loader::load(path, prefer_mmap, n_context_tokens, load_progress_callback) - } - } - /// Starts a new `InferenceSession` for this model. - pub fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession { + fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession { InferenceSession::new( params, - self.hyperparameters.n_ctx, + self.n_context_tokens, self.hyperparameters.n_layer, self.hyperparameters.n_embd, self.hyperparameters.n_vocab, ) } - /// Evaluates the transformer. - /// - /// The provided `output_request` struct lets you specify which additional - /// data you are interested in fetching from the transformer. Setting a - /// field to a `Some` value will clear and fill the provided vector with - /// data. The provided vector will be resized to the exact output size. - pub fn evaluate( + fn evaluate( &self, session: &mut InferenceSession, params: &InferenceParameters, @@ -244,7 +156,6 @@ impl Model { let Hyperparameters { n_vocab, - n_ctx, n_embd, n_mult: _, n_head, @@ -252,6 +163,7 @@ impl Model { n_rot, file_type: _, } = self.hyperparameters; + let n_ctx = self.n_context_tokens; // For the first run, we need to guess a maximum buffer size so we can measure // the actual memory consumption of the temporary ggml context. @@ -271,7 +183,7 @@ impl Model { // add 10% to account for ggml object overhead buf_size = (1.1f64 * session.mem_per_token as f64 * n as f64) as usize; }; - let ctx0 = ggml::Context::init(buf_size, true); + let ctx0 = ggml::context::Context::init(buf_size, true); let mut gf = ggml::ComputationGraph::new(n_threads); @@ -512,36 +424,32 @@ impl Model { } /// Returns the vocabulary used by this model. - pub fn vocabulary(&self) -> &Vocabulary { + fn vocabulary(&self) -> &Vocabulary { &self.vocabulary } - pub(crate) fn tensors_mut(&mut self) -> &mut HashMap { - &mut self.tensors - } - - pub(crate) fn n_ctx(&self) -> usize { - self.hyperparameters.n_ctx + fn n_ctx(&self) -> usize { + self.n_context_tokens } } #[cfg(test)] -impl Model { +impl Llama { /// This does *not* construct a valid model. All of the tensors are entirely /// empty. However, it can be used to determine if some code will compile. fn new_empty() -> Self { - let context = ggml::Context::init(1 * 1024 * 1024, true); + let context = ggml::context::Context::init(1024 * 1024, true); let tok_embeddings = context.new_f32(0.0); let norm = context.new_f32(0.0); let output = context.new_f32(0.0); Self { hyperparameters: Default::default(), + n_context_tokens: 0, vocabulary: Default::default(), tok_embeddings, norm, output, layers: Default::default(), - tensors: Default::default(), _mmap: Default::default(), _context: context, } @@ -553,8 +461,6 @@ impl Model { pub struct Hyperparameters { /// n_vocab pub n_vocab: usize, - /// n_ctx - pub n_ctx: usize, /// n_embd pub n_embd: usize, /// n_mult @@ -568,10 +474,25 @@ pub struct Hyperparameters { /// file_type pub file_type: FileType, } +impl llm_base::Hyperparameters for Hyperparameters { + fn read(reader: &mut dyn std::io::BufRead) -> Result { + Ok(Hyperparameters { + n_vocab: util::read_i32(reader)?.try_into()?, + n_embd: util::read_i32(reader)?.try_into()?, + n_mult: util::read_i32(reader)?.try_into()?, + n_head: util::read_i32(reader)?.try_into()?, + n_layer: util::read_i32(reader)?.try_into()?, + n_rot: util::read_i32(reader)?.try_into()?, + file_type: { + let ftype = util::read_i32(reader)?; + FileType::try_from(ftype).map_err(|_| LoadError::UnsupportedFileType(ftype))? + }, + }) + } -pub(crate) trait TensorLoader { - fn load(&mut self, name: &str, ne: &[usize]) -> Result; - fn finish(self) -> (ggml::Context, HashMap, Option); + fn n_vocabulary(&self) -> usize { + self.n_vocab + } } struct Layer { @@ -598,7 +519,7 @@ mod tests { #[test] fn can_share_model_between_threads() { - let model = Arc::new(Model::new_empty()); + let model = Arc::new(Llama::new_empty()); for _ in 0..4 { let model = model.clone(); diff --git a/llama-rs/src/loader.rs b/llama/src/old_loader.rs similarity index 84% rename from llama-rs/src/loader.rs rename to llama/src/old_loader.rs index f5e4fef8..fd38bcd0 100644 --- a/llama-rs/src/loader.rs +++ b/llama/src/old_loader.rs @@ -1,4 +1,9 @@ #![allow(dead_code)] +//! Old loader. Can load multipart models, but is difficult to maintain. +//! Plan is to use this to create a tool that can convert multipart models +//! to single-part models for use with the new loader. +//! +//! use std::{ collections::HashMap, @@ -6,23 +11,16 @@ use std::{ path::Path, }; -use crate::{ - loader_common::FileType, - util::{self, mulf}, - LoadError, LoadProgress, Model, TokenId, Vocabulary, -}; -use crate::{ElementType, Hyperparameters}; -use ggml_format::{ - util::{has_data_left, read_bytes_with_len, read_f32, read_i32, read_u32}, - ContainerType, -}; +use crate::Hyperparameters; +use crate::{Llama, LoadError, LoadProgress, TokenId, Vocabulary}; +use llm_base::{ggml, mulf, util, ContainerType, FileType}; pub(crate) fn load( path: impl AsRef, prefer_mmap: bool, n_context_tokens: usize, mut load_progress_callback: impl FnMut(LoadProgress), -) -> Result { +) -> Result { use std::fs::File; use std::io::BufReader; @@ -35,7 +33,7 @@ pub(crate) fn load( let mut reader = BufReader::new(&file); // Verify magic - let magic = read_u32(&mut reader)?; + let magic = util::read_u32(&mut reader)?; let model_type: ContainerType = match magic { ggml::FILE_MAGIC_GGMF => ContainerType::Ggmf, ggml::FILE_MAGIC_GGJT => ContainerType::Ggjt, @@ -51,7 +49,7 @@ pub(crate) fn load( // Load format version match model_type { ContainerType::Ggmf | ContainerType::Ggjt => { - let _version: u32 = match read_u32(&mut reader)? { + let _version: u32 = match util::read_u32(&mut reader)? { ggml::FORMAT_VERSION => ggml::FORMAT_VERSION, version => { return Err(LoadError::InvalidFormatVersion { @@ -71,15 +69,14 @@ pub(crate) fn load( // NOTE: Field order matters! Data is laid out in the file exactly // in this order. let hparams = Hyperparameters { - n_vocab: read_i32(&mut reader)?.try_into()?, - n_ctx: n_context_tokens, - n_embd: read_i32(&mut reader)?.try_into()?, - n_mult: read_i32(&mut reader)?.try_into()?, - n_head: read_i32(&mut reader)?.try_into()?, - n_layer: read_i32(&mut reader)?.try_into()?, - n_rot: read_i32(&mut reader)?.try_into()?, + n_vocab: util::read_i32(&mut reader)?.try_into()?, + n_embd: util::read_i32(&mut reader)?.try_into()?, + n_mult: util::read_i32(&mut reader)?.try_into()?, + n_head: util::read_i32(&mut reader)?.try_into()?, + n_layer: util::read_i32(&mut reader)?.try_into()?, + n_rot: util::read_i32(&mut reader)?.try_into()?, file_type: { - let ftype = read_i32(&mut reader)?; + let ftype = util::read_i32(&mut reader)?; FileType::try_from(ftype).map_err(|_| LoadError::UnsupportedFileType(ftype)) }?, }; @@ -87,7 +84,7 @@ pub(crate) fn load( let n_ff = ((2 * (4 * hparams.n_embd) / 3 + hparams.n_mult - 1) / hparams.n_mult) * hparams.n_mult; - load_progress_callback(LoadProgress::HyperparametersLoaded(&hparams)); + load_progress_callback(LoadProgress::HyperparametersLoaded); // =============== // Load vocabulary @@ -96,12 +93,12 @@ pub(crate) fn load( let mut vocab = Vocabulary::default(); for i in 0..hparams.n_vocab { - let len = read_i32(&mut reader)?; + let len = util::read_i32(&mut reader)?; let id = i as TokenId; - let token = read_bytes_with_len(&mut reader, len.try_into()?)?; + let token = util::read_bytes_with_len(&mut reader, len.try_into()?)?; let score = match model_type { - ContainerType::Ggmf | ContainerType::Ggjt => read_f32(&mut reader)?, + ContainerType::Ggmf | ContainerType::Ggjt => util::read_f32(&mut reader)?, ContainerType::Ggml => { // Legacy model, set empty score 0. @@ -164,7 +161,7 @@ pub(crate) fn load( }; // Initialize the context - let context = ggml::Context::init(ctx_size, alloc); + let context = ggml::context::Context::init(ctx_size, alloc); let (mmap, mmap_ptr) = if prefer_mmap && model_type.support_mmap() { let mmap = util::mmap_populate(&file)?; @@ -174,30 +171,33 @@ pub(crate) fn load( (None, None) }; - let mut model = Model::new_loader1(context, hparams, vocabulary, n_ff, wtype, mmap); - match model_type { - ContainerType::Ggmf | ContainerType::Ggml => { - let file_offset = reader.stream_position()?; - drop(reader); - load_weights_ggmf_or_unversioned( - file_offset, - main_path, - load_progress_callback, - model.tensors_mut(), - )? - } - ContainerType::Ggjt => { - load_weights_ggjt( - &mut reader, - mmap_ptr, - main_path, - load_progress_callback, - model.tensors_mut(), - )?; - } - } - - Ok(model) + let _ = (context, vocabulary, mmap, mmap_ptr, n_context_tokens); + + // let mut model = Llama::new_loader1(context, hparams, vocabulary, n_ff, wtype, mmap); + // match model_type { + // ContainerType::Ggmf | ContainerType::Ggml => { + // let file_offset = reader.stream_position()?; + // drop(reader); + // load_weights_ggmf_or_unversioned( + // file_offset, + // main_path, + // load_progress_callback, + // model.tensors_mut(), + // )? + // } + // ContainerType::Ggjt => { + // load_weights_ggjt( + // &mut reader, + // mmap_ptr, + // main_path, + // load_progress_callback, + // model.tensors_mut(), + // )?; + // } + // } + + // Ok(model) + todo!() } /// Helper function. Reads a string from the buffer and returns it. @@ -243,13 +243,13 @@ fn load_weights_ggmf_or_unversioned( // Load weights loop { - if !has_data_left(&mut part_reader)? { + if !util::has_data_left(&mut part_reader)? { break; } - let n_dims = usize::try_from(read_i32(&mut part_reader)?)?; - let length = read_i32(&mut part_reader)?; - let ftype = read_i32(&mut part_reader)?; + let n_dims = usize::try_from(util::read_i32(&mut part_reader)?)?; + let length = util::read_i32(&mut part_reader)?; + let ftype = util::read_u32(&mut part_reader)?; let TensorHeaderGgmf { nelements, @@ -371,14 +371,14 @@ fn load_tensor_header_ggmf<'a>( tensors: &'a mut HashMap, path: &Path, n_parts: usize, - ftype: i32, + ftype: u32, ) -> Result, LoadError> { let mut nelements = 1; let mut ne = [1i64, 1i64]; assert!(n_dims <= ne.len()); #[allow(clippy::needless_range_loop)] for i in 0..n_dims { - ne[i] = read_i32(reader)? as i64; + ne[i] = util::read_i32(reader)? as i64; nelements *= usize::try_from(ne[i])?; } let tensor_name = read_string(reader, length as usize)?; @@ -455,10 +455,10 @@ fn load_tensor_header_ggmf<'a>( }) } -fn tensor_type_size(ftype: i32, ne: [i64; 2]) -> Option { +fn tensor_type_size(ftype: u32, ne: [i64; 2]) -> Option { let ftype = ggml::Type::try_from(ftype).ok()?; match ftype { - ElementType::Q4_0 | ElementType::Q4_1 => { + ggml::Type::Q4_0 | ggml::Type::Q4_1 => { assert_eq!(ne[0] % 64, 0); } _ => {} @@ -484,20 +484,20 @@ fn load_weights_ggjt( }); loop { - if !has_data_left(reader)? { + if !util::has_data_left(reader)? { break; } - let n_dims = read_i32(reader)? as usize; - let length = read_i32(reader)?; - let ftype = read_i32(reader)?; + let n_dims = util::read_i32(reader)? as usize; + let length = util::read_i32(reader)?; + let ftype = util::read_u32(reader)?; let mut nelements: usize = 1; let mut ne = [1i64, 1]; assert!(n_dims <= ne.len()); #[allow(clippy::needless_range_loop)] for i in 0..n_dims { - let dim = read_i32(reader)? as usize; + let dim = util::read_i32(reader)? as usize; ne[i] = dim as i64; nelements *= dim; } diff --git a/llama-rs/src/quantize.rs b/llama/src/quantize.rs similarity index 92% rename from llama-rs/src/quantize.rs rename to llama/src/quantize.rs index dd7ec58b..055def3f 100644 --- a/llama-rs/src/quantize.rs +++ b/llama/src/quantize.rs @@ -1,8 +1,12 @@ //! Implements quantization of weights. -use crate::{loader2::Loader, Hyperparameters, LoadError, LoadProgress}; -use ggml_format::{util::write_i32, SaveError, SaveHandler, TensorData, TensorInfo}; +use crate::{Hyperparameters, LoadError, LoadProgress}; +use ggml::{ + loader::TensorInfo, + saver::{SaveError, SaveHandler, TensorData}, +}; use half::f16; +use llm_base::{ggml, util, Loader}; use std::{ collections::HashMap, fs::File, @@ -17,7 +21,7 @@ use thiserror::Error; /// Progress of quantization. pub enum QuantizeProgress<'a> { /// Hyperparameters have been loaded. - HyperparametersLoaded(&'a Hyperparameters), + HyperparametersLoaded, /// A tensor is being loaded. TensorLoading { /// Name of the tensor. @@ -145,15 +149,15 @@ pub fn quantize( path: path_in.to_owned(), })?; let mut reader = BufReader::new(&file_in); - let mut loader = Loader::new(0, { + let mut loader = Loader::new({ let progress_callback = progress_callback.clone(); move |p| { - if let LoadProgress::HyperparametersLoaded(h) = p { - progress_callback(QuantizeProgress::HyperparametersLoaded(h)) + if let LoadProgress::HyperparametersLoaded = p { + progress_callback(QuantizeProgress::HyperparametersLoaded) } } }); - ggml_format::load_model(&mut reader, &mut loader) + ggml::loader::load_model(&mut reader, &mut loader) .map_err(|err| LoadError::from_format_error(err, path_in.to_owned()))?; // Save the quantized model, quantizing as we go @@ -180,7 +184,7 @@ pub fn quantize( &mut file_in, |p| progress_callback(p), ); - ggml_format::save_model( + ggml::saver::save_model( &mut writer, &mut saver, &vocabulary, @@ -240,13 +244,13 @@ impl<'a, F: Fn(QuantizeProgress)> QuantizeSaver<'a, F> { impl SaveHandler for QuantizeSaver<'_, F> { fn write_hyperparameters(&mut self, writer: &mut dyn Write) -> Result<(), QuantizeError> { let h = self.hyperparameters; - write_i32(writer, h.n_vocab.try_into()?)?; - write_i32(writer, h.n_embd.try_into()?)?; - write_i32(writer, h.n_mult.try_into()?)?; - write_i32(writer, h.n_head.try_into()?)?; - write_i32(writer, h.n_layer.try_into()?)?; - write_i32(writer, h.n_rot.try_into()?)?; - write_i32(writer, h.file_type.into())?; + util::write_i32(writer, h.n_vocab.try_into()?)?; + util::write_i32(writer, h.n_embd.try_into()?)?; + util::write_i32(writer, h.n_mult.try_into()?)?; + util::write_i32(writer, h.n_head.try_into()?)?; + util::write_i32(writer, h.n_layer.try_into()?)?; + util::write_i32(writer, h.n_rot.try_into()?)?; + util::write_i32(writer, h.file_type.into())?; Ok(()) } diff --git a/llm-base/Cargo.toml b/llm-base/Cargo.toml new file mode 100644 index 00000000..9e758251 --- /dev/null +++ b/llm-base/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "llm-base" +version = { workspace = true } +edition = "2021" +rust-version = "1.65" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ggml = { path = "../ggml" } + +bytemuck = { workspace = true } +rand = { workspace = true } +serde = { workspace = true } + +bincode = "1.3.3" +log = "0.4" +partial_sort = "0.2.0" +thiserror = "1.0" +serde_bytes = "0.11" +zstd = { version = "0.12", default-features = false } +memmap2 = "0.5.10" diff --git a/llama-rs/src/inference_session.rs b/llm-base/src/inference_session.rs similarity index 96% rename from llama-rs/src/inference_session.rs rename to llm-base/src/inference_session.rs index 079401cd..96e14338 100644 --- a/llama-rs/src/inference_session.rs +++ b/llm-base/src/inference_session.rs @@ -5,7 +5,7 @@ use rand::{distributions::WeightedIndex, prelude::Distribution}; use thiserror::Error; use crate::{ - util::mulf, EvaluateOutputRequest, InferenceError, InferenceParameters, Model, TokenId, + mulf, EvaluateOutputRequest, InferenceError, InferenceParameters, Model, TokenId, TokenUtf8Buffer, EOT_TOKEN_ID, }; @@ -27,7 +27,7 @@ const SCRATCH_SIZE: usize = 512 * 1024 * 1024; /// to use it from multiple threads. pub struct InferenceSession { // Must be kept alive for the model - pub(crate) _session_ctx: ggml::Context, + pub(crate) _session_ctx: ggml::context::Context, // Original size of the memory used to create this context. pub(crate) memory_size: usize, @@ -35,34 +35,37 @@ pub struct InferenceSession { // Parameters for the session. pub(crate) params: InferenceSessionParameters, - pub(crate) memory_k: ggml::Tensor, - pub(crate) memory_v: ggml::Tensor, + /// Memory K + pub memory_k: ggml::Tensor, + + /// Memory M + pub memory_v: ggml::Tensor, /// How many tokens have been fed into the model's working memory so far. - pub(crate) n_past: usize, + pub n_past: usize, /// How much memory is required per token for the temporary context used /// during inference. - pub(crate) mem_per_token: usize, + pub mem_per_token: usize, /// All tokens generated by this inference session pub(crate) tokens: Vec, /// The logits that were last predicted by the network. Zeroed out otherwise. - pub(crate) last_logits: Vec, + pub last_logits: Vec, /// Scratch buffers used during inference. /// /// The number of scratch buffers was copied from `llama.cpp`. /// There is no specific reason for this number, but one is insufficient. - pub(crate) scratch: [ggml::Buffer; 2], + pub scratch: [ggml::Buffer; 2], } unsafe impl Send for InferenceSession {} impl InferenceSession { /// Feed a prompt to the model for this session. pub fn feed_prompt( &mut self, - model: &Model, + model: &dyn Model, params: &InferenceParameters, prompt: &str, mut callback: impl FnMut(&[u8]) -> Result<(), E>, @@ -100,7 +103,7 @@ impl InferenceSession { /// Infer the next token for this session. pub fn infer_next_token<'v>( &mut self, - model: &'v Model, + model: &'v dyn Model, params: &InferenceParameters, rng: &mut impl rand::Rng, ) -> Result<&'v [u8], InferenceError> { @@ -137,7 +140,7 @@ impl InferenceSession { /// If `params.play_back_previous_tokens` is specified, this will "play back" all existing tokens in the session. pub fn inference_with_prompt( &mut self, - model: &Model, + model: &dyn Model, params: &InferenceParameters, prompt: &str, maximum_token_count: Option, @@ -319,7 +322,7 @@ impl InferenceSession { /// Creates an [InferenceSession] from a snapshot. pub fn from_snapshot( snapshot: InferenceSnapshot, - model: &Model, + model: &dyn Model, ) -> Result { let mut session = model.start_session(snapshot.session_params); @@ -348,7 +351,8 @@ impl InferenceSession { } } impl InferenceSession { - pub(crate) fn new( + /// Create a new InferenceSession + pub fn new( params: InferenceSessionParameters, n_ctx: usize, n_layer: usize, @@ -373,7 +377,7 @@ impl InferenceSession { ctx_size }; - let session_ctx = ggml::Context::init(ctx_size, true); + let session_ctx = ggml::context::Context::init(ctx_size, true); // Initialize key + value memory tensors let n_mem = n_layer * n_ctx; @@ -405,7 +409,7 @@ impl InferenceSession { } impl Clone for InferenceSession { fn clone(&self) -> Self { - let context = ggml::Context::init(self.memory_size, true); + let context = ggml::context::Context::init(self.memory_size, true); let memory_k = context.new_tensor_1d(self.memory_k.get_type(), self.memory_k.nelements()); let memory_v = context.new_tensor_1d(self.memory_v.get_type(), self.memory_v.nelements()); diff --git a/llama-rs/src/lib.rs b/llm-base/src/lib.rs similarity index 91% rename from llama-rs/src/lib.rs rename to llm-base/src/lib.rs index 802229ac..871bc8db 100644 --- a/llama-rs/src/lib.rs +++ b/llm-base/src/lib.rs @@ -3,26 +3,29 @@ use thiserror::Error; -#[cfg(feature = "convert")] -pub mod convert; -#[cfg(feature = "quantize")] -pub mod quantize; +/// Large language model +pub mod model; +/// Snapshots +pub mod snapshot; +/// Utilities +pub mod util; mod inference_session; mod loader; -mod loader2; -mod loader_common; -mod model; -mod util; mod vocabulary; +pub use ggml; pub use ggml::Type as ElementType; pub use inference_session::{ InferenceSession, InferenceSessionParameters, InferenceSnapshot, ModelKVMemoryType, SnapshotError, }; -pub use loader_common::{FileType, LoadError, LoadProgress}; -pub use model::{Hyperparameters, Model}; +pub use loader::{ + load, load_progress_callback, ContainerType, FileType, LoadError, LoadProgress, Loader, + TensorLoader, +}; +pub use memmap2::Mmap; +pub use model::{Hyperparameters, KnownModel, Model}; pub use util::TokenUtf8Buffer; pub use vocabulary::{TokenBias, TokenId, Vocabulary}; diff --git a/llm-base/src/loader.rs b/llm-base/src/loader.rs new file mode 100644 index 00000000..f3ebc082 --- /dev/null +++ b/llm-base/src/loader.rs @@ -0,0 +1,557 @@ +use std::{ + collections::HashMap, + fmt::{Display, Formatter}, + fs::File, + io::{BufRead, BufReader, Read, Seek, SeekFrom}, + path::{Path, PathBuf}, +}; + +use crate::{ + util::{self, FindAllModelFilesError}, + Hyperparameters, KnownModel, TokenId, Vocabulary, +}; +pub use ggml::ContainerType; +use ggml::{ + context::Context, + loader::{LoadError as FormatLoadError, PartialHyperparameters, TensorInfo}, +}; +use memmap2::Mmap; +use thiserror::Error; + +/// How the tensors are stored in the GGML LLaMA model. +#[derive(Debug, PartialEq, Clone, Copy, Eq, Default)] +pub enum FileType { + /// All tensors are stored as f32. + F32, + #[default] + /// All tensors are mostly stored as `f16`, except for the 1D tensors (32-bit). + MostlyF16, + /// All tensors are mostly stored as `Q4_0`, except for the 1D tensors (32-bit). + MostlyQ4_0, + /// All tensors are mostly stored as `Q4_1`, except for the 1D tensors (32-bit) + MostlyQ4_1, + /// All tensors are mostly stored as `Q4_1`, except for the 1D tensors (32-bit) + /// and the `tok_embeddings.weight` (f16) and `output.weight` tensors (f16). + MostlyQ4_1SomeF16, + /// All tensors are mostly stored as `Q4_2`, except for the 1D tensors (32-bit). + MostlyQ4_2, + /// All tensors are mostly stored as `Q4_3`, except for the 1D tensors (32-bit). + MostlyQ4_3, +} +impl From for i32 { + fn from(value: FileType) -> Self { + match value { + FileType::F32 => 0, + FileType::MostlyF16 => 1, + FileType::MostlyQ4_0 => 2, + FileType::MostlyQ4_1 => 3, + FileType::MostlyQ4_1SomeF16 => 4, + FileType::MostlyQ4_2 => 5, + FileType::MostlyQ4_3 => 6, + } + } +} +impl TryFrom for FileType { + type Error = (); + + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(FileType::F32), + 1 => Ok(FileType::MostlyF16), + 2 => Ok(FileType::MostlyQ4_0), + 3 => Ok(FileType::MostlyQ4_1), + 4 => Ok(FileType::MostlyQ4_1SomeF16), + 5 => Ok(FileType::MostlyQ4_2), + 6 => Ok(FileType::MostlyQ4_3), + _ => Err(()), + } + } +} +impl Display for FileType { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + FileType::F32 => write!(f, "f32"), + FileType::MostlyF16 => write!(f, "f16"), + FileType::MostlyQ4_0 => write!(f, "q4_0"), + FileType::MostlyQ4_1 => write!(f, "q4_1"), + FileType::MostlyQ4_1SomeF16 => write!(f, "q4_1_with_f16"), + FileType::MostlyQ4_2 => write!(f, "q4_2"), + FileType::MostlyQ4_3 => write!(f, "q4_3"), + } + } +} + +/// Each variant represents a step within the process of loading the model. +/// These can be used to report progress to the user. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum LoadProgress<'a> { + /// The hyperparameters have been loaded from the model. + HyperparametersLoaded, + /// The context has been created. + ContextSize { + /// The size of the context. + bytes: usize, + }, + /// A part of the model is being loaded. + PartLoading { + /// The path to the model part. + file: &'a Path, + /// The current part (0-indexed). + current_part: usize, + /// The number of total parts. + total_parts: usize, + }, + /// A tensor from the current part has been loaded. + PartTensorLoaded { + /// The path to the model part. + file: &'a Path, + /// The current tensor (0-indexed). + current_tensor: usize, + /// The number of total tensors. + tensor_count: usize, + }, + /// A model part has finished fully loading. + PartLoaded { + /// The path to the model part. + file: &'a Path, + /// The number of bytes in the part. + byte_size: usize, + /// The number of tensors in the part. + tensor_count: usize, + }, +} + +#[derive(Error, Debug)] +/// Errors encountered during the loading process. +pub enum LoadError { + #[error("could not open file {path:?}")] + /// A file failed to open. + OpenFileFailed { + /// The original error. + source: std::io::Error, + /// The path that failed. + path: PathBuf, + }, + #[error("no parent path for {path:?}")] + /// There is no parent path for a given path. + NoParentPath { + /// The path without a parent. + path: PathBuf, + }, + #[error("unable to read exactly {bytes} bytes")] + /// Reading exactly `bytes` from a file failed. + ReadExactFailed { + /// The original error. + source: std::io::Error, + /// The number of bytes that were attempted to be read. + bytes: usize, + }, + #[error("non-specific I/O error")] + /// A non-specific IO error. + Io(#[from] std::io::Error), + #[error("could not convert bytes to a UTF-8 string")] + /// One of the strings encountered was not valid UTF-8. + InvalidUtf8(#[from] std::string::FromUtf8Error), + #[error("invalid integer conversion")] + /// One of the integers encountered could not be converted to a more appropriate type. + InvalidIntegerConversion(#[from] std::num::TryFromIntError), + #[error("unsupported f16_: {0}")] + /// The `f16_` hyperparameter had an invalid value. + UnsupportedFileType(i32), + #[error("invalid magic number {magic:#x} for {path:?}")] + /// An invalid magic number was encountered during the loading process. + InvalidMagic { + /// The path that failed. + path: PathBuf, + /// The magic number that was encountered. + magic: u32, + }, + #[error("invalid file format version {version}")] + /// The version of the format is not supported by this version of `llama-rs`. + InvalidFormatVersion { + /// The format that was encountered. + container_type: ContainerType, + /// The version that was encountered. + version: u32, + }, + #[error("invalid value {ftype} for `f16` in hyperparameters")] + /// The `f16` hyperparameter had an invalid value. + HyperparametersF16Invalid { + /// The format type that was encountered. + ftype: i32, + }, + #[error("unknown tensor `{tensor_name}` in {path:?}")] + /// The tensor `tensor_name` was encountered during the loading of `path`, but was not seen during + /// the model prelude. + UnknownTensor { + /// The name of the tensor. + tensor_name: String, + /// The path that failed. + path: PathBuf, + }, + #[error("the tensor `{tensor_name}` has the wrong size in {path:?}")] + /// The tensor `tensor_name` did not match its expected size. + TensorWrongSize { + /// The name of the tensor. + tensor_name: String, + /// The path that failed. + path: PathBuf, + }, + /// The tensor `tensor_name` did not have the expected format type. + #[error("invalid ftype {ftype} for tensor `{tensor_name}` in {path:?}")] + UnsupportedElementType { + /// The name of the tensor. + tensor_name: String, + /// The format type that was encountered. + ftype: u32, + /// The path that failed. + path: PathBuf, + }, + /// An invariant was broken. + /// + /// This error is not relevant unless `loader2` is being used. + #[error("invariant broken: {invariant} in {path:?}")] + InvariantBroken { + /// The path that failed. + path: Option, + /// The invariant that was broken. + invariant: String, + }, + /// The model could not be created. + /// + /// This implies that there were no tensors in the model to be loaded. + /// + /// This error is not relevant unless `loader2` is being used. + #[error("could not create model from {path:?}")] + ModelNotCreated { + /// The path that failed. + path: PathBuf, + }, + /// Multiple parts of the model were found. + /// + /// Multi-part models are not supported. Please convert the model to a single part. + #[error("multipart models are not supported")] + MultipartNotSupported { + /// The paths that were found. + paths: Vec, + }, +} +impl From for LoadError { + fn from(value: FindAllModelFilesError) -> Self { + match value { + FindAllModelFilesError::NoParentPath { path } => LoadError::NoParentPath { path }, + FindAllModelFilesError::IO(err) => LoadError::Io(err), + } + } +} + +impl LoadError { + #[doc(hidden)] + pub fn from_format_error(value: FormatLoadError, path: PathBuf) -> Self { + match value { + FormatLoadError::InvalidMagic(magic) => LoadError::InvalidMagic { path, magic }, + FormatLoadError::InvalidFormatVersion(container_type, version) => { + LoadError::InvalidFormatVersion { + container_type, + version, + } + } + FormatLoadError::Io(err) => LoadError::Io(err), + FormatLoadError::InvalidUtf8(err) => LoadError::InvalidUtf8(err), + FormatLoadError::InvalidIntegerConversion(err) => { + LoadError::InvalidIntegerConversion(err) + } + FormatLoadError::ImplementationError(err) => err, + FormatLoadError::UnsupportedElementType { tensor_name, ftype } => { + LoadError::UnsupportedElementType { + path, + tensor_name, + ftype, + } + } + FormatLoadError::InvariantBroken(invariant) => LoadError::InvariantBroken { + path: Some(path), + invariant, + }, + } + } +} + +/// Used by models to fetch tensors from a loader. +pub trait TensorLoader { + /// Loads a tensor from the loader. + fn load(&mut self, name: &str, ne: &[usize]) -> Result; + /// Finish loading the model, and extract all of the state from the loader. + fn finish(self) -> (Context, HashMap, Option); +} + +/// Load an arbitrary GGML model. +pub fn load( + path: impl AsRef, + prefer_mmap: bool, + n_context_tokens: usize, + mut load_progress_callback: impl FnMut(LoadProgress), +) -> Result { + let main_path = path.as_ref(); + + let paths = util::find_all_model_files(main_path)?; + if paths.len() != 1 { + return Err(LoadError::MultipartNotSupported { paths }); + } + + let file = File::open(main_path).map_err(|e| LoadError::OpenFileFailed { + source: e, + path: main_path.to_owned(), + })?; + let mut reader = BufReader::new(&file); + + let path = path.as_ref().to_owned(); + + (load_progress_callback)(LoadProgress::PartLoading { + file: &path, + current_part: 0, + total_parts: 1, + }); + + let mut loader = Loader::new(load_progress_callback); + + ggml::loader::load_model(&mut reader, &mut loader) + .map_err(|err| LoadError::from_format_error(err, path.clone()))?; + + let Loader { + hyperparameters, + vocabulary, + tensors, + mut load_progress_callback, + container_type, + .. + } = loader; + + let use_mmap = prefer_mmap && container_type.support_mmap(); + + let ctx_size = tensors + .values() + .map(|ti| { + ggml::Tensor::C_TYPE_SIZE + + ggml::OBJECT_SIZE + + if use_mmap { 0 } else { ti.calc_size() } + }) + .sum::(); + (load_progress_callback)(LoadProgress::ContextSize { bytes: ctx_size }); + let context = Context::init(ctx_size, !use_mmap); + + let mmap = if use_mmap { + let file = File::open(&path)?; + Some(unsafe { Mmap::map(&file)? }) + } else { + None + }; + + struct MmapCompatibleLoader<'a> { + path: PathBuf, + file: File, + tensors: HashMap, + context: Context, + mmap: Option, + load_progress_callback: &'a mut dyn FnMut(LoadProgress), + loaded_tensors: HashMap, + } + impl TensorLoader for MmapCompatibleLoader<'_> { + fn load(&mut self, name: &str, ne: &[usize]) -> Result { + let info = self + .tensors + .get(name) + .ok_or_else(|| LoadError::UnknownTensor { + path: self.path.clone(), + tensor_name: name.to_owned(), + })?; + + let dims = ne.len(); + if dims != info.n_dims { + return Err(LoadError::InvariantBroken { + path: Some(self.path.clone()), + invariant: format!( + "the tensor {name} should have {} dimensions, not {dims}", + info.n_dims + ), + }); + } + + let ctx = &self.context; + let mut tensor = match dims { + 1 => ctx.new_tensor_1d(info.element_type, ne[0]), + 2 => ctx.new_tensor_2d(info.element_type, ne[0], ne[1]), + 3 => ctx.new_tensor_3d(info.element_type, ne[0], ne[1], ne[2]), + _ => { + return Err(LoadError::InvariantBroken { + path: Some(self.path.clone()), + invariant: format!( + "the tensor {name} had an unsupported dimension count: {ne:?}" + ), + }) + } + }; + + match self.mmap.as_ref() { + Some(mmap) => unsafe { + let ptr = mmap.as_ptr().offset(info.start_offset as isize); + tensor.set_data(ptr as *mut std::ffi::c_void); + }, + None => { + let buf: &mut [u8] = unsafe { + std::slice::from_raw_parts_mut(tensor.data() as *mut u8, tensor.nbytes()) + }; + self.file.seek(SeekFrom::Start(info.start_offset))?; + self.file.read_exact(buf)?; + } + } + + self.loaded_tensors.insert(name.to_owned(), tensor.share()); + (self.load_progress_callback)(LoadProgress::PartTensorLoaded { + file: &self.path, + current_tensor: self.loaded_tensors.len(), + tensor_count: self.tensors.len(), + }); + + Ok(tensor) + } + + fn finish(self) -> (Context, HashMap, Option) { + (self.context, self.loaded_tensors, self.mmap) + } + } + + let tensors_len = tensors.len(); + let tl = MmapCompatibleLoader { + path: path.clone(), + file, + tensors, + context, + mmap, + load_progress_callback: &mut load_progress_callback, + loaded_tensors: Default::default(), + }; + + let model = KnownModel::new(hyperparameters, n_context_tokens, vocabulary, tl)?; + + (load_progress_callback)(LoadProgress::PartLoaded { + file: &path, + byte_size: 0, + tensor_count: tensors_len, + }); + + Ok(model) +} + +/// A GGML format loader for LLMs. +pub struct Loader { + // Input + load_progress_callback: F, + + // Output + /// The container type of the model. + pub container_type: ContainerType, + /// The hyperparameters of the model. + pub hyperparameters: Hp, + /// The vocabulary of the model. + pub vocabulary: Vocabulary, + /// The tensors of the model. + pub tensors: HashMap, +} +impl Loader { + /// Creates a new loader. + pub fn new(load_progress_callback: F) -> Self { + Self { + load_progress_callback, + + container_type: ContainerType::Ggjt, + hyperparameters: Hp::default(), + vocabulary: Vocabulary::default(), + tensors: HashMap::default(), + } + } +} +impl ggml::loader::LoadHandler + for Loader +{ + fn container_type(&mut self, container_type: ContainerType) -> Result<(), LoadError> { + self.container_type = container_type; + Ok(()) + } + + fn vocabulary_token(&mut self, i: usize, token: Vec, score: f32) -> Result<(), LoadError> { + let id = match TokenId::try_from(i) { + Ok(id) => id, + Err(err) => return Err(LoadError::InvalidIntegerConversion(err)), + }; + self.vocabulary.push_token(id, token, score); + + Ok(()) + } + + fn read_hyperparameters( + &mut self, + reader: &mut dyn BufRead, + ) -> Result { + // NOTE: Field order matters! Data is laid out in the file exactly in this order. + let hyperparameters = Hp::read(reader)?; + let partial = PartialHyperparameters { + n_vocab: hyperparameters.n_vocabulary(), + }; + self.hyperparameters = hyperparameters; + (self.load_progress_callback)(LoadProgress::HyperparametersLoaded); + + Ok(partial) + } + + fn tensor_buffer(&mut self, info: TensorInfo) -> Result<(), LoadError> { + self.tensors.insert(info.name.clone(), info); + Ok(()) + } +} + +/// Default load progress callbacks (prints progress to stdout) +pub fn load_progress_callback(progress: LoadProgress) { + match progress { + LoadProgress::HyperparametersLoaded => println!("Loaded hyperparameters"), + LoadProgress::ContextSize { bytes } => println!( + "ggml ctx size = {:.2} MB\n", + bytes as f64 / (1024.0 * 1024.0) + ), + LoadProgress::PartLoading { + file, + current_part, + total_parts, + } => { + let current_part = current_part + 1; + println!( + "Loading model part {}/{} from '{}'\n", + current_part, + total_parts, + file.to_string_lossy() + ) + } + LoadProgress::PartTensorLoaded { + current_tensor, + tensor_count, + .. + } => { + let current_tensor = current_tensor + 1; + if current_tensor % 8 == 0 { + println!("Loaded tensor {current_tensor}/{tensor_count}"); + } + } + LoadProgress::PartLoaded { + file, + byte_size, + tensor_count, + } => { + println!("Loading of '{}' complete", file.to_string_lossy()); + println!( + "Model size = {:.2} MB / num tensors = {}", + byte_size as f64 / 1024.0 / 1024.0, + tensor_count + ); + } + }; +} diff --git a/llm-base/src/model.rs b/llm-base/src/model.rs new file mode 100644 index 00000000..ca5d6ccc --- /dev/null +++ b/llm-base/src/model.rs @@ -0,0 +1,104 @@ +use std::{error::Error, io::BufRead}; + +use crate::{ + loader::TensorLoader, vocabulary::TokenId, EvaluateOutputRequest, InferenceParameters, + InferenceSession, InferenceSessionParameters, LoadError, Vocabulary, +}; + +/// A large language model. +pub trait KnownModel { + /// Hyperparameters for the model + type Hyperparameters: Hyperparameters; + + /// Creates a new model from the provided hyperparameters. + fn new( + hyperparameters: Self::Hyperparameters, + n_context_tokens: usize, + vocabulary: Vocabulary, + tensor_loader: impl TensorLoader, + ) -> Result + where + Self: Sized; + + /// Starts a new `InferenceSession` for this model. + fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession; + + /// Evaluates the transformer. + /// + /// The provided `output_request` struct lets you specify which additional + /// data you are interested in fetching from the transformer. Setting a + /// field to a `Some` value will clear and fill the provided vector with + /// data. The provided vector will be resized to the exact output size. + fn evaluate( + &self, + session: &mut InferenceSession, + params: &InferenceParameters, + input_tokens: &[TokenId], + output_request: &mut EvaluateOutputRequest, + ); + + /// Model vocabulary + fn vocabulary(&self) -> &Vocabulary; + + /// Model context size + fn n_ctx(&self) -> usize; +} + +/// A type-erased model to allow for interacting with a model without knowing +/// its hyperparameters. +pub trait Model { + /// Starts a new `InferenceSession` for this model. + fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession; + + /// Evaluates the transformer. + /// + /// The provided `output_request` struct lets you specify which additional + /// data you are interested in fetching from the transformer. Setting a + /// field to a `Some` value will clear and fill the provided vector with + /// data. The provided vector will be resized to the exact output size. + fn evaluate( + &self, + session: &mut InferenceSession, + params: &InferenceParameters, + input_tokens: &[TokenId], + output_request: &mut EvaluateOutputRequest, + ); + + /// Model vocabulary + fn vocabulary(&self) -> &Vocabulary; + + /// Model context size + fn n_ctx(&self) -> usize; +} +impl> Model for M { + fn start_session(&self, params: InferenceSessionParameters) -> InferenceSession { + KnownModel::start_session(self, params) + } + + fn evaluate( + &self, + session: &mut InferenceSession, + params: &InferenceParameters, + input_tokens: &[TokenId], + output_request: &mut EvaluateOutputRequest, + ) { + KnownModel::evaluate(self, session, params, input_tokens, output_request) + } + + fn vocabulary(&self) -> &Vocabulary { + KnownModel::vocabulary(self) + } + + fn n_ctx(&self) -> usize { + KnownModel::n_ctx(self) + } +} + +/// Implemented by model hyperparameters for loading and saving to a GGML model read/writer. +pub trait Hyperparameters: Sized + Default { + /// Read the parameters from a reader. + fn read(reader: &mut dyn BufRead) -> Result; + + /// Get the number of tokens in the vocabulary. + fn n_vocabulary(&self) -> usize; +} diff --git a/llama-cli/src/snapshot.rs b/llm-base/src/snapshot.rs similarity index 89% rename from llama-cli/src/snapshot.rs rename to llm-base/src/snapshot.rs index 3601de76..af74ada2 100644 --- a/llama-cli/src/snapshot.rs +++ b/llm-base/src/snapshot.rs @@ -1,10 +1,12 @@ -use llama_rs::{InferenceSession, InferenceSessionParameters, Model}; use std::{ error::Error, fs::File, io::{BufReader, BufWriter}, path::Path, }; + +use crate::{InferenceSession, InferenceSessionParameters, Model}; + use zstd::{ stream::{read::Decoder, write::Encoder}, zstd_safe::CompressionLevel, @@ -12,13 +14,14 @@ use zstd::{ const SNAPSHOT_COMPRESSION_LEVEL: CompressionLevel = 1; +/// Read or create a session pub fn read_or_create_session( - model: &Model, + model: &dyn Model, persist_session: Option<&Path>, load_session: Option<&Path>, inference_session_params: InferenceSessionParameters, ) -> (InferenceSession, bool) { - fn load(model: &Model, path: &Path) -> InferenceSession { + fn load(model: &dyn Model, path: &Path) -> InferenceSession { let file = unwrap_or_exit(File::open(path), || format!("Could not open file {path:?}")); let decoder = unwrap_or_exit(Decoder::new(BufReader::new(file)), || { format!("Could not create decoder for {path:?}") @@ -40,7 +43,8 @@ pub fn read_or_create_session( } } -pub fn write_session(mut session: llama_rs::InferenceSession, path: &Path) { +/// Write the session +pub fn write_session(mut session: InferenceSession, path: &Path) { // SAFETY: the session is consumed here, so nothing else can access it. let snapshot = unsafe { session.get_snapshot() }; let file = unwrap_or_exit(File::create(path), || { diff --git a/llama-rs/src/util.rs b/llm-base/src/util.rs similarity index 96% rename from llama-rs/src/util.rs rename to llm-base/src/util.rs index afec7af4..90dda24a 100644 --- a/llama-rs/src/util.rs +++ b/llm-base/src/util.rs @@ -1,3 +1,4 @@ +pub use ggml::util::*; use std::path::{Path, PathBuf}; /// NOTE: The original code relies in promotion rules and automatic cast between @@ -7,6 +8,7 @@ use std::path::{Path, PathBuf}; /// the ctx_size found using this code, and the one in llama.cpp. The number for /// rust ends up being slightly lower, but no "out of memory" errors are /// reported by ggml. +#[macro_export] macro_rules! mulf { ($term:expr, $($terms:expr),*) => { usize::try_from((($term as f64) $(* ($terms as f64))*) as u64).unwrap() @@ -14,7 +16,6 @@ macro_rules! mulf { } use memmap2::{Mmap, MmapAsRawDesc, MmapOptions}; -pub(crate) use mulf; use thiserror::Error; /// Used to buffer incoming tokens until they produce a valid string of UTF-8 text. @@ -83,9 +84,8 @@ pub enum FindAllModelFilesError { IO(#[from] std::io::Error), } -pub(crate) fn find_all_model_files( - main_path: &Path, -) -> Result, FindAllModelFilesError> { +/// Find all the files related to a model. +pub fn find_all_model_files(main_path: &Path) -> Result, FindAllModelFilesError> { Ok(collect_related_paths( main_path, std::fs::read_dir(main_path.parent().ok_or_else(|| { @@ -176,6 +176,7 @@ mod tests { } } +/// mmap with MAP_POPULATE pub fn mmap_populate(file: T) -> Result { unsafe { MmapOptions::new().populate().map(file) } } diff --git a/llama-rs/src/vocabulary.rs b/llm-base/src/vocabulary.rs similarity index 95% rename from llama-rs/src/vocabulary.rs rename to llm-base/src/vocabulary.rs index 32bdd07f..7443b222 100644 --- a/llama-rs/src/vocabulary.rs +++ b/llm-base/src/vocabulary.rs @@ -11,17 +11,17 @@ pub(crate) type TokenScore = f32; #[derive(Debug, Clone, Default)] pub struct Vocabulary { /// Maps every integer (index) token id to its corresponding token - pub(crate) id_to_token: Vec, + pub id_to_token: Vec, /// Maps every integer (index) token id to corresponding score - pub(crate) id_to_token_score: Vec, + pub id_to_token_score: Vec, // todo: use a radix tree /// Maps a token to a token id - pub(crate) token_to_id: HashMap, + pub token_to_id: HashMap, /// The longest token in this vocabulary - pub(crate) max_token_length: usize, + pub max_token_length: usize, } impl Vocabulary { @@ -32,7 +32,7 @@ impl Vocabulary { /// # Panics /// - This function can panic if `id` does not correspond to the next token in the vocabulary. /// That is, if there are already `n` tokens in the vocabulary, then `id` must be `n`. - pub(crate) fn push_token(&mut self, id: TokenId, content: Token, score: TokenScore) { + pub fn push_token(&mut self, id: TokenId, content: Token, score: TokenScore) { // These are loader invariants. If this is broken, then the loader is broken and this is a bug, // not an issue with the model itself. assert_eq!(self.id_to_token.len(), self.id_to_token_score.len()); diff --git a/llama-cli/Cargo.toml b/llm-cli/Cargo.toml similarity index 65% rename from llama-cli/Cargo.toml rename to llm-cli/Cargo.toml index 15ba6a9c..a552c364 100644 --- a/llama-cli/Cargo.toml +++ b/llm-cli/Cargo.toml @@ -1,20 +1,22 @@ [package] edition = "2021" -name = "llama-cli" +name = "llm-cli" version = {workspace = true} -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[[bin]] +name = "llm" +path = "src/main.rs" [dependencies] -llama-rs = { path = "../llama-rs", features = ["convert", "quantize"] } +llm = { path = "../llm" } +log = { workspace = true } rand = {workspace = true} bincode = "1.3.3" clap = {version = "4.1.8", features = ["derive"]} color-eyre = {version = "0.6.2", default-features = false} env_logger = "0.10.0" -log = "0.4" num_cpus = "1.15.0" once_cell = "1.17.1" rustyline = "11.0.0" diff --git a/llama-cli/src/cli_args.rs b/llm-cli/src/cli_args.rs similarity index 83% rename from llama-cli/src/cli_args.rs rename to llm-cli/src/cli_args.rs index b14a3e73..725f4451 100644 --- a/llama-cli/src/cli_args.rs +++ b/llm-cli/src/cli_args.rs @@ -1,9 +1,13 @@ -use std::path::PathBuf; +use std::{ + fmt::Debug, + path::{Path, PathBuf}, +}; use clap::{Parser, ValueEnum}; use color_eyre::eyre::{Result, WrapErr}; -use llama_rs::{ - InferenceParameters, InferenceSessionParameters, ModelKVMemoryType, TokenBias, EOT_TOKEN_ID, +use llm::{ + ElementType, InferenceParameters, InferenceSessionParameters, LoadProgress, Model, + ModelKVMemoryType, TokenBias, EOT_TOKEN_ID, }; use rand::SeedableRng; @@ -249,6 +253,10 @@ pub struct ModelLoad { #[arg(long, short = 'm')] pub model_path: PathBuf, + /// The model architecture to use. + #[arg(long, short = 'a', default_value_t, value_enum)] + pub model_architecture: ModelArchitecture, + /// Sets the size of the context (in tokens). Allows feeding longer prompts. /// Note that this affects memory. /// @@ -267,18 +275,29 @@ pub struct ModelLoad { #[arg(long)] pub no_mmap: bool, } +#[derive(Parser, Debug, ValueEnum, Clone, Copy, Default)] +pub enum ModelArchitecture { + /// Meta's LLaMA model and derivatives (Vicuna, etc). + #[default] + Llama, + /// OpenAI's GPT2 architecture and derivatives (Cerebras, etc). + Gpt2, + /// The BLOOM model. This is currently disabled as it does not work. + Bloom, +} impl ModelLoad { - pub fn load(&self) -> Result { + pub fn load(&self) -> Result> { let now = std::time::Instant::now(); - let model = llama_rs::Model::load( - &self.model_path, - !self.no_mmap, - self.num_ctx_tokens, - |progress| { - use llama_rs::LoadProgress; - match progress { - LoadProgress::HyperparametersLoaded(hparams) => { - log::debug!("Loaded hyperparameters {hparams:#?}") + + let prefer_mmap = !self.no_mmap; + let model = self + .load_indirect( + &self.model_path, + !self.no_mmap, + self.num_ctx_tokens, + |progress| match progress { + LoadProgress::HyperparametersLoaded => { + log::debug!("Loaded hyperparameters") } LoadProgress::ContextSize { bytes } => log::info!( "ggml ctx size = {:.2} MB\n", @@ -295,7 +314,7 @@ impl ModelLoad { current_part, total_parts, file.to_string_lossy(), - !self.no_mmap + prefer_mmap ) } LoadProgress::PartTensorLoaded { @@ -320,10 +339,9 @@ impl ModelLoad { tensor_count ); } - } - }, - ) - .wrap_err("Could not load model")?; + }, + ) + .wrap_err("Could not load model")?; log::info!( "Model fully loaded! Elapsed: {}ms", @@ -332,6 +350,35 @@ impl ModelLoad { Ok(model) } + + fn load_indirect( + &self, + path: &Path, + prefer_mmap: bool, + n_context_tokens: usize, + load_progress_callback: impl FnMut(LoadProgress<'_>), + ) -> Result> { + Ok(match self.model_architecture { + ModelArchitecture::Llama => Box::new(llm::load::( + path, + prefer_mmap, + n_context_tokens, + load_progress_callback, + )?), + ModelArchitecture::Gpt2 => Box::new(llm::load::( + path, + prefer_mmap, + n_context_tokens, + load_progress_callback, + )?), + ModelArchitecture::Bloom => Box::new(llm::load::( + path, + prefer_mmap, + n_context_tokens, + load_progress_callback, + )?), + }) + } } #[derive(Parser, Debug)] @@ -390,13 +437,13 @@ pub enum FileType { /// Float 32-bit. F32, } -impl From for llama_rs::FileType { +impl From for llm::FileType { fn from(t: FileType) -> Self { match t { - FileType::Q4_0 => llama_rs::FileType::MostlyQ4_0, - FileType::Q4_1 => llama_rs::FileType::MostlyQ4_1, - FileType::F16 => llama_rs::FileType::MostlyF16, - FileType::F32 => llama_rs::FileType::F32, + FileType::Q4_0 => llm::FileType::MostlyQ4_0, + FileType::Q4_1 => llm::FileType::MostlyQ4_1, + FileType::F16 => llm::FileType::MostlyF16, + FileType::F32 => llm::FileType::F32, } } } @@ -423,11 +470,11 @@ pub enum QuantizationTarget { /// Quantized 4-bit (type 1). Q4_1, } -impl From for llama_rs::ElementType { +impl From for ElementType { fn from(t: QuantizationTarget) -> Self { match t { - QuantizationTarget::Q4_0 => llama_rs::ElementType::Q4_0, - QuantizationTarget::Q4_1 => llama_rs::ElementType::Q4_1, + QuantizationTarget::Q4_0 => ElementType::Q4_0, + QuantizationTarget::Q4_1 => ElementType::Q4_1, } } } diff --git a/llama-cli/src/main.rs b/llm-cli/src/main.rs similarity index 92% rename from llama-cli/src/main.rs rename to llm-cli/src/main.rs index cc142875..adbc1a2e 100644 --- a/llama-cli/src/main.rs +++ b/llm-cli/src/main.rs @@ -3,11 +3,10 @@ use std::{convert::Infallible, io::Write}; use clap::Parser; use cli_args::Args; use color_eyre::eyre::{Context, Result}; -use llama_rs::{convert::convert_pth_to_ggml, InferenceError}; +use llm::{llama::convert::convert_pth_to_ggml, snapshot, InferenceError}; use rustyline::error::ReadlineError; mod cli_args; -mod snapshot; fn main() -> Result<()> { env_logger::builder() @@ -34,7 +33,7 @@ fn infer(args: &cli_args::Infer) -> Result<()> { let inference_session_params = args.generate.inference_session_parameters(); let model = args.model_load.load()?; let (mut session, session_loaded) = snapshot::read_or_create_session( - &model, + model.as_ref(), args.persist_session.as_deref(), args.generate.load_session.as_deref(), inference_session_params, @@ -43,7 +42,7 @@ fn infer(args: &cli_args::Infer) -> Result<()> { let mut rng = args.generate.rng(); let res = session.inference_with_prompt::( - &model, + model.as_ref(), &inference_params, &prompt, args.generate.num_predict, @@ -59,14 +58,15 @@ fn infer(args: &cli_args::Infer) -> Result<()> { match res { Ok(_) => (), - Err(llama_rs::InferenceError::ContextFull) => { + Err(InferenceError::ContextFull) => { log::warn!("Context window full, stopping inference.") } - Err(llama_rs::InferenceError::TokenizationFailed) => { + Err(InferenceError::TokenizationFailed) => { log::error!("Failed to tokenize initial prompt."); } - Err(llama_rs::InferenceError::UserCallback(_)) - | Err(llama_rs::InferenceError::EndOfText) => unreachable!("cannot fail"), + Err(InferenceError::UserCallback(_)) | Err(InferenceError::EndOfText) => { + unreachable!("cannot fail") + } } if let Some(session_path) = args.save_session.as_ref().or(args.persist_session.as_ref()) { @@ -116,7 +116,7 @@ fn interactive( let inference_session_params = args.generate.inference_session_parameters(); let model = args.model_load.load()?; let (mut session, session_loaded) = snapshot::read_or_create_session( - &model, + model.as_ref(), None, args.generate.load_session.as_deref(), inference_session_params, @@ -142,7 +142,7 @@ fn interactive( let mut sp = spinners::Spinner::new(spinners::Spinners::Dots2, "".to_string()); if let Err(InferenceError::ContextFull) = session.feed_prompt::( - &model, + model.as_ref(), &inference_params, &prompt, |_| Ok(()), @@ -152,7 +152,7 @@ fn interactive( sp.stop(); let res = session.inference_with_prompt::( - &model, + model.as_ref(), &inference_params, "", args.generate.num_predict, @@ -186,13 +186,13 @@ fn interactive( } fn quantize(args: &cli_args::Quantize) -> Result<()> { - use llama_rs::quantize::{quantize, QuantizeProgress::*}; + use llm::llama::quantize::{quantize, QuantizeProgress::*}; quantize( &args.source, &args.destination, args.target.into(), |progress| match progress { - HyperparametersLoaded(_) => log::info!("Loaded hyperparameters"), + HyperparametersLoaded => log::info!("Loaded hyperparameters"), TensorLoading { name, dims, diff --git a/llm/Cargo.toml b/llm/Cargo.toml new file mode 100644 index 00000000..1ce45915 --- /dev/null +++ b/llm/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "llm" +version = { workspace = true } +edition = "2021" + +[dependencies] +llm-base = { path = "../llm-base" } +llama = { path = "../llama", features = ["convert", "quantize"], optional = true } +gpt2 = { path = "../gpt2", optional = true } +bloom = { path = "../bloom", optional = true } + +[features] +default = ["llama", "gpt2", "bloom"] +llama = ["dep:llama"] +gpt2 = ["dep:gpt2"] diff --git a/llm/src/lib.rs b/llm/src/lib.rs new file mode 100644 index 00000000..53a5a164 --- /dev/null +++ b/llm/src/lib.rs @@ -0,0 +1,13 @@ +pub use llm_base::{ + load, snapshot, ElementType, FileType, InferenceError, InferenceParameters, InferenceSession, + InferenceSessionParameters, InferenceSnapshot, KnownModel, LoadError, LoadProgress, Model, + ModelKVMemoryType, SnapshotError, TokenBias, TokenId, TokenUtf8Buffer, Vocabulary, + EOT_TOKEN_ID, +}; + +#[cfg(feature = "bloom")] +pub use bloom::{self, Bloom}; +#[cfg(feature = "gpt2")] +pub use gpt2::{self, Gpt2}; +#[cfg(feature = "llama")] +pub use llama::{self, Llama}; diff --git a/tools/generate-ggml-bindings/Cargo.toml b/tools/generate-ggml-bindings/Cargo.toml new file mode 100644 index 00000000..0efc3ef6 --- /dev/null +++ b/tools/generate-ggml-bindings/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "generate-ggml-bindings" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +bindgen = "0.65.1" diff --git a/tools/generate-ggml-bindings/src/main.rs b/tools/generate-ggml-bindings/src/main.rs new file mode 100644 index 00000000..aca90252 --- /dev/null +++ b/tools/generate-ggml-bindings/src/main.rs @@ -0,0 +1,28 @@ +//! Helper tool to generate the bindings for the ggml crate. +//! +//! Assumed to be run from the root of the workspace. + +use std::path::PathBuf; + +fn main() { + const HEADER_PATH: &str = "ggml/sys/ggml/include/ggml/ggml.h"; + + let bindings = bindgen::Builder::default() + .header(HEADER_PATH) + // Suppress some warnings + .raw_line("#![allow(non_upper_case_globals)]") + .raw_line("#![allow(non_camel_case_types)]") + .raw_line("#![allow(non_snake_case)]") + .raw_line("#![allow(unused)]") + // Do not generate code for ggml's includes (stdlib) + .allowlist_file(HEADER_PATH) + .generate() + .expect("Unable to generate bindings"); + + let out_path = PathBuf::from("ggml").join("sys").join("src").join("lib.rs"); + bindings + .write_to_file(out_path) + .expect("Couldn't write bindings"); + + println!("Successfully updated bindings"); +}