From 6e1f17ca86ad160c26be004af3f4352180c712f7 Mon Sep 17 00:00:00 2001 From: FL03 Date: Sun, 18 Feb 2024 13:06:20 -0600 Subject: [PATCH 01/87] update Signed-off-by: FL03 --- acme/tests/autodiff.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 4f61cdff..7ff41a0a 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -168,7 +168,7 @@ fn test_sigmoid() { assert_eq!(autodiff!(x: fn sigmoid(x: f64) -> f64 { 1_f64 / (1_f64 + (-x).exp()) }), sigmoid_prime(x)); } -// #[ignore = "Currently, support for function calls is not fully implemented"] +#[ignore = "Currently, support for function calls is not fully implemented"] #[test] fn test_function_call() { let x = 2_f64; From 7f6415f4233daa9b2d65445ebf5a31e4e7cad7d8 Mon Sep 17 00:00:00 2001 From: FL03 Date: Sun, 18 Feb 2024 14:22:10 -0600 Subject: [PATCH 02/87] update Signed-off-by: FL03 --- acme/examples/autodiff.rs | 22 +++++++++++++--------- core/src/errors/error.rs | 2 +- core/src/exp/mod.rs | 1 + core/src/stores/stack.rs | 2 +- derive/examples/params.rs | 4 ++-- macros/src/ad/handle/expr/method.rs | 7 +------ macros/src/ast/partials.rs | 17 +++++++++++++++-- macros/src/lib.rs | 11 +++++++++-- 8 files changed, 43 insertions(+), 23 deletions(-) diff --git a/acme/examples/autodiff.rs b/acme/examples/autodiff.rs index d8afb0b1..0d2df1a0 100644 --- a/acme/examples/autodiff.rs +++ b/acme/examples/autodiff.rs @@ -2,6 +2,7 @@ Appellation: autodiff Contrib: FL03 */ +#![allow(dead_code, unused_variables)] #![feature(fn_traits)] extern crate acme; @@ -16,18 +17,21 @@ macro_rules! eval { } fn main() -> Result<(), Box> { - let x: f64 = 2.0; + let x = 2_f64; + // samples(x); - eval!(x: x.tan()); - - eval!(x: x.sin()); - eval!(x: x.cos().sin()); - // show_item!(sigmoid::); - unsafe { - println!("{:?}", sigmoid::.call((2_f64,))); - } + show_item!(acme::prelude::sigmoid::(x)); + println!("{:?}", sigmoid::(2_f64)); Ok(()) } + +fn samples(x: f64) { + eval!(x: x.tan()); + + eval!(x: x.sin()); + + eval!(x: x.cos().sin()); +} \ No newline at end of file diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index 44457f8c..ef96a576 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -56,7 +56,7 @@ where } impl From for Error { - fn from(err: petgraph::algo::NegativeCycle) -> Self { + fn from(_err: petgraph::algo::NegativeCycle) -> Self { Self::new(ErrorKind::Graph, "Negative Cycle detected") } } diff --git a/core/src/exp/mod.rs b/core/src/exp/mod.rs index 15194a1f..a1fae7e3 100644 --- a/core/src/exp/mod.rs +++ b/core/src/exp/mod.rs @@ -5,6 +5,7 @@ //! # Experimental //! //! +#![allow(dead_code, unused_imports, unused_variables)] pub mod basic; pub mod dynamic; pub mod ops; diff --git a/core/src/stores/stack.rs b/core/src/stores/stack.rs index 4a4a98e7..4e672757 100644 --- a/core/src/stores/stack.rs +++ b/core/src/stores/stack.rs @@ -4,5 +4,5 @@ */ pub struct Stack { - pub(crate) store: Vec<(K, V)>, + pub store: Vec<(K, V)>, } diff --git a/derive/examples/params.rs b/derive/examples/params.rs index 555aa731..2a5c50d9 100644 --- a/derive/examples/params.rs +++ b/derive/examples/params.rs @@ -7,8 +7,8 @@ extern crate acme_derive as acme; use acme::Params; fn main() -> Result<(), Box> { - let params = LinearParams { weight: 1.0 }; - let wk = LinearParamsKey::Weight; + let _params = LinearParams { weight: 1.0 }; + let _wk = LinearParamsKey::Weight; Ok(()) } diff --git a/macros/src/ad/handle/expr/method.rs b/macros/src/ad/handle/expr/method.rs index d7edbbd0..d508d4ea 100644 --- a/macros/src/ad/handle/expr/method.rs +++ b/macros/src/ad/handle/expr/method.rs @@ -18,12 +18,7 @@ pub fn handle_call(expr: &ExprCall, var: &Ident) -> TokenStream { let arg = handle_expr(&arg, var); grad = quote! { #grad + #arg }; } - if let Expr::Path(path) = &**func { - println!("{:?}", expr.span().unwrap().source_file().path()); - if let Some(block) = expr.span().source_text() { - println!("********\n\n\t\tFunction\n{:?}\nArgs:\n{:?}\n{:?}\n\n********", func, args, &block); - } - } + // let df = handle_expr(&func, var); diff --git a/macros/src/ast/partials.rs b/macros/src/ast/partials.rs index 5146ddad..1c595463 100644 --- a/macros/src/ast/partials.rs +++ b/macros/src/ast/partials.rs @@ -5,6 +5,7 @@ use syn::parse::{Parse, ParseStream, Result}; use syn::punctuated::Punctuated; use syn::{Attribute, Block, Expr, Ident, ItemFn, Signature, Token, Type, Visibility}; +use syn::braced; pub struct Partial { pub expr: Expr, @@ -39,14 +40,26 @@ impl Parse for Partials { } pub struct StructuredPartial { - + } pub struct PartialFnCall { pub attrs: Vec, pub body: Box, pub sig: Signature, - pub vis: Visibility, +} + +impl Parse for PartialFnCall { + fn parse(input: ParseStream) -> Result { + let attrs = input.call(Attribute::parse_outer)?; + let sig: Signature = input.parse()?; + let body: Block = input.parse()?; + Ok(Self { + attrs, + body: Box::new(body), + sig, + }) + } } pub enum PartialFn { diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 6034a0a4..a79fdd0d 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -19,7 +19,8 @@ pub(crate) mod graph; use ast::partials::*; use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, Expr}; +use syn::{parse_macro_input, Expr,}; +use syn::spanned::Spanned; #[proc_macro_attribute] pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { @@ -30,7 +31,13 @@ pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { #[proc_macro] pub fn show_item(item: TokenStream) -> TokenStream { - println!("item: \"{:?}\"", syn::parse_macro_input!(item as syn::ItemFn)); + let expr = parse_macro_input!(item as Expr); + // let item = item.to_string(); + let span = expr.span(); + println!("Span Bytes: {:?}", span.byte_range()); + println!("Span (start, end): ({:?}, {:?})", span.start(), span.end()); + println!("Source File: {:?}", span.unwrap().source_file()); + println!("Source Text: {:?}", span.source_text()); quote! { }.into() } From f5170dba2654c6179ce18b012f8c0b9f966d1da4 Mon Sep 17 00:00:00 2001 From: FL03 Date: Sun, 18 Feb 2024 15:07:38 -0600 Subject: [PATCH 03/87] update Signed-off-by: FL03 --- acme/examples/autodiff.rs | 5 ++--- core/src/lib.rs | 3 +-- core/src/specs.rs | 4 ---- core/src/specs/func/mod.rs | 6 ++++++ core/src/specs/func/structural.rs | 10 ++++++++++ core/src/{ => specs}/hkt/applicative.rs | 0 core/src/{ => specs}/hkt/functor.rs | 0 core/src/{ => specs}/hkt/mod.rs | 0 core/src/{ => specs}/hkt/monad.rs | 0 core/src/specs/mod.rs | 7 +++++++ macros/src/lib.rs | 5 +++-- 11 files changed, 29 insertions(+), 11 deletions(-) delete mode 100644 core/src/specs.rs create mode 100644 core/src/specs/func/mod.rs create mode 100644 core/src/specs/func/structural.rs rename core/src/{ => specs}/hkt/applicative.rs (100%) rename core/src/{ => specs}/hkt/functor.rs (100%) rename core/src/{ => specs}/hkt/mod.rs (100%) rename core/src/{ => specs}/hkt/monad.rs (100%) create mode 100644 core/src/specs/mod.rs diff --git a/acme/examples/autodiff.rs b/acme/examples/autodiff.rs index 0d2df1a0..7ba5b78e 100644 --- a/acme/examples/autodiff.rs +++ b/acme/examples/autodiff.rs @@ -20,9 +20,8 @@ fn main() -> Result<(), Box> { let x = 2_f64; // samples(x); - - show_item!(acme::prelude::sigmoid::(x)); - println!("{:?}", sigmoid::(2_f64)); + // let z = sigmoid(x); + show_item!(sigmoid(x)); Ok(()) diff --git a/core/src/lib.rs b/core/src/lib.rs index b18adb9c..22516ef3 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -19,13 +19,12 @@ pub(crate) mod exp; pub mod cmp; pub mod errors; pub mod graphs; -pub mod hkt; pub mod ops; pub mod stores; pub mod prelude { pub use crate::primitives::*; - // pub use crate::specs::*; + pub use crate::specs::*; pub use crate::utils::*; pub use crate::cmp::*; diff --git a/core/src/specs.rs b/core/src/specs.rs deleted file mode 100644 index 1d8faa71..00000000 --- a/core/src/specs.rs +++ /dev/null @@ -1,4 +0,0 @@ -/* - Appellation: specs - Contrib: FL03 -*/ diff --git a/core/src/specs/func/mod.rs b/core/src/specs/func/mod.rs new file mode 100644 index 00000000..316cee12 --- /dev/null +++ b/core/src/specs/func/mod.rs @@ -0,0 +1,6 @@ +/* + Appellation: func + Contrib: FL03 +*/ + +pub(crate) mod structural; \ No newline at end of file diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs new file mode 100644 index 00000000..c823be0f --- /dev/null +++ b/core/src/specs/func/structural.rs @@ -0,0 +1,10 @@ +/* + Appellation: structural + Contrib: FL03 +*/ + +pub trait StructuralFn { + type Output; + + fn eval(&self) -> Self::Output; +} \ No newline at end of file diff --git a/core/src/hkt/applicative.rs b/core/src/specs/hkt/applicative.rs similarity index 100% rename from core/src/hkt/applicative.rs rename to core/src/specs/hkt/applicative.rs diff --git a/core/src/hkt/functor.rs b/core/src/specs/hkt/functor.rs similarity index 100% rename from core/src/hkt/functor.rs rename to core/src/specs/hkt/functor.rs diff --git a/core/src/hkt/mod.rs b/core/src/specs/hkt/mod.rs similarity index 100% rename from core/src/hkt/mod.rs rename to core/src/specs/hkt/mod.rs diff --git a/core/src/hkt/monad.rs b/core/src/specs/hkt/monad.rs similarity index 100% rename from core/src/hkt/monad.rs rename to core/src/specs/hkt/monad.rs diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs new file mode 100644 index 00000000..877aa348 --- /dev/null +++ b/core/src/specs/mod.rs @@ -0,0 +1,7 @@ +/* + Appellation: specs + Contrib: FL03 +*/ + +pub mod func; +pub mod hkt; \ No newline at end of file diff --git a/macros/src/lib.rs b/macros/src/lib.rs index a79fdd0d..f93b613d 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -33,10 +33,11 @@ pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { pub fn show_item(item: TokenStream) -> TokenStream { let expr = parse_macro_input!(item as Expr); // let item = item.to_string(); - let span = expr.span(); + let span_expr = expr.span(); + let span = expr.span().unwrap().parent().expect(""); println!("Span Bytes: {:?}", span.byte_range()); println!("Span (start, end): ({:?}, {:?})", span.start(), span.end()); - println!("Source File: {:?}", span.unwrap().source_file()); + println!("Source File: {:?}", span.source_file()); println!("Source Text: {:?}", span.source_text()); quote! { }.into() } From 3ceab4d0ae5e6fc2a02e263fb2f24e48af281b6f Mon Sep 17 00:00:00 2001 From: FL03 Date: Sun, 18 Feb 2024 15:27:17 -0600 Subject: [PATCH 04/87] update Signed-off-by: FL03 --- macros/src/ast/mod.rs | 27 +++++++++++++++++++++++++++ macros/src/ast/partials.rs | 5 +---- macros/src/lib.rs | 11 ++++++----- 3 files changed, 34 insertions(+), 9 deletions(-) diff --git a/macros/src/ast/mod.rs b/macros/src/ast/mod.rs index 7da61062..921514aa 100644 --- a/macros/src/ast/mod.rs +++ b/macros/src/ast/mod.rs @@ -4,3 +4,30 @@ */ pub mod partials; + +use proc_macro2::Span; +use quote::{quote, ToTokens}; +use syn::Expr; +use syn::parse::{Parse, ParseStream, Result}; +use syn::spanned::Spanned; + +pub struct Ast { + pub expr: Expr, + pub span: Span, +} + +impl Parse for Ast { + fn parse(input: ParseStream) -> Result { + // let span = input.span(); + + let expr: Expr = input.parse()?; + let span = expr.span(); + Ok(Self { expr, span, }) + } +} + +impl ToTokens for Ast { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + tokens.extend(quote! {#self.expr }) + } +} \ No newline at end of file diff --git a/macros/src/ast/partials.rs b/macros/src/ast/partials.rs index 1c595463..5fbae9ab 100644 --- a/macros/src/ast/partials.rs +++ b/macros/src/ast/partials.rs @@ -39,10 +39,6 @@ impl Parse for Partials { } } -pub struct StructuredPartial { - -} - pub struct PartialFnCall { pub attrs: Vec, pub body: Box, @@ -51,6 +47,7 @@ pub struct PartialFnCall { impl Parse for PartialFnCall { fn parse(input: ParseStream) -> Result { + let attrs = input.call(Attribute::parse_outer)?; let sig: Signature = input.parse()?; let body: Block = input.parse()?; diff --git a/macros/src/lib.rs b/macros/src/lib.rs index f93b613d..130a3aa8 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -22,6 +22,8 @@ use quote::quote; use syn::{parse_macro_input, Expr,}; use syn::spanned::Spanned; + + #[proc_macro_attribute] pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { println!("attr: \"{}\"", attr.to_string()); @@ -31,15 +33,14 @@ pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { #[proc_macro] pub fn show_item(item: TokenStream) -> TokenStream { + let expr = parse_macro_input!(item as Expr); - // let item = item.to_string(); - let span_expr = expr.span(); - let span = expr.span().unwrap().parent().expect(""); + let span = expr.span(); println!("Span Bytes: {:?}", span.byte_range()); println!("Span (start, end): ({:?}, {:?})", span.start(), span.end()); - println!("Source File: {:?}", span.source_file()); + println!("Source File: {:?}", span.unwrap().source_file()); println!("Source Text: {:?}", span.source_text()); - quote! { }.into() + quote! { #expr }.into() } #[proc_macro] From e6a2626c8732ff61720eb1e6828419c23eba0bad Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 09:40:29 -0600 Subject: [PATCH 05/87] update Signed-off-by: FL03 --- acme/examples/autodiff.rs | 15 ++- acme/tests/autodiff.rs | 23 +++- core/src/lib.rs | 8 +- core/src/specs/func/mod.rs | 2 +- core/src/specs/func/structural.rs | 4 +- core/src/specs/mod.rs | 2 +- core/src/utils.rs | 2 +- macros/Cargo.toml | 1 - macros/src/ad/handle/expr/binary.rs | 4 +- macros/src/ad/handle/expr/method.rs | 24 +--- macros/src/ad/handle/item.rs | 23 +++- macros/src/ad/ops/unary.rs | 2 +- macros/src/ast/mod.rs | 6 +- macros/src/ast/partials.rs | 4 +- macros/src/cmp/graph.rs | 10 -- macros/src/cmp/mod.rs | 3 +- macros/src/graph.rs | 193 ---------------------------- macros/src/lib.rs | 57 ++------ macros/tests/partial.rs | 63 --------- tensor/src/specs.rs | 6 + 20 files changed, 90 insertions(+), 362 deletions(-) delete mode 100644 macros/src/cmp/graph.rs delete mode 100644 macros/src/graph.rs delete mode 100644 macros/tests/partial.rs diff --git a/acme/examples/autodiff.rs b/acme/examples/autodiff.rs index 7ba5b78e..e97bb86a 100644 --- a/acme/examples/autodiff.rs +++ b/acme/examples/autodiff.rs @@ -6,8 +6,8 @@ #![feature(fn_traits)] extern crate acme; -use acme::{autodiff, show_item}; use acme::prelude::sigmoid; +use acme::{autodiff, partial, show_item, show_streams}; macro_rules! eval { ($var:ident: $ex:expr) => { @@ -22,15 +22,24 @@ fn main() -> Result<(), Box> { // let z = sigmoid(x); show_item!(sigmoid(x)); - + + multiply(x, x); Ok(()) } +#[show_streams(x)] +pub fn multiply(x: A, y: B) -> C +where + A: std::ops::Mul, +{ + x * y +} + fn samples(x: f64) { eval!(x: x.tan()); eval!(x: x.sin()); eval!(x: x.cos().sin()); -} \ No newline at end of file +} diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 7ff41a0a..05cf38b4 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -123,7 +123,10 @@ fn test_foil() { let (x, y) = (1_f64, 2_f64); assert_eq!(autodiff!(x: (x + y) * (x + y)), 2_f64 * (x + y)); - assert_eq!(autodiff!(x: (x + y) * (x + y)), autodiff!(y: (x + y) * (x + y))); + assert_eq!( + autodiff!(x: (x + y) * (x + y)), + autodiff!(y: (x + y) * (x + y)) + ); } #[test] @@ -155,7 +158,11 @@ fn test_log() { #[test] fn test_chained() { let x: f64 = 2.0; - assert_abs_diff_eq!(autodiff!(x: x.sin() * x.cos()), 2_f64 * x.cos().square() - 1_f64, epsilon = 1e-8); + assert_abs_diff_eq!( + autodiff!(x: x.sin() * x.cos()), + 2_f64 * x.cos().square() - 1_f64, + epsilon = 1e-8 + ); assert_eq!(autodiff!(x: x.sin().cos()), -x.cos() * x.sin().sin()); assert_eq!(autodiff!(x: x.ln().ln()), (x * x.ln()).recip()); } @@ -164,8 +171,14 @@ fn test_chained() { fn test_sigmoid() { let x = 2_f64; assert_eq!(autodiff!(x: 1.0 / (1.0 + (-x).exp())), sigmoid_prime(x)); - assert_eq!(autodiff!(x: | x: f64 | 1.0 / (1.0 + (-x).exp())), sigmoid_prime(x)); - assert_eq!(autodiff!(x: fn sigmoid(x: f64) -> f64 { 1_f64 / (1_f64 + (-x).exp()) }), sigmoid_prime(x)); + assert_eq!( + autodiff!(x: | x: f64 | 1.0 / (1.0 + (-x).exp())), + sigmoid_prime(x) + ); + assert_eq!( + autodiff!(x: fn sigmoid(x: f64) -> f64 { 1_f64 / (1_f64 + (-x).exp()) }), + sigmoid_prime(x) + ); } #[ignore = "Currently, support for function calls is not fully implemented"] @@ -182,4 +195,4 @@ fn test_method() { assert_eq!(autodiff!(x: x.mul(y)), 2.0); assert_eq!(autodiff!(x: x.sigmoid()), sigmoid_prime(x)); -} \ No newline at end of file +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 22516ef3..78d9422a 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -6,7 +6,13 @@ //! //! #![allow(incomplete_features)] -#![feature(adt_const_params, fn_traits, tuple_trait, unboxed_closures)] +#![feature( + adt_const_params, + fn_traits, + rustc_private, + tuple_trait, + unboxed_closures +)] pub use self::primitives::*; diff --git a/core/src/specs/func/mod.rs b/core/src/specs/func/mod.rs index 316cee12..1a7b9dbf 100644 --- a/core/src/specs/func/mod.rs +++ b/core/src/specs/func/mod.rs @@ -3,4 +3,4 @@ Contrib: FL03 */ -pub(crate) mod structural; \ No newline at end of file +pub(crate) mod structural; diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs index c823be0f..697204d6 100644 --- a/core/src/specs/func/structural.rs +++ b/core/src/specs/func/structural.rs @@ -5,6 +5,6 @@ pub trait StructuralFn { type Output; - + fn eval(&self) -> Self::Output; -} \ No newline at end of file +} diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 877aa348..afbae98b 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -4,4 +4,4 @@ */ pub mod func; -pub mod hkt; \ No newline at end of file +pub mod hkt; diff --git a/core/src/utils.rs b/core/src/utils.rs index 21054947..2bbf8c21 100644 --- a/core/src/utils.rs +++ b/core/src/utils.rs @@ -9,4 +9,4 @@ where T: Float, { (T::one() + x.neg().exp()).recip() -} \ No newline at end of file +} diff --git a/macros/Cargo.toml b/macros/Cargo.toml index ed97cb9b..20315d38 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -18,7 +18,6 @@ test = true [dependencies] num = "0.4" -petgraph = { features = [], version = "0.6" } proc-macro2 = { features = ["nightly", "span-locations"], version = "1" } quote = "1" diff --git a/macros/src/ad/handle/expr/binary.rs b/macros/src/ad/handle/expr/binary.rs index e7558676..45e30847 100644 --- a/macros/src/ad/handle/expr/binary.rs +++ b/macros/src/ad/handle/expr/binary.rs @@ -142,8 +142,8 @@ fn foil(a: &ExprParen, b: &ExprParen, var: &Ident) -> TokenStream { let dr = handle_expr(&pright.into(), var); return quote! { #dl + #dr - } + }; } } panic!("FOILER") -} \ No newline at end of file +} diff --git a/macros/src/ad/handle/expr/method.rs b/macros/src/ad/handle/expr/method.rs index d508d4ea..e1c601cf 100644 --- a/macros/src/ad/handle/expr/method.rs +++ b/macros/src/ad/handle/expr/method.rs @@ -6,10 +6,8 @@ use super::handle_expr; use crate::ad::ops::{Methods, UnaryMethod}; use proc_macro2::TokenStream; use quote::quote; -use syn::spanned::Spanned; use std::str::FromStr; use syn::{Expr, ExprCall, ExprMethodCall, Ident}; -use syn::ExprPath; pub fn handle_call(expr: &ExprCall, var: &Ident) -> TokenStream { let ExprCall { args, func, .. } = expr; @@ -18,11 +16,10 @@ pub fn handle_call(expr: &ExprCall, var: &Ident) -> TokenStream { let arg = handle_expr(&arg, var); grad = quote! { #grad + #arg }; } - - // + + // let df = handle_expr(&func, var); - quote! { #df + #grad } } @@ -73,20 +70,3 @@ pub fn handle_unary_method(method: &UnaryMethod, recv: &Expr, _var: &Ident) -> T UnaryMethod::Tanh => quote! { #recv.cosh().powi(2).recip() }, } } - -pub fn extract_block_logic(expr: &ExprCall) -> Option { - // Get the span of the function call expression - let span = expr.span(); - let source = span.clone().unwrap().source_file(); - - if let Expr::Path(inner) = &*expr.func { - let ExprPath { path, .. } = inner; - // Get the span of the last segment of the path - let span = path.segments.last().unwrap().ident.span(); - - - - } - - None -} diff --git a/macros/src/ad/handle/item.rs b/macros/src/ad/handle/item.rs index 3c796619..d48ab929 100644 --- a/macros/src/ad/handle/item.rs +++ b/macros/src/ad/handle/item.rs @@ -4,15 +4,28 @@ */ use super::block::handle_block; use proc_macro2::TokenStream; -use syn::{Ident, Item, ItemFn}; +use syn::{Ident, Item, ItemFn, Signature}; pub fn handle_item(item: &Item, var: &Ident) -> TokenStream { match item { - Item::Fn(inner) => { - let ItemFn { block, .. } = inner; - handle_block(&block, var) - } + Item::Fn(inner) => handle_item_fn(inner, var), _ => panic!("Unsupported item!"), } } + +pub fn handle_item_fn(item: &ItemFn, var: &Ident) -> TokenStream { + let ItemFn { block, sig, .. } = item; + let Signature { inputs, .. } = sig; + + let mut vars = Vec::new(); + for input in inputs { + if let syn::FnArg::Typed(typed) = input { + if let syn::Pat::Ident(ident) = &*typed.pat { + vars.push(ident.ident.clone()); + } + } + } + + handle_block(&block, &var) +} diff --git a/macros/src/ad/ops/unary.rs b/macros/src/ad/ops/unary.rs index e900621b..7176cd16 100644 --- a/macros/src/ad/ops/unary.rs +++ b/macros/src/ad/ops/unary.rs @@ -46,7 +46,7 @@ impl Parse for UnaryMethod { if input.peek(syn::Token![.]) { if input.peek2(syn::Ident) { let method = input.parse::()?; - if let Ok(method) = UnaryMethod::from_str(method.to_string().as_str()) { + if let Ok(method) = UnaryMethod::from_str(method.to_string().as_str()) { return Ok(method); } } diff --git a/macros/src/ast/mod.rs b/macros/src/ast/mod.rs index 921514aa..eb19d030 100644 --- a/macros/src/ast/mod.rs +++ b/macros/src/ast/mod.rs @@ -7,9 +7,9 @@ pub mod partials; use proc_macro2::Span; use quote::{quote, ToTokens}; -use syn::Expr; use syn::parse::{Parse, ParseStream, Result}; use syn::spanned::Spanned; +use syn::Expr; pub struct Ast { pub expr: Expr, @@ -22,7 +22,7 @@ impl Parse for Ast { let expr: Expr = input.parse()?; let span = expr.span(); - Ok(Self { expr, span, }) + Ok(Self { expr, span }) } } @@ -30,4 +30,4 @@ impl ToTokens for Ast { fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { tokens.extend(quote! {#self.expr }) } -} \ No newline at end of file +} diff --git a/macros/src/ast/partials.rs b/macros/src/ast/partials.rs index 5fbae9ab..3fc837a5 100644 --- a/macros/src/ast/partials.rs +++ b/macros/src/ast/partials.rs @@ -4,8 +4,7 @@ */ use syn::parse::{Parse, ParseStream, Result}; use syn::punctuated::Punctuated; -use syn::{Attribute, Block, Expr, Ident, ItemFn, Signature, Token, Type, Visibility}; -use syn::braced; +use syn::{Attribute, Block, Expr, Ident, ItemFn, Signature, Token, Type}; pub struct Partial { pub expr: Expr, @@ -47,7 +46,6 @@ pub struct PartialFnCall { impl Parse for PartialFnCall { fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; let sig: Signature = input.parse()?; let body: Block = input.parse()?; diff --git a/macros/src/cmp/graph.rs b/macros/src/cmp/graph.rs deleted file mode 100644 index af2273d0..00000000 --- a/macros/src/cmp/graph.rs +++ /dev/null @@ -1,10 +0,0 @@ -/* - Appellation: graph - Contrib: FL03 -*/ -use syn::Expr; - -pub struct Node { - id: usize, - expr: Box, -} diff --git a/macros/src/cmp/mod.rs b/macros/src/cmp/mod.rs index eb8caa83..3996240c 100644 --- a/macros/src/cmp/mod.rs +++ b/macros/src/cmp/mod.rs @@ -2,7 +2,6 @@ Appellation: cmp Contrib: FL03 */ -pub use self::{graph::*, store::*}; +pub use self::store::*; -pub(crate) mod graph; pub(crate) mod store; diff --git a/macros/src/graph.rs b/macros/src/graph.rs deleted file mode 100644 index ee1c36da..00000000 --- a/macros/src/graph.rs +++ /dev/null @@ -1,193 +0,0 @@ -/* - Appellation: graph - Contrib: FL03 -*/ -use petgraph::{ - algo::toposort, - prelude::{DiGraph, NodeIndex}, -}; -use proc_macro2::TokenStream; -use quote::{quote, ToTokens}; -use std::collections::HashMap; -use syn::{Expr, ExprBinary}; - -pub struct Context { - graph: DiGraph, -} - -impl Context { - pub fn new() -> Self { - Context { - graph: DiGraph::new(), - } - } - - pub fn add_node(&mut self, expr: Expr) -> NodeIndex { - self.graph.add_node(expr) - } - - pub fn add_edge(&mut self, src: NodeIndex, target: NodeIndex) { - self.graph.add_edge(src, target, ()); - } - - pub fn backward(&self) -> HashMap { - let sorted = toposort(&self.graph, None).expect("The graph is cyclic"); - let target = sorted.last().unwrap().clone(); - - let mut stack = Vec::<(NodeIndex, TokenStream)>::new(); - stack.push((target, quote! { 1.0 })); - let mut store = HashMap::::from_iter(stack.clone()); - - // Iterate through the edges of the graph to compute gradients - while let Some((i, grad)) = stack.pop() { - // get the current node - let node = &self.graph[i]; - - match node { - Expr::Binary(expr_binary) => { - // Compute the gradient of the left child - let left = self - .graph - .neighbors_directed(i, petgraph::Direction::Outgoing) - .next() - .unwrap(); - let left_grad = quote! { #grad * #expr_binary.right }; - stack.push((left, left_grad)); - - // Compute the gradient of the right child - let right = self - .graph - .neighbors_directed(i, petgraph::Direction::Outgoing) - .last() - .unwrap(); - let right_grad = quote! { #grad * #expr_binary.left }; - stack.push((right, right_grad)); - } - Expr::Unary(expr_unary) => { - // Compute the gradient of the child - let child = self - .graph - .neighbors_directed(i, petgraph::Direction::Outgoing) - .next() - .unwrap(); - let child_grad = quote! { #grad * #expr_unary.expr }; - stack.push((child, child_grad)); - } - _ => { - // Do nothing - } - } - } - - store - } - - pub fn traverse(&mut self, expr: &Expr) { - let c = self.add_node(expr.clone()); - - match expr { - Expr::Binary(inner) => { - let ExprBinary { left, right, .. } = inner; - // Add edges for left and right children - let a = self.add_node(*left.clone()); - let b = self.add_node(*right.clone()); - self.add_edge(a, c); - self.add_edge(b, c); - - // Recursive traversal for left and right children - self.traverse(left); - self.traverse(right); - } - - Expr::Unary(inner) => { - // Add an edge for the child - let a = self.add_node(*inner.expr.clone()); - self.add_edge(a, c); - - // Recursive traversal for the child - self.traverse(&inner.expr); - } - _ => {} - } - } -} - -fn handle_expr(expr: &Expr) -> Grad { - match expr { - Expr::Binary(inner) => handle_binary(inner).into(), - _ => panic!("Unsupported expression!"), - } -} - -fn handle_binary(expr: &ExprBinary) -> BinaryGrad { - use syn::BinOp; - let ExprBinary { - left, op, right, .. - } = expr.clone(); - - let dl = handle_expr(&left); - let dr = handle_expr(&right); - match op { - BinOp::Add(_) => { - // Implement addition handling - BinaryGrad { - left: quote! { #dl }, - right: quote! { #dr }, - } - } - BinOp::Mul(_) => { - // Implement multiplication handling - BinaryGrad { - left: quote! { #dl * #right }, - right: quote! { #dr * #left }, - } - } - _ => panic!("Unsupported binary operator!"), - } -} - -pub struct BinaryGrad { - pub left: TokenStream, - pub right: TokenStream, -} - -impl ToTokens for BinaryGrad { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.left.to_tokens(tokens); - self.right.to_tokens(tokens); - } -} - -pub enum Grad { - Binary(BinaryGrad), - Unary(TokenStream), - Verbatim(TokenStream), -} - -impl From for Grad { - fn from(grad: BinaryGrad) -> Self { - Grad::Binary(grad) - } -} - -impl From for Grad { - fn from(grad: TokenStream) -> Self { - Grad::Verbatim(grad) - } -} - -impl ToTokens for Grad { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self { - Grad::Binary(grad) => { - grad.to_tokens(tokens); - } - Grad::Unary(grad) => { - grad.to_tokens(tokens); - } - Grad::Verbatim(grad) => { - grad.to_tokens(tokens); - } - } - } -} diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 130a3aa8..9af8068a 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -5,35 +5,31 @@ //! # acme-macros //! //! -#![feature(proc_macro_span,)] +#![feature(proc_macro_span)] extern crate proc_macro; - pub(crate) mod ad; pub(crate) mod ast; pub(crate) mod cmp; pub(crate) mod gradient; -pub(crate) mod graph; use ast::partials::*; use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, Expr,}; use syn::spanned::Spanned; - - +use syn::{parse_macro_input, Expr}; #[proc_macro_attribute] pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { + let input = parse_macro_input!(item as syn::ItemFn); println!("attr: \"{}\"", attr.to_string()); - println!("item: \"{}\"", item.to_string()); - item + println!("item: \"{:?}\"", &input); + quote! { #input }.into() } #[proc_macro] pub fn show_item(item: TokenStream) -> TokenStream { - let expr = parse_macro_input!(item as Expr); let span = expr.span(); println!("Span Bytes: {:?}", span.byte_range()); @@ -43,6 +39,15 @@ pub fn show_item(item: TokenStream) -> TokenStream { quote! { #expr }.into() } +#[proc_macro_attribute] +pub fn partial(attr: TokenStream, item: TokenStream) -> TokenStream { + let input = parse_macro_input!(item as syn::ItemFn); + println!("attr: \"{}\"", attr.to_string()); + // let result = ad::handle::item::handle_item(&input); + // TokenStream::from(result) + quote! { #input }.into() +} + #[proc_macro] pub fn autodiff(input: TokenStream) -> TokenStream { // Parse the input expression into a syntax tree @@ -55,28 +60,6 @@ pub fn autodiff(input: TokenStream) -> TokenStream { TokenStream::from(result) } -#[proc_macro] -pub fn compute(input: TokenStream) -> TokenStream { - use graph::Context; - // Parse the input expression into a syntax tree - let expr = parse_macro_input!(input as Expr); - - // Build a computational graph representing the expression - let mut graph = Context::new(); - graph.traverse(&expr); - - // Generate code to compute gradients and return as a HashMap - let grad = graph.backward(); - let grads = grad - .into_iter() - .map(|(k, v)| { - let k = k.index(); - quote! { (#k, #v) } - }) - .collect::>(); - quote! { [#(#grads),*] }.into() -} - #[proc_macro] pub fn grad(input: TokenStream) -> TokenStream { // Parse the input expression into a syntax tree @@ -89,18 +72,6 @@ pub fn grad(input: TokenStream) -> TokenStream { TokenStream::from(result) } -#[proc_macro] -pub fn partial(input: TokenStream) -> TokenStream { - // Parse the input token stream into a structured syntax tree - let partial = parse_macro_input!(input as Partial); - - // Generate code to perform partial differentiation - let result = ad::handle::expr::handle_expr(&partial.expr, &partial.var); - - // Return the generated code as a token stream - TokenStream::from(result) -} - pub(crate) mod kw { syn::custom_keyword!(grad); diff --git a/macros/tests/partial.rs b/macros/tests/partial.rs deleted file mode 100644 index 7b41f479..00000000 --- a/macros/tests/partial.rs +++ /dev/null @@ -1,63 +0,0 @@ -/* - Appellation: gradient - Contrib: FL03 -*/ -#[cfg(test)] -extern crate acme_macros as macros; - -use macros::partial; - -macro_rules! partials { - ($($x:ident),* : $f:expr) => { - { - let mut store = Vec::new(); - $( - store.push(partial!($x: $f)); - )* - store - } - }; -} - -#[test] -fn test_add() { - let (x, y) = (1_f64, 2_f64); - assert_eq!(partial!(x: x + y), 1.0); - assert_eq!(partial!(y: x += y), 1.0); - assert_eq!(partials!(x, y: x + y + 3.0), [1.0; 2]); -} - -#[test] -fn test_div() { - let (x, y) = (1_f64, 2_f64); - - assert_eq!(partial!(x: x / y), 1.0 / 2.0); - assert_eq!(partial!(y: x / y), -1.0 / 4.0); -} - -#[test] -fn test_mul() { - let (x, y) = (1_f64, 2_f64); - - assert_eq!(partial!(x: x * y), 2.0); - assert_eq!(partial!(y: x * y), 1.0); - assert_eq!(partial!(y: x * y + 3.0), 1.0); -} - -#[test] -fn test_sub() { - let (x, y) = (1_f64, 2_f64); - - assert_eq!(partial!(x: x - y), 1.0); - assert_eq!(partial!(y: x - y), -1.0); -} - -#[test] -fn test_chain_rule() { - let (x, y) = (1_f64, 2_f64); - - assert_eq!(partial!(x: y * (x + y)), y); - assert_eq!(partial!(y: y * (x + y)), 2_f64 * y + x); - assert_eq!(partial!(x: (x + y) * y), y); - assert_eq!(partial!(y: (x + y) * y), 2_f64 * y + x); -} diff --git a/tensor/src/specs.rs b/tensor/src/specs.rs index 8f580913..a9b4db86 100644 --- a/tensor/src/specs.rs +++ b/tensor/src/specs.rs @@ -6,6 +6,12 @@ use crate::core::cmp::id::AtomicId; use crate::shape::{Rank, Shape}; use crate::store::Layout; +pub trait Affine { + type Output; + + fn affine(&self, mul: &T, add: &T) -> Self::Output; +} + pub trait Matmul { type Output; From 8a83d0615c21ba14dcae42b4b87c533e213dcbd3 Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 09:41:58 -0600 Subject: [PATCH 06/87] update Signed-off-by: FL03 --- core/src/cmp/mod.rs | 25 ------------------------- derive/src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/core/src/cmp/mod.rs b/core/src/cmp/mod.rs index bd9f4b3e..958b7e9d 100644 --- a/core/src/cmp/mod.rs +++ b/core/src/cmp/mod.rs @@ -70,31 +70,6 @@ macro_rules! impl_const_op { impl_op!($name, $bound, $fn, 0, |a, b| $name::new($e(a, b))); }; } -macro_rules! impl_dual_op { - ($name:ident, $bound:ident, $fn:ident, $val:tt, $e:expr) => { - impl $bound for $name - where - T: $bound, - { - type Output = Self; - - fn $fn(self, rhs: $name) -> Self::Output { - $e(self.$val, rhs.$val) - } - } - - impl $bound for $name - where - T: $bound, - { - type Output = Self; - - fn $fn(self, rhs: T) -> Self::Output { - $e(self.$val, rhs) - } - } - }; -} use std::ops::{Add, Div, Mul, Rem, Sub}; diff --git a/derive/src/lib.rs b/derive/src/lib.rs index b9f8b5a5..b7985feb 100644 --- a/derive/src/lib.rs +++ b/derive/src/lib.rs @@ -38,7 +38,7 @@ pub fn params(input: TokenStream) -> TokenStream { let store_name = format_ident!("{}Key", struct_name); // Generate the parameter struct definition - let param_struct = match &input.data { + let _param_struct = match &input.data { Data::Struct(s) => match &s.fields { _ => {} }, @@ -60,7 +60,7 @@ pub fn params(input: TokenStream) -> TokenStream { discriminant: None, } }); - let varaints_str = varaints.clone().map(|v| v.ident); + let _varaints_str = varaints.clone().map(|v| v.ident); quote! { #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd,)] From 08bc1011eac1f41700b9cf83fcfcb5fb279517ec Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 10:07:05 -0600 Subject: [PATCH 07/87] update Signed-off-by: FL03 --- acme/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 8a14db14..cfc80123 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -25,7 +25,6 @@ default = ["core", "tensor"] full = [ "core", "derive", - "macros", "tensor" ] From 4ba7965cffefc1638334e1c42d007249dfce9f3f Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 12:52:20 -0600 Subject: [PATCH 08/87] update Signed-off-by: FL03 --- core/src/ops/gradient.rs | 14 ------- core/src/ops/operator.rs | 2 +- macros/src/ad/mod.rs | 12 ------ macros/src/ast/gradient.rs | 19 +++++++++ macros/src/ast/mod.rs | 1 + macros/src/{ad => diff}/handle/block.rs | 0 macros/src/{ad => diff}/handle/expr/binary.rs | 0 macros/src/{ad => diff}/handle/expr/method.rs | 2 +- macros/src/{ad => diff}/handle/expr/mod.rs | 0 macros/src/{ad => diff}/handle/expr/unary.rs | 0 macros/src/{ad => diff}/handle/item.rs | 0 macros/src/{ad => diff}/handle/mod.rs | 0 macros/src/{ad => diff}/handle/stmt.rs | 0 macros/src/{ad/autodiff.rs => diff/mod.rs} | 15 ++++--- macros/src/grad/mod.rs | 42 +++++++++++++++++++ macros/src/lib.rs | 10 +++-- macros/src/{ad => }/ops/mod.rs | 0 macros/src/{ad => }/ops/unary.rs | 0 macros/tests/gradient.rs | 8 ++-- 19 files changed, 85 insertions(+), 40 deletions(-) delete mode 100644 macros/src/ad/mod.rs create mode 100644 macros/src/ast/gradient.rs rename macros/src/{ad => diff}/handle/block.rs (100%) rename macros/src/{ad => diff}/handle/expr/binary.rs (100%) rename macros/src/{ad => diff}/handle/expr/method.rs (98%) rename macros/src/{ad => diff}/handle/expr/mod.rs (100%) rename macros/src/{ad => diff}/handle/expr/unary.rs (100%) rename macros/src/{ad => diff}/handle/item.rs (100%) rename macros/src/{ad => diff}/handle/mod.rs (100%) rename macros/src/{ad => diff}/handle/stmt.rs (100%) rename macros/src/{ad/autodiff.rs => diff/mod.rs} (71%) create mode 100644 macros/src/grad/mod.rs rename macros/src/{ad => }/ops/mod.rs (100%) rename macros/src/{ad => }/ops/unary.rs (100%) diff --git a/core/src/ops/gradient.rs b/core/src/ops/gradient.rs index 6cd91eab..6a40d878 100644 --- a/core/src/ops/gradient.rs +++ b/core/src/ops/gradient.rs @@ -15,17 +15,3 @@ pub trait Gradient { fn grad(&self, args: T) -> Self::Gradient; } -// Mathematically, the gradient of a function is a vector of partial derivatives. - -pub struct Derivative { - pub wrt: T, - pub f: Box T>, -} - -impl Differentiable for Derivative { - type Derivative = T; - - fn diff(&self, args: T) -> Self::Derivative { - (self.f)(args) - } -} diff --git a/core/src/ops/operator.rs b/core/src/ops/operator.rs index fb7052eb..4ee55ed5 100644 --- a/core/src/ops/operator.rs +++ b/core/src/ops/operator.rs @@ -14,5 +14,5 @@ where fn eval(&self, args: Args) -> Self::Output; - fn grad(&self, args: Self::Output) -> Option; + fn grad(&self, args: Args) -> Vec; } diff --git a/macros/src/ad/mod.rs b/macros/src/ad/mod.rs deleted file mode 100644 index c547d018..00000000 --- a/macros/src/ad/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - Appellation: ad - Contrib: FL03 -*/ -//! # Autodifferentiation (AD) -//! -pub use self::autodiff::generate_autodiff; - -pub(crate) mod autodiff; - -pub mod handle; -pub mod ops; diff --git a/macros/src/ast/gradient.rs b/macros/src/ast/gradient.rs new file mode 100644 index 00000000..c533aa2e --- /dev/null +++ b/macros/src/ast/gradient.rs @@ -0,0 +1,19 @@ +/* + Appellation: gradient + Contrib: FL03 +*/ +use syn::{Attribute, ItemFn}; +use syn::parse::{Parse, ParseStream, Result}; + +pub struct GradientAst { + pub attrs: Vec, + pub item: ItemFn, +} + +impl Parse for GradientAst { + fn parse(input: ParseStream) -> Result { + let attrs = input.call(Attribute::parse_outer)?; + let item = input.parse()?; + Ok(GradientAst { attrs, item }) + } +} diff --git a/macros/src/ast/mod.rs b/macros/src/ast/mod.rs index eb19d030..1383c121 100644 --- a/macros/src/ast/mod.rs +++ b/macros/src/ast/mod.rs @@ -3,6 +3,7 @@ Contrib: FL03 */ +pub mod gradient; pub mod partials; use proc_macro2::Span; diff --git a/macros/src/ad/handle/block.rs b/macros/src/diff/handle/block.rs similarity index 100% rename from macros/src/ad/handle/block.rs rename to macros/src/diff/handle/block.rs diff --git a/macros/src/ad/handle/expr/binary.rs b/macros/src/diff/handle/expr/binary.rs similarity index 100% rename from macros/src/ad/handle/expr/binary.rs rename to macros/src/diff/handle/expr/binary.rs diff --git a/macros/src/ad/handle/expr/method.rs b/macros/src/diff/handle/expr/method.rs similarity index 98% rename from macros/src/ad/handle/expr/method.rs rename to macros/src/diff/handle/expr/method.rs index e1c601cf..edbdd887 100644 --- a/macros/src/ad/handle/expr/method.rs +++ b/macros/src/diff/handle/expr/method.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use super::handle_expr; -use crate::ad::ops::{Methods, UnaryMethod}; +use crate::ops::{Methods, UnaryMethod}; use proc_macro2::TokenStream; use quote::quote; use std::str::FromStr; diff --git a/macros/src/ad/handle/expr/mod.rs b/macros/src/diff/handle/expr/mod.rs similarity index 100% rename from macros/src/ad/handle/expr/mod.rs rename to macros/src/diff/handle/expr/mod.rs diff --git a/macros/src/ad/handle/expr/unary.rs b/macros/src/diff/handle/expr/unary.rs similarity index 100% rename from macros/src/ad/handle/expr/unary.rs rename to macros/src/diff/handle/expr/unary.rs diff --git a/macros/src/ad/handle/item.rs b/macros/src/diff/handle/item.rs similarity index 100% rename from macros/src/ad/handle/item.rs rename to macros/src/diff/handle/item.rs diff --git a/macros/src/ad/handle/mod.rs b/macros/src/diff/handle/mod.rs similarity index 100% rename from macros/src/ad/handle/mod.rs rename to macros/src/diff/handle/mod.rs diff --git a/macros/src/ad/handle/stmt.rs b/macros/src/diff/handle/stmt.rs similarity index 100% rename from macros/src/ad/handle/stmt.rs rename to macros/src/diff/handle/stmt.rs diff --git a/macros/src/ad/autodiff.rs b/macros/src/diff/mod.rs similarity index 71% rename from macros/src/ad/autodiff.rs rename to macros/src/diff/mod.rs index e05ace53..b99a68d4 100644 --- a/macros/src/ad/autodiff.rs +++ b/macros/src/diff/mod.rs @@ -1,10 +1,15 @@ /* - Appellation: autodiff + Appellation: ad Contrib: FL03 */ -use super::handle::expr::handle_expr; -use super::handle::item::handle_item; -use crate::ast::partials::*; +//! # Autodifferentiation (AD) +//! + +pub mod handle; + +use handle::expr::handle_expr; +use handle::item::handle_item; +use crate::ast::partials::{PartialAst, PartialFn}; use proc_macro2::TokenStream; use syn::Ident; @@ -19,4 +24,4 @@ fn handle_input(input: &PartialFn, var: &Ident) -> TokenStream { PartialFn::Expr(inner) => handle_expr(&inner, var), PartialFn::Item(inner) => handle_item(&inner.clone().into(), var), } -} +} \ No newline at end of file diff --git a/macros/src/grad/mod.rs b/macros/src/grad/mod.rs new file mode 100644 index 00000000..2e56e2a2 --- /dev/null +++ b/macros/src/grad/mod.rs @@ -0,0 +1,42 @@ +/* + Appellation: grad + Contrib: FL03 +*/ + + +use crate::ast::gradient::GradientAst; +use crate::diff::handle::block::handle_block; +use quote::quote; +use proc_macro2::TokenStream; +use syn::{ItemFn, Signature}; + +pub fn gradient(grad: &GradientAst) -> TokenStream { + let GradientAst { attrs, item } = grad; + let attrs = attrs; + let item = item; + let output = quote! { + #(#attrs)* + #item + }; + output +} + +fn handle_item_fn(item: &ItemFn) -> TokenStream { + let ItemFn { block, sig, .. } = item; + let Signature { inputs, .. } = sig; + + let mut vars = Vec::new(); + for input in inputs { + if let syn::FnArg::Typed(typed) = input { + if let syn::Pat::Ident(ident) = &*typed.pat { + vars.push(ident.ident.clone()); + } + } + } + + let grad = vars.iter().map(|var| handle_block(&block, &var)).collect::>(); + + quote! { + [#(#grad)*] + } +} \ No newline at end of file diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 9af8068a..9ef45d99 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -8,9 +8,12 @@ #![feature(proc_macro_span)] extern crate proc_macro; -pub(crate) mod ad; + pub(crate) mod ast; pub(crate) mod cmp; +pub(crate) mod diff; +pub(crate) mod grad; +pub(crate) mod ops; pub(crate) mod gradient; @@ -23,7 +26,7 @@ use syn::{parse_macro_input, Expr}; #[proc_macro_attribute] pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { let input = parse_macro_input!(item as syn::ItemFn); - println!("attr: \"{}\"", attr.to_string()); + println!("attr: \"{:?}\"", &attr); println!("item: \"{:?}\"", &input); quote! { #input }.into() } @@ -54,7 +57,7 @@ pub fn autodiff(input: TokenStream) -> TokenStream { let expr = parse_macro_input!(input as PartialAst); // Generate code to compute the gradient - let result = ad::generate_autodiff(&expr); + let result = diff::generate_autodiff(&expr); // Return the generated code as a token stream TokenStream::from(result) @@ -73,6 +76,7 @@ pub fn grad(input: TokenStream) -> TokenStream { } pub(crate) mod kw { + syn::custom_keyword!(eval); syn::custom_keyword!(grad); syn::custom_keyword!(cos); diff --git a/macros/src/ad/ops/mod.rs b/macros/src/ops/mod.rs similarity index 100% rename from macros/src/ad/ops/mod.rs rename to macros/src/ops/mod.rs diff --git a/macros/src/ad/ops/unary.rs b/macros/src/ops/unary.rs similarity index 100% rename from macros/src/ad/ops/unary.rs rename to macros/src/ops/unary.rs diff --git a/macros/tests/gradient.rs b/macros/tests/gradient.rs index b296dba9..8b6e74de 100644 --- a/macros/tests/gradient.rs +++ b/macros/tests/gradient.rs @@ -5,13 +5,13 @@ #[cfg(test)] extern crate acme_macros as macros; -use macros::grad; +use macros::gradient; #[test] fn test_grad_addition() { let x = 1.0; let y = 2.0; - let df = grad!(x + y); + let df = gradient!(x + y); // let df = BTreeMap::from_iter(df); assert_eq!( df.into_iter().filter(|(k, _v)| k == &x).collect::>(), @@ -41,7 +41,7 @@ fn test_grad_addition() { fn test_grad_multiply() { let x = 1.0; let y = 2.0; - let df = grad!(x * y); + let df = gradient!(x * y); assert_eq!( df.into_iter().filter(|(k, _v)| k == &x).collect::>(), [(x, 2.0)] @@ -66,7 +66,7 @@ fn test_grad_multiply() { fn test_grad_mixed() { let x = 1.0; let y = 2.0; - let df = grad!(y * (x + y)); + let df = gradient!(y * (x + y)); // assert_eq!(df.into_iter().filter(|(k, _v)| k == &x).collect::>(), [(x, 2.0)]); assert_eq!( df.into_iter().filter(|(k, _v)| k == &y).collect::>(), From 9e6730de84122c60a2884a19850c376486cb7d93 Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 13:36:41 -0600 Subject: [PATCH 09/87] update Signed-off-by: FL03 --- core/Cargo.toml | 3 --- core/src/ops/gradient.rs | 15 +++++++++++++++ macros/Cargo.toml | 1 - macros/examples/sample.rs | 15 --------------- macros/src/ast/gradient.rs | 2 +- macros/src/diff/mod.rs | 4 ++-- macros/src/grad/mod.rs | 18 ++++++++---------- macros/src/lib.rs | 3 +-- macros/tests/gradient.rs | 4 ++-- tensor/src/lib.rs | 4 ++-- tensor/src/ops/backprop.rs | 2 +- tensor/src/specs.rs | 2 +- tensor/src/tensor.rs | 2 +- 13 files changed, 34 insertions(+), 41 deletions(-) delete mode 100644 macros/examples/sample.rs diff --git a/core/Cargo.toml b/core/Cargo.toml index 64a310a4..1f37bb4e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -12,8 +12,6 @@ version.workspace = true [features] default = [] - - [lib] bench = false crate-type = ["cdylib", "rlib"] @@ -26,7 +24,6 @@ test = true [dependencies] anyhow.workspace = true -# daggy = { features = ["serde-1"], version = "0.8" } lazy_static = "1" num = "0.4" petgraph = { features = ["serde-1"], version = "0.6" } diff --git a/core/src/ops/gradient.rs b/core/src/ops/gradient.rs index 6a40d878..edb0175d 100644 --- a/core/src/ops/gradient.rs +++ b/core/src/ops/gradient.rs @@ -15,3 +15,18 @@ pub trait Gradient { fn grad(&self, args: T) -> Self::Gradient; } +pub trait Grad { + type Output; + + /// Compute the gradient of a function at a given point, with respect to a given variable. + // TODO: Create a macro for generating parameter keys + fn grad(&self, at: T, wrt: &str) -> Self::Output; +} + +pub trait Parameter { + type Key; + type Value; + + fn key(&self) -> Self::Key; + fn value(&self) -> Self::Value; +} diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 20315d38..05591129 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -20,7 +20,6 @@ test = true num = "0.4" proc-macro2 = { features = ["nightly", "span-locations"], version = "1" } quote = "1" - syn = { features = ["extra-traits", "fold", "full"], version = "2" } [dev-dependencies] diff --git a/macros/examples/sample.rs b/macros/examples/sample.rs deleted file mode 100644 index f9745a4a..00000000 --- a/macros/examples/sample.rs +++ /dev/null @@ -1,15 +0,0 @@ -/* - Appellation: simple - Contrib: FL03 -*/ -extern crate acme_macros as macros; - -use macros::show_streams; - -fn main() -> Result<(), Box> { - foo(); - Ok(()) -} - -#[show_streams({ delimeters })] -fn foo() {} diff --git a/macros/src/ast/gradient.rs b/macros/src/ast/gradient.rs index c533aa2e..0761cc5a 100644 --- a/macros/src/ast/gradient.rs +++ b/macros/src/ast/gradient.rs @@ -2,8 +2,8 @@ Appellation: gradient Contrib: FL03 */ -use syn::{Attribute, ItemFn}; use syn::parse::{Parse, ParseStream, Result}; +use syn::{Attribute, ItemFn}; pub struct GradientAst { pub attrs: Vec, diff --git a/macros/src/diff/mod.rs b/macros/src/diff/mod.rs index b99a68d4..9f9927c5 100644 --- a/macros/src/diff/mod.rs +++ b/macros/src/diff/mod.rs @@ -7,9 +7,9 @@ pub mod handle; +use crate::ast::partials::{PartialAst, PartialFn}; use handle::expr::handle_expr; use handle::item::handle_item; -use crate::ast::partials::{PartialAst, PartialFn}; use proc_macro2::TokenStream; use syn::Ident; @@ -24,4 +24,4 @@ fn handle_input(input: &PartialFn, var: &Ident) -> TokenStream { PartialFn::Expr(inner) => handle_expr(&inner, var), PartialFn::Item(inner) => handle_item(&inner.clone().into(), var), } -} \ No newline at end of file +} diff --git a/macros/src/grad/mod.rs b/macros/src/grad/mod.rs index 2e56e2a2..65109759 100644 --- a/macros/src/grad/mod.rs +++ b/macros/src/grad/mod.rs @@ -3,22 +3,17 @@ Contrib: FL03 */ - use crate::ast::gradient::GradientAst; use crate::diff::handle::block::handle_block; -use quote::quote; use proc_macro2::TokenStream; +use quote::quote; use syn::{ItemFn, Signature}; pub fn gradient(grad: &GradientAst) -> TokenStream { let GradientAst { attrs, item } = grad; - let attrs = attrs; + let _attrs = attrs; let item = item; - let output = quote! { - #(#attrs)* - #item - }; - output + handle_item_fn(&item) } fn handle_item_fn(item: &ItemFn) -> TokenStream { @@ -34,9 +29,12 @@ fn handle_item_fn(item: &ItemFn) -> TokenStream { } } - let grad = vars.iter().map(|var| handle_block(&block, &var)).collect::>(); + let grad = vars + .iter() + .map(|var| handle_block(&block, &var)) + .collect::>(); quote! { [#(#grad)*] } -} \ No newline at end of file +} diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 9ef45d99..73d454aa 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -8,7 +8,6 @@ #![feature(proc_macro_span)] extern crate proc_macro; - pub(crate) mod ast; pub(crate) mod cmp; pub(crate) mod diff; @@ -64,7 +63,7 @@ pub fn autodiff(input: TokenStream) -> TokenStream { } #[proc_macro] -pub fn grad(input: TokenStream) -> TokenStream { +pub fn gradient(input: TokenStream) -> TokenStream { // Parse the input expression into a syntax tree let expr = parse_macro_input!(input as Expr); diff --git a/macros/tests/gradient.rs b/macros/tests/gradient.rs index 8b6e74de..a34612f3 100644 --- a/macros/tests/gradient.rs +++ b/macros/tests/gradient.rs @@ -22,7 +22,7 @@ fn test_grad_addition() { [(y, 1.0)] ); let z = 3.0; - let df = grad!(x + y + z); + let df = gradient!(x + y + z); assert_eq!( df.into_iter().filter(|(k, _v)| k == &x).collect::>(), [(x, 1.0)] @@ -50,7 +50,7 @@ fn test_grad_multiply() { df.into_iter().filter(|(k, _v)| k == &y).collect::>(), [(y, 1.0)] ); - let df = grad!(x * y + 3.0); + let df = gradient!(x * y + 3.0); assert_eq!( df.into_iter().filter(|(k, _v)| k == &x).collect::>(), [(x, 2.0)] diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 9b5cd2ba..9194e9e2 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -6,6 +6,8 @@ //! //! #![feature(array_chunks)] + +extern crate acme_core as acme; pub use self::{specs::*, tensor::*}; pub(crate) mod specs; @@ -16,8 +18,6 @@ pub mod ops; pub mod shape; pub mod store; -pub(crate) use acme_core as core; - pub mod prelude { pub use crate::specs::*; diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index de3b4e04..40b66607 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -2,6 +2,6 @@ Appellation: backprop Contrib: FL03 */ -use crate::core::prelude::Ops; +use acme::prelude::Ops; pub struct BackpropOp(Option); diff --git a/tensor/src/specs.rs b/tensor/src/specs.rs index a9b4db86..af3cb322 100644 --- a/tensor/src/specs.rs +++ b/tensor/src/specs.rs @@ -2,9 +2,9 @@ Appellation: specs Contrib: FL03 */ -use crate::core::cmp::id::AtomicId; use crate::shape::{Rank, Shape}; use crate::store::Layout; +use acme::cmp::id::AtomicId; pub trait Affine { type Output; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 47f13ab9..1279cf6e 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,10 +2,10 @@ Appellation: tensor Contrib: FL03 */ -use crate::core::cmp::id::AtomicId; use crate::data::Scalar; use crate::shape::{IntoShape, Rank, Shape}; use crate::store::Layout; +use acme::cmp::id::AtomicId; // use std::ops::{Index, IndexMut}; // use std::sync::{Arc, RwLock}; From 463be7567dce7b92635a0663a51c1605c5ef6c32 Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 14:04:42 -0600 Subject: [PATCH 10/87] update Signed-off-by: FL03 --- derive/examples/params.rs | 5 +++-- derive/src/ast/mod.rs | 4 ++++ derive/src/cmp/mod.rs | 6 ++++++ derive/src/cmp/params/mod.rs | 36 +++++++++++++++++++++++++++++++ derive/src/lib.rs | 42 ++++++++---------------------------- derive/src/utils.rs | 13 +++++++++++ 6 files changed, 71 insertions(+), 35 deletions(-) create mode 100644 derive/src/ast/mod.rs create mode 100644 derive/src/cmp/mod.rs create mode 100644 derive/src/cmp/params/mod.rs create mode 100644 derive/src/utils.rs diff --git a/derive/examples/params.rs b/derive/examples/params.rs index 2a5c50d9..d06c7ac0 100644 --- a/derive/examples/params.rs +++ b/derive/examples/params.rs @@ -13,6 +13,7 @@ fn main() -> Result<(), Box> { } #[derive(Params)] -pub struct LinearParams { - pub weight: f64, +pub struct LinearParams { + #[param] + pub weight: T, } diff --git a/derive/src/ast/mod.rs b/derive/src/ast/mod.rs new file mode 100644 index 00000000..97cc5a44 --- /dev/null +++ b/derive/src/ast/mod.rs @@ -0,0 +1,4 @@ +/* + Appellation: ast + Contrib: FL03 +*/ \ No newline at end of file diff --git a/derive/src/cmp/mod.rs b/derive/src/cmp/mod.rs new file mode 100644 index 00000000..3ebcca91 --- /dev/null +++ b/derive/src/cmp/mod.rs @@ -0,0 +1,6 @@ +/* + Appellation: cmp + Contrib: FL03 +*/ + +pub mod params; \ No newline at end of file diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs new file mode 100644 index 00000000..d08b2e64 --- /dev/null +++ b/derive/src/cmp/params/mod.rs @@ -0,0 +1,36 @@ +/* + Appellation: params + Contrib: FL03 +*/ +use crate::utils::capitalize_first; +use proc_macro2::TokenStream; +use quote::{format_ident, quote}; +use syn::{Fields, FieldsNamed, Ident, Variant}; + +pub fn generate_keys(fields: &Fields, name: &Ident) -> TokenStream { + match fields { + Fields::Named(inner) => handle_named_fields(inner, name), + _ => panic!("Only named fields are supported"), + } +} + +fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { + let FieldsNamed { named, .. } = fields; + let varaints = named.iter().cloned().map(| field | { + let ident = field.ident.unwrap(); + let variant_ident = format_ident!("{}", capitalize_first(&ident.to_string())); + Variant { + attrs: vec![], + ident: variant_ident, + fields: Fields::Unit, + discriminant: None, + } + }); + quote! { + #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] + + pub enum #name { + #(#varaints),* + } + } +} \ No newline at end of file diff --git a/derive/src/lib.rs b/derive/src/lib.rs index b7985feb..be5752e3 100644 --- a/derive/src/lib.rs +++ b/derive/src/lib.rs @@ -8,15 +8,11 @@ extern crate proc_macro; use proc_macro::TokenStream; use quote::{format_ident, quote}; -use syn::{parse_macro_input, Data, DeriveInput, Fields, Variant}; +use syn::{parse_macro_input, Data, DataStruct, DeriveInput,}; -fn capitalize_first(s: &str) -> String { - s.chars() - .take(1) - .flat_map(|f| f.to_uppercase()) - .chain(s.chars().skip(1)) - .collect() -} +pub(crate) mod ast; +pub(crate) mod cmp; +pub(crate) mod utils; #[proc_macro_derive(AnswerFn)] pub fn derive_answer_fn(_item: TokenStream) -> TokenStream { @@ -47,41 +43,21 @@ pub fn params(input: TokenStream) -> TokenStream { // Generate the parameter keys enum let param_keys_enum = match &input.data { - Data::Struct(s) => match &s.fields { - Fields::Named(fields) => { - let field_names = fields.named.iter().map(|f| &f.ident); - let varaints = field_names.clone().map(|ident| { - let ident_str = ident.as_ref().unwrap().to_string(); - let ident_str = format_ident!("{}", capitalize_first(&ident_str)); - Variant { - attrs: vec![], - ident: ident_str, - fields: Fields::Unit, - discriminant: None, - } - }); - let _varaints_str = varaints.clone().map(|v| v.ident); + Data::Struct(s) => { + let DataStruct { fields, .. } = s; - quote! { - #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd,)] - pub enum #store_name { - #( - #varaints, - )* - } - } - } - _ => panic!("Only named fields are supported"), + crate::cmp::params::generate_keys(fields, &store_name) }, _ => panic!("Only structs are supported"), }; // Combine the generated code let generated_code = quote! { - // #param_struct + #param_keys_enum }; // Return the generated code as a TokenStream generated_code.into() } + diff --git a/derive/src/utils.rs b/derive/src/utils.rs new file mode 100644 index 00000000..8489ed77 --- /dev/null +++ b/derive/src/utils.rs @@ -0,0 +1,13 @@ +/* + Appellation: utils + Contrib: FL03 +*/ + + +pub fn capitalize_first(s: &str) -> String { + s.chars() + .take(1) + .flat_map(|f| f.to_uppercase()) + .chain(s.chars().skip(1)) + .collect() +} \ No newline at end of file From 0f16ea553ce9e24bbfd28b39c796c5d65aa28105 Mon Sep 17 00:00:00 2001 From: FL03 Date: Tue, 20 Feb 2024 15:12:47 -0600 Subject: [PATCH 11/87] Update --- acme/examples/autodiff.rs | 2 +- derive/src/cmp/params/mod.rs | 9 +++++++-- derive/src/utils.rs | 2 +- macros/src/lib.rs | 6 +++--- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/acme/examples/autodiff.rs b/acme/examples/autodiff.rs index e97bb86a..9513b585 100644 --- a/acme/examples/autodiff.rs +++ b/acme/examples/autodiff.rs @@ -7,7 +7,7 @@ extern crate acme; use acme::prelude::sigmoid; -use acme::{autodiff, partial, show_item, show_streams}; +use acme::{autodiff, show_item, show_streams}; macro_rules! eval { ($var:ident: $ex:expr) => { diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs index d08b2e64..27244539 100644 --- a/derive/src/cmp/params/mod.rs +++ b/derive/src/cmp/params/mod.rs @@ -16,7 +16,10 @@ pub fn generate_keys(fields: &Fields, name: &Ident) -> TokenStream { fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { let FieldsNamed { named, .. } = fields; - let varaints = named.iter().cloned().map(| field | { + let fields_str = named.clone().map(|field| { + field.ident.unwrap() + }); + let variants = named.iter().cloned().map(|field | { let ident = field.ident.unwrap(); let variant_ident = format_ident!("{}", capitalize_first(&ident.to_string())); Variant { @@ -26,11 +29,13 @@ fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { discriminant: None, } }); + + quote! { #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum #name { - #(#varaints),* + #(#variants),* } } } \ No newline at end of file diff --git a/derive/src/utils.rs b/derive/src/utils.rs index 8489ed77..5b4961c3 100644 --- a/derive/src/utils.rs +++ b/derive/src/utils.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ - +/// A function for capitalizing the first letter of a string. pub fn capitalize_first(s: &str) -> String { s.chars() .take(1) diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 73d454aa..c10c05d1 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -27,7 +27,7 @@ pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { let input = parse_macro_input!(item as syn::ItemFn); println!("attr: \"{:?}\"", &attr); println!("item: \"{:?}\"", &input); - quote! { #input }.into() + (quote! { #input }).into() } #[proc_macro] @@ -38,7 +38,7 @@ pub fn show_item(item: TokenStream) -> TokenStream { println!("Span (start, end): ({:?}, {:?})", span.start(), span.end()); println!("Source File: {:?}", span.unwrap().source_file()); println!("Source Text: {:?}", span.source_text()); - quote! { #expr }.into() + (quote! { #expr }).into() } #[proc_macro_attribute] @@ -47,7 +47,7 @@ pub fn partial(attr: TokenStream, item: TokenStream) -> TokenStream { println!("attr: \"{}\"", attr.to_string()); // let result = ad::handle::item::handle_item(&input); // TokenStream::from(result) - quote! { #input }.into() + (quote! { #input }).into() } #[proc_macro] From b209321e0904de5c8464e5ef6e153687a29ef286 Mon Sep 17 00:00:00 2001 From: FL03 Date: Fri, 23 Feb 2024 14:08:33 -0600 Subject: [PATCH 12/87] update Signed-off-by: FL03 --- core/src/utils.rs | 13 +++++++++++++ derive/src/cmp/params/mod.rs | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/core/src/utils.rs b/core/src/utils.rs index 2bbf8c21..a14b94e7 100644 --- a/core/src/utils.rs +++ b/core/src/utils.rs @@ -10,3 +10,16 @@ where { (T::one() + x.neg().exp()).recip() } + +pub trait Sigmoid { + fn sigmoid(self) -> Self; +} + +impl Sigmoid for T +where + T: Float, +{ + fn sigmoid(self) -> Self { + (T::one() + self.neg().exp()).recip() + } +} \ No newline at end of file diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs index 27244539..25b54d60 100644 --- a/derive/src/cmp/params/mod.rs +++ b/derive/src/cmp/params/mod.rs @@ -16,7 +16,7 @@ pub fn generate_keys(fields: &Fields, name: &Ident) -> TokenStream { fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { let FieldsNamed { named, .. } = fields; - let fields_str = named.clone().map(|field| { + let fields_str = named.iter().cloned().map(|field| { field.ident.unwrap() }); let variants = named.iter().cloned().map(|field | { From 723cf050eff1c3518da4c67003992929fa6ce984 Mon Sep 17 00:00:00 2001 From: FL03 Date: Fri, 23 Feb 2024 14:47:57 -0600 Subject: [PATCH 13/87] update Signed-off-by: FL03 --- acme/tests/autodiff.rs | 44 ++++++++++++++++-------------------- core/src/utils.rs | 2 +- derive/src/ast/mod.rs | 2 +- derive/src/cmp/mod.rs | 2 +- derive/src/cmp/params/mod.rs | 11 ++++----- derive/src/lib.rs | 7 +++--- derive/src/utils.rs | 2 +- 7 files changed, 31 insertions(+), 39 deletions(-) diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 05cf38b4..6a23db39 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -7,7 +7,7 @@ #[cfg(test)] extern crate acme; -use acme::prelude::{autodiff, sigmoid}; +use acme::prelude::{autodiff, sigmoid, Sigmoid}; use approx::assert_abs_diff_eq; use num::traits::Float; use std::ops::Add; @@ -26,18 +26,6 @@ where x.neg().exp() / (T::one() + x.neg().exp()).powi(2) } -pub trait Sigmoid { - fn sigmoid(self) -> Self; -} - -impl Sigmoid for T -where - T: Float, -{ - fn sigmoid(self) -> Self { - (T::one() + self.neg().exp()).recip() - } -} trait Square { fn square(self) -> Self; } @@ -54,17 +42,8 @@ where #[test] fn test_autodiff() { let (x, y) = (1.0, 2.0); - // differentiating a function item w.r.t. a - assert_eq!( - autodiff!(a: fn addition(a: f64, b: f64) -> f64 { a + b }), - 1.0 - ); // differentiating a closure item w.r.t. x assert_eq!(autodiff!(x: | x: f64, y: f64 | x * y ), 2.0); - // differentiating a function call w.r.t. x - assert_eq!(autodiff!(x: add(x, y)), 1.0); - // differentiating a function call w.r.t. some variable - assert_eq!(autodiff!(a: add(x, y)), 0.0); // differentiating a method call w.r.t. the reciever (x) assert_eq!(autodiff!(x: x.add(y)), 1.0); // differentiating an expression w.r.t. x @@ -181,11 +160,28 @@ fn test_sigmoid() { ); } +#[ignore = "Function items are currently not supported"] +#[test] +fn test_fn_item() { + let (x, y) = (1_f64, 2_f64); + // differentiating a function item w.r.t. a + // assert_eq!( + // autodiff!(y: fn mul(x: A, y: B) -> C where A: std::ops::Mul { x * y }), + // 2_f64 + // ); + + assert_eq!(autodiff!(y: fn mul(x: f64, y: f64) -> f64 { x * y }), 2_f64); +} + #[ignore = "Currently, support for function calls is not fully implemented"] #[test] fn test_function_call() { - let x = 2_f64; - assert_eq!(autodiff!(x: sigmoid::(x)), sigmoid_prime(x)); + let (x, y) = (1_f64, 2_f64); + // differentiating a function call w.r.t. x + assert_eq!(autodiff!(x: add(x, y)), 1.0); + // differentiating a function call w.r.t. some variable + assert_eq!(autodiff!(a: add(x, y)), 0.0); + assert_eq!(autodiff!(y: sigmoid::(y)), sigmoid_prime(y)); } #[ignore = "Custom trait methods are not yet supported"] diff --git a/core/src/utils.rs b/core/src/utils.rs index a14b94e7..61b4fef4 100644 --- a/core/src/utils.rs +++ b/core/src/utils.rs @@ -22,4 +22,4 @@ where fn sigmoid(self) -> Self { (T::one() + self.neg().exp()).recip() } -} \ No newline at end of file +} diff --git a/derive/src/ast/mod.rs b/derive/src/ast/mod.rs index 97cc5a44..ff6ef7ff 100644 --- a/derive/src/ast/mod.rs +++ b/derive/src/ast/mod.rs @@ -1,4 +1,4 @@ /* Appellation: ast Contrib: FL03 -*/ \ No newline at end of file +*/ diff --git a/derive/src/cmp/mod.rs b/derive/src/cmp/mod.rs index 3ebcca91..eb18459f 100644 --- a/derive/src/cmp/mod.rs +++ b/derive/src/cmp/mod.rs @@ -3,4 +3,4 @@ Contrib: FL03 */ -pub mod params; \ No newline at end of file +pub mod params; diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs index 25b54d60..d282f3f5 100644 --- a/derive/src/cmp/params/mod.rs +++ b/derive/src/cmp/params/mod.rs @@ -16,10 +16,8 @@ pub fn generate_keys(fields: &Fields, name: &Ident) -> TokenStream { fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { let FieldsNamed { named, .. } = fields; - let fields_str = named.iter().cloned().map(|field| { - field.ident.unwrap() - }); - let variants = named.iter().cloned().map(|field | { + let fields_str = named.iter().cloned().map(|field| field.ident.unwrap()); + let variants = named.iter().cloned().map(|field| { let ident = field.ident.unwrap(); let variant_ident = format_ident!("{}", capitalize_first(&ident.to_string())); Variant { @@ -30,12 +28,11 @@ fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { } }); - quote! { #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - + pub enum #name { #(#variants),* } } -} \ No newline at end of file +} diff --git a/derive/src/lib.rs b/derive/src/lib.rs index be5752e3..638d0a51 100644 --- a/derive/src/lib.rs +++ b/derive/src/lib.rs @@ -8,7 +8,7 @@ extern crate proc_macro; use proc_macro::TokenStream; use quote::{format_ident, quote}; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput,}; +use syn::{parse_macro_input, Data, DataStruct, DeriveInput}; pub(crate) mod ast; pub(crate) mod cmp; @@ -47,17 +47,16 @@ pub fn params(input: TokenStream) -> TokenStream { let DataStruct { fields, .. } = s; crate::cmp::params::generate_keys(fields, &store_name) - }, + } _ => panic!("Only structs are supported"), }; // Combine the generated code let generated_code = quote! { - + #param_keys_enum }; // Return the generated code as a TokenStream generated_code.into() } - diff --git a/derive/src/utils.rs b/derive/src/utils.rs index 5b4961c3..76255e20 100644 --- a/derive/src/utils.rs +++ b/derive/src/utils.rs @@ -10,4 +10,4 @@ pub fn capitalize_first(s: &str) -> String { .flat_map(|f| f.to_uppercase()) .chain(s.chars().skip(1)) .collect() -} \ No newline at end of file +} From 5afd706876c1ad2cb26485b2ed0ba877cbc9f8d2 Mon Sep 17 00:00:00 2001 From: FL03 Date: Fri, 23 Feb 2024 15:02:47 -0600 Subject: [PATCH 14/87] update Signed-off-by: FL03 --- acme/tests/autodiff.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 6a23db39..5d26f706 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -41,16 +41,27 @@ where #[test] fn test_autodiff() { - let (x, y) = (1.0, 2.0); + let (x, y) = (1_f64, 2_f64); // differentiating a closure item w.r.t. x - assert_eq!(autodiff!(x: | x: f64, y: f64 | x * y ), 2.0); - // differentiating a method call w.r.t. the reciever (x) + assert_eq!(autodiff!(x: | x: f64, y: f64 | x * y ), y); + assert_eq!(autodiff!(y: | x: f64, y: f64 | x * y ), x); + // differentiating a known method call w.r.t. the reciever (x) assert_eq!(autodiff!(x: x.add(y)), 1.0); // differentiating an expression w.r.t. x assert_eq!(autodiff!(x: x + y), 1.0); assert_eq!(autodiff!(y: x += y), 1.0); } +#[test] +fn test_item_function() { + let (x, y) = (1_f64, 2_f64); + assert_eq!( + autodiff!(x: fn mul(x: A, y: B) -> C where A: std::ops::Mul { x * y }), + y + ); + assert_eq!(autodiff!(y: fn mul(x: f64, y: f64) -> f64 { x * y }), x); +} + #[test] fn test_array() { let x = [1.0, 2.0]; @@ -160,18 +171,7 @@ fn test_sigmoid() { ); } -#[ignore = "Function items are currently not supported"] -#[test] -fn test_fn_item() { - let (x, y) = (1_f64, 2_f64); - // differentiating a function item w.r.t. a - // assert_eq!( - // autodiff!(y: fn mul(x: A, y: B) -> C where A: std::ops::Mul { x * y }), - // 2_f64 - // ); - assert_eq!(autodiff!(y: fn mul(x: f64, y: f64) -> f64 { x * y }), 2_f64); -} #[ignore = "Currently, support for function calls is not fully implemented"] #[test] From 1c56fdfec20eb867fc505f5f7828d2bc93dc5e70 Mon Sep 17 00:00:00 2001 From: FL03 Date: Fri, 23 Feb 2024 15:03:31 -0600 Subject: [PATCH 15/87] update Signed-off-by: FL03 --- acme/tests/autodiff.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 5d26f706..15500555 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -49,6 +49,7 @@ fn test_autodiff() { assert_eq!(autodiff!(x: x.add(y)), 1.0); // differentiating an expression w.r.t. x assert_eq!(autodiff!(x: x + y), 1.0); + assert_eq!(autodiff!(x: x + x), 2.0); assert_eq!(autodiff!(y: x += y), 1.0); } From 5893274f43718e034ca1c581c59792df0dd6d36f Mon Sep 17 00:00:00 2001 From: FL03 Date: Sun, 25 Feb 2024 13:37:13 -0600 Subject: [PATCH 16/87] update Signed-off-by: FL03 --- derive/examples/params.rs | 4 +++- derive/src/cmp/params/mod.rs | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/derive/examples/params.rs b/derive/examples/params.rs index d06c7ac0..37305ad6 100644 --- a/derive/examples/params.rs +++ b/derive/examples/params.rs @@ -8,7 +8,9 @@ use acme::Params; fn main() -> Result<(), Box> { let _params = LinearParams { weight: 1.0 }; - let _wk = LinearParamsKey::Weight; + let wk = LinearParamsKey::Weight; + println!("{:?}", &wk); + // let _key = wk.key(); Ok(()) } diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs index d282f3f5..0a7cedbb 100644 --- a/derive/src/cmp/params/mod.rs +++ b/derive/src/cmp/params/mod.rs @@ -30,7 +30,6 @@ fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { quote! { #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - pub enum #name { #(#variants),* } From e9eb205903875325d68c3c9409be3becb20fbe31 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 15 Mar 2024 08:23:55 -0500 Subject: [PATCH 17/87] update Signed-off-by: Joe McCain III --- tensor/src/ops/kinds.rs | 21 +++++++++++++++++++++ tensor/src/ops/mod.rs | 1 + tensor/src/tensor.rs | 16 ++++++++++------ 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 tensor/src/ops/kinds.rs diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs new file mode 100644 index 00000000..af4e0db2 --- /dev/null +++ b/tensor/src/ops/kinds.rs @@ -0,0 +1,21 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ + +pub trait TensorOp { + +} + +pub enum Op { + Binary(BinaryOp), + Unary(UnaryOp), +} + +pub enum BinaryOp { + +} + +pub enum UnaryOp { + +} \ No newline at end of file diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index e0934e8a..6b0e7fc5 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -5,6 +5,7 @@ pub use self::backprop::*; pub(crate) mod backprop; +pub(crate) mod kinds; #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 1279cf6e..1d920d64 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -9,6 +9,15 @@ use acme::cmp::id::AtomicId; // use std::ops::{Index, IndexMut}; // use std::sync::{Arc, RwLock}; +pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase { + let layout = Layout::contiguous(shape); + TensorBase { + id: AtomicId::new(), + layout, + store, //Arc::new(RwLock::new(store)), + } +} + #[derive(Clone, Debug)] pub struct TensorBase { id: AtomicId, @@ -18,12 +27,7 @@ pub struct TensorBase { impl TensorBase { pub fn from_vec(shape: impl IntoShape, store: Vec) -> Self { - let layout = Layout::contiguous(shape); - Self { - id: AtomicId::new(), - layout, - store, //Arc::new(RwLock::new(store)), - } + from_vec(shape, store) } // Function to get the index of the data based on coordinates From d8cad07ca67c88a010f3b1983a95d57b47efe484 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 15 Mar 2024 08:44:20 -0500 Subject: [PATCH 18/87] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 37 +++++++++++------- acme/tests/autodiff.rs | 2 - core/Cargo.toml | 18 +++++++-- core/src/cmp/constants.rs | 17 ++------- core/src/cmp/dual.rs | 17 ++------- core/src/cmp/mod.rs | 2 - core/src/cmp/operators.rs | 2 +- core/src/cmp/variables.rs | 4 +- core/src/errors/error.rs | 4 +- core/src/errors/kinds.rs | 21 +++++++---- core/src/exp/dynamic/edge.rs | 4 +- core/src/exp/dynamic/node.rs | 4 +- core/src/graphs/scg/edge.rs | 8 ++-- core/src/graphs/scg/node.rs | 6 ++- core/src/{cmp => }/id/atomic.rs | 62 +++++++++++++++++++++---------- core/src/{cmp => }/id/gradient.rs | 4 +- core/src/{cmp => }/id/id.rs | 4 +- core/src/{cmp => }/id/mod.rs | 2 + core/src/lib.rs | 2 + core/src/ops/arithmetic.rs | 22 +++++------ core/src/ops/kinds.rs | 43 ++++++++++++--------- tensor/Cargo.toml | 6 ++- tensor/src/ops/kinds.rs | 12 ++---- tensor/src/shape/rank.rs | 7 ++-- tensor/src/shape/shape.rs | 4 +- tensor/src/specs.rs | 2 +- tensor/src/tensor.rs | 2 +- 27 files changed, 182 insertions(+), 136 deletions(-) rename core/src/{cmp => }/id/atomic.rs (53%) rename core/src/{cmp => }/id/gradient.rs (84%) rename core/src/{cmp => }/id/id.rs (85%) rename core/src/{cmp => }/id/mod.rs (90%) diff --git a/acme/Cargo.toml b/acme/Cargo.toml index cfc80123..50461e4e 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -12,19 +12,16 @@ readme.workspace = true repository.workspace = true version.workspace = true -[[example]] -name = "autodiff" -required-features = ["macros"] - -[[test]] -name = "autodiff" -required-features = ["macros"] - [features] -default = ["core", "tensor"] +default = [ + "core", + "tensor", +] + full = [ "core", "derive", + "serde", "tensor" ] @@ -45,19 +42,33 @@ tensor = [ "dep:acme-tensor" ] +serde = [ + "acme-core/serde", + "acme-tensor/serde" +] + + [lib] bench = true crate-type = ["cdylib", "rlib"] doctest = true test = true +[[example]] +name = "autodiff" +required-features = ["macros"] + +[[test]] +name = "autodiff" +required-features = ["macros"] + [build-dependencies] [dependencies] -acme-core = { features = [], optional = true, path = "../core", version = "0.3.0" } -acme-derive = { features = [], optional = true, path = "../derive", version = "0.3.0" } -acme-macros = { features = [], optional = true, path = "../macros", version = "0.3.0" } -acme-tensor = { features = [], optional = true, path = "../tensor", version = "0.3.0" } +acme-core = { optional = true, path = "../core", version = "0.3.0" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } [dev-dependencies] approx = "0.5" diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 15500555..1adb9e52 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -172,8 +172,6 @@ fn test_sigmoid() { ); } - - #[ignore = "Currently, support for function calls is not fully implemented"] #[test] fn test_function_call() { diff --git a/core/Cargo.toml b/core/Cargo.toml index 1f37bb4e..40f7770c 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -12,6 +12,16 @@ version.workspace = true [features] default = [] +serde = [ + "dep:serde", + "serde-ext", +] + +serde-ext = [ + "dep:serde_json", + "petgraph/serde-1", +] + [lib] bench = false crate-type = ["cdylib", "rlib"] @@ -26,11 +36,11 @@ test = true anyhow.workspace = true lazy_static = "1" num = "0.4" -petgraph = { features = ["serde-1"], version = "0.6" } -serde.workspace = true -serde_json.workspace = true -strum.workspace = true +petgraph = "0.6" +serde = { optional = true, features = ["derive"], version = "1" } +serde_json = { optional = true, version = "1" } smart-default.workspace = true +strum.workspace = true [package.metadata.docs.rs] all-features = true diff --git a/core/src/cmp/constants.rs b/core/src/cmp/constants.rs index c560276c..13a301a9 100644 --- a/core/src/cmp/constants.rs +++ b/core/src/cmp/constants.rs @@ -4,24 +4,13 @@ */ use crate::ops::{Evaluate, Gradient}; use num::{Num, One, Zero}; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::marker::ConstParamTy; use std::ops::{Deref, DerefMut, Neg, Not}; -#[derive( - Clone, - ConstParamTy, - Copy, - Debug, - Default, - Deserialize, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - Serialize, -)] +#[derive(Clone, ConstParamTy, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[repr(transparent)] pub struct Constant(pub T); diff --git a/core/src/cmp/dual.rs b/core/src/cmp/dual.rs index 67f42751..8949149e 100644 --- a/core/src/cmp/dual.rs +++ b/core/src/cmp/dual.rs @@ -14,24 +14,13 @@ use crate::ops::{Evaluate, Gradient}; use num::{Num, One, Zero}; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::marker::ConstParamTy; use std::ops::{self, Neg, Not}; -#[derive( - Clone, - ConstParamTy, - Copy, - Debug, - Default, - Deserialize, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - Serialize, -)] +#[derive(Clone, ConstParamTy, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct Dual { dual: T, real: T, diff --git a/core/src/cmp/mod.rs b/core/src/cmp/mod.rs index 958b7e9d..83d1fb9f 100644 --- a/core/src/cmp/mod.rs +++ b/core/src/cmp/mod.rs @@ -12,8 +12,6 @@ pub(crate) mod dual; pub(crate) mod operators; pub(crate) mod variables; -pub mod id; - use petgraph::prelude::NodeIndex; pub trait NodeConfig { diff --git a/core/src/cmp/operators.rs b/core/src/cmp/operators.rs index 2af9f5cb..2b3bfbcc 100644 --- a/core/src/cmp/operators.rs +++ b/core/src/cmp/operators.rs @@ -2,7 +2,7 @@ Appellation: operators Contrib: FL03 */ -use super::id::Id; +use crate::id::Id; pub struct Operator { inputs: Vec, diff --git a/core/src/cmp/variables.rs b/core/src/cmp/variables.rs index b4c949fe..c1065077 100644 --- a/core/src/cmp/variables.rs +++ b/core/src/cmp/variables.rs @@ -4,10 +4,12 @@ */ use crate::ops::{Evaluate, Gradient}; use num::{Num, One, Zero}; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{Add, Div, Mul, Sub}; -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Variable { name: String, pub(crate) value: Option, diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index ef96a576..5ec82d6d 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -3,9 +3,11 @@ Contrib: FL03 */ use super::kinds::ErrorKind; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Error { kind: ErrorKind, message: String, diff --git a/core/src/errors/kinds.rs b/core/src/errors/kinds.rs index b7b61b56..e1864072 100644 --- a/core/src/errors/kinds.rs +++ b/core/src/errors/kinds.rs @@ -2,6 +2,7 @@ Appellation: error Contrib: FL03 */ +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; @@ -10,7 +11,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Copy, Debug, Default, - Deserialize, Display, EnumCount, EnumIs, @@ -20,11 +20,14 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Ord, PartialEq, PartialOrd, - Serialize, VariantNames, )] -#[serde(rename_all = "camelCase")] -#[strum(serialize_all = "camelCase")] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] pub enum ErrorKind { Func(FuncError), Graph, @@ -44,7 +47,6 @@ impl From for ErrorKind { Copy, Debug, Default, - Deserialize, Display, EnumCount, EnumIs, @@ -54,11 +56,14 @@ impl From for ErrorKind { Ord, PartialEq, PartialOrd, - Serialize, VariantNames, )] -#[serde(rename_all = "camelCase")] -#[strum(serialize_all = "camelCase")] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] pub enum FuncError { #[default] ArgCount, diff --git a/core/src/exp/dynamic/edge.rs b/core/src/exp/dynamic/edge.rs index 69448006..55597e48 100644 --- a/core/src/exp/dynamic/edge.rs +++ b/core/src/exp/dynamic/edge.rs @@ -2,9 +2,11 @@ Appellation: value Contrib: FL03 */ +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct DcgEdge { op: String, } diff --git a/core/src/exp/dynamic/node.rs b/core/src/exp/dynamic/node.rs index d056a72a..2648daf2 100644 --- a/core/src/exp/dynamic/node.rs +++ b/core/src/exp/dynamic/node.rs @@ -8,9 +8,11 @@ //! The edges connecting to any given node are considered to be inputs and help to determine the flow of information use crate::prelude::Ops; use petgraph::prelude::NodeIndex; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Node { inputs: Vec, operation: Option, diff --git a/core/src/graphs/scg/edge.rs b/core/src/graphs/scg/edge.rs index 2047490a..68f216c9 100644 --- a/core/src/graphs/scg/edge.rs +++ b/core/src/graphs/scg/edge.rs @@ -2,12 +2,12 @@ Appellation: edge Contrib: FL03 */ -use crate::cmp::id::{GradientId, Id}; +use crate::id::{GradientId, Id}; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[derive( - Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, -)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Edge { id: Option>, data: T, diff --git a/core/src/graphs/scg/node.rs b/core/src/graphs/scg/node.rs index 4949bf23..d66dbe3f 100644 --- a/core/src/graphs/scg/node.rs +++ b/core/src/graphs/scg/node.rs @@ -6,12 +6,14 @@ //! //! A computational graph relies on weighted nodes to represent constants, operations, and variables. //! The edges connecting to any given node are considered to be inputs and help to determine the flow of information -use crate::cmp::id::AtomicId; +use crate::id::AtomicId; use crate::ops::Ops; use petgraph::prelude::NodeIndex; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Node { id: AtomicId, inputs: Vec, diff --git a/core/src/cmp/id/atomic.rs b/core/src/id/atomic.rs similarity index 53% rename from core/src/cmp/id/atomic.rs rename to core/src/id/atomic.rs index ad25b87b..62e65386 100644 --- a/core/src/cmp/id/atomic.rs +++ b/core/src/id/atomic.rs @@ -1,39 +1,41 @@ /* - Appellation: atomic - Contrib: FL03 + Appellation: atomic + Contrib: FL03 */ +//! # Atomic Id +//! +//! +use crate::id::Identifier; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::ops; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[repr(transparent)] pub struct AtomicId(usize); impl AtomicId { pub fn new() -> Self { - use std::sync::atomic; - static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); - Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) + static COUNTER: AtomicUsize = AtomicUsize::new(1); + Self(COUNTER.fetch_add(1, Relaxed)) } - pub fn get(&self) -> usize { - self.0 + pub fn next(&self) -> Self { + Self::new() } - pub fn into_inner(self) -> usize { - self.0 + pub fn set(&mut self, id: usize) { + self.0 = id; } -} -impl Default for AtomicId { - fn default() -> Self { - Self::new() + pub const fn get(&self) -> usize { + self.0 } -} -impl std::fmt::Display for AtomicId { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0) + pub fn into_inner(self) -> usize { + self.0 } } @@ -49,7 +51,13 @@ impl AsMut for AtomicId { } } -impl ops::Deref for AtomicId { +impl Default for AtomicId { + fn default() -> Self { + Self::new() + } +} + +impl Deref for AtomicId { type Target = usize; fn deref(&self) -> &Self::Target { @@ -57,6 +65,20 @@ impl ops::Deref for AtomicId { } } +impl DerefMut for AtomicId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Identifier for AtomicId {} + +impl std::fmt::Display for AtomicId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + impl From for AtomicId { fn from(id: usize) -> Self { Self(id) diff --git a/core/src/cmp/id/gradient.rs b/core/src/id/gradient.rs similarity index 84% rename from core/src/cmp/id/gradient.rs rename to core/src/id/gradient.rs index bf4b1b07..570d8db1 100644 --- a/core/src/cmp/id/gradient.rs +++ b/core/src/id/gradient.rs @@ -3,11 +3,13 @@ Contrib: FL03 */ use super::Id; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::Deref; -#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct GradientId { pub(crate) inner: Id, ptr: PhantomData, diff --git a/core/src/cmp/id/id.rs b/core/src/id/id.rs similarity index 85% rename from core/src/cmp/id/id.rs rename to core/src/id/id.rs index 3076213c..febb9966 100644 --- a/core/src/cmp/id/id.rs +++ b/core/src/id/id.rs @@ -4,9 +4,11 @@ */ use super::AtomicId; use petgraph::prelude::NodeIndex; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct Id { id: AtomicId, index: NodeIndex, diff --git a/core/src/cmp/id/mod.rs b/core/src/id/mod.rs similarity index 90% rename from core/src/cmp/id/mod.rs rename to core/src/id/mod.rs index d96723ef..0e3edfdd 100644 --- a/core/src/cmp/id/mod.rs +++ b/core/src/id/mod.rs @@ -11,5 +11,7 @@ pub(crate) mod atomic; pub(crate) mod gradient; pub(crate) mod id; +pub trait Identifier {} + #[cfg(test)] mod tests {} diff --git a/core/src/lib.rs b/core/src/lib.rs index 78d9422a..0bb2c3ca 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -25,6 +25,7 @@ pub(crate) mod exp; pub mod cmp; pub mod errors; pub mod graphs; +pub mod id; pub mod ops; pub mod stores; @@ -37,6 +38,7 @@ pub mod prelude { pub use crate::errors::*; pub use crate::graphs::scg::Scg; pub use crate::graphs::*; + pub use crate::id::*; pub use crate::ops::*; pub use crate::stores::*; } diff --git a/core/src/ops/arithmetic.rs b/core/src/ops/arithmetic.rs index 345b65a7..1bcfd413 100644 --- a/core/src/ops/arithmetic.rs +++ b/core/src/ops/arithmetic.rs @@ -2,6 +2,7 @@ Appellation: arithmetic Contrib: FL03 */ +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{Add, Div, Mul, Sub}; @@ -10,24 +11,21 @@ pub trait Trig { fn cos(self) -> Self; fn tan(self) -> Self; } -#[derive( - Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, -)] + +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Addition; -#[derive( - Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, -)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Division; -#[derive( - Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, -)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Multiplication; -#[derive( - Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, -)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Subtraction; macro_rules! impl_binary_op { diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index 27a82623..42fe3489 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -4,6 +4,7 @@ */ use super::arithmetic::*; use super::BinaryOperation; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; @@ -16,12 +17,16 @@ pub enum Op { Unary(T, UnaryOp), } +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] #[derive( Clone, Copy, Debug, Default, - Deserialize, Display, EnumCount, EnumIs, @@ -31,12 +36,10 @@ pub enum Op { Ord, PartialEq, PartialOrd, - Serialize, VariantNames, )] #[repr(u8)] -#[serde(rename_all = "snake_case")] -#[strum(serialize_all = "snake_case")] +#[strum(serialize_all = "lowercase")] pub enum CompareOp { #[default] Eq, @@ -51,7 +54,6 @@ pub enum CompareOp { Clone, Copy, Debug, - Deserialize, Display, EnumCount, EnumIs, @@ -61,13 +63,16 @@ pub enum CompareOp { Ord, PartialEq, PartialOrd, - Serialize, SmartDefault, VariantNames, )] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] #[repr(u8)] -#[serde(rename_all = "snake_case")] -#[strum(serialize_all = "snake_case")] +#[strum(serialize_all = "lowercase")] pub enum BinaryOp { #[default] Add(Addition), @@ -157,7 +162,7 @@ impl From for BinaryOp { Clone, Copy, Debug, - Deserialize, + Default, Display, EnumCount, EnumIs, @@ -167,13 +172,15 @@ impl From for BinaryOp { Ord, PartialEq, PartialOrd, - Serialize, - SmartDefault, VariantNames, )] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] #[repr(u8)] -#[serde(rename_all = "snake_case")] -#[strum(serialize_all = "snake_case")] +#[strum(serialize_all = "lowercase")] pub enum UnaryOp { #[default] Abs, @@ -197,7 +204,6 @@ pub enum UnaryOp { #[derive( Clone, Debug, - Deserialize, Display, EnumCount, EnumIs, @@ -207,13 +213,16 @@ pub enum UnaryOp { Ord, PartialEq, PartialOrd, - Serialize, SmartDefault, VariantNames, )] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] #[repr(u8)] -#[serde(rename_all = "snake_case")] -#[strum(serialize_all = "snake_case")] +#[strum(serialize_all = "lowercase")] pub enum Ops { Binary(BinaryOp), Compare(CompareOp), diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index 994e0b03..c51a5c6c 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -13,12 +13,16 @@ version.workspace = true [features] default = [] +serde = [ + "dep:serde" +] + [build-dependencies] [dependencies] acme-core = { features = [], path = "../core", version = "0.3" } num = "0.4" -serde = { features = ["derive"], version = "1" } +serde = { optional = true, features = ["derive"], version = "1" } strum = { features = ["derive"], version = "0.26" } [dev-dependencies] diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index af4e0db2..2e931327 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -3,19 +3,13 @@ Contrib: FL03 */ -pub trait TensorOp { - -} +pub trait TensorOp {} pub enum Op { Binary(BinaryOp), Unary(UnaryOp), } -pub enum BinaryOp { - -} - -pub enum UnaryOp { +pub enum BinaryOp {} -} \ No newline at end of file +pub enum UnaryOp {} diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/rank.rs index 938ecf1c..98a425bc 100644 --- a/tensor/src/shape/rank.rs +++ b/tensor/src/shape/rank.rs @@ -5,13 +5,12 @@ //! # Rank //! //! The rank of a n-dimensional array describes the number of dimensions +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{Deref, DerefMut}; -#[derive( - Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, -)] -#[serde(rename_all = "lowercase")] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Rank(pub usize); impl Rank { diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 7b9ed0de..3ce5372c 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -3,6 +3,7 @@ Contrib: FL03 */ use super::Rank; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{self, Deref}; @@ -19,7 +20,8 @@ where } } -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Shape(Vec); impl Shape { diff --git a/tensor/src/specs.rs b/tensor/src/specs.rs index af3cb322..ff367e5b 100644 --- a/tensor/src/specs.rs +++ b/tensor/src/specs.rs @@ -4,7 +4,7 @@ */ use crate::shape::{Rank, Shape}; use crate::store::Layout; -use acme::cmp::id::AtomicId; +use acme::prelude::AtomicId; pub trait Affine { type Output; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 1d920d64..c1d5621b 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -5,7 +5,7 @@ use crate::data::Scalar; use crate::shape::{IntoShape, Rank, Shape}; use crate::store::Layout; -use acme::cmp::id::AtomicId; +use acme::prelude::AtomicId; // use std::ops::{Index, IndexMut}; // use std::sync::{Arc, RwLock}; From ae2598f0f310cb81df687a600a6053f03360662f Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 15 Mar 2024 12:51:44 -0500 Subject: [PATCH 19/87] update Signed-off-by: Joe McCain III --- tensor/src/data/mod.rs | 23 +++++++++++++++++++++++ tensor/src/lib.rs | 3 +++ 2 files changed, 26 insertions(+) diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 7c29a97b..375b282c 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -6,6 +6,29 @@ pub use self::scalar::*; pub(crate) mod scalar; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use std::ptr::NonNull; + +/// Return a NonNull pointer to the vector's data +pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull +{ + // this pointer is guaranteed to be non-null + unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } +} + +/// Converts `ptr` to `NonNull` +/// +/// Safety: `ptr` *must* be non-null. +/// This is checked with a debug assertion, and will panic if this is not true, +/// but treat this as an unconditional conversion. +#[inline] +pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull +{ + debug_assert!(!ptr.is_null()); + NonNull::new_unchecked(ptr) +} + #[cfg(test)] mod tests { // use super::*; diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 9194e9e2..8c6aa101 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -6,8 +6,11 @@ //! //! #![feature(array_chunks)] +#[cfg(not(feature = "std"))] +extern crate alloc; extern crate acme_core as acme; + pub use self::{specs::*, tensor::*}; pub(crate) mod specs; From b9bf7a1bb102466b5a5d7be75c93f96cc939d1ac Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 15 Mar 2024 14:46:08 -0500 Subject: [PATCH 20/87] update Signed-off-by: Joe McCain III --- core/src/eval/evaluator.rs | 6 + core/src/eval/mod.rs | 7 + core/src/exp/basic.rs | 167 --------------------- core/src/exp/dynamic/edge.rs | 12 -- core/src/exp/dynamic/graph.rs | 46 ------ core/src/exp/dynamic/mod.rs | 32 ---- core/src/exp/dynamic/node.rs | 57 ------- core/src/exp/mod.rs | 14 -- core/src/exp/ops/addition.rs | 38 ----- core/src/exp/ops/mod.rs | 45 ------ core/src/exp/ops/multiply.rs | 43 ------ core/src/graphs/dcg/edge.rs | 23 +++ core/src/graphs/dcg/graph.rs | 149 ++++++++++++++++++ core/src/graphs/dcg/mod.rs | 41 +++++ core/src/graphs/dcg/node.rs | 34 +++++ core/src/graphs/mod.rs | 39 +++++ core/src/lib.rs | 6 +- core/src/ops/arithmetic.rs | 28 +--- core/src/ops/gradient.rs | 11 ++ core/src/ops/kinds.rs | 2 +- core/src/ops/mod.rs | 4 +- tensor/src/data/mod.rs | 48 ++---- tensor/src/lib.rs | 8 +- tensor/src/ops/kinds.rs | 21 ++- tensor/src/specs/mod.rs | 59 ++++++++ tensor/src/{specs.rs => specs/ndtensor.rs} | 14 +- tensor/src/{data => specs}/scalar.rs | 0 tensor/src/tensor.rs | 2 +- 28 files changed, 414 insertions(+), 542 deletions(-) create mode 100644 core/src/eval/evaluator.rs create mode 100644 core/src/eval/mod.rs delete mode 100644 core/src/exp/basic.rs delete mode 100644 core/src/exp/dynamic/edge.rs delete mode 100644 core/src/exp/dynamic/graph.rs delete mode 100644 core/src/exp/dynamic/mod.rs delete mode 100644 core/src/exp/dynamic/node.rs delete mode 100644 core/src/exp/mod.rs delete mode 100644 core/src/exp/ops/addition.rs delete mode 100644 core/src/exp/ops/mod.rs delete mode 100644 core/src/exp/ops/multiply.rs create mode 100644 core/src/graphs/dcg/edge.rs create mode 100644 core/src/graphs/dcg/graph.rs create mode 100644 core/src/graphs/dcg/mod.rs create mode 100644 core/src/graphs/dcg/node.rs create mode 100644 tensor/src/specs/mod.rs rename tensor/src/{specs.rs => specs/ndtensor.rs} (69%) rename tensor/src/{data => specs}/scalar.rs (100%) diff --git a/core/src/eval/evaluator.rs b/core/src/eval/evaluator.rs new file mode 100644 index 00000000..1acd7259 --- /dev/null +++ b/core/src/eval/evaluator.rs @@ -0,0 +1,6 @@ +/* + Appellation: evaluator + Contrib: FL03 +*/ + +pub struct Evaluator; \ No newline at end of file diff --git a/core/src/eval/mod.rs b/core/src/eval/mod.rs new file mode 100644 index 00000000..cd22b337 --- /dev/null +++ b/core/src/eval/mod.rs @@ -0,0 +1,7 @@ +/* + Appellation: eval + Contrib: FL03 +*/ +pub use self::evaluator::*; + +pub(crate) mod evaluator; diff --git a/core/src/exp/basic.rs b/core/src/exp/basic.rs deleted file mode 100644 index b065a50b..00000000 --- a/core/src/exp/basic.rs +++ /dev/null @@ -1,167 +0,0 @@ -/* - Appellation: basic - Contrib: FL03 -*/ -use std::collections::HashMap; - -// Node structure representing a node in the computational graph -#[derive(Clone, Debug)] -struct Node { - id: usize, - name: String, - inputs: Vec, // Stores indices of parent nodes - operation: String, // Operation performed by the node -} - -// Computational graph structure -#[derive(Clone, Debug)] -struct ComputeGraph { - nodes: Vec, // Stores all nodes in the graph - values: HashMap, // Stores values of nodes -} - -impl ComputeGraph { - // Constructor to create a new empty ComputeGraph - fn new() -> Self { - ComputeGraph { - nodes: Vec::new(), - values: HashMap::new(), - } - } - - pub fn variable(&mut self, name: impl ToString, value: Option) -> usize { - let id = self.add_node(name.to_string(), vec![], "input".to_string()); - self.values.insert(id, value.unwrap_or(f64::default())); - id - } - - // Method to add a node to the graph - fn add_node( - &mut self, - name: impl ToString, - inputs: Vec, - operation: impl ToString, - ) -> usize { - let id = self.nodes.len(); - let node = Node { - id, - name: name.to_string(), - inputs, - operation: operation.to_string(), - }; - self.nodes.push(node); - id - } - - // Method to evaluate the value of a node recursively - fn evaluate(&mut self, node_id: usize) -> f64 { - if let Some(value) = self.values.get(&node_id) { - return *value; - } - - let node = self.nodes[node_id].clone(); - let mut result = 0.0; - - // Perform the operation based on the type of node - match node.operation.as_str() { - "input" => result = self.values[&node_id], // Placeholder value for input nodes - "add" => { - for &input_id in &node.inputs { - result += self.evaluate(input_id); - } - } - "multiply" => { - result = 1.0; // Identity element for multiplication - for &input_id in &node.inputs { - result *= self.evaluate(input_id); - } - } - _ => println!("Unsupported operation"), - } - - // Store the computed value for future reference - self.values.insert(node_id, result); - result - } - - // Method to compute gradients using backpropagation - fn grad(&self, target: usize) -> HashMap { - let mut gradients: HashMap = HashMap::new(); - let mut gradients_stack: Vec<(usize, f64)> = Vec::new(); - - // Initialize gradient of output node with respect to itself - gradients.insert(target, 1.0); - gradients_stack.push((target, 1.0)); - - // Compute gradients for all nodes in reverse order - while let Some((i, grad)) = gradients_stack.pop() { - let node = &self.nodes[i]; - - // Compute gradient for each input of the node - for &input_id in &node.inputs { - // Compute gradient contribution from the current node - let gradient_contribution = match node.operation.as_str() { - "add" => grad, - "multiply" => { - // Compute the product of gradients - let output = self.values[&i]; - let value = self.values[&input_id]; - grad * output / value - } - _ => 0.0, // Other operations have zero gradient contribution - }; - - // Update gradient for the input node - *gradients.entry(input_id).or_insert(0.0) += gradient_contribution; - gradients_stack.push((input_id, gradient_contribution)); - } - } - - gradients - } -} - -impl ComputeGraph { - fn add(&mut self, x: usize, y: usize) -> usize { - self.add_node("Add".to_string(), vec![x, y], "add".to_string()) - } - - fn multiply(&mut self, x: usize, y: usize) -> usize { - self.add_node("Multiply".to_string(), vec![x, y], "multiply".to_string()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - // #[ignore = "The basic compute graph fails to compute the gradients correctly."] - #[test] - fn test_basic_graph() { - // Create a new computational graph - let mut graph = ComputeGraph::new(); - - // Add nodes to the graph - let x = graph.variable("x".to_string(), Some(1.0)); - let y = graph.variable("y".to_string(), Some(2.0)); - let c = graph.add(x, y); - let d = graph.multiply(c, y); - - // Evaluate nodes - let res = graph.evaluate(c); - assert_eq!(res, 3.0); - let res = graph.evaluate(d); - assert_eq!(res, 6.0); - - // Compute gradients - let gc = graph.grad(c); - assert_eq!(gc[&x], 1.0); - assert_eq!(gc[&y], 1.0); - - let gd = graph.grad(d); - - // Check gradients - assert_eq!(gd[&x], 2.0); - assert_eq!(gd[&y], 5.0); - } -} diff --git a/core/src/exp/dynamic/edge.rs b/core/src/exp/dynamic/edge.rs deleted file mode 100644 index 55597e48..00000000 --- a/core/src/exp/dynamic/edge.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - Appellation: value - Contrib: FL03 -*/ -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct DcgEdge { - op: String, -} diff --git a/core/src/exp/dynamic/graph.rs b/core/src/exp/dynamic/graph.rs deleted file mode 100644 index 94ec0933..00000000 --- a/core/src/exp/dynamic/graph.rs +++ /dev/null @@ -1,46 +0,0 @@ -/* - Appellation: graph - Contrib: FL03 -*/ -use super::{DcgEdge, Node}; -use crate::prelude::Result; -use crate::stores::{GradientStore, Store}; -use petgraph::algo::toposort; -use petgraph::prelude::{DiGraph, NodeIndex}; - -pub struct Dcg { - graph: DiGraph, DcgEdge>, -} - -impl Dcg { - pub fn new() -> Self { - Self { - graph: DiGraph::new(), - } - } - - pub fn clear(&mut self) { - self.graph.clear(); - } - - pub fn get(&self, index: NodeIndex) -> Option<&Node> { - self.graph.node_weight(index) - } - - pub fn variable(&mut self, value: T) -> NodeIndex { - self.graph.add_node(Node::new().with_value(value)) - } -} - -impl Dcg -where - T: Clone + Default + 'static, -{ - pub fn compute_gradients(&mut self, target: NodeIndex) -> Result<()> { - let nodes = toposort(&self.graph, None)?; - - let mut gradients = GradientStore::new(); - gradients.insert(target, self.graph[target].clone()); - Ok(()) - } -} diff --git a/core/src/exp/dynamic/mod.rs b/core/src/exp/dynamic/mod.rs deleted file mode 100644 index bb72bd82..00000000 --- a/core/src/exp/dynamic/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -/* - Appellation: dynamic - Contrib: FL03 -*/ -//! # Dynamic Compute Graph -//! -//! -//! - A dynamic computational graph considers a DAG whose nodes represent data in the form of tensors. -//! - Edges represent the operations applied to the data. -pub use self::{edge::*, graph::*, node::*}; - -pub(crate) mod edge; -pub(crate) mod graph; -pub(crate) mod node; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_dcg() { - let mut dag = Dcg::new(); - let a = dag.variable(1_f64); - let b = dag.variable(1_f64); - // let c = dag.mul(a, b).unwrap(); - - // let e = dag.add(c, a).unwrap(); - - assert_eq!(*dag.get(a).unwrap().value().unwrap(), 1.0); - // assert_eq!(*dag.get(e).unwrap(), 2.0); - } -} diff --git a/core/src/exp/dynamic/node.rs b/core/src/exp/dynamic/node.rs deleted file mode 100644 index 2648daf2..00000000 --- a/core/src/exp/dynamic/node.rs +++ /dev/null @@ -1,57 +0,0 @@ -/* - Appellation: node - Contrib: FL03 -*/ -//! # Node -//! -//! -//! The edges connecting to any given node are considered to be inputs and help to determine the flow of information -use crate::prelude::Ops; -use petgraph::prelude::NodeIndex; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Node { - inputs: Vec, - operation: Option, - value: Option, -} - -impl Node { - pub fn new() -> Self { - Self { - inputs: Vec::new(), - operation: None, - value: None, - } - } - - pub fn with_inputs(mut self, inputs: Vec) -> Self { - self.inputs = inputs; - self - } - - pub fn with_op(mut self, operation: Ops) -> Self { - self.operation = Some(operation); - self - } - - pub fn with_value(mut self, value: T) -> Self { - self.value = Some(value); - self - } - - pub fn inputs(&self) -> &[NodeIndex] { - &self.inputs - } - - pub fn operation(&self) -> Option<&Ops> { - self.operation.as_ref() - } - - pub fn value(&self) -> Option<&T> { - self.value.as_ref() - } -} diff --git a/core/src/exp/mod.rs b/core/src/exp/mod.rs deleted file mode 100644 index a1fae7e3..00000000 --- a/core/src/exp/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - Appellation: exp - Contrib: FL03 -*/ -//! # Experimental -//! -//! -#![allow(dead_code, unused_imports, unused_variables)] -pub mod basic; -pub mod dynamic; -pub mod ops; - -#[cfg(test)] -mod tests {} diff --git a/core/src/exp/ops/addition.rs b/core/src/exp/ops/addition.rs deleted file mode 100644 index fc1c1fe6..00000000 --- a/core/src/exp/ops/addition.rs +++ /dev/null @@ -1,38 +0,0 @@ -/* - Appellation: addition - Contrib: FL03 -*/ -use crate::ops::{Evaluate, Gradient}; - -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] -pub struct Addition(T, T); - -impl Addition { - pub fn new(a: T, b: T) -> Self { - Self(a, b) - } -} - -impl Evaluate for Addition -where - S: Evaluate, - S::Output: std::ops::Add, -{ - type Output = T; - - fn eval(self) -> Self::Output { - self.0.eval() + self.1.eval() - } -} - -impl Gradient for Addition -where - T: Clone + Gradient, - T::Gradient: std::ops::Add, -{ - type Gradient = T::Gradient; - - fn grad(&self, args: T) -> Self::Gradient { - self.0.grad(args.clone()) + self.1.grad(args) - } -} diff --git a/core/src/exp/ops/mod.rs b/core/src/exp/ops/mod.rs deleted file mode 100644 index 794fb4e0..00000000 --- a/core/src/exp/ops/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -/* - Appellation: ops - Contrib: FL03 -*/ -pub use self::{addition::*, multiply::*}; - -pub(crate) mod addition; -pub(crate) mod multiply; - -pub trait Grad { - type Gradient; - - fn grad(&self, at: T) -> Self::Gradient; -} - -#[cfg(test)] -mod tests { - use super::{Addition, Multiply}; - use crate::cmp::{Constant, Variable}; - use crate::ops::{Evaluate, Gradient}; - - #[test] - fn test_addition() { - let add = Addition::new(Constant::new(1.0), Constant::new(1.0)); - assert_eq!(add.eval(), 2.0); - let x = Variable::new("x").with_value(1.0); - let y = Variable::new("y").with_value(2.0); - let add = Addition::new(x.clone(), y.clone()); - assert_eq!(add.clone().eval(), 3.0); - assert_eq!(add.grad(x.clone()), 1.0); - assert_eq!(add.grad(y.clone()), 1.0); - assert_eq!(add.grad(x.clone()).eval(), add.grad(y.clone()).eval()); - } - - #[test] - fn test_multiply() { - let mul = Multiply::new(Constant::new(2.0), Constant::new(2.0)); - assert_eq!(mul.eval(), 4.0); - let x = Variable::new("x").with_value(2.0); - let y = Variable::new("y").with_value(2.0); - let mul = Multiply::new(x.clone(), y.clone()); - assert_eq!(mul.clone().eval(), 4.0); - assert_eq!(mul.grad(x.clone()).eval(), 2.0); - } -} diff --git a/core/src/exp/ops/multiply.rs b/core/src/exp/ops/multiply.rs deleted file mode 100644 index 07686542..00000000 --- a/core/src/exp/ops/multiply.rs +++ /dev/null @@ -1,43 +0,0 @@ -/* - Appellation: addition - Contrib: FL03 -*/ -use crate::ops::{Evaluate, Gradient}; - -#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)] -pub struct Multiply(T, T); - -impl Multiply { - pub fn new(a: T, b: T) -> Self { - Self(a, b) - } -} - -impl Evaluate for Multiply -where - S: Evaluate, - - S::Output: std::ops::Mul, -{ - type Output = T; - - fn eval(self) -> Self::Output { - self.0.eval() * self.1.eval() - } -} - -impl Gradient for Multiply -where - T: Clone + Evaluate + Gradient + std::ops::Mul, - ::Output: std::ops::Mul, - >::Gradient: - std::ops::Add + std::ops::Mul, -{ - type Gradient = T::Gradient; - - fn grad(&self, args: T) -> Self::Gradient { - let a = self.1.clone().eval() * self.0.grad(args.clone()); - let b = self.0.clone().eval() * self.1.grad(args); - a + b - } -} diff --git a/core/src/graphs/dcg/edge.rs b/core/src/graphs/dcg/edge.rs new file mode 100644 index 00000000..e9236b03 --- /dev/null +++ b/core/src/graphs/dcg/edge.rs @@ -0,0 +1,23 @@ +/* + Appellation: edge + Contrib: FL03 +*/ +use petgraph::graph::NodeIndex; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub struct Edge { + source: NodeIndex, +} + +impl Edge { + pub fn new(source: NodeIndex) -> Self { + Self { source } + } + + pub fn source(&self) -> NodeIndex { + self.source + } +} diff --git a/core/src/graphs/dcg/graph.rs b/core/src/graphs/dcg/graph.rs new file mode 100644 index 00000000..6233702f --- /dev/null +++ b/core/src/graphs/dcg/graph.rs @@ -0,0 +1,149 @@ +/* + Appellation: graph + Contrib: FL03 +*/ +use super::edge::Edge; +use super::node::Node; +use super::DynamicGraph; +use crate::ops::*; +use crate::prelude::Result; +use num::traits::{Num, NumAssignOps, NumOps}; +use petgraph::algo::toposort; +use petgraph::prelude::{Direction, NodeIndex}; +use std::collections::HashMap; +use std::ops::Index; + +pub struct Dcg { + store: DynamicGraph, +} + +impl Dcg { + pub fn new() -> Self { + Dcg { + store: DynamicGraph::new(), + } + } + + pub fn get(&self, index: NodeIndex) -> Option<&Node> { + self.store.node_weight(index) + } + + pub fn include(&mut self, node: impl Into>) -> NodeIndex { + self.store.add_node(node.into()) + } + + pub fn remove(&mut self, index: NodeIndex) -> Option> { + self.store.remove_node(index) + } + + pub fn input(&mut self, param: bool, value: T) -> NodeIndex { + self.store.add_node(Node::input(param, value)) + } + + pub fn op( + &mut self, + inputs: impl IntoIterator, + op: impl Into, + ) -> NodeIndex { + let args = Vec::from_iter(inputs); + + let c = self.store.add_node(Node::op(args.clone(), op)); + for arg in args { + self.store.add_edge(arg, c, Edge::new(arg)); + } + c + } +} + +impl Dcg { + pub fn add(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { + self.op([lhs, rhs], BinaryOp::add()) + } + + pub fn mul(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { + self.op([lhs, rhs], BinaryOp::mul()) + } + + pub fn backward(&self) -> Result> + where + T: Copy + Default + Num + NumAssignOps + NumOps, + { + let sorted = toposort(&self.store, None)?; + let target = *sorted.last().unwrap(); + + let mut gradients = HashMap::::new(); + gradients.insert(target, T::one()); + + for node in sorted.iter().rev() { + let node_grad = gradients[node]; + let node_op = self.get(*node).unwrap(); + + if let Node::Op { inputs, op } = node_op { + match op { + Ops::Binary(BinaryOp::Add(_)) => { + for arg in self.store.neighbors_directed(*node, Direction::Incoming) { + *gradients.entry(arg).or_default() += node_grad; + } + } + Ops::Binary(BinaryOp::Mul(_)) => { + let lhs = inputs[0]; + let rhs = inputs[1]; + let lhs_val = self.get(lhs).unwrap().get_value(); + let rhs_val = self.get(rhs).unwrap().get_value(); + *gradients.entry(lhs).or_default() += node_grad * rhs_val; + *gradients.entry(rhs).or_default() += node_grad * lhs_val; + } + // Handle other operations as needed + _ => {} + } + } + } + + Ok(gradients) + } + + pub fn gradient(&self, output: NodeIndex) -> Result> + where + T: Copy + Default + Num + NumAssignOps + NumOps, + { + let mut gradients = HashMap::::new(); + gradients.insert(output, T::one()); // Initialize output gradient to 1.0 + + let topo = toposort(&self.store, None)?; + + for node in topo.iter().rev() { + let node_grad = gradients[node]; + let node_op = self.get(*node).unwrap(); + + if let Node::Op { inputs, op } = node_op { + match op { + Ops::Binary(BinaryOp::Add(_)) => { + for arg in self.store.neighbors_directed(*node, Direction::Incoming) { + *gradients.entry(arg).or_default() += node_grad; + } + } + Ops::Binary(BinaryOp::Mul(_)) => { + let lhs = inputs[0]; + let rhs = inputs[1]; + let lhs_val = self[lhs].get_value(); + let rhs_val = self[rhs].get_value(); + *gradients.entry(lhs).or_default() += node_grad * rhs_val; + *gradients.entry(rhs).or_default() += node_grad * lhs_val; + } + // Handle other operations as needed + _ => {} + } + } + } + + Ok(gradients) + } +} + +impl Index for Dcg { + type Output = Node; + + fn index(&self, index: NodeIndex) -> &Self::Output { + self.get(index).unwrap() + } +} diff --git a/core/src/graphs/dcg/mod.rs b/core/src/graphs/dcg/mod.rs new file mode 100644 index 00000000..8f40b94d --- /dev/null +++ b/core/src/graphs/dcg/mod.rs @@ -0,0 +1,41 @@ +/* + Appellation: dcg + Contrib: FL03 +*/ +//! # Dynamic Compute Graph +//! +//! A computational graph forms the backbone of automatic differentiation. Computational graphs are directed acyclic graphs (DAGs) +//! that represent any computation as a series of nodes and edges. +pub use self::graph::Dcg; + +pub(crate) mod graph; + +pub mod edge; +pub mod node; + +pub(crate) type DynamicGraph = petgraph::graph::DiGraph, edge::Edge>; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dcg() { + let mut dcg = Dcg::::new(); + let a = dcg.input(true, 2.0); + let b = dcg.input(true, 3.0); + let c = dcg.add(a, b); + + let grad = dcg.gradient(c).unwrap(); + assert_eq!(grad[&a], 1.0); + + let mut dcg = Dcg::::new(); + let a = dcg.input(true, 2.0); + let b = dcg.input(true, 3.0); + let c = dcg.mul(a, b); + + let grad = dcg.gradient(c).unwrap(); + assert_eq!(grad[&a], 3.0); + assert_eq!(grad[&b], 2.0); + } +} diff --git a/core/src/graphs/dcg/node.rs b/core/src/graphs/dcg/node.rs new file mode 100644 index 00000000..9b5cb805 --- /dev/null +++ b/core/src/graphs/dcg/node.rs @@ -0,0 +1,34 @@ +/* + Appellation: node + Contrib: FL03 +*/ +use crate::ops::Ops; +use petgraph::prelude::NodeIndex; + +pub enum Node { + Op { inputs: Vec, op: Ops }, + Input { param: bool, value: T }, +} + +impl Node { + pub fn op(inputs: impl IntoIterator, op: impl Into) -> Self { + Node::Op { + inputs: Vec::from_iter(inputs), + op: op.into(), + } + } + + pub fn input(param: bool, value: T) -> Self { + Node::Input { param, value } + } + + pub fn get_value(&self) -> T + where + T: Copy + Default, + { + match self { + Node::Input { value, .. } => *value, + _ => T::default(), + } + } +} diff --git a/core/src/graphs/mod.rs b/core/src/graphs/mod.rs index 20f9d124..8941bdf1 100644 --- a/core/src/graphs/mod.rs +++ b/core/src/graphs/mod.rs @@ -10,4 +10,43 @@ //! In a dynamic computational graph (DCG), the graph considers the nodes to be tensors and the edges to be operations. //! +pub mod dcg; pub mod scg; + +pub trait GraphEntry { + type Idx; + type Weight; +} + +pub trait ComputeGraph { + type Edge: GraphEntry; + type Node: GraphEntry; + + fn add_node(&mut self, node: ::Weight) -> ::Idx; + + fn add_edge( + &mut self, + source: ::Idx, + target: ::Idx, + weight: ::Weight, + ) -> ::Idx; + + fn clear(&mut self); +} + +pub(crate) mod prelude { + pub use super::dcg::Dcg; + pub use super::scg::Scg; +} + +#[cfg(test)] +mod tests { + use super::prelude::*; + + #[test] + fn test_dcg() { + let mut dcg = Dcg::::new(); + let _input = dcg.input(true, 1.0); + assert_eq!(1, 1); + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 0bb2c3ca..033a0c51 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -20,10 +20,9 @@ pub(crate) mod primitives; pub(crate) mod specs; pub(crate) mod utils; -pub(crate) mod exp; - pub mod cmp; pub mod errors; +pub mod eval; pub mod graphs; pub mod id; pub mod ops; @@ -36,8 +35,7 @@ pub mod prelude { pub use crate::cmp::*; pub use crate::errors::*; - pub use crate::graphs::scg::Scg; - pub use crate::graphs::*; + pub use crate::graphs::prelude::*; pub use crate::id::*; pub use crate::ops::*; pub use crate::stores::*; diff --git a/core/src/ops/arithmetic.rs b/core/src/ops/arithmetic.rs index 1bcfd413..9469f1ee 100644 --- a/core/src/ops/arithmetic.rs +++ b/core/src/ops/arithmetic.rs @@ -12,37 +12,25 @@ pub trait Trig { fn tan(self) -> Self; } -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Addition; - -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Division; - -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Multiplication; - -#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Subtraction; - macro_rules! impl_binary_op { ($op:ident, $bound:ident, $exp:expr) => { + #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] + #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] + pub struct $op; + impl $op { pub fn new() -> Self { Self } } - impl crate::ops::BinaryOperation for $op + impl crate::ops::BinaryOperation for $op where - T: $bound, + A: $bound, { - type Output = T; + type Output = C; - fn eval(&self, lhs: T, rhs: T) -> Self::Output { + fn eval(&self, lhs: A, rhs: B) -> Self::Output { $exp(lhs, rhs) } } diff --git a/core/src/ops/gradient.rs b/core/src/ops/gradient.rs index edb0175d..1b43cb8a 100644 --- a/core/src/ops/gradient.rs +++ b/core/src/ops/gradient.rs @@ -23,6 +23,17 @@ pub trait Grad { fn grad(&self, at: T, wrt: &str) -> Self::Output; } +pub trait Partial { + type Args; + type Output; + + fn partial(&self) -> fn(Self::Args) -> Self::Output; + + fn partial_at(&self, args: Self::Args) -> Self::Output { + (self.partial())(args) + } +} + pub trait Parameter { type Key; type Value; diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index 42fe3489..f0a28b69 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -116,7 +116,7 @@ impl BinaryOp { } } -impl BinaryOperation for BinaryOp +impl BinaryOperation for BinaryOp where T: Copy + Default + PartialOrd + num::traits::NumOps, { diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 317671f6..6e3b2419 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -46,10 +46,10 @@ impl Evaluate for f64 { } } -pub trait BinaryOperation { +pub trait BinaryOperation { type Output; - fn eval(&self, lhs: T, rhs: T) -> Self::Output; + fn eval(&self, lhs: A, rhs: B) -> Self::Output; } pub trait UnaryOperation { diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 375b282c..3a232217 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -1,18 +1,20 @@ /* - Appellation: scalar + Appellation: data Contrib: FL03 */ -pub use self::scalar::*; -pub(crate) mod scalar; +pub unsafe trait RawData { + type Elem; +} + +pub trait Data: RawData {} #[cfg(not(feature = "std"))] use alloc::vec::Vec; use std::ptr::NonNull; /// Return a NonNull pointer to the vector's data -pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull -{ +pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { // this pointer is guaranteed to be non-null unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } } @@ -23,42 +25,10 @@ pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull /// This is checked with a debug assertion, and will panic if this is not true, /// but treat this as an unconditional conversion. #[inline] -pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull -{ +pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull { debug_assert!(!ptr.is_null()); NonNull::new_unchecked(ptr) } #[cfg(test)] -mod tests { - // use super::*; - - macro_rules! Scalar { - (complex) => { - Scalar!(cf64) - }; - (float) => { - Scalar!(f64) - }; - (cf64) => { - Complex - }; - (cf32) => { - Complex - }; - (f64) => { - f64 - }; - (f32) => { - f32 - }; - - } - - #[test] - fn test_scalar() { - let a: Scalar!(f64); - a = 3.0; - assert_eq!(a, 3_f64); - } -} +mod tests {} diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 8c6aa101..861f2518 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -11,22 +11,22 @@ extern crate alloc; extern crate acme_core as acme; -pub use self::{specs::*, tensor::*}; +pub use self::tensor::*; -pub(crate) mod specs; pub(crate) mod tensor; pub mod data; pub mod ops; pub mod shape; +pub mod specs; pub mod store; pub mod prelude { - pub use crate::specs::*; + pub use crate::tensor::TensorBase; pub use crate::data::*; pub use crate::ops::*; pub use crate::shape::*; + pub use crate::specs::prelude::*; pub use crate::store::*; - pub use crate::tensor::*; } diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 2e931327..fedbdd65 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -2,14 +2,27 @@ Appellation: kinds Contrib: FL03 */ +use crate::TensorBase; pub trait TensorOp {} -pub enum Op { - Binary(BinaryOp), - Unary(UnaryOp), +pub enum Op { + Binary(Box>, Box>, BinaryOp), + Unary(Box>, UnaryOp), } -pub enum BinaryOp {} +pub enum BinaryOp { + Add, + Div, + Mul, + Sub, +} pub enum UnaryOp {} + +pub enum Expr { + Binary(BinaryOp), + Unary(UnaryOp), + Scalar(T), + Tensor(TensorBase), +} diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs new file mode 100644 index 00000000..cf76149a --- /dev/null +++ b/tensor/src/specs/mod.rs @@ -0,0 +1,59 @@ +/* + Appellation: specs + Contrib: FL03 +*/ + +pub mod ndtensor; +pub mod scalar; + +pub trait Affine { + type Output; + + fn affine(&self, mul: &T, add: &T) -> Self::Output; +} + +pub trait Matmul { + type Output; + + fn matmul(&self, rhs: &Rhs) -> Self::Output; +} + +pub(crate) mod prelude { + pub use super::ndtensor::*; + pub use super::scalar::*; + pub use super::{Affine, Matmul}; +} + +#[cfg(test)] +mod tests { + // use super::*; + + macro_rules! Scalar { + (complex) => { + Scalar!(cf64) + }; + (float) => { + Scalar!(f64) + }; + (cf64) => { + Complex + }; + (cf32) => { + Complex + }; + (f64) => { + f64 + }; + (f32) => { + f32 + }; + + } + + #[test] + fn test_scalar() { + let a: Scalar!(f64); + a = 3.0; + assert_eq!(a, 3_f64); + } +} diff --git a/tensor/src/specs.rs b/tensor/src/specs/ndtensor.rs similarity index 69% rename from tensor/src/specs.rs rename to tensor/src/specs/ndtensor.rs index ff367e5b..810e67a9 100644 --- a/tensor/src/specs.rs +++ b/tensor/src/specs/ndtensor.rs @@ -1,23 +1,11 @@ /* - Appellation: specs + Appellation: ndtensor Contrib: FL03 */ use crate::shape::{Rank, Shape}; use crate::store::Layout; use acme::prelude::AtomicId; -pub trait Affine { - type Output; - - fn affine(&self, mul: &T, add: &T) -> Self::Output; -} - -pub trait Matmul { - type Output; - - fn matmul(&self, rhs: &Rhs) -> Self::Output; -} - pub trait NdTensor { fn elements(&self) -> usize { self.layout().elements() diff --git a/tensor/src/data/scalar.rs b/tensor/src/specs/scalar.rs similarity index 100% rename from tensor/src/data/scalar.rs rename to tensor/src/specs/scalar.rs diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index c1d5621b..6c9bcdee 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,7 +2,7 @@ Appellation: tensor Contrib: FL03 */ -use crate::data::Scalar; +use crate::prelude::Scalar; use crate::shape::{IntoShape, Rank, Shape}; use crate::store::Layout; use acme::prelude::AtomicId; From 0190bdd73d2db4eaff9fb320419aadaba011cb58 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 16 Mar 2024 06:53:40 -0500 Subject: [PATCH 21/87] update Signed-off-by: Joe McCain III --- .github/workflows/clippy.yml | 16 ++++++------- .github/workflows/crates.yml | 46 ++++++++++++------------------------ .github/workflows/rust.yml | 38 +++++++++++++++-------------- 3 files changed, 43 insertions(+), 57 deletions(-) diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index 2257078b..549127d0 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -2,19 +2,19 @@ name: Clippy concurrency: group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: false + cancel-in-progress: true on: pull_request: - branches-ignore: [ "dev*", "next*" ] + branches: [ main ] push: - branches-ignore: [ "dev*", "next*" ] - tags: [ "beta*", "nightly*", "prod*", "v*.*.*"] + branches: [ main ] + tags: [ nightly*, v*.*.* ] release: repository_dispatch: - types: [ "clippy" ] + types: [ clippy ] schedule: - - cron: "30 21 * * *" + - cron: "30 21 * * 0" # Every Sunday at 21:30 UTC workflow_dispatch: permissions: @@ -27,7 +27,7 @@ jobs: name: Clippy runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust toolchain uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af #@v1 with: @@ -44,7 +44,7 @@ jobs: --message-format=json | clippy-sarif | tee rust-clippy-results.sarif | sarif-fmt continue-on-error: true - name: Upload results - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@v3 with: sarif_file: rust-clippy-results.sarif wait-for-processing: true \ No newline at end of file diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index d318f0d4..6e592d4e 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -9,59 +9,43 @@ env: on: push: - tags: [ "nightly*", "v*.*.*" ] + tags: [ v*.*.* ] release: - types: [ "created" ] + types: [ created ] repository_dispatch: - types: [ "publish" ] + types: [ publish ] workflow_dispatch: jobs: - build: - name: build + core: + name: Publish (core) runs-on: ubuntu-latest + strategy: + matrix: + features: [ core ] steps: - - uses: actions/checkout@v3 - - name: setup (langspace) - run: | - rustup update - rustup default nightly - - id: rust-build - name: Build - run: cargo build -r -v --workspace - - uses: actions/cache@v2 - id: rust-cache - name: Cache build - with: - path: | - ~/.cargo/registry - ~/.cargo/git - target - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + - uses: actions/checkout@v4 + - name: Publish (${{ github.ref.name }}-${{ matrix.features }}) + run: cargo publish --all-features -v -p ${{ github.ref.name }}-${{ matrix.features }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} sdk: concurrency: group: ${{ github.workflow }}-${{ github.ref }}-sdk cancel-in-progress: false - env: - SDK_NAME: fluidity name: Publish (sdk) - needs: build + needs: core runs-on: ubuntu-latest strategy: matrix: - features: [ core, derive, macros ] + features: [ derive, macros, tensor ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Publish (${{ github.ref.name }}-${{ matrix.features }}) run: cargo publish --all-features -v -p ${{ github.ref.name }}-${{ matrix.features }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} publish: - concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-publish - cancel-in-progress: false name: Publish (${{ github.ref.name }}) needs: sdk runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Publish (${{ github.ref.name }}) run: cargo publish --all-features -v -p ${{ github.ref.name }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index e74d07cc..452c6b80 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -9,15 +9,15 @@ env: on: pull_request: - branches-ignore: [ "beta*", "dev*", "next*" ] + branches: [ main ] push: - branches-ignore: [ "beta*", "dev*", "next*" ] - tags: [ "nightly*", "v*.*.*" ] + branches: [ main ] + tags: [ nightly*, v*.*.* ] release: repository_dispatch: - types: [ "publish" ] + types: [ rust ] schedule: - - cron: "30 21 * * *" # 9:30pm UTC + - cron: "30 21 * * 0" # Every Sunday at 21:30 UTC workflow_dispatch: jobs: @@ -26,19 +26,20 @@ jobs: strategy: matrix: platform: [ ubuntu-latest ] + toolchain: [ stable, nightly ] runs-on: ${{ matrix.platform }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup (langspace) run: | rustup update - rustup default nightly + rustup default ${{ matrix.toolchain }} - name: Build id: rust-build - run: cargo build -F full -r -v --workspace + run: cargo build --features full -r -v --workspace - name: Cache build id: rust-cache - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: | ~/.cargo/registry @@ -49,27 +50,28 @@ jobs: name: Test strategy: matrix: - platform: [ ubuntu-latest ] toolchain: [ nightly ] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - name: setup (langspace) - run: | - rustup update - rustup default ${{ matrix.toolchain }} + - uses: actions/checkout@v4 + - name: Install Rust toolchain + uses: actions-rs/toolchain@v1 #@v1 + with: + override: true + profile: minimal + toolchain: ${{ matrix.toolchain }} - name: Test id: rust-test - run: cargo test --all -F full -r -v + run: cargo test --all-features -r -v --workspace bench: name: Bench runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: setup (langspace) run: | rustup update rustup default nightly - name: Bench id: rust-bench - run: cargo bench --all -F full -r -v \ No newline at end of file + run: cargo bench --features full -v --workspace \ No newline at end of file From 3a2e72b90fbc144bed96b7fffdeda788d761ef95 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 16 Mar 2024 07:09:29 -0500 Subject: [PATCH 22/87] update Signed-off-by: Joe McCain III --- SECURITY.md | 2 +- core/examples/func.rs | 39 ------------------------------- core/src/cmp/variables.rs | 22 ++++++++++------- core/src/eval/evaluator.rs | 2 +- core/src/graphs/mod.rs | 5 +++- core/src/specs/func/mod.rs | 4 ++++ core/src/specs/func/structural.rs | 2 ++ 7 files changed, 25 insertions(+), 51 deletions(-) delete mode 100644 core/examples/func.rs diff --git a/SECURITY.md b/SECURITY.md index f625185a..47ca9cce 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -7,7 +7,7 @@ currently being supported with security updates. | Package | Current | Supported | |--------------|---------|-----------| -| acme | 0.2.1 | <=0.2.0 | +| acme | 0.3.0 | <=0.3.0 | ## Reporting a Vulnerability diff --git a/core/examples/func.rs b/core/examples/func.rs deleted file mode 100644 index 7bd753eb..00000000 --- a/core/examples/func.rs +++ /dev/null @@ -1,39 +0,0 @@ -/* - Appellation: simple - Contrib: FL03 -*/ -extern crate acme_core as acme; - -use acme::prelude::BoxResult; - -fn main() -> BoxResult { - sample(); - Ok(()) -} - -#[macro_use] -mod macros { - macro_rules! autodiff { - (eval $f:expr) => { - $f() - }; - (grad $df:expr) => { - $df() - }; - } - - macro_rules! func { - ($f:expr) => { - $f - }; - (eval $f:expr, ($($xs:literal),*)) => { - $f($($xs),*) - } - - } -} - -fn sample() { - let f = func!(|x, y, z| z * (x + y)); - println!("{:?}", func!(eval f, (1.0, 2.0, 3.0))); -} diff --git a/core/src/cmp/variables.rs b/core/src/cmp/variables.rs index c1065077..0294cf7c 100644 --- a/core/src/cmp/variables.rs +++ b/core/src/cmp/variables.rs @@ -23,14 +23,8 @@ impl Variable { } } - pub fn with_name(mut self, name: impl ToString) -> Self { - self.name = name.to_string(); - self - } - - pub fn with_value(mut self, value: T) -> Self { - self.value = Some(value); - self + pub fn is_none(&self) -> bool { + self.value.is_none() } pub fn name(&self) -> &str { @@ -44,6 +38,16 @@ impl Variable { pub fn set(&mut self, value: T) { self.value = Some(value); } + + pub fn with_name(mut self, name: impl ToString) -> Self { + self.name = name.to_string(); + self + } + + pub fn with_value(mut self, value: T) -> Self { + self.value = Some(value); + self + } } impl std::fmt::Display for Variable { @@ -88,7 +92,7 @@ where type Output = Self; fn add(self, rhs: Self) -> Self::Output { - let name = format!("{} + {}", self.name, rhs.name); + let name = "+".to_string(); let value = self.eval() + rhs.eval(); Variable::new(name).with_value(value) } diff --git a/core/src/eval/evaluator.rs b/core/src/eval/evaluator.rs index 1acd7259..f7e20587 100644 --- a/core/src/eval/evaluator.rs +++ b/core/src/eval/evaluator.rs @@ -3,4 +3,4 @@ Contrib: FL03 */ -pub struct Evaluator; \ No newline at end of file +pub struct Evaluator; diff --git a/core/src/graphs/mod.rs b/core/src/graphs/mod.rs index 8941bdf1..62ce9783 100644 --- a/core/src/graphs/mod.rs +++ b/core/src/graphs/mod.rs @@ -22,7 +22,10 @@ pub trait ComputeGraph { type Edge: GraphEntry; type Node: GraphEntry; - fn add_node(&mut self, node: ::Weight) -> ::Idx; + fn add_node( + &mut self, + node: ::Weight, + ) -> ::Idx; fn add_edge( &mut self, diff --git a/core/src/specs/func/mod.rs b/core/src/specs/func/mod.rs index 1a7b9dbf..400f9e13 100644 --- a/core/src/specs/func/mod.rs +++ b/core/src/specs/func/mod.rs @@ -2,5 +2,9 @@ Appellation: func Contrib: FL03 */ +pub use self::structural::*; pub(crate) mod structural; + +#[cfg(test)] +mod tests {} diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs index 697204d6..409c63d8 100644 --- a/core/src/specs/func/structural.rs +++ b/core/src/specs/func/structural.rs @@ -8,3 +8,5 @@ pub trait StructuralFn { fn eval(&self) -> Self::Output; } + +pub trait StructuredArgs {} From f80f20220f57c7bca3ebcd13aa6b510c907d4070 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 11:14:21 -0500 Subject: [PATCH 23/87] update Signed-off-by: Joe McCain III --- core/src/errors/error.rs | 23 ++++-- tensor/src/linalg/mod.rs | 11 +++ tensor/src/ops/backprop.rs | 26 +++++- tensor/src/ops/kinds.rs | 26 +++++- tensor/src/ops/mod.rs | 4 +- tensor/src/shape/shape.rs | 11 +++ tensor/src/tensor.rs | 158 ++++++++++++++++++++++++++++++++++++- tensor/tests/arith.rs | 57 +++++++++++++ tensor/tests/default.rs | 17 +++- tensor/tests/tensor.rs | 6 +- 10 files changed, 322 insertions(+), 17 deletions(-) create mode 100644 tensor/src/linalg/mod.rs create mode 100644 tensor/tests/arith.rs diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index 5ec82d6d..b4614150 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -36,12 +36,6 @@ impl From for Error { } } -impl From> for Error { - fn from(err: Box) -> Self { - Self::new(ErrorKind::Unknown, err.to_string()) - } -} - impl From> for Error { fn from(err: std::sync::TryLockError) -> Self { Self::new(ErrorKind::Sync, err.to_string()) @@ -62,3 +56,20 @@ impl From for Error { Self::new(ErrorKind::Graph, "Negative Cycle detected") } } + +macro_rules! error_from { + (shared $kind:expr, ($($t:ty),*)) => { + $( + error_from!($kind, $t); + )* + }; + ($kind:expr, $t:ty) => { + impl From<$t> for Error { + fn from(err: $t) -> Self { + Self::new($kind, err.to_string()) + } + } + }; +} + +error_from!(shared ErrorKind::Unknown, (&str, String, Box)); diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs new file mode 100644 index 00000000..56087979 --- /dev/null +++ b/tensor/src/linalg/mod.rs @@ -0,0 +1,11 @@ +/* + Appellation: linal + Contrib: FL03 +*/ +//! # Linear Algebra +//! +//! +pub mod arith; + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 40b66607..954a5ff0 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -2,6 +2,28 @@ Appellation: backprop Contrib: FL03 */ -use acme::prelude::Ops; +use super::Op; -pub struct BackpropOp(Option); +pub struct BackpropOp(Option>); + +impl BackpropOp { + pub fn new(op: Op) -> Self { + BackpropOp(Some(op)) + } + + pub fn none() -> Self { + BackpropOp(None) + } + + pub fn op(&self) -> Option<&Op> { + self.0.as_ref() + } + + pub fn op_mut(&mut self) -> Option<&mut Op> { + self.0.as_mut() + } + + pub fn into_inner(self) -> Option> { + self.0 + } +} diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index fedbdd65..67048749 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -4,21 +4,35 @@ */ use crate::TensorBase; -pub trait TensorOp {} - +#[derive(Clone, Debug)] pub enum Op { Binary(Box>, Box>, BinaryOp), Unary(Box>, UnaryOp), } +#[derive(Clone, Copy, Debug)] pub enum BinaryOp { Add, Div, + Matmul, Mul, Sub, } -pub enum UnaryOp {} +#[derive(Clone, Copy, Debug)] +pub enum UnaryOp { + Abs, + Cos, + Cosh, + Exp, + Log, + Neg, + Reciprocal, + Sin, + Sinh, + Tan, + Tanh, +} pub enum Expr { Binary(BinaryOp), @@ -26,3 +40,9 @@ pub enum Expr { Scalar(T), Tensor(TensorBase), } + +pub struct BinOp { + pub lhs: TensorBase, + pub rhs: TensorBase, + pub op: BinaryOp, +} diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index 6b0e7fc5..88eb8a25 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -2,10 +2,12 @@ Appellation: ops Contrib: FL03 */ -pub use self::backprop::*; +pub use self::{backprop::*, kinds::*}; pub(crate) mod backprop; pub(crate) mod kinds; +pub trait TensorOp {} + #[cfg(test)] mod tests {} diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 3ce5372c..7c111a00 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -3,6 +3,7 @@ Contrib: FL03 */ use super::Rank; +use acme::prelude::Result; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{self, Deref}; @@ -45,6 +46,16 @@ impl Shape { Self(vec![0; rank]) } + pub fn matmul_shape(&self, other: &Self) -> Result { + if *self.rank() != 2 || *other.rank() != 2 { + return Err("Both shapes must be rank 2".into()); + } + if self[1] != other[0] { + return Err("Incompatible shapes".into()); + } + Ok(Self::from((self[0], other[1]))) + } + pub fn dims(&self) -> &[usize] { &self.0 } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 6c9bcdee..f0d65da3 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,6 +2,7 @@ Appellation: tensor Contrib: FL03 */ +use crate::ops::kinds::{BinaryOp, Op}; use crate::prelude::Scalar; use crate::shape::{IntoShape, Rank, Shape}; use crate::store::Layout; @@ -10,10 +11,19 @@ use acme::prelude::AtomicId; // use std::sync::{Arc, RwLock}; pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase { + from_vec_with_op(None, shape, store) +} + +pub(crate) fn from_vec_with_op( + op: Option>, + shape: impl IntoShape, + store: Vec, +) -> TensorBase { let layout = Layout::contiguous(shape); TensorBase { id: AtomicId::new(), layout, + op, store, //Arc::new(RwLock::new(store)), } } @@ -22,6 +32,7 @@ pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase pub struct TensorBase { id: AtomicId, layout: Layout, + op: Option>, store: Vec, } @@ -43,6 +54,10 @@ impl TensorBase { &self.layout } + pub fn op(&self) -> Option<&Op> { + self.op.as_ref() + } + pub fn rank(&self) -> Rank { self.layout.shape().rank() } @@ -104,6 +119,31 @@ where } } +impl TensorBase +where + T: Scalar, +{ + pub fn matmul(&self, other: &Self) -> Self { + let shape = self.shape().matmul_shape(other.shape()).unwrap(); + let mut result = vec![T::zero(); shape.elements()]; + + for i in 0..self.shape()[0] { + for j in 0..other.shape()[1] { + for k in 0..self.shape()[1] { + result[i * other.shape()[1] + j] += + self.store[i * self.shape()[1] + k] * other.store[k * other.shape()[1] + j]; + } + } + } + let op = Op::Binary( + Box::new(self.clone()), + Box::new(other.clone()), + BinaryOp::Matmul, + ); + from_vec_with_op(Some(op), shape, result) + } +} + impl std::ops::Index<&[usize]> for TensorBase { type Output = T; @@ -123,6 +163,122 @@ where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - self.id == other.id + self.id == other.id || self.store == other.store } } + +macro_rules! cmp { + (ne: $lhs:expr, $rhs:expr) => { + if $lhs != $rhs { + panic!("Shape Mismatch: {:?} != {:?}", $lhs, $rhs); + } + }; +} + +macro_rules! impl_arith { + ($trait:ident, $method:ident, $op:tt) => { + impl std::ops::$trait for TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = Self; + + fn $method(self, other: Self) -> Self::Output { + cmp!(ne: self.shape(), other.shape()); + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); + from_vec_with_op(Some(op), shape, store) + } + } + + impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: &'a TensorBase) -> Self::Output { + if self.shape() != other.shape() { + panic!("shapes must be equal"); + } + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); + from_vec_with_op(Some(op), shape, store) + } + } + + impl<'a, T> std::ops::$trait> for &'a TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: TensorBase) -> Self::Output { + if self.shape() != other.shape() { + panic!("shapes must be equal"); + } + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); + from_vec_with_op(Some(op), shape, store) + } + } + + impl<'a, 'b, T> std::ops::$trait<&'b TensorBase> for &'a TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: &'b TensorBase) -> Self::Output { + if self.shape() != other.shape() { + panic!("shapes must be equal"); + } + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); + from_vec_with_op(Some(op), shape, store) + } + } + }; +} + +macro_rules! impl_scalar_arith { + ($trait:ident, $method:ident, $op:tt) => { + impl std::ops::$trait for TensorBase + where + T: Copy + std::ops::$trait, + { + type Output = Self; + + fn $method(self, other: T) -> Self::Output { + let store = self.store.iter().map(|a| *a $op other).collect(); + Self::Output::from_vec(self.shape().clone(), store) + } + } + + impl<'a, T> std::ops::$trait for &'a TensorBase + where + T: Copy + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: T) -> Self::Output { + let store = self.store.iter().map(|a| *a $op other).collect(); + Self::Output::from_vec(self.shape().clone(), store) + } + } + }; +} + +impl_arith!(Add, add, +); +impl_arith!(Div, div, /); +impl_arith!(Mul, mul, *); +impl_arith!(Sub, sub, -); + +impl_scalar_arith!(Add, add, +); +impl_scalar_arith!(Div, div, /); +impl_scalar_arith!(Mul, mul, *); +impl_scalar_arith!(Sub, sub, -); diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs new file mode 100644 index 00000000..8c301ab1 --- /dev/null +++ b/tensor/tests/arith.rs @@ -0,0 +1,57 @@ +/* + Appellation: arith + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as tensor; + +use tensor::TensorBase; + +#[test] +fn test_add() { + let shape = (2, 2); + let a = TensorBase::::ones(shape); + let b = TensorBase::::ones(shape); + let c = a + &b; + + assert_eq!(c, TensorBase::::ones(shape) * 2.0); +} + +#[test] +fn test_div() { + let shape = (2, 2); + let a = TensorBase::::ones(shape); + let b = TensorBase::::ones(shape) * 2.0; + let c = a / b; + + assert_eq!(c, TensorBase::::fill(shape, 0.5)); +} + +#[test] +fn test_mul() { + let shape = (2, 2); + let a = TensorBase::::ones(shape); + let b = TensorBase::::ones(shape); + let c = a * b; + + assert_eq!(c, TensorBase::::ones(shape)); +} + +#[test] +fn test_sub() { + let shape = (2, 2); + let a = TensorBase::::ones(shape); + let b = TensorBase::::ones(shape); + let c = a - &b; + + assert_eq!(c, TensorBase::::zeros(shape)); +} + +#[test] +fn test_matmul() { + let a = TensorBase::::fill((3, 2), 2.0); + let b = TensorBase::::ones((2, 3)); + let c = a.matmul(&b); + + assert_eq!(c, TensorBase::::fill((3, 3), 4.0)); +} diff --git a/tensor/tests/default.rs b/tensor/tests/default.rs index ff4c83fe..d1c044d2 100644 --- a/tensor/tests/default.rs +++ b/tensor/tests/default.rs @@ -1,7 +1,18 @@ -#[cfg(test)] +/* + Appellation: default + Contrib: FL03 +*/ +#![cfg(test)] + +fn addition(a: A, b: B) -> C +where + A: std::ops::Add, +{ + a + b +} + #[test] fn compiles() { - let add = |a, b| a + b; - let result = add(2, 2); + let result = addition(2, 2); assert_eq!(result, 4); } diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index 59401b49..d4d10a00 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -1,4 +1,8 @@ -#[cfg(test)] +/* + Appellation: tensor + Contrib: FL03 +*/ +#![cfg(test)] extern crate acme_tensor as tensor; use tensor::TensorBase; From f51c7d68d4d4c1b2bfd811c89b8dcb7940cae476 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 11:19:06 -0500 Subject: [PATCH 24/87] update Signed-off-by: Joe McCain III --- tensor/src/linalg/arith.rs | 4 ++++ tensor/src/linalg/mod.rs | 2 +- tensor/src/tensor.rs | 10 ++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 tensor/src/linalg/arith.rs diff --git a/tensor/src/linalg/arith.rs b/tensor/src/linalg/arith.rs new file mode 100644 index 00000000..0a6333ef --- /dev/null +++ b/tensor/src/linalg/arith.rs @@ -0,0 +1,4 @@ +/* + Appellation: arith + Contrib: FL03 +*/ \ No newline at end of file diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index 56087979..00a85d42 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -1,5 +1,5 @@ /* - Appellation: linal + Appellation: linalg Contrib: FL03 */ //! # Linear Algebra diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index f0d65da3..1a197265 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -247,6 +247,16 @@ macro_rules! impl_arith { macro_rules! impl_scalar_arith { ($trait:ident, $method:ident, $op:tt) => { + // impl TensorBase + // where + // T: Copy + std::ops::$trait, + // { + // pub fn $method(self, other: T) -> TensorBase { + // let store = self.store.iter().map(|a| *a $op other).collect(); + // from_vec(self.shape().clone(), store) + // } + // } + impl std::ops::$trait for TensorBase where T: Copy + std::ops::$trait, From 67c1c731a66c6073ccd83eb2bf172d59a596bf0c Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 11:27:29 -0500 Subject: [PATCH 25/87] update Signed-off-by: Joe McCain III --- .artifacts/license/APACHE.LICENSE | 2 +- .artifacts/license/MIT.LICENSE | 2 +- .github/workflows/clippy.yml | 21 ++++++++-------- .github/workflows/crates.yml | 2 -- .github/workflows/rust.yml | 40 ++++++++++++++----------------- LICENSE | 2 +- tensor/src/lib.rs | 8 ++++++- 7 files changed, 39 insertions(+), 38 deletions(-) diff --git a/.artifacts/license/APACHE.LICENSE b/.artifacts/license/APACHE.LICENSE index 3c6f2847..071b69b1 100644 --- a/.artifacts/license/APACHE.LICENSE +++ b/.artifacts/license/APACHE.LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2022 Scattered-Systems + Copyright 2024 Scattered-Systems, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/.artifacts/license/MIT.LICENSE b/.artifacts/license/MIT.LICENSE index 470df835..2e7ec0f2 100644 --- a/.artifacts/license/MIT.LICENSE +++ b/.artifacts/license/MIT.LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2022 Scattered-Systems +Copyright (c) 2024 Scattered-Systems, LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index 549127d0..db9201de 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -2,48 +2,49 @@ name: Clippy concurrency: group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: false on: pull_request: branches: [ main ] push: branches: [ main ] - tags: [ nightly*, v*.*.* ] + tags: [ nightly*, v*.*.* ] release: + types: [ created ] repository_dispatch: types: [ clippy ] schedule: - - cron: "30 21 * * 0" # Every Sunday at 21:30 UTC + - cron: 30 21 * * 0 # Every Sunday at 9:30PM UTC workflow_dispatch: permissions: - actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status + actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status contents: read security-events: write - + jobs: clippy: name: Clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Install Rust toolchain - uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af #@v1 + - name: setup (rust) + uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable components: clippy override: true - - name: Setup (langspace) + - name: setup (clippy) run: cargo install clippy-sarif sarif-fmt - - name: Analyze + - name: analyze run: cargo clippy --all-features --message-format=json | clippy-sarif | tee rust-clippy-results.sarif | sarif-fmt continue-on-error: true - - name: Upload results + - name: Upload analysis uses: github/codeql-action/upload-sarif@v3 with: sarif_file: rust-clippy-results.sarif diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index 6e592d4e..d2e8fa33 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -8,8 +8,6 @@ env: CARGO_TERM_COLOR: always on: - push: - tags: [ v*.*.* ] release: types: [ created ] repository_dispatch: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 452c6b80..ff6535a6 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -8,24 +8,23 @@ env: CARGO_TERM_COLOR: always on: - pull_request: - branches: [ main ] push: branches: [ main ] - tags: [ nightly*, v*.*.* ] + tags: [ nightly*, v*.*.* ] release: + types: [ created ] repository_dispatch: types: [ rust ] schedule: - - cron: "30 21 * * 0" # Every Sunday at 21:30 UTC + - cron: 30 21 * * 0 # Every Sunday at 9:30pm UTC workflow_dispatch: jobs: - build: + builder: name: Build strategy: matrix: - platform: [ ubuntu-latest ] + platform: [ macos-latest, ubuntu-latest, windows-latest ] toolchain: [ stable, nightly ] runs-on: ${{ matrix.platform }} steps: @@ -35,36 +34,34 @@ jobs: rustup update rustup default ${{ matrix.toolchain }} - name: Build - id: rust-build - run: cargo build --features full -r -v --workspace + run: cargo build --all-features -r -v --workspace - name: Cache build - id: rust-cache + id: cache-build uses: actions/cache@v4 with: path: | ~/.cargo/registry ~/.cargo/git target/release - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + key: ${{ matrix.toolchain }}-${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} test: name: Test + needs: [ builder ] strategy: matrix: - toolchain: [ nightly ] + toolchain: [ stable, nightly ] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 #@v1 - with: - override: true - profile: minimal - toolchain: ${{ matrix.toolchain }} + - name: setup (langspace) + run: | + rustup update + rustup default ${{ matrix.toolchain }} - name: Test - id: rust-test - run: cargo test --all-features -r -v --workspace + run: cargo test --all-features -v --workspace bench: - name: Bench + name: Benchmark + needs: [ builder ] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -73,5 +70,4 @@ jobs: rustup update rustup default nightly - name: Bench - id: rust-bench - run: cargo bench --features full -v --workspace \ No newline at end of file + run: cargo bench --all --features full -v \ No newline at end of file diff --git a/LICENSE b/LICENSE index 3c6f2847..071b69b1 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2022 Scattered-Systems + Copyright 2024 Scattered-Systems, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 861f2518..fc227edf 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -21,12 +21,18 @@ pub mod shape; pub mod specs; pub mod store; + pub mod prelude { + #[doc(inline)] pub use crate::tensor::TensorBase; - + #[doc(inline)] pub use crate::data::*; + #[doc(inline)] pub use crate::ops::*; + #[doc(inline)] pub use crate::shape::*; + #[doc(inline)] pub use crate::specs::prelude::*; + #[doc(inline)] pub use crate::store::*; } From 0a8d17d630dc1b3d35a5b2a5887eaf2f15da5bd5 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 11:35:03 -0500 Subject: [PATCH 26/87] update Signed-off-by: Joe McCain III --- tensor/src/specs/scalar.rs | 22 ++++++++++++++++++++++ tensor/src/tensor.rs | 1 + 2 files changed, 23 insertions(+) diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index bce4b6e6..5c1f5d98 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -231,3 +231,25 @@ macro_rules! impl_scalar { } impl_scalar!(f32); impl_scalar!(f64); + +macro_rules! unary_op_trait { + ($trait:ident, $method:ident) => { + pub trait $trait { + fn $method(self) -> Self; + } + }; +} + + +unary_op_trait!(Cos, cos); +unary_op_trait!(Cosh, cosh); +unary_op_trait!(Exp, exp); +unary_op_trait!(Ln, ln); +unary_op_trait!(Recip, recip); +unary_op_trait!(Sin, sin); +unary_op_trait!(Sinh, sinh); +unary_op_trait!(Sqrt, sqrt); +unary_op_trait!(Square, square); +unary_op_trait!(Tan, tan); +unary_op_trait!(Tanh, tanh); + diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 1a197265..15316a80 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -292,3 +292,4 @@ impl_scalar_arith!(Add, add, +); impl_scalar_arith!(Div, div, /); impl_scalar_arith!(Mul, mul, *); impl_scalar_arith!(Sub, sub, -); + From 031a492a07123a5b1226a4890d0e524cfa10e0b6 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 15:08:39 -0500 Subject: [PATCH 27/87] update Signed-off-by: Joe McCain III --- core/src/graphs/dcg/graph.rs | 27 +++-- core/src/ops/gradient.rs | 8 +- core/src/ops/mod.rs | 20 ---- tensor/src/data/mod.rs | 5 +- tensor/src/impls/arith.rs | 218 +++++++++++++++++++++++++++++++++++ tensor/src/impls/grad.rs | 43 +++++++ tensor/src/impls/linalg.rs | 34 ++++++ tensor/src/lib.rs | 11 +- tensor/src/ops/kinds.rs | 3 + tensor/src/specs/scalar.rs | 2 - tensor/src/store/mod.rs | 4 + tensor/src/tensor.rs | 165 +++++--------------------- tensor/tests/arith.rs | 12 +- 13 files changed, 375 insertions(+), 177 deletions(-) create mode 100644 tensor/src/impls/arith.rs create mode 100644 tensor/src/impls/grad.rs create mode 100644 tensor/src/impls/linalg.rs diff --git a/core/src/graphs/dcg/graph.rs b/core/src/graphs/dcg/graph.rs index 6233702f..079f32c2 100644 --- a/core/src/graphs/dcg/graph.rs +++ b/core/src/graphs/dcg/graph.rs @@ -80,19 +80,22 @@ impl Dcg { if let Node::Op { inputs, op } = node_op { match op { - Ops::Binary(BinaryOp::Add(_)) => { - for arg in self.store.neighbors_directed(*node, Direction::Incoming) { - *gradients.entry(arg).or_default() += node_grad; + Ops::Binary(inner) => match *inner { + BinaryOp::Add(_) => { + for arg in self.store.neighbors_directed(*node, Direction::Incoming) { + *gradients.entry(arg).or_default() += node_grad; + } } - } - Ops::Binary(BinaryOp::Mul(_)) => { - let lhs = inputs[0]; - let rhs = inputs[1]; - let lhs_val = self.get(lhs).unwrap().get_value(); - let rhs_val = self.get(rhs).unwrap().get_value(); - *gradients.entry(lhs).or_default() += node_grad * rhs_val; - *gradients.entry(rhs).or_default() += node_grad * lhs_val; - } + BinaryOp::Mul(_) => { + let lhs = inputs[0]; + let rhs = inputs[1]; + let lhs_val = self.get(lhs).unwrap().get_value(); + let rhs_val = self.get(rhs).unwrap().get_value(); + *gradients.entry(lhs).or_default() += node_grad * rhs_val; + *gradients.entry(rhs).or_default() += node_grad * lhs_val; + } + _ => {} + }, // Handle other operations as needed _ => {} } diff --git a/core/src/ops/gradient.rs b/core/src/ops/gradient.rs index 1b43cb8a..cd2a0f2c 100644 --- a/core/src/ops/gradient.rs +++ b/core/src/ops/gradient.rs @@ -3,6 +3,8 @@ Contrib: FL03 */ +use crate::prelude::Store; + pub trait Differentiable { type Derivative; @@ -16,11 +18,9 @@ pub trait Gradient { } pub trait Grad { - type Output; + type Gradient: Store; - /// Compute the gradient of a function at a given point, with respect to a given variable. - // TODO: Create a macro for generating parameter keys - fn grad(&self, at: T, wrt: &str) -> Self::Output; + fn grad(&self) -> Self::Gradient; } pub trait Partial { diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 6e3b2419..7725cc1d 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -12,26 +12,6 @@ pub(crate) mod gradient; pub(crate) mod kinds; pub(crate) mod operator; -use crate::prelude::Result; - -pub trait Expressive { - type Graph; - - fn expand(&self) -> Self::Graph; -} - -pub trait Backward { - type Store; - - fn backward(&self) -> Result; -} - -pub trait Compute { - type Output; - - fn compute(&self, args: T) -> Self::Output; -} - pub trait Evaluate { type Output; diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 3a232217..ce24bea1 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -2,16 +2,17 @@ Appellation: data Contrib: FL03 */ - +#![allow(dead_code, unused_imports)] pub unsafe trait RawData { type Elem; } pub trait Data: RawData {} + #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use std::ptr::NonNull; +use core::ptr::NonNull; /// Return a NonNull pointer to the vector's data pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { diff --git a/tensor/src/impls/arith.rs b/tensor/src/impls/arith.rs new file mode 100644 index 00000000..280ee712 --- /dev/null +++ b/tensor/src/impls/arith.rs @@ -0,0 +1,218 @@ +/* + Appellation: arith + Contrib: FL03 +*/ +use crate::ops::{BinaryOp, Op, UnaryOp}; +use crate::prelude::Scalar; +use crate::tensor::*; + +impl std::ops::Neg for TensorBase +where + T: Copy + std::ops::Neg, +{ + type Output = Self; + + fn neg(self) -> Self::Output { + let shape = self.shape().clone(); + let store = self.store.iter().map(|a| -*a).collect(); + let op = Op::Unary(Box::new(self), UnaryOp::Neg); + from_vec_with_op(op, shape, store) + } +} + +impl<'a, T> std::ops::Neg for &'a TensorBase +where + T: Copy + std::ops::Neg, +{ + type Output = TensorBase; + + fn neg(self) -> Self::Output { + let shape = self.shape().clone(); + let store = self.store.iter().map(|a| -*a).collect(); + let op = Op::Unary(Box::new(self.clone()), UnaryOp::Neg); + from_vec_with_op(op, shape, store) + } +} + +macro_rules! cmp { + (ne: $lhs:expr, $rhs:expr) => { + if $lhs != $rhs { + panic!("Shape Mismatch: {:?} != {:?}", $lhs, $rhs); + } + }; +} + +macro_rules! impl_arith { + ($trait:ident, $method:ident, $op:tt) => { + impl std::ops::$trait for TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = Self; + + fn $method(self, other: Self) -> Self::Output { + cmp!(ne: self.shape(), other.shape()); + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); + from_vec_with_op(op, shape, store) + } + } + + impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: &'a TensorBase) -> Self::Output { + if self.shape() != other.shape() { + panic!("shapes must be equal"); + } + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); + from_vec_with_op(op, shape, store) + } + } + + impl<'a, T> std::ops::$trait> for &'a TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: TensorBase) -> Self::Output { + if self.shape() != other.shape() { + panic!("shapes must be equal"); + } + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); + from_vec_with_op(op, shape, store) + } + } + + impl<'a, 'b, T> std::ops::$trait<&'b TensorBase> for &'a TensorBase + where + T: Scalar + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: &'b TensorBase) -> Self::Output { + if self.shape() != other.shape() { + panic!("shapes must be equal"); + } + let shape = self.shape().clone(); + let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let op = Op::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); + from_vec_with_op(op, shape, store) + } + } + }; +} + +macro_rules! impl_scalar_arith { + ($trait:ident, $method:ident, $op:tt) => { + // impl TensorBase + // where + // T: Copy + std::ops::$trait, + // { + // pub fn $method(self, other: T) -> TensorBase { + // let store = self.store.iter().map(|a| *a $op other).collect(); + // from_vec(self.shape().clone(), store) + // } + // } + + impl std::ops::$trait for TensorBase + where + T: Copy + std::ops::$trait, + { + type Output = Self; + + fn $method(self, other: T) -> Self::Output { + let store = self.store.iter().map(|a| *a $op other).collect(); + Self::Output::from_vec(self.shape().clone(), store) + } + } + + impl<'a, T> std::ops::$trait for &'a TensorBase + where + T: Copy + std::ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: T) -> Self::Output { + let store = self.store.iter().map(|a| *a $op other).collect(); + Self::Output::from_vec(self.shape().clone(), store) + } + } + }; +} + +macro_rules! impl_assign_op { + ($trait:ident, $method:ident, $inner:ident, $op:tt) => { + impl std::ops::$trait for TensorBase + where + T: Copy + std::ops::$inner, + { + fn $method(&mut self, other: Self) { + cmp!(ne: self.shape(), other.shape()); + self.store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + } + } + + impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase + where + T: Copy + std::ops::$inner, + { + fn $method(&mut self, other: &'a TensorBase) { + cmp!(ne: self.shape(), other.shape()); + self.store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + } + } + }; + +} + +macro_rules! impl_unary_arith { + ($variant:ident, $method:ident, $e:expr) => { + impl TensorBase + where + T: Scalar, + { + pub fn $method(self) -> Self { + let shape = self.shape().clone(); + let store = self.store.iter().map($e).collect(); + let op = Op::::Unary(Box::new(self), UnaryOp::$variant); + from_vec_with_op(op, shape, store) + } + } + }; +} + +impl_arith!(Add, add, +); +impl_arith!(Div, div, /); +impl_arith!(Mul, mul, *); +impl_arith!(Sub, sub, -); + +impl_assign_op!(AddAssign, add_assign, Add, +); +impl_assign_op!(DivAssign, div_assign, Div, /); +impl_assign_op!(MulAssign, mul_assign, Mul, *); +impl_assign_op!(SubAssign, sub_assign, Sub, -); + +impl_scalar_arith!(Add, add, +); +impl_scalar_arith!(Div, div, /); +impl_scalar_arith!(Mul, mul, *); +impl_scalar_arith!(Sub, sub, -); + +impl_unary_arith!(Exp, exp, |v| v.exp()); +// impl_unary_arith!(Log, log, |v| v.log()); + +impl_unary_arith!(Cos, cos, |v| v.cos()); +impl_unary_arith!(Cosh, cosh, |v| v.cosh()); +impl_unary_arith!(Sin, sin, |v| v.sin()); +impl_unary_arith!(Sinh, sinh, |v| v.sinh()); +impl_unary_arith!(Sqrt, sqrt, |v| v.sqrt()); +impl_unary_arith!(Tan, tan, |v| v.tan()); +impl_unary_arith!(Tanh, tanh, |v| v.tanh()); diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs new file mode 100644 index 00000000..872c6264 --- /dev/null +++ b/tensor/src/impls/grad.rs @@ -0,0 +1,43 @@ +/* + Appellation: grad + Contrib: FL03 +*/ +use crate::ops::{BinaryOp, Op,}; +use crate::prelude::Scalar; +use crate::tensor::*; +use acme::prelude::AtomicId; + +pub(crate) type GradStore = std::collections::BTreeMap; + +impl TensorBase +where + T: Scalar, +{ + pub fn grad(&self) -> GradStore> { + let mut store = GradStore::new(); + store.insert(self.id().into(), TensorBase::ones_like(self)); + + let grad = store.get(&self.id().into()).unwrap().clone(); + + if let Some(op) = &self.op { + match op { + Op::Unary(_a, kind) => match kind { + _ => todo!(), + }, + Op::Binary(a, b, kind) => match kind { + BinaryOp::Add => { + *store + .entry(a.id().into()) + .or_insert(TensorBase::zeros_like(a)) += grad.clone(); + *store + .entry(b.id().into()) + .or_insert(TensorBase::zeros_like(b)) += grad; + } + _ => todo!(), + }, + // _ => {} + } + } + store + } +} diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs new file mode 100644 index 00000000..3dd1eb42 --- /dev/null +++ b/tensor/src/impls/linalg.rs @@ -0,0 +1,34 @@ +/* + Appellation: linalg + Contrib: FL03 +*/ +//! Implementations for linear algebra operations. +use crate::ops::{BinaryOp, Op}; +use crate::prelude::{Matmul, Scalar}; +use crate::tensor::*; + +impl Matmul> for TensorBase +where + T: Scalar, +{ + type Output = Self; + fn matmul(&self, other: &Self) -> Self { + let shape = self.shape().matmul_shape(other.shape()).unwrap(); + let mut result = vec![T::zero(); shape.elements()]; + + for i in 0..self.shape()[0] { + for j in 0..other.shape()[1] { + for k in 0..self.shape()[1] { + result[i * other.shape()[1] + j] += + self.store[i * self.shape()[1] + k] * other.store[k * other.shape()[1] + j]; + } + } + } + let op = Op::Binary( + Box::new(self.clone()), + Box::new(other.clone()), + BinaryOp::Matmul, + ); + from_vec_with_op(op, shape, result) + } +} diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index fc227edf..53371e7d 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -21,10 +21,15 @@ pub mod shape; pub mod specs; pub mod store; +mod impls { + mod arith; + mod grad; + mod linalg; +} + +pub type Tensor = tensor::TensorBase; pub mod prelude { - #[doc(inline)] - pub use crate::tensor::TensorBase; #[doc(inline)] pub use crate::data::*; #[doc(inline)] @@ -35,4 +40,6 @@ pub mod prelude { pub use crate::specs::prelude::*; #[doc(inline)] pub use crate::store::*; + #[doc(inline)] + pub use crate::Tensor; } diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 67048749..ff5311c7 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -26,10 +26,13 @@ pub enum UnaryOp { Cosh, Exp, Log, + Ln, Neg, Reciprocal, Sin, Sinh, + Sqrt, + Square, Tan, Tanh, } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index 5c1f5d98..3183a703 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -240,7 +240,6 @@ macro_rules! unary_op_trait { }; } - unary_op_trait!(Cos, cos); unary_op_trait!(Cosh, cosh); unary_op_trait!(Exp, exp); @@ -252,4 +251,3 @@ unary_op_trait!(Sqrt, sqrt); unary_op_trait!(Square, square); unary_op_trait!(Tan, tan); unary_op_trait!(Tanh, tanh); - diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs index 0d1268ae..9f9eaef6 100644 --- a/tensor/src/store/mod.rs +++ b/tensor/src/store/mod.rs @@ -11,5 +11,9 @@ use std::sync::{Arc, RwLock}; pub type ArcTensor = Arc>>; +pub trait TensorStore { + type Elem; +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 15316a80..5cf4a58e 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -11,11 +11,16 @@ use acme::prelude::AtomicId; // use std::sync::{Arc, RwLock}; pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase { - from_vec_with_op(None, shape, store) + TensorBase { + id: AtomicId::new(), + layout: Layout::contiguous(shape), + op: None, + store, //Arc::new(RwLock::new(store)), + } } pub(crate) fn from_vec_with_op( - op: Option>, + op: Op, shape: impl IntoShape, store: Vec, ) -> TensorBase { @@ -23,17 +28,17 @@ pub(crate) fn from_vec_with_op( TensorBase { id: AtomicId::new(), layout, - op, + op: Some(op), store, //Arc::new(RwLock::new(store)), } } #[derive(Clone, Debug)] pub struct TensorBase { - id: AtomicId, - layout: Layout, - op: Option>, - store: Vec, + pub(crate) id: AtomicId, + pub(crate) layout: Layout, + pub(crate) op: Option>, + pub(crate) store: Vec, } impl TensorBase { @@ -114,9 +119,17 @@ where Self::fill(shape, T::one()) } + pub fn ones_like(tensor: &TensorBase) -> Self { + Self::ones(tensor.shape().clone()) + } + pub fn zeros(shape: impl IntoShape) -> Self { Self::fill(shape, T::zero()) } + + pub fn zeros_like(tensor: &TensorBase) -> Self { + Self::zeros(tensor.shape().clone()) + } } impl TensorBase @@ -140,7 +153,7 @@ where Box::new(other.clone()), BinaryOp::Matmul, ); - from_vec_with_op(Some(op), shape, result) + from_vec_with_op(op, shape, result) } } @@ -158,138 +171,22 @@ impl std::ops::Index<&[usize]> for TensorBase { // } // } +impl Eq for TensorBase where T: Eq {} + impl PartialEq for TensorBase where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - self.id == other.id || self.store == other.store + self.store == other.store } } -macro_rules! cmp { - (ne: $lhs:expr, $rhs:expr) => { - if $lhs != $rhs { - panic!("Shape Mismatch: {:?} != {:?}", $lhs, $rhs); - } - }; -} - -macro_rules! impl_arith { - ($trait:ident, $method:ident, $op:tt) => { - impl std::ops::$trait for TensorBase - where - T: Scalar + std::ops::$trait, - { - type Output = Self; - - fn $method(self, other: Self) -> Self::Output { - cmp!(ne: self.shape(), other.shape()); - let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); - from_vec_with_op(Some(op), shape, store) - } - } - - impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase - where - T: Scalar + std::ops::$trait, - { - type Output = TensorBase; - - fn $method(self, other: &'a TensorBase) -> Self::Output { - if self.shape() != other.shape() { - panic!("shapes must be equal"); - } - let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); - from_vec_with_op(Some(op), shape, store) - } - } - - impl<'a, T> std::ops::$trait> for &'a TensorBase - where - T: Scalar + std::ops::$trait, - { - type Output = TensorBase; - - fn $method(self, other: TensorBase) -> Self::Output { - if self.shape() != other.shape() { - panic!("shapes must be equal"); - } - let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); - from_vec_with_op(Some(op), shape, store) - } - } - - impl<'a, 'b, T> std::ops::$trait<&'b TensorBase> for &'a TensorBase - where - T: Scalar + std::ops::$trait, - { - type Output = TensorBase; - - fn $method(self, other: &'b TensorBase) -> Self::Output { - if self.shape() != other.shape() { - panic!("shapes must be equal"); - } - let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); - from_vec_with_op(Some(op), shape, store) - } - } - }; -} - -macro_rules! impl_scalar_arith { - ($trait:ident, $method:ident, $op:tt) => { - // impl TensorBase - // where - // T: Copy + std::ops::$trait, - // { - // pub fn $method(self, other: T) -> TensorBase { - // let store = self.store.iter().map(|a| *a $op other).collect(); - // from_vec(self.shape().clone(), store) - // } - // } - - impl std::ops::$trait for TensorBase - where - T: Copy + std::ops::$trait, - { - type Output = Self; - - fn $method(self, other: T) -> Self::Output { - let store = self.store.iter().map(|a| *a $op other).collect(); - Self::Output::from_vec(self.shape().clone(), store) - } - } - - impl<'a, T> std::ops::$trait for &'a TensorBase - where - T: Copy + std::ops::$trait, - { - type Output = TensorBase; - - fn $method(self, other: T) -> Self::Output { - let store = self.store.iter().map(|a| *a $op other).collect(); - Self::Output::from_vec(self.shape().clone(), store) - } - } - }; +impl num::traits::One for TensorBase +where + T: Scalar, +{ + fn one() -> Self { + Self::fill(1, T::one()) + } } - -impl_arith!(Add, add, +); -impl_arith!(Div, div, /); -impl_arith!(Mul, mul, *); -impl_arith!(Sub, sub, -); - -impl_scalar_arith!(Add, add, +); -impl_scalar_arith!(Div, div, /); -impl_scalar_arith!(Mul, mul, *); -impl_scalar_arith!(Sub, sub, -); - diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index 8c301ab1..4aef4a43 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -49,9 +49,19 @@ fn test_sub() { #[test] fn test_matmul() { - let a = TensorBase::::fill((3, 2), 2.0); + let a = TensorBase::::fill((3, 2), 2_f64); let b = TensorBase::::ones((2, 3)); let c = a.matmul(&b); assert_eq!(c, TensorBase::::fill((3, 3), 4.0)); } + +#[test] +fn test_trig() { + let a = TensorBase::::ones((2, 2)); + let b = a.clone().sin(); + let c = a.cos(); + + assert_eq!(b[&[0, 0]], 1_f64.sin()); + assert_eq!(c[&[0, 0]], 1_f64.cos()); +} From 6a7bf11fbe178e1008f79298abadc8cd1edc54f9 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 15:42:15 -0500 Subject: [PATCH 28/87] update Signed-off-by: Joe McCain III --- core/src/stores/mod.rs | 17 +++++++++++++++++ tensor/src/data/mod.rs | 1 - tensor/src/impls/grad.rs | 2 +- tensor/src/ops/kinds.rs | 7 ------- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/core/src/stores/mod.rs b/core/src/stores/mod.rs index 8d0b4582..732d2ba6 100644 --- a/core/src/stores/mod.rs +++ b/core/src/stores/mod.rs @@ -7,8 +7,25 @@ pub use self::{gradient::*, stack::*}; pub(crate) mod gradient; pub(crate) mod stack; +use std::borrow::Borrow; use std::collections::{BTreeMap, HashMap}; +pub trait Get +where + K: Borrow, +{ + fn get(&self, key: &Q) -> Option<&V>; +} + +impl Get for BTreeMap +where + K: Borrow + Ord, + Q: Ord, +{ + fn get(&self, key: &Q) -> Option<&V> { + BTreeMap::get(self, key) + } +} pub trait Store { fn get(&self, key: &K) -> Option<&V>; diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index ce24bea1..b0dde87b 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -9,7 +9,6 @@ pub unsafe trait RawData { pub trait Data: RawData {} - #[cfg(not(feature = "std"))] use alloc::vec::Vec; use core::ptr::NonNull; diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 872c6264..f2a180ef 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -2,7 +2,7 @@ Appellation: grad Contrib: FL03 */ -use crate::ops::{BinaryOp, Op,}; +use crate::ops::{BinaryOp, Op}; use crate::prelude::Scalar; use crate::tensor::*; use acme::prelude::AtomicId; diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index ff5311c7..1ad1ca7c 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -37,13 +37,6 @@ pub enum UnaryOp { Tanh, } -pub enum Expr { - Binary(BinaryOp), - Unary(UnaryOp), - Scalar(T), - Tensor(TensorBase), -} - pub struct BinOp { pub lhs: TensorBase, pub rhs: TensorBase, From 6a45128aef37cf98f7fc011683119cadf991cb63 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 17 Mar 2024 15:49:56 -0500 Subject: [PATCH 29/87] update Signed-off-by: Joe McCain III --- core/src/cmp/constants.rs | 13 +++++++++++++ core/src/cmp/mod.rs | 2 ++ core/src/lib.rs | 8 +------- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/core/src/cmp/constants.rs b/core/src/cmp/constants.rs index 13a301a9..aff09c02 100644 --- a/core/src/cmp/constants.rs +++ b/core/src/cmp/constants.rs @@ -6,6 +6,7 @@ use crate::ops::{Evaluate, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use std::borrow::{Borrow, BorrowMut}; use std::marker::ConstParamTy; use std::ops::{Deref, DerefMut, Neg, Not}; @@ -150,3 +151,15 @@ where self.0.is_zero() } } + +impl Borrow for Constant { + fn borrow(&self) -> &T { + &self.0 + } +} + +impl BorrowMut for Constant { + fn borrow_mut(&mut self) -> &mut T { + &mut self.0 + } +} diff --git a/core/src/cmp/mod.rs b/core/src/cmp/mod.rs index 83d1fb9f..675da56b 100644 --- a/core/src/cmp/mod.rs +++ b/core/src/cmp/mod.rs @@ -87,6 +87,8 @@ mod tests { let add = a + 3; assert_eq!(add, Constant(6)); + // let b = Constant(3_f64).ln(); + let a = Constant::new(3); let b = Constant::new(3); assert_eq!(a + b, Constant(6)); diff --git a/core/src/lib.rs b/core/src/lib.rs index 033a0c51..86c058f7 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -6,13 +6,7 @@ //! //! #![allow(incomplete_features)] -#![feature( - adt_const_params, - fn_traits, - rustc_private, - tuple_trait, - unboxed_closures -)] +#![feature(adt_const_params, fn_traits, tuple_trait, unboxed_closures)] pub use self::primitives::*; From fbbd8431d384bf0374fc2dae285313685a9c3637 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 08:29:11 -0500 Subject: [PATCH 30/87] update Signed-off-by: Joe McCain III --- acme/examples/autodiff.rs | 5 ++-- core/src/cmp/dual.rs | 60 +++++++++++++++++++++---------------- macros/src/lib.rs | 20 ------------- tensor/src/impls/arith.rs | 22 +++++++------- tensor/src/impls/num.rs | 29 ++++++++++++++++++ tensor/src/lib.rs | 4 ++- tensor/src/ops/kinds.rs | 51 +++++++++++++++++++++++++++++-- tensor/src/ops/mod.rs | 2 ++ tensor/src/ops/op/binary.rs | 1 + tensor/src/ops/op/mod.rs | 16 ++++++++++ tensor/src/ops/op/unary.rs | 1 + tensor/src/shape/mod.rs | 20 +++++++++++++ tensor/src/shape/shape.rs | 17 ++++++++++- tensor/src/specs/scalar.rs | 42 +++++++++++++++++++++++++- tensor/src/tensor.rs | 27 +++++++++++------ tensor/tests/arith.rs | 5 ++-- 16 files changed, 248 insertions(+), 74 deletions(-) create mode 100644 tensor/src/impls/num.rs create mode 100644 tensor/src/ops/op/binary.rs create mode 100644 tensor/src/ops/op/mod.rs create mode 100644 tensor/src/ops/op/unary.rs diff --git a/acme/examples/autodiff.rs b/acme/examples/autodiff.rs index 9513b585..53002ebd 100644 --- a/acme/examples/autodiff.rs +++ b/acme/examples/autodiff.rs @@ -6,8 +6,8 @@ #![feature(fn_traits)] extern crate acme; +use acme::autodiff; use acme::prelude::sigmoid; -use acme::{autodiff, show_item, show_streams}; macro_rules! eval { ($var:ident: $ex:expr) => { @@ -21,14 +21,13 @@ fn main() -> Result<(), Box> { // samples(x); // let z = sigmoid(x); - show_item!(sigmoid(x)); + // show_item!(sigmoid(x)); multiply(x, x); Ok(()) } -#[show_streams(x)] pub fn multiply(x: A, y: B) -> C where A: std::ops::Mul, diff --git a/core/src/cmp/dual.rs b/core/src/cmp/dual.rs index 8949149e..87cb9ed0 100644 --- a/core/src/cmp/dual.rs +++ b/core/src/cmp/dual.rs @@ -28,14 +28,17 @@ pub struct Dual { impl Dual { pub fn new(real: T, dual: T) -> Self { - Self { real, dual } + Self { dual, real } } - pub fn real(value: T) -> Self + pub fn real(real: T) -> Self where T: Default, { - Self::new(value, T::default()) + Self { + dual: T::default(), + real, + } } pub fn value(&self) -> &T { @@ -110,28 +113,6 @@ unsafe impl Send for Dual {} unsafe impl Sync for Dual {} -impl ops::Add for Dual -where - T: ops::Add, -{ - type Output = Dual; - - fn add(self, rhs: Self) -> Self::Output { - Dual::new(self.real + rhs.real, self.dual + rhs.dual) - } -} - -impl ops::Add for Dual -where - T: ops::Add, -{ - type Output = Dual; - - fn add(self, rhs: T) -> Self::Output { - Dual::new(self.real + rhs, self.dual) - } -} - impl ops::Div for Dual where T: Copy + ops::Div + ops::Mul + ops::Sub, @@ -262,3 +243,32 @@ where self.real.is_zero() } } + +macro_rules! impl_dual_op { + ($trait:ident, $method:ident) => { + impl $trait for Dual + where + T: $trait, + { + type Output = Dual; + + fn $method(self, rhs: Self) -> Self::Output { + Dual::new(self.real.$method(rhs.real), self.dual.$method(rhs.dual)) + } + } + + impl $trait for Dual + where + T: Copy + $trait, + { + type Output = Dual; + + fn $method(self, rhs: T) -> Self::Output { + Dual::new(self.real.$method(rhs), self.dual.$method(rhs)) + } + } + }; +} +use std::ops::Add; + +impl_dual_op!(Add, add); diff --git a/macros/src/lib.rs b/macros/src/lib.rs index c10c05d1..83bd8639 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -19,28 +19,8 @@ pub(crate) mod gradient; use ast::partials::*; use proc_macro::TokenStream; use quote::quote; -use syn::spanned::Spanned; use syn::{parse_macro_input, Expr}; -#[proc_macro_attribute] -pub fn show_streams(attr: TokenStream, item: TokenStream) -> TokenStream { - let input = parse_macro_input!(item as syn::ItemFn); - println!("attr: \"{:?}\"", &attr); - println!("item: \"{:?}\"", &input); - (quote! { #input }).into() -} - -#[proc_macro] -pub fn show_item(item: TokenStream) -> TokenStream { - let expr = parse_macro_input!(item as Expr); - let span = expr.span(); - println!("Span Bytes: {:?}", span.byte_range()); - println!("Span (start, end): ({:?}, {:?})", span.start(), span.end()); - println!("Source File: {:?}", span.unwrap().source_file()); - println!("Source Text: {:?}", span.source_text()); - (quote! { #expr }).into() -} - #[proc_macro_attribute] pub fn partial(attr: TokenStream, item: TokenStream) -> TokenStream { let input = parse_macro_input!(item as syn::ItemFn); diff --git a/tensor/src/impls/arith.rs b/tensor/src/impls/arith.rs index 280ee712..0675aa19 100644 --- a/tensor/src/impls/arith.rs +++ b/tensor/src/impls/arith.rs @@ -14,7 +14,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.store.iter().map(|a| -*a).collect(); + let store = self.data().iter().map(|a| -*a).collect(); let op = Op::Unary(Box::new(self), UnaryOp::Neg); from_vec_with_op(op, shape, store) } @@ -28,7 +28,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.store.iter().map(|a| -*a).collect(); + let store = self.data().iter().map(|a| -*a).collect(); let op = Op::Unary(Box::new(self.clone()), UnaryOp::Neg); from_vec_with_op(op, shape, store) } @@ -53,7 +53,7 @@ macro_rules! impl_arith { fn $method(self, other: Self) -> Self::Output { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = Op::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); from_vec_with_op(op, shape, store) } @@ -70,7 +70,7 @@ macro_rules! impl_arith { panic!("shapes must be equal"); } let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = Op::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); from_vec_with_op(op, shape, store) } @@ -87,7 +87,7 @@ macro_rules! impl_arith { panic!("shapes must be equal"); } let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = Op::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); from_vec_with_op(op, shape, store) } @@ -104,7 +104,7 @@ macro_rules! impl_arith { panic!("shapes must be equal"); } let shape = self.shape().clone(); - let store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = Op::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); from_vec_with_op(op, shape, store) } @@ -131,8 +131,9 @@ macro_rules! impl_scalar_arith { type Output = Self; fn $method(self, other: T) -> Self::Output { - let store = self.store.iter().map(|a| *a $op other).collect(); - Self::Output::from_vec(self.shape().clone(), store) + let shape = self.shape().clone(); + let store = self.into_store().iter().map(|a| *a $op other).collect(); + Self::Output::from_vec(shape, store) } } @@ -143,8 +144,9 @@ macro_rules! impl_scalar_arith { type Output = TensorBase; fn $method(self, other: T) -> Self::Output { - let store = self.store.iter().map(|a| *a $op other).collect(); - Self::Output::from_vec(self.shape().clone(), store) + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| *a $op other).collect(); + Self::Output::from_vec(shape, store) } } }; diff --git a/tensor/src/impls/num.rs b/tensor/src/impls/num.rs new file mode 100644 index 00000000..a92aa13f --- /dev/null +++ b/tensor/src/impls/num.rs @@ -0,0 +1,29 @@ +/* + Appellation: num + Contrib: FL03 +*/ +use crate::prelude::Scalar; +use crate::tensor::TensorBase; +use num::traits::{One, Zero}; + +impl One for TensorBase +where + T: Scalar, +{ + fn one() -> Self { + Self::fill(1, T::one()) + } +} + +impl Zero for TensorBase +where + T: Scalar, +{ + fn zero() -> Self { + Self::fill(1, T::zero()) + } + + fn is_zero(&self) -> bool { + self.data().iter().all(|x| x.is_zero()) + } +} diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 53371e7d..be989cda 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -11,6 +11,7 @@ extern crate alloc; extern crate acme_core as acme; +#[doc(inline)] pub use self::tensor::*; pub(crate) mod tensor; @@ -25,6 +26,7 @@ mod impls { mod arith; mod grad; mod linalg; + mod num; } pub type Tensor = tensor::TensorBase; @@ -35,7 +37,7 @@ pub mod prelude { #[doc(inline)] pub use crate::ops::*; #[doc(inline)] - pub use crate::shape::*; + pub use crate::shape::prelude::*; #[doc(inline)] pub use crate::specs::prelude::*; #[doc(inline)] diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 1ad1ca7c..037361cd 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -3,6 +3,9 @@ Contrib: FL03 */ use crate::TensorBase; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive(Clone, Debug)] pub enum Op { @@ -10,7 +13,29 @@ pub enum Op { Unary(Box>, UnaryOp), } -#[derive(Clone, Copy, Debug)] +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "lowercase", untagged) +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] pub enum BinaryOp { Add, Div, @@ -19,7 +44,29 @@ pub enum BinaryOp { Sub, } -#[derive(Clone, Copy, Debug)] +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "lowercase", untagged) +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] pub enum UnaryOp { Abs, Cos, diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index 88eb8a25..72c8741b 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -7,6 +7,8 @@ pub use self::{backprop::*, kinds::*}; pub(crate) mod backprop; pub(crate) mod kinds; +pub mod op; + pub trait TensorOp {} #[cfg(test)] diff --git a/tensor/src/ops/op/binary.rs b/tensor/src/ops/op/binary.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tensor/src/ops/op/binary.rs @@ -0,0 +1 @@ + diff --git a/tensor/src/ops/op/mod.rs b/tensor/src/ops/op/mod.rs new file mode 100644 index 00000000..01ed16c5 --- /dev/null +++ b/tensor/src/ops/op/mod.rs @@ -0,0 +1,16 @@ +/* + Appellation: ops + Contrib: FL03 +*/ + +pub mod binary; +pub mod unary; + +#[cfg(test)] +mod tests { + + #[test] + fn test_ops() { + assert_eq!(1 + 1, 2); + } +} diff --git a/tensor/src/ops/op/unary.rs b/tensor/src/ops/op/unary.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tensor/src/ops/op/unary.rs @@ -0,0 +1 @@ + diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index a1f377d0..26351378 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -10,6 +10,26 @@ pub(crate) mod rank; pub(crate) mod shape; pub(crate) mod stride; +pub trait IntoShape { + fn into_shape(self) -> Shape; +} + +impl IntoShape for S +where + S: Into, +{ + fn into_shape(self) -> Shape { + self.into() + } +} + +pub(crate) mod prelude { + pub use super::dimension::*; + pub use super::rank::*; + pub use super::shape::*; + pub use super::stride::*; +} + #[cfg(test)] mod tests { use super::*; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 7c111a00..df4a7fb9 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -46,7 +46,7 @@ impl Shape { Self(vec![0; rank]) } - pub fn matmul_shape(&self, other: &Self) -> Result { + pub(crate) fn matmul_shape(&self, other: &Self) -> Result { if *self.rank() != 2 || *other.rank() != 2 { return Err("Both shapes must be rank 2".into()); } @@ -296,3 +296,18 @@ impl ops::Index> for Shape { unsafe impl Send for Shape {} unsafe impl Sync for Shape {} + +macro_rules! impl_from_tuple { + ($($n:tt: $name:ident),+) => { + impl<$($name),+> From<($($name,)+)> for Shape + where + $($name: Into,)+ + { + fn from(shape: ($($name,)+)) -> Self { + Self(vec![$($name.into(),)+]) + } + } + }; +} + +// impl_from_tuple!(A: A); diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index 3183a703..a5f789d8 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -235,11 +235,29 @@ impl_scalar!(f64); macro_rules! unary_op_trait { ($trait:ident, $method:ident) => { pub trait $trait { - fn $method(self) -> Self; + type Output; + + fn $method(self) -> Self::Output; + } + }; +} + +macro_rules! impl_unary_trait { + ($trait:ident, $method:ident, $op:tt) => { + impl $trait for T + where + T: Scalar, + { + type Output = T; + + fn $method(self) -> Self::Output { + ::$op(self) + } } }; } +unary_op_trait!(Abs, abs); unary_op_trait!(Cos, cos); unary_op_trait!(Cosh, cosh); unary_op_trait!(Exp, exp); @@ -251,3 +269,25 @@ unary_op_trait!(Sqrt, sqrt); unary_op_trait!(Square, square); unary_op_trait!(Tan, tan); unary_op_trait!(Tanh, tanh); + +impl Abs for T +where + T: num::Signed, +{ + type Output = T; + + fn abs(self) -> Self::Output { + ::abs(&self) + } +} + +impl Cos for T +where + T: Scalar, +{ + type Output = T; + + fn cos(self) -> Self::Output { + ::cos(self) + } +} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 5cf4a58e..7fb7c0b3 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -34,6 +34,7 @@ pub(crate) fn from_vec_with_op( } #[derive(Clone, Debug)] +// #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] pub struct TensorBase { pub(crate) id: AtomicId, pub(crate) layout: Layout, @@ -76,6 +77,23 @@ impl TensorBase { } } +impl TensorBase { + pub(crate) fn data(&self) -> &Vec { + &self.store + } + + pub(crate) fn into_store(self) -> Vec { + self.store + } + + pub(crate) fn snapshot(&self) -> Vec + where + T: Clone, + { + self.store.clone() + } +} + impl TensorBase where T: Clone, @@ -181,12 +199,3 @@ where self.store == other.store } } - -impl num::traits::One for TensorBase -where - T: Scalar, -{ - fn one() -> Self { - Self::fill(1, T::one()) - } -} diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index 4aef4a43..25dc7788 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -3,9 +3,10 @@ Contrib: FL03 */ #![cfg(test)] -extern crate acme_tensor as tensor; +extern crate acme_tensor as acme; -use tensor::TensorBase; +use acme::TensorBase; +// use acme::prelude::Matmul; #[test] fn test_add() { From 21ded1728a3235b9e140029c3aae1fab94bf93d1 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 10:13:44 -0500 Subject: [PATCH 31/87] update Signed-off-by: Joe McCain III --- .github/dependabot.yml | 4 + .github/workflows/crates.yml | 2 +- Cargo.toml | 5 +- acme/Cargo.toml | 23 ++++-- acme/examples/cgraph.rs | 3 +- acme/src/lib.rs | 20 +++-- acme/tests/autodiff.rs | 5 +- core/src/cmp/mod.rs | 35 --------- core/src/cmp/variables.rs | 13 ++++ core/src/errors/error.rs | 22 ++++++ core/src/errors/kinds.rs | 36 ++++++--- core/src/id/mod.rs | 3 +- core/src/lib.rs | 8 +- core/src/ops/kinds.rs | 50 ++++++------- core/src/ops/mod.rs | 3 +- core/src/specs/mod.rs | 13 ++++ core/src/{ops => specs}/operator.rs | 0 graphs/Cargo.toml | 53 ++++++++++++++ {core/src/graphs => graphs/src}/dcg/edge.rs | 14 ++-- {core/src/graphs => graphs/src}/dcg/graph.rs | 20 ++--- {core/src/graphs => graphs/src}/dcg/mod.rs | 0 {core/src/graphs => graphs/src}/dcg/node.rs | 15 +++- .../id/gradient.rs => graphs/src/grad/id.rs | 4 +- graphs/src/grad/mod.rs | 17 +++++ graphs/src/grad/store.rs | 55 ++++++++++++++ core/src/graphs/mod.rs => graphs/src/graph.rs | 24 +----- graphs/src/lib.rs | 29 ++++++++ {core/src/graphs => graphs/src}/scg/edge.rs | 3 +- {core/src/graphs => graphs/src}/scg/graph.rs | 22 +++--- {core/src/graphs => graphs/src}/scg/mod.rs | 0 {core/src/graphs => graphs/src}/scg/node.rs | 10 +-- graphs/tests/dcg.rs | 6 ++ graphs/tests/default.rs | 7 ++ tensor/Cargo.toml | 10 ++- tensor/src/errors/error.rs | 73 +++++++++++++++++++ tensor/src/errors/mod.rs | 9 +++ tensor/src/impls/arith.rs | 13 +--- tensor/src/lib.rs | 4 + tensor/src/linalg/arith.rs | 2 +- tensor/src/linalg/mod.rs | 4 +- tensor/src/ops/kinds.rs | 5 ++ tensor/src/ops/op/unary.rs | 67 +++++++++++++++++ tensor/src/shape/shape.rs | 7 +- tensor/src/specs/scalar.rs | 60 --------------- tensor/src/tensor.rs | 15 +++- 45 files changed, 551 insertions(+), 242 deletions(-) rename core/src/{ops => specs}/operator.rs (100%) create mode 100644 graphs/Cargo.toml rename {core/src/graphs => graphs/src}/dcg/edge.rs (63%) rename {core/src/graphs => graphs/src}/dcg/graph.rs (90%) rename {core/src/graphs => graphs/src}/dcg/mod.rs (100%) rename {core/src/graphs => graphs/src}/dcg/node.rs (74%) rename core/src/id/gradient.rs => graphs/src/grad/id.rs (95%) create mode 100644 graphs/src/grad/mod.rs create mode 100644 graphs/src/grad/store.rs rename core/src/graphs/mod.rs => graphs/src/graph.rs (71%) create mode 100644 graphs/src/lib.rs rename {core/src/graphs => graphs/src}/scg/edge.rs (94%) rename {core/src/graphs => graphs/src}/scg/graph.rs (89%) rename {core/src/graphs => graphs/src}/scg/mod.rs (100%) rename {core/src/graphs => graphs/src}/scg/node.rs (88%) create mode 100644 graphs/tests/dcg.rs create mode 100644 graphs/tests/default.rs create mode 100644 tensor/src/errors/error.rs create mode 100644 tensor/src/errors/mod.rs diff --git a/.github/dependabot.yml b/.github/dependabot.yml index fd5da2be..a0d5fcd7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -20,6 +20,10 @@ updates: directory: /derive schedule: interval: daily + - package-ecosystem: cargo + directory: /graphs + schedule: + interval: daily - package-ecosystem: cargo directory: /macros schedule: diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index d2e8fa33..d023028b 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -34,7 +34,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - features: [ derive, macros, tensor ] + features: [ derive, graphs, macros, tensor ] steps: - uses: actions/checkout@v4 - name: Publish (${{ github.ref.name }}-${{ matrix.features }}) diff --git a/Cargo.toml b/Cargo.toml index ef9a165a..23ffa735 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://github.com/FL03/acme/wikis" license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0" # TODO - Update cargo package version +version = "0.3.0-nightly" # TODO - Update cargo package version [workspace] default-members = [ @@ -18,6 +18,7 @@ members = [ "acme", "core", "derive", + "graphs", "macros", "tensor" ] @@ -25,8 +26,6 @@ resolver = "2" [workspace.dependencies] anyhow = "1" -serde = { features = ["derive"], version = "1" } -serde_json = "1" strum = { features = ["derive"], version = "0.26" } smart-default = "0.7" diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 50461e4e..80812426 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -21,19 +21,22 @@ default = [ full = [ "core", "derive", + "graph", "serde", "tensor" ] -core = [ - "dep:acme-core" -] +core = [] derive = [ "dep:acme-derive", "macros" ] +graph = [ + "dep:acme-graphs" +] + macros = [ "dep:acme-macros" ] @@ -44,6 +47,7 @@ tensor = [ serde = [ "acme-core/serde", + "acme-graphs/serde", "acme-tensor/serde" ] @@ -58,6 +62,10 @@ test = true name = "autodiff" required-features = ["macros"] +[[example]] +name = "cgraph" +required-features = ["graph"] + [[test]] name = "autodiff" required-features = ["macros"] @@ -65,10 +73,11 @@ required-features = ["macros"] [build-dependencies] [dependencies] -acme-core = { optional = true, path = "../core", version = "0.3.0" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly" } [dev-dependencies] approx = "0.5" diff --git a/acme/examples/cgraph.rs b/acme/examples/cgraph.rs index d097dab9..dd92ff96 100644 --- a/acme/examples/cgraph.rs +++ b/acme/examples/cgraph.rs @@ -4,7 +4,8 @@ */ extern crate acme; -use acme::prelude::{Result, Scg}; +use acme::graph::scg::Scg; +use acme::prelude::Result; fn main() -> Result<()> { let mut scg = Scg::new(); diff --git a/acme/src/lib.rs b/acme/src/lib.rs index a6fba212..cb52e647 100644 --- a/acme/src/lib.rs +++ b/acme/src/lib.rs @@ -7,22 +7,30 @@ //! Acme is an autodifferentiaion library for Rust. It is designed to be a //! flexible and powerful tool for building machine learning models and //! other differentiable programs. -#[cfg(feature = "core")] -pub use acme_core as core; +#[doc(inline)] +pub use acme_core::*; #[cfg(feature = "derive")] pub use acme_derive::*; +#[cfg(feature = "graph")] +#[doc(inline)] +pub use acme_graphs as graph; #[cfg(feature = "macros")] pub use acme_macros::*; #[cfg(feature = "tensor")] +#[doc(inline)] pub use acme_tensor as tensor; pub mod prelude { - #[cfg(feature = "core")] - pub use crate::core::prelude::*; - #[cfg(feature = "tensor")] - pub use crate::tensor::prelude::*; + #[doc(inline)] + pub use acme_core::prelude::*; #[cfg(feature = "derive")] pub use acme_derive::*; + #[cfg(feature = "graph")] + #[doc(inline)] + pub use acme_graphs::prelude::*; #[cfg(feature = "macros")] pub use acme_macros::*; + #[cfg(feature = "tensor")] + #[doc(inline)] + pub use acme_tensor::prelude::*; } diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 1adb9e52..38efa622 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -2,15 +2,13 @@ Appellation: gradient Contrib: FL03 */ -#![allow(unused_variables)] #[cfg(test)] extern crate acme; -use acme::prelude::{autodiff, sigmoid, Sigmoid}; +use acme::prelude::autodiff; use approx::assert_abs_diff_eq; use num::traits::Float; -use std::ops::Add; pub fn add(a: A, b: B) -> C where @@ -175,6 +173,7 @@ fn test_sigmoid() { #[ignore = "Currently, support for function calls is not fully implemented"] #[test] fn test_function_call() { + use acme::prelude::sigmoid; let (x, y) = (1_f64, 2_f64); // differentiating a function call w.r.t. x assert_eq!(autodiff!(x: add(x, y)), 1.0); diff --git a/core/src/cmp/mod.rs b/core/src/cmp/mod.rs index 675da56b..b13852d9 100644 --- a/core/src/cmp/mod.rs +++ b/core/src/cmp/mod.rs @@ -12,31 +12,6 @@ pub(crate) mod dual; pub(crate) mod operators; pub(crate) mod variables; -use petgraph::prelude::NodeIndex; - -pub trait NodeConfig { - type Eval; - type Grad; -} - -#[derive(Clone, Debug, PartialEq)] -pub enum FnNode { - Const(Constant), - Var(Variable), - Binary { left: NodeIndex, right: NodeIndex }, - Operator {}, -} - -impl FnNode { - pub fn constant(value: T) -> Self { - Self::Const(Constant::new(value)) - } - - pub fn variable(name: impl ToString) -> Self { - Self::Var(Variable::new(name)) - } -} - macro_rules! impl_op { ($name:ident, $bound:ident, $fn:ident, $val:tt, $e:expr) => { impl $bound for $name @@ -93,14 +68,4 @@ mod tests { let b = Constant::new(3); assert_eq!(a + b, Constant(6)); } - - #[test] - fn test_fn_node_constant() { - let node = FnNode::constant(3); - assert_eq!(node, FnNode::Const(Constant(3))); - - let value = Constant(3); - let add = value + 3; - assert_eq!(add, Constant(6)); - } } diff --git a/core/src/cmp/variables.rs b/core/src/cmp/variables.rs index 0294cf7c..e44438a8 100644 --- a/core/src/cmp/variables.rs +++ b/core/src/cmp/variables.rs @@ -6,6 +6,7 @@ use crate::ops::{Evaluate, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use std::borrow::{Borrow, BorrowMut}; use std::ops::{Add, Div, Mul, Sub}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] @@ -50,6 +51,18 @@ impl Variable { } } +impl Borrow for Variable { + fn borrow(&self) -> &T { + self.value.as_ref().unwrap() + } +} + +impl BorrowMut for Variable { + fn borrow_mut(&mut self) -> &mut T { + self.value.as_mut().unwrap() + } +} + impl std::fmt::Display for Variable { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.name) diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index b4614150..6447f28a 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -20,6 +20,18 @@ impl Error { message: msg.to_string(), } } + + pub fn kind(&self) -> ErrorKind { + self.kind + } + + pub fn message(&self) -> &str { + &self.message + } + + pub fn into_message(self) -> String { + self.message + } } impl std::fmt::Display for Error { @@ -72,4 +84,14 @@ macro_rules! error_from { }; } +macro_rules! into_error { + (kind $kind:expr, $t:ty) => { + impl From<$t> for Error { + fn from(err: $t) -> Self { + Self::new($kind, err.to_string()) + } + } + }; +} + error_from!(shared ErrorKind::Unknown, (&str, String, Box)); diff --git a/core/src/errors/kinds.rs b/core/src/errors/kinds.rs index e1864072..0d03ac81 100644 --- a/core/src/errors/kinds.rs +++ b/core/src/errors/kinds.rs @@ -6,6 +6,17 @@ use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; +pub trait ErrorType { + type Kind; + + fn kind(&self) -> Self::Kind; +} + +pub enum Errors { + Specific(Box>), + Unknown, +} + #[derive( Clone, Copy, @@ -29,19 +40,13 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; )] #[strum(serialize_all = "snake_case")] pub enum ErrorKind { - Func(FuncError), + Func, Graph, Sync, #[default] Unknown, } -impl From for ErrorKind { - fn from(e: FuncError) -> Self { - ErrorKind::Func(e) - } -} - #[derive( Clone, Copy, @@ -50,7 +55,6 @@ impl From for ErrorKind { Display, EnumCount, EnumIs, - EnumIter, Eq, Hash, Ord, @@ -64,8 +68,18 @@ impl From for ErrorKind { serde(rename_all = "snake_case") )] #[strum(serialize_all = "snake_case")] -pub enum FuncError { +pub enum ExternalError { + Known(E), #[default] - ArgCount, - ArgType, + Unknown, +} + +impl ExternalError { + pub fn new(error: E) -> Self { + Self::Known(error) + } + + pub fn unknown() -> Self { + Self::Unknown + } } diff --git a/core/src/id/mod.rs b/core/src/id/mod.rs index 0e3edfdd..f1df797a 100644 --- a/core/src/id/mod.rs +++ b/core/src/id/mod.rs @@ -5,10 +5,9 @@ //! # Ids //! //! -pub use self::{atomic::*, gradient::*, id::*}; +pub use self::{atomic::*, id::*}; pub(crate) mod atomic; -pub(crate) mod gradient; pub(crate) mod id; pub trait Identifier {} diff --git a/core/src/lib.rs b/core/src/lib.rs index 86c058f7..66d477c1 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -8,29 +8,27 @@ #![allow(incomplete_features)] #![feature(adt_const_params, fn_traits, tuple_trait, unboxed_closures)] -pub use self::primitives::*; +pub use self::{primitives::*, utils::*}; pub(crate) mod primitives; -pub(crate) mod specs; pub(crate) mod utils; pub mod cmp; pub mod errors; pub mod eval; -pub mod graphs; pub mod id; pub mod ops; +pub mod specs; pub mod stores; pub mod prelude { pub use crate::primitives::*; - pub use crate::specs::*; pub use crate::utils::*; pub use crate::cmp::*; pub use crate::errors::*; - pub use crate::graphs::prelude::*; pub use crate::id::*; pub use crate::ops::*; + pub use crate::specs::prelude::*; pub use crate::stores::*; } diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index f0a28b69..da24f4f1 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -10,11 +10,11 @@ use smart_default::SmartDefault; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[derive(Clone)] -pub enum Op { - Binary(T, T, BinaryOp), - Compare(T, T, CompareOp), +pub enum Expr { + Binary(T, T, BinaryExpr), + Compare(T, T, CompareExpr), Custom(String), - Unary(T, UnaryOp), + Unary(T, UnaryExpr), } #[cfg_attr( @@ -40,7 +40,7 @@ pub enum Op { )] #[repr(u8)] #[strum(serialize_all = "lowercase")] -pub enum CompareOp { +pub enum CompareExpr { #[default] Eq, Ge, @@ -73,7 +73,7 @@ pub enum CompareOp { )] #[repr(u8)] #[strum(serialize_all = "lowercase")] -pub enum BinaryOp { +pub enum BinaryExpr { #[default] Add(Addition), Div(Division), @@ -83,7 +83,7 @@ pub enum BinaryOp { Sub(Subtraction), } -impl BinaryOp { +impl BinaryExpr { pub fn add() -> Self { Self::Add(Addition) } @@ -116,7 +116,7 @@ impl BinaryOp { } } -impl BinaryOperation for BinaryOp +impl BinaryOperation for BinaryExpr where T: Copy + Default + PartialOrd + num::traits::NumOps, { @@ -146,13 +146,13 @@ where } } -impl From for BinaryOp { +impl From for BinaryExpr { fn from(_: Addition) -> Self { Self::Add(Addition) } } -impl From for BinaryOp { +impl From for BinaryExpr { fn from(_: Division) -> Self { Self::Div(Division) } @@ -181,7 +181,7 @@ impl From for BinaryOp { )] #[repr(u8)] #[strum(serialize_all = "lowercase")] -pub enum UnaryOp { +pub enum UnaryExpr { #[default] Abs, Ceil, @@ -223,23 +223,23 @@ pub enum UnaryOp { )] #[repr(u8)] #[strum(serialize_all = "lowercase")] -pub enum Ops { - Binary(BinaryOp), - Compare(CompareOp), +pub enum Operations { + Binary(BinaryExpr), + Compare(CompareExpr), #[default] - Unary(UnaryOp), + Unary(UnaryExpr), Custom { name: String, }, } -impl Ops { +impl Operations { /// A functional constructor for [Ops::Binary] - pub fn binary(op: BinaryOp) -> Self { + pub fn binary(op: BinaryExpr) -> Self { Self::Binary(op) } /// A functional constructor for [Ops::Compare] - pub fn compare(op: CompareOp) -> Self { + pub fn compare(op: CompareExpr) -> Self { Self::Compare(op) } /// A functional constructor for [Ops::Custom] @@ -247,25 +247,25 @@ impl Ops { Self::Custom { name: name.into() } } /// A functional constructor for [Ops::Unary] - pub fn unary(op: UnaryOp) -> Self { + pub fn unary(op: UnaryExpr) -> Self { Self::Unary(op) } } -impl From for Ops { - fn from(op: BinaryOp) -> Self { +impl From for Operations { + fn from(op: BinaryExpr) -> Self { Self::Binary(op) } } -impl From for Ops { - fn from(op: CompareOp) -> Self { +impl From for Operations { + fn from(op: CompareExpr) -> Self { Self::Compare(op) } } -impl From for Ops { - fn from(op: UnaryOp) -> Self { +impl From for Operations { + fn from(op: UnaryExpr) -> Self { Self::Unary(op) } } diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 7725cc1d..f0750aa7 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -5,12 +5,11 @@ //! # Operations //! //! -pub use self::{arithmetic::*, gradient::*, kinds::*, operator::*}; +pub use self::{arithmetic::*, gradient::*, kinds::*}; pub(crate) mod arithmetic; pub(crate) mod gradient; pub(crate) mod kinds; -pub(crate) mod operator; pub trait Evaluate { type Output; diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index afbae98b..7936c7f4 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,5 +3,18 @@ Contrib: FL03 */ +pub use self::operator::*; + +pub(crate) mod operator; + pub mod func; pub mod hkt; + +pub(crate) mod prelude { + pub use super::func::*; + pub use super::hkt::*; + pub use super::operator::*; +} + +#[cfg(test)] +mod tests {} diff --git a/core/src/ops/operator.rs b/core/src/specs/operator.rs similarity index 100% rename from core/src/ops/operator.rs rename to core/src/specs/operator.rs diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml new file mode 100644 index 00000000..cb78e10d --- /dev/null +++ b/graphs/Cargo.toml @@ -0,0 +1,53 @@ +[package] +authors.workspace = true +description = "" +edition.workspace = true +homepage.workspace = true +license.workspace = true +name = "acme-graphs" +readme.workspace = true +repository.workspace = true +version.workspace = true + +[features] +default = [] + +serde = [ + "dep:serde", + "serde-ext", +] + +serde-ext = [ + "dep:serde_json", + "petgraph/serde-1", +] + +[lib] +bench = false +crate-type = ["cdylib", "rlib"] +doctest = true +test = true + +[build-dependencies] + +[dev-dependencies] + +[dependencies] +acme-core = { path = "../core", version = "0.3.0-nightly" } + +anyhow.workspace = true +lazy_static = "1" +num = "0.4" +petgraph = "0.6" +serde = { optional = true, features = ["derive"], version = "1" } +serde_json = { optional = true, version = "1" } +smart-default.workspace = true +strum.workspace = true + +[package.metadata.docs.rs] +all-features = true +rustc-args = ["--cfg", "docsrs"] + +[target.wasm32-unknown-unknown] + +[target.wasm32-wasi] diff --git a/core/src/graphs/dcg/edge.rs b/graphs/src/dcg/edge.rs similarity index 63% rename from core/src/graphs/dcg/edge.rs rename to graphs/src/dcg/edge.rs index e9236b03..1030d71c 100644 --- a/core/src/graphs/dcg/edge.rs +++ b/graphs/src/dcg/edge.rs @@ -8,16 +8,20 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub struct Edge { - source: NodeIndex, +pub struct Edge { + source: Idx, } -impl Edge { - pub fn new(source: NodeIndex) -> Self { +impl Edge { + pub fn new(source: Idx) -> Self { Self { source } } - pub fn source(&self) -> NodeIndex { + pub fn source(&self) -> &Idx { + &self.source + } + + pub fn into_source(self) -> Idx { self.source } } diff --git a/core/src/graphs/dcg/graph.rs b/graphs/src/dcg/graph.rs similarity index 90% rename from core/src/graphs/dcg/graph.rs rename to graphs/src/dcg/graph.rs index 079f32c2..455b9e5c 100644 --- a/core/src/graphs/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -5,8 +5,8 @@ use super::edge::Edge; use super::node::Node; use super::DynamicGraph; -use crate::ops::*; -use crate::prelude::Result; +use acme::ops::*; +use acme::prelude::Result; use num::traits::{Num, NumAssignOps, NumOps}; use petgraph::algo::toposort; use petgraph::prelude::{Direction, NodeIndex}; @@ -43,7 +43,7 @@ impl Dcg { pub fn op( &mut self, inputs: impl IntoIterator, - op: impl Into, + op: impl Into, ) -> NodeIndex { let args = Vec::from_iter(inputs); @@ -57,11 +57,11 @@ impl Dcg { impl Dcg { pub fn add(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { - self.op([lhs, rhs], BinaryOp::add()) + self.op([lhs, rhs], BinaryExpr::add()) } pub fn mul(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { - self.op([lhs, rhs], BinaryOp::mul()) + self.op([lhs, rhs], BinaryExpr::mul()) } pub fn backward(&self) -> Result> @@ -80,13 +80,13 @@ impl Dcg { if let Node::Op { inputs, op } = node_op { match op { - Ops::Binary(inner) => match *inner { - BinaryOp::Add(_) => { + Operations::Binary(inner) => match *inner { + BinaryExpr::Add(_) => { for arg in self.store.neighbors_directed(*node, Direction::Incoming) { *gradients.entry(arg).or_default() += node_grad; } } - BinaryOp::Mul(_) => { + BinaryExpr::Mul(_) => { let lhs = inputs[0]; let rhs = inputs[1]; let lhs_val = self.get(lhs).unwrap().get_value(); @@ -120,12 +120,12 @@ impl Dcg { if let Node::Op { inputs, op } = node_op { match op { - Ops::Binary(BinaryOp::Add(_)) => { + Operations::Binary(BinaryExpr::Add(_)) => { for arg in self.store.neighbors_directed(*node, Direction::Incoming) { *gradients.entry(arg).or_default() += node_grad; } } - Ops::Binary(BinaryOp::Mul(_)) => { + Operations::Binary(BinaryExpr::Mul(_)) => { let lhs = inputs[0]; let rhs = inputs[1]; let lhs_val = self[lhs].get_value(); diff --git a/core/src/graphs/dcg/mod.rs b/graphs/src/dcg/mod.rs similarity index 100% rename from core/src/graphs/dcg/mod.rs rename to graphs/src/dcg/mod.rs diff --git a/core/src/graphs/dcg/node.rs b/graphs/src/dcg/node.rs similarity index 74% rename from core/src/graphs/dcg/node.rs rename to graphs/src/dcg/node.rs index 9b5cb805..99d29307 100644 --- a/core/src/graphs/dcg/node.rs +++ b/graphs/src/dcg/node.rs @@ -2,16 +2,23 @@ Appellation: node Contrib: FL03 */ -use crate::ops::Ops; +use acme::ops::Operations; use petgraph::prelude::NodeIndex; +#[derive(Clone, Debug)] pub enum Node { - Op { inputs: Vec, op: Ops }, - Input { param: bool, value: T }, + Op { + inputs: Vec, + op: Operations, + }, + Input { + param: bool, + value: T, + }, } impl Node { - pub fn op(inputs: impl IntoIterator, op: impl Into) -> Self { + pub fn op(inputs: impl IntoIterator, op: impl Into) -> Self { Node::Op { inputs: Vec::from_iter(inputs), op: op.into(), diff --git a/core/src/id/gradient.rs b/graphs/src/grad/id.rs similarity index 95% rename from core/src/id/gradient.rs rename to graphs/src/grad/id.rs index 570d8db1..f3711953 100644 --- a/core/src/id/gradient.rs +++ b/graphs/src/grad/id.rs @@ -1,8 +1,8 @@ /* - Appellation: gradient + Appellation: id Contrib: FL03 */ -use super::Id; +use acme::id::Id; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::marker::PhantomData; diff --git a/graphs/src/grad/mod.rs b/graphs/src/grad/mod.rs new file mode 100644 index 00000000..edc07f32 --- /dev/null +++ b/graphs/src/grad/mod.rs @@ -0,0 +1,17 @@ +/* + Appellation: grad + Contrib: FL03 +*/ +pub use self::id::GradientId; + +pub(crate) mod id; + +pub mod store; + +pub(crate) mod prelude { + pub use super::id::GradientId; + pub use super::store::GradientStore; +} + +#[cfg(test)] +mod tests {} diff --git a/graphs/src/grad/store.rs b/graphs/src/grad/store.rs new file mode 100644 index 00000000..ec1e2f2f --- /dev/null +++ b/graphs/src/grad/store.rs @@ -0,0 +1,55 @@ +/* + Appellation: gradient + Contrib: FL03 +*/ +use acme::stores::Store; +use petgraph::prelude::NodeIndex; +use std::any::Any; +use std::collections::BTreeMap; + +pub struct GradientStore { + store: BTreeMap>, +} + +impl GradientStore +where + K: Ord, +{ + pub fn new() -> Self { + Self { + store: BTreeMap::new(), + } + } + + pub fn or_insert(&mut self, key: K, value: Box) -> &mut dyn Any { + self.store.entry(key).or_insert(value) + } +} + +impl Store for GradientStore +where + K: Ord, + T: Clone + 'static, +{ + fn get(&self, key: &K) -> Option<&T> { + self.store.get(key).map(|v| v.downcast_ref::().unwrap()) + } + + fn get_mut(&mut self, key: &K) -> Option<&mut T> { + self.store + .get_mut(key) + .map(|v| v.downcast_mut::().unwrap()) + } + + fn insert(&mut self, key: K, value: T) -> Option { + self.store + .insert(key, Box::new(value)) + .map(|v| v.downcast_ref::().unwrap().clone()) + } + + fn remove(&mut self, key: &K) -> Option { + self.store + .remove(key) + .map(|v| v.downcast_ref::().unwrap().clone()) + } +} diff --git a/core/src/graphs/mod.rs b/graphs/src/graph.rs similarity index 71% rename from core/src/graphs/mod.rs rename to graphs/src/graph.rs index 62ce9783..fe6e8727 100644 --- a/core/src/graphs/mod.rs +++ b/graphs/src/graph.rs @@ -1,8 +1,8 @@ /* - Appellation: graphs + Appellation: graph Contrib: FL03 */ -//! # Graphs +//! # Graph //! //! A computational graph forms the backbone of automatic differentiation. Computational graphs are directed acyclic graphs (DAGs) //! that represent any computation as a series of nodes and edges. @@ -10,9 +10,6 @@ //! In a dynamic computational graph (DCG), the graph considers the nodes to be tensors and the edges to be operations. //! -pub mod dcg; -pub mod scg; - pub trait GraphEntry { type Idx; type Weight; @@ -36,20 +33,3 @@ pub trait ComputeGraph { fn clear(&mut self); } - -pub(crate) mod prelude { - pub use super::dcg::Dcg; - pub use super::scg::Scg; -} - -#[cfg(test)] -mod tests { - use super::prelude::*; - - #[test] - fn test_dcg() { - let mut dcg = Dcg::::new(); - let _input = dcg.input(true, 1.0); - assert_eq!(1, 1); - } -} diff --git a/graphs/src/lib.rs b/graphs/src/lib.rs new file mode 100644 index 00000000..6f690008 --- /dev/null +++ b/graphs/src/lib.rs @@ -0,0 +1,29 @@ +/* + Appellation: acme-graphs + Contrib: FL03 +*/ +//! # acme-graphs +//! +//! + +extern crate acme_core as acme; + +#[doc(inline)] +pub use self::graph::*; + +pub(crate) mod graph; + +pub mod dcg; +pub mod grad; +pub mod scg; + +pub mod prelude { + #[doc(inline)] + pub use crate::dcg::Dcg; + #[doc(inline)] + pub use crate::grad::prelude::*; + #[doc(inline)] + pub use crate::graph::*; + #[doc(inline)] + pub use crate::scg::Scg; +} diff --git a/core/src/graphs/scg/edge.rs b/graphs/src/scg/edge.rs similarity index 94% rename from core/src/graphs/scg/edge.rs rename to graphs/src/scg/edge.rs index 68f216c9..391906f3 100644 --- a/core/src/graphs/scg/edge.rs +++ b/graphs/src/scg/edge.rs @@ -2,7 +2,8 @@ Appellation: edge Contrib: FL03 */ -use crate::id::{GradientId, Id}; +use crate::grad::GradientId; +use acme::prelude::Id; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/core/src/graphs/scg/graph.rs b/graphs/src/scg/graph.rs similarity index 89% rename from core/src/graphs/scg/graph.rs rename to graphs/src/scg/graph.rs index 137210b7..c98faf51 100644 --- a/core/src/graphs/scg/graph.rs +++ b/graphs/src/scg/graph.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use super::Node; -use crate::prelude::{BinaryOp, BinaryOperation, Ops, Result}; +use acme::prelude::{BinaryExpr, BinaryOperation, Operations, Result}; use num::traits::{NumAssign, NumOps, Signed}; use petgraph::algo::toposort; use petgraph::prelude::{DiGraph, NodeIndex}; @@ -44,7 +44,7 @@ impl Scg { pub fn operation( &mut self, inputs: impl IntoIterator, - operation: impl Into, + operation: impl Into, result: Option, ) -> Result where @@ -93,9 +93,9 @@ where // calculate the gradient of each input w.r.t. the current node let dt = if let Some(op) = node.operation() { match op { - Ops::Binary(op) => match op { - BinaryOp::Add(_) => grad, - BinaryOp::Div(_) => { + Operations::Binary(op) => match op { + BinaryExpr::Add(_) => grad, + BinaryExpr::Div(_) => { let out = self.vals[&i]; let val = self.vals[input]; if j % 2 == 0 { @@ -104,12 +104,12 @@ where -grad * out / (val * val) } } - BinaryOp::Mul(_) => { + BinaryExpr::Mul(_) => { let out = self.vals[&i]; let val = self.vals[input]; grad * out / val } - BinaryOp::Sub(_) => { + BinaryExpr::Sub(_) => { if j % 2 == 0 { grad } else { @@ -140,7 +140,7 @@ where pub fn add(&mut self, a: NodeIndex, b: NodeIndex) -> Result { let x = self.vals[&a]; let y = self.vals[&b]; - let op = BinaryOp::add(); + let op = BinaryExpr::add(); let res = op.eval(x, y); let c = self.operation([a, b], op, Some(res))?; @@ -152,7 +152,7 @@ where let y = self.vals[&b]; let res = x / y; - let op = BinaryOp::div(); + let op = BinaryExpr::div(); let c = self.operation([a, b], op, Some(res))?; Ok(c) @@ -162,7 +162,7 @@ where let x = self.vals[&a]; let y = self.vals[&b]; let res = x * y; - let c = self.operation([a, b], BinaryOp::mul(), Some(res))?; + let c = self.operation([a, b], BinaryExpr::mul(), Some(res))?; Ok(c) } @@ -171,7 +171,7 @@ where let x = self.vals[&a]; let y = self.vals[&b]; let res = x - y; - let c = self.operation([a, b], BinaryOp::sub(), Some(res))?; + let c = self.operation([a, b], BinaryExpr::sub(), Some(res))?; Ok(c) } diff --git a/core/src/graphs/scg/mod.rs b/graphs/src/scg/mod.rs similarity index 100% rename from core/src/graphs/scg/mod.rs rename to graphs/src/scg/mod.rs diff --git a/core/src/graphs/scg/node.rs b/graphs/src/scg/node.rs similarity index 88% rename from core/src/graphs/scg/node.rs rename to graphs/src/scg/node.rs index d66dbe3f..33f5aaca 100644 --- a/core/src/graphs/scg/node.rs +++ b/graphs/src/scg/node.rs @@ -6,8 +6,8 @@ //! //! A computational graph relies on weighted nodes to represent constants, operations, and variables. //! The edges connecting to any given node are considered to be inputs and help to determine the flow of information -use crate::id::AtomicId; -use crate::ops::Ops; +use acme::id::AtomicId; +use acme::ops::Operations; use petgraph::prelude::NodeIndex; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -18,7 +18,7 @@ pub struct Node { id: AtomicId, inputs: Vec, name: String, - op: Option, + op: Option, } impl Node { @@ -41,7 +41,7 @@ impl Node { self } - pub fn with_op(mut self, op: impl Into) -> Self { + pub fn with_op(mut self, op: impl Into) -> Self { self.op = Some(op.into()); self } @@ -63,7 +63,7 @@ impl Node { &self.name } - pub fn operation(&self) -> Option<&Ops> { + pub fn operation(&self) -> Option<&Operations> { self.op.as_ref() } } diff --git a/graphs/tests/dcg.rs b/graphs/tests/dcg.rs new file mode 100644 index 00000000..46f3b86c --- /dev/null +++ b/graphs/tests/dcg.rs @@ -0,0 +1,6 @@ +/* + Appellation: dcg + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_graphs as acme; diff --git a/graphs/tests/default.rs b/graphs/tests/default.rs new file mode 100644 index 00000000..42c7d5fb --- /dev/null +++ b/graphs/tests/default.rs @@ -0,0 +1,7 @@ +#[cfg(test)] +#[test] +fn compiles() { + let f = |l: usize, r: usize| l + r; + let result = f(2, 2); + assert_eq!(result, 4); +} diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index c51a5c6c..cefcd7f1 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -14,13 +14,19 @@ version.workspace = true default = [] serde = [ - "dep:serde" + "dep:serde", + "serde-ext", +] + +serde-ext = [ + "acme-core/serde" ] [build-dependencies] [dependencies] -acme-core = { features = [], path = "../core", version = "0.3" } +acme-core = { path = "../core", version = "0.3.0-nightly" } + num = "0.4" serde = { optional = true, features = ["derive"], version = "1" } strum = { features = ["derive"], version = "0.26" } diff --git a/tensor/src/errors/error.rs b/tensor/src/errors/error.rs new file mode 100644 index 00000000..a9ab014d --- /dev/null +++ b/tensor/src/errors/error.rs @@ -0,0 +1,73 @@ +/* + Appellation: error + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[derive(Clone, Debug, Display, EnumCount, EnumIs, VariantNames)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[repr(usize)] +#[strum(serialize_all = "snake_case")] +pub enum TensorError { + Arithmetic(ArithmeticError), + Indexing(String), + Shape(ShapeError), +} + +unsafe impl Send for TensorError {} + +unsafe impl Sync for TensorError {} + +impl std::error::Error for TensorError {} + +impl From<&str> for TensorError { + fn from(error: &str) -> Self { + TensorError::Indexing(error.to_string()) + } +} + +#[derive(Clone, Copy, Debug, Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[repr(usize)] +#[strum(serialize_all = "snake_case")] +pub enum ArithmeticError { + DivisionByZero, + Overflow, + Underflow, +} + +#[derive(Clone, Copy, Debug, Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[repr(usize)] +#[strum(serialize_all = "snake_case")] +pub enum ShapeError { + IncompatibleShapes, + InvalidShape, +} + +macro_rules! into_tensor_error { + ($error:ident, $kind:ident) => { + impl From<$error> for TensorError { + fn from(error: $error) -> Self { + TensorError::$kind(error) + } + } + }; +} + +into_tensor_error!(ArithmeticError, Arithmetic); +into_tensor_error!(ShapeError, Shape); diff --git a/tensor/src/errors/mod.rs b/tensor/src/errors/mod.rs new file mode 100644 index 00000000..d171406e --- /dev/null +++ b/tensor/src/errors/mod.rs @@ -0,0 +1,9 @@ +/* + Appellation: errors + Contrib: FL03 +*/ +pub use self::error::*; + +pub(crate) mod error; + +pub type Result = std::result::Result; diff --git a/tensor/src/impls/arith.rs b/tensor/src/impls/arith.rs index 0675aa19..0c996ad2 100644 --- a/tensor/src/impls/arith.rs +++ b/tensor/src/impls/arith.rs @@ -14,7 +14,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.data().iter().map(|a| -*a).collect(); + let store = self.data().iter().copied().map(|a| -a).collect(); let op = Op::Unary(Box::new(self), UnaryOp::Neg); from_vec_with_op(op, shape, store) } @@ -28,7 +28,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.data().iter().map(|a| -*a).collect(); + let store = self.data().iter().copied().map(|a| -a).collect(); let op = Op::Unary(Box::new(self.clone()), UnaryOp::Neg); from_vec_with_op(op, shape, store) } @@ -114,15 +114,6 @@ macro_rules! impl_arith { macro_rules! impl_scalar_arith { ($trait:ident, $method:ident, $op:tt) => { - // impl TensorBase - // where - // T: Copy + std::ops::$trait, - // { - // pub fn $method(self, other: T) -> TensorBase { - // let store = self.store.iter().map(|a| *a $op other).collect(); - // from_vec(self.shape().clone(), store) - // } - // } impl std::ops::$trait for TensorBase where diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index be989cda..021bb264 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -17,6 +17,8 @@ pub use self::tensor::*; pub(crate) mod tensor; pub mod data; +pub mod errors; +pub mod linalg; pub mod ops; pub mod shape; pub mod specs; @@ -35,6 +37,8 @@ pub mod prelude { #[doc(inline)] pub use crate::data::*; #[doc(inline)] + pub use crate::errors::*; + #[doc(inline)] pub use crate::ops::*; #[doc(inline)] pub use crate::shape::prelude::*; diff --git a/tensor/src/linalg/arith.rs b/tensor/src/linalg/arith.rs index 0a6333ef..c311b924 100644 --- a/tensor/src/linalg/arith.rs +++ b/tensor/src/linalg/arith.rs @@ -1,4 +1,4 @@ /* Appellation: arith Contrib: FL03 -*/ \ No newline at end of file +*/ diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index 00a85d42..b0c82083 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -3,8 +3,8 @@ Contrib: FL03 */ //! # Linear Algebra -//! -//! +//! +//! pub mod arith; #[cfg(test)] diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 037361cd..31cbe181 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -89,3 +89,8 @@ pub struct BinOp { pub rhs: TensorBase, pub op: BinaryOp, } + +pub enum OpInput { + Scalar(T), + Tensor(TensorBase), +} diff --git a/tensor/src/ops/op/unary.rs b/tensor/src/ops/op/unary.rs index 8b137891..d7774f12 100644 --- a/tensor/src/ops/op/unary.rs +++ b/tensor/src/ops/op/unary.rs @@ -1 +1,68 @@ +/* + Appellation: unary + Contrib: FL03 +*/ +use crate::prelude::Scalar; +macro_rules! unary_op_trait { + ($trait:ident, $method:ident) => { + pub trait $trait { + type Output; + + fn $method(self) -> Self::Output; + } + }; +} + +macro_rules! impl_unary_trait { + ($trait:ident, $method:ident) => { + impl $trait for T + where + T: Scalar, + { + type Output = T; + + fn $method(self) -> Self::Output { + ::$method(self) + } + } + }; +} + +unary_op_trait!(Abs, abs); +unary_op_trait!(Cos, cos); +unary_op_trait!(Cosh, cosh); +unary_op_trait!(Exp, exp); +unary_op_trait!(Ln, ln); +unary_op_trait!(Recip, recip); +unary_op_trait!(Sin, sin); +unary_op_trait!(Sinh, sinh); +unary_op_trait!(Sqrt, sqrt); +unary_op_trait!(Square, square); +unary_op_trait!(Tan, tan); +unary_op_trait!(Tanh, tanh); + +impl Abs for T +where + T: num::Signed, +{ + type Output = T; + + fn abs(self) -> Self::Output { + ::abs(&self) + } +} + +// impl Cos for T +// where +// T: Scalar, +// { +// type Output = T; + +// fn cos(self) -> Self::Output { +// ::cos(self) +// } +// } + +impl_unary_trait!(Cos, cos); +impl_unary_trait!(Cosh, cosh); diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index df4a7fb9..24cf58ed 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -3,7 +3,8 @@ Contrib: FL03 */ use super::Rank; -use acme::prelude::Result; +use crate::errors::ShapeError; +use crate::prelude::Result; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{self, Deref}; @@ -48,10 +49,10 @@ impl Shape { pub(crate) fn matmul_shape(&self, other: &Self) -> Result { if *self.rank() != 2 || *other.rank() != 2 { - return Err("Both shapes must be rank 2".into()); + return Err(ShapeError::IncompatibleShapes.into()); } if self[1] != other[0] { - return Err("Incompatible shapes".into()); + return Err(ShapeError::IncompatibleShapes.into()); } Ok(Self::from((self[0], other[1]))) } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index a5f789d8..bce4b6e6 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -231,63 +231,3 @@ macro_rules! impl_scalar { } impl_scalar!(f32); impl_scalar!(f64); - -macro_rules! unary_op_trait { - ($trait:ident, $method:ident) => { - pub trait $trait { - type Output; - - fn $method(self) -> Self::Output; - } - }; -} - -macro_rules! impl_unary_trait { - ($trait:ident, $method:ident, $op:tt) => { - impl $trait for T - where - T: Scalar, - { - type Output = T; - - fn $method(self) -> Self::Output { - ::$op(self) - } - } - }; -} - -unary_op_trait!(Abs, abs); -unary_op_trait!(Cos, cos); -unary_op_trait!(Cosh, cosh); -unary_op_trait!(Exp, exp); -unary_op_trait!(Ln, ln); -unary_op_trait!(Recip, recip); -unary_op_trait!(Sin, sin); -unary_op_trait!(Sinh, sinh); -unary_op_trait!(Sqrt, sqrt); -unary_op_trait!(Square, square); -unary_op_trait!(Tan, tan); -unary_op_trait!(Tanh, tanh); - -impl Abs for T -where - T: num::Signed, -{ - type Output = T; - - fn abs(self) -> Self::Output { - ::abs(&self) - } -} - -impl Cos for T -where - T: Scalar, -{ - type Output = T; - - fn cos(self) -> Self::Output { - ::cos(self) - } -} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 7fb7c0b3..b289090c 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -15,7 +15,7 @@ pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase id: AtomicId::new(), layout: Layout::contiguous(shape), op: None, - store, //Arc::new(RwLock::new(store)), + store, } } @@ -29,7 +29,7 @@ pub(crate) fn from_vec_with_op( id: AtomicId::new(), layout, op: Some(op), - store, //Arc::new(RwLock::new(store)), + store, } } @@ -112,6 +112,17 @@ where } } +impl TensorBase +where + T: Clone + Default, +{ + pub fn broadcast(&self, shape: impl IntoShape) -> Self { + let shape = shape.into_shape(); + + self.clone() + } +} + impl TensorBase where T: Scalar, From 7dd72a1275b1ce8770ca6b52f7343e2986ffc279 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 10:20:47 -0500 Subject: [PATCH 32/87] update Signed-off-by: Joe McCain III --- .github/workflows/crates.yml | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index d023028b..2aedc07a 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -5,6 +5,7 @@ concurrency: cancel-in-progress: false env: + CARGO_PREFIX: ${{ github.repository.name }} CARGO_TERM_COLOR: always on: @@ -21,11 +22,13 @@ jobs: strategy: matrix: features: [ core ] + env: + CARGO_PACKAGE_NAME: ${{ github.repository.name }}-${{ matrix.features }} steps: - uses: actions/checkout@v4 - - name: Publish (${{ github.ref.name }}-${{ matrix.features }}) - run: cargo publish --all-features -v -p ${{ github.ref.name }}-${{ matrix.features }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} - sdk: + - name: Publish (${{ env.CARGO_PACKAGE_NAME }}) + run: cargo publish --all-features -v -p ${{ env.CARGO_PACKAGE_NAME }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} + features: concurrency: group: ${{ github.workflow }}-${{ github.ref }}-sdk cancel-in-progress: false @@ -35,15 +38,17 @@ jobs: strategy: matrix: features: [ derive, graphs, macros, tensor ] + env: + CARGO_PACKAGE_NAME: ${{ github.repository.name }}-${{ matrix.features }} steps: - uses: actions/checkout@v4 - - name: Publish (${{ github.ref.name }}-${{ matrix.features }}) - run: cargo publish --all-features -v -p ${{ github.ref.name }}-${{ matrix.features }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} + - name: Publish (${{ env.CARGO_PACKAGE_NAME }}) + run: cargo publish --all-features -v -p ${{ env.CARGO_PACKAGE_NAME }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} publish: - name: Publish (${{ github.ref.name }}) - needs: sdk + name: Publish (${{ github.repository.name }}) + needs: features runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Publish (${{ github.ref.name }}) - run: cargo publish --all-features -v -p ${{ github.ref.name }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file + - name: Publish (${{ github.repository.name }}) + run: cargo publish --all-features -v -p ${{ github.repository.name }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file From ea9d98cef8987f08b4b55212328205c010d130bf Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 11:55:10 -0500 Subject: [PATCH 33/87] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 4 ++ acme/examples/tensor.rs | 13 +++++ acme/tests/autodiff.rs | 2 +- acme/tests/macros.rs | 7 +++ core/src/cmp/constants.rs | 7 ++- core/src/cmp/dual.rs | 2 +- core/src/cmp/variables.rs | 2 +- core/src/errors/error.rs | 33 ++++-------- core/src/errors/kinds.rs | 2 + core/src/eval/mod.rs | 14 +++++ core/src/lib.rs | 1 + core/src/ops/arithmetic.rs | 36 ++++++++++--- core/src/ops/binary/kinds.rs | 47 +++++++++++++++++ core/src/ops/binary/mod.rs | 21 ++++++++ core/src/ops/mod.rs | 24 ++------- core/src/ops/unary/kinds.rs | 48 +++++++++++++++++ core/src/ops/unary/mod.rs | 19 +++++++ core/src/{ops => specs}/gradient.rs | 4 ++ core/src/specs/mod.rs | 4 +- graphs/src/dcg/graph.rs | 2 +- graphs/src/errors/error.rs | 80 +++++++++++++++++++++++++++++ graphs/src/errors/mod.rs | 9 ++++ graphs/src/lib.rs | 3 ++ graphs/src/scg/graph.rs | 3 +- macros/src/ast/gradient.rs | 14 +++++ macros/src/lib.rs | 22 ++++---- tensor/src/errors/mod.rs | 2 +- tensor/src/shape/rank.rs | 7 +++ tensor/src/shape/shape.rs | 28 +++++----- tensor/src/tensor.rs | 2 + tensor/tests/composition.rs | 32 ++++++++++++ tensor/tests/tensor.rs | 17 ------ 32 files changed, 410 insertions(+), 101 deletions(-) create mode 100644 acme/examples/tensor.rs create mode 100644 acme/tests/macros.rs create mode 100644 core/src/ops/binary/kinds.rs create mode 100644 core/src/ops/binary/mod.rs create mode 100644 core/src/ops/unary/kinds.rs create mode 100644 core/src/ops/unary/mod.rs rename core/src/{ops => specs}/gradient.rs (91%) create mode 100644 graphs/src/errors/error.rs create mode 100644 graphs/src/errors/mod.rs create mode 100644 tensor/tests/composition.rs delete mode 100644 tensor/tests/tensor.rs diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 80812426..49d8466a 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -70,6 +70,10 @@ required-features = ["graph"] name = "autodiff" required-features = ["macros"] +[[test]] +name = "macros" +required-features = ["macros"] + [build-dependencies] [dependencies] diff --git a/acme/examples/tensor.rs b/acme/examples/tensor.rs new file mode 100644 index 00000000..909e6a34 --- /dev/null +++ b/acme/examples/tensor.rs @@ -0,0 +1,13 @@ +/* + Appellation: tensor + Contrib: FL03 +*/ +#![cfg(feature = "tensor")] + +extern crate acme; + +use acme::prelude::BoxResult; + +fn main() -> BoxResult { + Ok(()) +} diff --git a/acme/tests/autodiff.rs b/acme/tests/autodiff.rs index 38efa622..8fba9119 100644 --- a/acme/tests/autodiff.rs +++ b/acme/tests/autodiff.rs @@ -2,8 +2,8 @@ Appellation: gradient Contrib: FL03 */ +#![cfg(all(test, feature = "macros"))] -#[cfg(test)] extern crate acme; use acme::prelude::autodiff; diff --git a/acme/tests/macros.rs b/acme/tests/macros.rs new file mode 100644 index 00000000..9c7d0c7f --- /dev/null +++ b/acme/tests/macros.rs @@ -0,0 +1,7 @@ +/* + Appellation: macros + Contrib: FL03 +*/ +#![cfg(test)] +#![cfg(feature = "macros")] +extern crate acme; diff --git a/core/src/cmp/constants.rs b/core/src/cmp/constants.rs index aff09c02..1f659051 100644 --- a/core/src/cmp/constants.rs +++ b/core/src/cmp/constants.rs @@ -2,7 +2,8 @@ Appellation: constants Contrib: FL03 */ -use crate::ops::{Evaluate, Gradient}; + +use crate::prelude::{Evaluate, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -27,6 +28,10 @@ impl Constant { pub fn value_mut(&mut self) -> &mut T { &mut self.0 } + + pub fn into_value(self) -> T { + self.0 + } } impl AsRef for Constant { diff --git a/core/src/cmp/dual.rs b/core/src/cmp/dual.rs index 87cb9ed0..435ed62c 100644 --- a/core/src/cmp/dual.rs +++ b/core/src/cmp/dual.rs @@ -12,7 +12,7 @@ //! e != 0 //! e^2 = 0 -use crate::ops::{Evaluate, Gradient}; +use crate::prelude::{Evaluate, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/core/src/cmp/variables.rs b/core/src/cmp/variables.rs index e44438a8..a963e141 100644 --- a/core/src/cmp/variables.rs +++ b/core/src/cmp/variables.rs @@ -2,7 +2,7 @@ Appellation: variables Contrib: FL03 */ -use crate::ops::{Evaluate, Gradient}; +use crate::prelude::{Evaluate, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index 6447f28a..ad369b63 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -54,21 +54,6 @@ impl From> for Error { } } -impl From> for Error -where - E: Copy + std::fmt::Debug, -{ - fn from(err: petgraph::algo::Cycle) -> Self { - Self::new(ErrorKind::Graph, format!("Cycle: {:?}", err.node_id())) - } -} - -impl From for Error { - fn from(_err: petgraph::algo::NegativeCycle) -> Self { - Self::new(ErrorKind::Graph, "Negative Cycle detected") - } -} - macro_rules! error_from { (shared $kind:expr, ($($t:ty),*)) => { $( @@ -84,14 +69,14 @@ macro_rules! error_from { }; } -macro_rules! into_error { - (kind $kind:expr, $t:ty) => { - impl From<$t> for Error { - fn from(err: $t) -> Self { - Self::new($kind, err.to_string()) - } - } - }; -} +// macro_rules! into_error { +// (kind $kind:expr, $t:ty) => { +// impl From<$t> for Error { +// fn from(err: $t) -> Self { +// Self::new($kind, err.to_string()) +// } +// } +// }; +// } error_from!(shared ErrorKind::Unknown, (&str, String, Box)); diff --git a/core/src/errors/kinds.rs b/core/src/errors/kinds.rs index 0d03ac81..fa0e0dd1 100644 --- a/core/src/errors/kinds.rs +++ b/core/src/errors/kinds.rs @@ -10,6 +10,8 @@ pub trait ErrorType { type Kind; fn kind(&self) -> Self::Kind; + + fn name(&self) -> &str; } pub enum Errors { diff --git a/core/src/eval/mod.rs b/core/src/eval/mod.rs index cd22b337..9e14cc16 100644 --- a/core/src/eval/mod.rs +++ b/core/src/eval/mod.rs @@ -5,3 +5,17 @@ pub use self::evaluator::*; pub(crate) mod evaluator; + +pub trait Evaluate { + type Output; + + fn eval(self) -> Self::Output; +} + +impl Evaluate for f64 { + type Output = f64; + + fn eval(self) -> Self::Output { + self + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 66d477c1..ee219e40 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -27,6 +27,7 @@ pub mod prelude { pub use crate::cmp::*; pub use crate::errors::*; + pub use crate::eval::*; pub use crate::id::*; pub use crate::ops::*; pub use crate::specs::prelude::*; diff --git a/core/src/ops/arithmetic.rs b/core/src/ops/arithmetic.rs index 9469f1ee..87b903fd 100644 --- a/core/src/ops/arithmetic.rs +++ b/core/src/ops/arithmetic.rs @@ -12,8 +12,8 @@ pub trait Trig { fn tan(self) -> Self; } -macro_rules! impl_binary_op { - ($op:ident, $bound:ident, $exp:expr) => { +macro_rules! operator { + ($op:ident) => { #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct $op; @@ -22,7 +22,31 @@ macro_rules! impl_binary_op { pub fn new() -> Self { Self } + + pub fn name(&self) -> &'static str { + stringify!($op) + } + } + }; +} + +macro_rules! impl_binary_op { + ($op:ident, $bound:ident, $operator:tt) => { + operator!($op); + + impl crate::ops::BinaryOperation for $op + where + A: $bound, + { + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + lhs $operator rhs + } } + }; + (expr $op:ident, $bound:ident, $exp:expr) => { + operator!($op); impl crate::ops::BinaryOperation for $op where @@ -37,10 +61,10 @@ macro_rules! impl_binary_op { }; } -impl_binary_op!(Addition, Add, |lhs, rhs| lhs + rhs); +impl_binary_op!(Addition, Add, +); -impl_binary_op!(Division, Div, |lhs, rhs| lhs / rhs); +impl_binary_op!(Division, Div, /); -impl_binary_op!(Multiplication, Mul, |lhs, rhs| lhs * rhs); +impl_binary_op!(Multiplication, Mul, *); -impl_binary_op!(Subtraction, Sub, |lhs, rhs| lhs - rhs); +impl_binary_op!(Subtraction, Sub, -); diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs new file mode 100644 index 00000000..8c6ae23d --- /dev/null +++ b/core/src/ops/binary/kinds.rs @@ -0,0 +1,47 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum BinaryOp { + #[default] + Add, + Sub, + Mul, + Div, + Pow, + Rem, + Max, + Min, + And, + Or, + Xor, + Shl, + Shr, +} diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs new file mode 100644 index 00000000..318ecf5a --- /dev/null +++ b/core/src/ops/binary/mod.rs @@ -0,0 +1,21 @@ +/* + Appellation: binary + Contrib: FL03 +*/ +pub use self::kinds::*; + +pub(crate) mod kinds; + +pub trait BinaryOperator { + type Output; + + fn apply(lhs: A, rhs: B) -> Self::Output; +} + +pub struct BinaryO { + pub args: (A, B), + pub op: BinaryOp, +} + +#[cfg(test)] +mod tests {} diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index f0750aa7..cc85f193 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -5,34 +5,16 @@ //! # Operations //! //! -pub use self::{arithmetic::*, gradient::*, kinds::*}; +pub use self::{arithmetic::*, kinds::*}; pub(crate) mod arithmetic; -pub(crate) mod gradient; pub(crate) mod kinds; -pub trait Evaluate { - type Output; - - fn eval(self) -> Self::Output; -} - -impl Evaluate for f64 { - type Output = f64; - - fn eval(self) -> Self::Output { - self - } -} +pub mod binary; +pub mod unary; pub trait BinaryOperation { type Output; fn eval(&self, lhs: A, rhs: B) -> Self::Output; } - -pub trait UnaryOperation { - type Output; - - fn eval(self) -> Self::Output; -} diff --git a/core/src/ops/unary/kinds.rs b/core/src/ops/unary/kinds.rs new file mode 100644 index 00000000..b7020b7c --- /dev/null +++ b/core/src/ops/unary/kinds.rs @@ -0,0 +1,48 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum UnaryOp { + #[default] + Abs, + Cos, + Cosh, + Exp, + Floor, + Inv, + Ln, + Neg, + Sin, + Sinh, + Sqrt, + Square, + Tan, + Tanh, +} diff --git a/core/src/ops/unary/mod.rs b/core/src/ops/unary/mod.rs new file mode 100644 index 00000000..46e3f5a9 --- /dev/null +++ b/core/src/ops/unary/mod.rs @@ -0,0 +1,19 @@ +/* + Appellation: unary + Contrib: FL03 +*/ +//! # Unary Operations +//! +//! +pub use self::kinds::*; + +pub(crate) mod kinds; + +pub trait UnaryOperation { + type Output; + + fn eval(self) -> Self::Output; +} + +#[cfg(test)] +mod tests {} diff --git a/core/src/ops/gradient.rs b/core/src/specs/gradient.rs similarity index 91% rename from core/src/ops/gradient.rs rename to core/src/specs/gradient.rs index cd2a0f2c..b660976f 100644 --- a/core/src/ops/gradient.rs +++ b/core/src/specs/gradient.rs @@ -5,6 +5,10 @@ use crate::prelude::Store; +pub trait IsDifferentiable { + fn differentiable(&self) -> bool; +} + pub trait Differentiable { type Derivative; diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 7936c7f4..3c3411b3 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,8 +3,9 @@ Contrib: FL03 */ -pub use self::operator::*; +pub use self::{gradient::*, operator::*}; +pub(crate) mod gradient; pub(crate) mod operator; pub mod func; @@ -12,6 +13,7 @@ pub mod hkt; pub(crate) mod prelude { pub use super::func::*; + pub use super::gradient::*; pub use super::hkt::*; pub use super::operator::*; } diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index 455b9e5c..396095fa 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -5,8 +5,8 @@ use super::edge::Edge; use super::node::Node; use super::DynamicGraph; +use crate::prelude::GraphResult as Result; use acme::ops::*; -use acme::prelude::Result; use num::traits::{Num, NumAssignOps, NumOps}; use petgraph::algo::toposort; use petgraph::prelude::{Direction, NodeIndex}; diff --git a/graphs/src/errors/error.rs b/graphs/src/errors/error.rs new file mode 100644 index 00000000..671d7b21 --- /dev/null +++ b/graphs/src/errors/error.rs @@ -0,0 +1,80 @@ +/* + Appellation: error + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[derive(Clone, Debug, Display, EnumCount, EnumIs, VariantNames)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[repr(usize)] +#[strum(serialize_all = "snake_case")] +pub enum GraphError { + Cycle(CycleError), + Unknown(String), +} + +unsafe impl Send for GraphError {} + +unsafe impl Sync for GraphError {} + +impl std::error::Error for GraphError {} + +impl From<&str> for GraphError { + fn from(error: &str) -> Self { + GraphError::Unknown(error.to_string()) + } +} + +impl From for GraphError { + fn from(error: String) -> Self { + GraphError::Unknown(error) + } +} + +impl From> for GraphError +where + Idx: Copy + std::fmt::Debug, +{ + fn from(error: petgraph::algo::Cycle) -> Self { + GraphError::Cycle(CycleError::Cycle { + id: format!("{:?}", error.node_id()), + }) + } +} + +impl From for GraphError { + fn from(_error: petgraph::algo::NegativeCycle) -> Self { + GraphError::Cycle(CycleError::NegativeCylce) + } +} + +#[derive(Clone, Debug, Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[repr(usize)] +#[strum(serialize_all = "snake_case")] +pub enum CycleError { + Cycle { id: String }, + NegativeCylce, +} + +macro_rules! into_error { + ($error:ident, $kind:ident) => { + impl From<$error> for GraphError { + fn from(error: $error) -> Self { + GraphError::$kind(error) + } + } + }; +} + +into_error!(CycleError, Cycle); diff --git a/graphs/src/errors/mod.rs b/graphs/src/errors/mod.rs new file mode 100644 index 00000000..af04e25c --- /dev/null +++ b/graphs/src/errors/mod.rs @@ -0,0 +1,9 @@ +/* + Appellation: errors + Contrib: FL03 +*/ +pub use self::error::*; + +pub(crate) mod error; + +pub type GraphResult = std::result::Result; diff --git a/graphs/src/lib.rs b/graphs/src/lib.rs index 6f690008..d554a632 100644 --- a/graphs/src/lib.rs +++ b/graphs/src/lib.rs @@ -14,6 +14,7 @@ pub use self::graph::*; pub(crate) mod graph; pub mod dcg; +pub mod errors; pub mod grad; pub mod scg; @@ -21,6 +22,8 @@ pub mod prelude { #[doc(inline)] pub use crate::dcg::Dcg; #[doc(inline)] + pub use crate::errors::*; + #[doc(inline)] pub use crate::grad::prelude::*; #[doc(inline)] pub use crate::graph::*; diff --git a/graphs/src/scg/graph.rs b/graphs/src/scg/graph.rs index c98faf51..d6a0a713 100644 --- a/graphs/src/scg/graph.rs +++ b/graphs/src/scg/graph.rs @@ -3,7 +3,8 @@ Contrib: FL03 */ use super::Node; -use acme::prelude::{BinaryExpr, BinaryOperation, Operations, Result}; +use crate::prelude::GraphResult as Result; +use acme::prelude::{BinaryExpr, BinaryOperation, Operations}; use num::traits::{NumAssign, NumOps, Signed}; use petgraph::algo::toposort; use petgraph::prelude::{DiGraph, NodeIndex}; diff --git a/macros/src/ast/gradient.rs b/macros/src/ast/gradient.rs index 0761cc5a..b618b4e4 100644 --- a/macros/src/ast/gradient.rs +++ b/macros/src/ast/gradient.rs @@ -10,6 +10,20 @@ pub struct GradientAst { pub item: ItemFn, } +impl GradientAst { + pub fn new(attrs: Vec, item: ItemFn) -> Self { + Self { attrs, item } + } + + pub fn attributes(&self) -> &[Attribute] { + &self.attrs + } + + pub fn item(&self) -> &ItemFn { + &self.item + } +} + impl Parse for GradientAst { fn parse(input: ParseStream) -> Result { let attrs = input.call(Attribute::parse_outer)?; diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 83bd8639..2d6b0c92 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -5,8 +5,7 @@ //! # acme-macros //! //! -#![feature(proc_macro_span)] -extern crate proc_macro; +extern crate proc_macro as pm; pub(crate) mod ast; pub(crate) mod cmp; @@ -16,18 +15,19 @@ pub(crate) mod ops; pub(crate) mod gradient; -use ast::partials::*; -use proc_macro::TokenStream; -use quote::quote; +use ast::gradient::GradientAst; +use ast::partials::PartialAst; +use pm::TokenStream; use syn::{parse_macro_input, Expr}; #[proc_macro_attribute] -pub fn partial(attr: TokenStream, item: TokenStream) -> TokenStream { - let input = parse_macro_input!(item as syn::ItemFn); - println!("attr: \"{}\"", attr.to_string()); - // let result = ad::handle::item::handle_item(&input); - // TokenStream::from(result) - (quote! { #input }).into() +pub fn partial(_attr: TokenStream, item: TokenStream) -> TokenStream { + // let attr = parse_macro_input!(attr as syn::Attribute); + // let item = parse_macro_input!(item as syn::ItemFn); + // let ast = ast::gradient::GradientAst::new(attr, item); + let ast = parse_macro_input!(item as GradientAst); + let result = grad::gradient(&ast); + TokenStream::from(result) } #[proc_macro] diff --git a/tensor/src/errors/mod.rs b/tensor/src/errors/mod.rs index d171406e..731b1e27 100644 --- a/tensor/src/errors/mod.rs +++ b/tensor/src/errors/mod.rs @@ -6,4 +6,4 @@ pub use self::error::*; pub(crate) mod error; -pub type Result = std::result::Result; +pub type TensorResult = std::result::Result; diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/rank.rs index 98a425bc..d48a1fbd 100644 --- a/tensor/src/shape/rank.rs +++ b/tensor/src/shape/rank.rs @@ -7,6 +7,7 @@ //! The rank of a n-dimensional array describes the number of dimensions #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; use std::ops::{Deref, DerefMut}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] @@ -41,6 +42,12 @@ impl AsMut for Rank { } } +impl Borrow for Rank { + fn borrow(&self) -> &usize { + &self.0 + } +} + impl Deref for Rank { type Target = usize; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 24cf58ed..94789dba 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -4,7 +4,7 @@ */ use super::Rank; use crate::errors::ShapeError; -use crate::prelude::Result; +use crate::prelude::TensorResult; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{self, Deref}; @@ -47,7 +47,7 @@ impl Shape { Self(vec![0; rank]) } - pub(crate) fn matmul_shape(&self, other: &Self) -> Result { + pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { if *self.rank() != 2 || *other.rank() != 2 { return Err(ShapeError::IncompatibleShapes.into()); } @@ -298,17 +298,17 @@ unsafe impl Send for Shape {} unsafe impl Sync for Shape {} -macro_rules! impl_from_tuple { - ($($n:tt: $name:ident),+) => { - impl<$($name),+> From<($($name,)+)> for Shape - where - $($name: Into,)+ - { - fn from(shape: ($($name,)+)) -> Self { - Self(vec![$($name.into(),)+]) - } - } - }; -} +// macro_rules! impl_from_tuple { +// ($($n:tt: $name:ident),+) => { +// impl<$($name),+> From<($($name,)+)> for Shape +// where +// $($name: Into,)+ +// { +// fn from(shape: ($($name,)+)) -> Self { +// Self(vec![$($name.into(),)+]) +// } +// } +// }; +// } // impl_from_tuple!(A: A); diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index b289090c..d2005ee8 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -119,6 +119,8 @@ where pub fn broadcast(&self, shape: impl IntoShape) -> Self { let shape = shape.into_shape(); + let _diff = *self.shape().rank() - *shape.rank(); + self.clone() } } diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs new file mode 100644 index 00000000..8ec55e7d --- /dev/null +++ b/tensor/tests/composition.rs @@ -0,0 +1,32 @@ +/* + Appellation: tensor + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::{Shape, Tensor}; + +#[test] +fn test_tensor() { + let shape = (2, 2); + let a = Tensor::::ones(shape); + let b = Tensor::zeros(shape); + + assert_ne!(&a, &b); +} + +#[test] +fn test_arange() { + let exp = Shape::from(10); + let a = Tensor::arange(0_f64, 1_f64, 0.1); + assert_eq!(a.shape(), &exp); +} + +#[test] +fn test_fill() { + let shape = (2, 2); + let a = Tensor::fill(shape, 1_f64); + let b = Tensor::ones(shape); + assert_eq!(&a, &b); +} diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs deleted file mode 100644 index d4d10a00..00000000 --- a/tensor/tests/tensor.rs +++ /dev/null @@ -1,17 +0,0 @@ -/* - Appellation: tensor - Contrib: FL03 -*/ -#![cfg(test)] -extern crate acme_tensor as tensor; - -use tensor::TensorBase; - -#[test] -fn test_tensor() { - let shape = (2, 2); - let a = TensorBase::::ones(shape); - let b = TensorBase::::zeros(shape); - - assert_ne!(&a, &b); -} From 5ba7276d0ed8674ecd48ded3393521aebd3b1718 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 12:48:47 -0500 Subject: [PATCH 34/87] update Signed-off-by: Joe McCain III --- .github/workflows/crates.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index 2aedc07a..cfe946ca 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -23,7 +23,7 @@ jobs: matrix: features: [ core ] env: - CARGO_PACKAGE_NAME: ${{ github.repository.name }}-${{ matrix.features }} + CARGO_PACKAGE_NAME: ${{ github.event.repository.name }}-${{ matrix.features }} steps: - uses: actions/checkout@v4 - name: Publish (${{ env.CARGO_PACKAGE_NAME }}) @@ -39,7 +39,7 @@ jobs: matrix: features: [ derive, graphs, macros, tensor ] env: - CARGO_PACKAGE_NAME: ${{ github.repository.name }}-${{ matrix.features }} + CARGO_PACKAGE_NAME: ${{ github.event.repository.name }}-${{ matrix.features }} steps: - uses: actions/checkout@v4 - name: Publish (${{ env.CARGO_PACKAGE_NAME }}) @@ -50,5 +50,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Publish (${{ github.repository.name }}) - run: cargo publish --all-features -v -p ${{ github.repository.name }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file + - name: Publish (${{ github.event.repository.name }}) + run: cargo publish --all-features -v -p ${{ github.event.repository.name }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} \ No newline at end of file From 16e2fd1b654cf8ed47c073fccda2f84d25d521e3 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 12:51:02 -0500 Subject: [PATCH 35/87] update Signed-off-by: Joe McCain III --- core/src/cmp/constants.rs | 3 +-- core/src/cmp/dual.rs | 3 +-- core/src/lib.rs | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/core/src/cmp/constants.rs b/core/src/cmp/constants.rs index 1f659051..2a3682fb 100644 --- a/core/src/cmp/constants.rs +++ b/core/src/cmp/constants.rs @@ -8,10 +8,9 @@ use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::borrow::{Borrow, BorrowMut}; -use std::marker::ConstParamTy; use std::ops::{Deref, DerefMut, Neg, Not}; -#[derive(Clone, ConstParamTy, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[repr(transparent)] pub struct Constant(pub T); diff --git a/core/src/cmp/dual.rs b/core/src/cmp/dual.rs index 435ed62c..10382020 100644 --- a/core/src/cmp/dual.rs +++ b/core/src/cmp/dual.rs @@ -16,10 +16,9 @@ use crate::prelude::{Evaluate, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::marker::ConstParamTy; use std::ops::{self, Neg, Not}; -#[derive(Clone, ConstParamTy, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct Dual { dual: T, diff --git a/core/src/lib.rs b/core/src/lib.rs index ee219e40..ef5bc520 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -5,8 +5,7 @@ //! # acme-core //! //! -#![allow(incomplete_features)] -#![feature(adt_const_params, fn_traits, tuple_trait, unboxed_closures)] +#![feature(tuple_trait,)] pub use self::{primitives::*, utils::*}; From 871b42b69d59ea1ff610e0a6d80a1a63b429e6b5 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 12:53:14 -0500 Subject: [PATCH 36/87] update Signed-off-by: Joe McCain III --- core/src/lib.rs | 1 - core/src/specs/operator.rs | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/core/src/lib.rs b/core/src/lib.rs index ef5bc520..c7dd5fdc 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -5,7 +5,6 @@ //! # acme-core //! //! -#![feature(tuple_trait,)] pub use self::{primitives::*, utils::*}; diff --git a/core/src/specs/operator.rs b/core/src/specs/operator.rs index 4ee55ed5..7ef43d05 100644 --- a/core/src/specs/operator.rs +++ b/core/src/specs/operator.rs @@ -2,12 +2,8 @@ Appellation: operator Contrib: FL03 */ -use std::marker::Tuple; -pub trait Operand -where - Args: Tuple, -{ +pub trait Operand { type Output; fn name(&self) -> &str; From 59e1261e3fa10ce6ddb217c18f0a65e2c9e7ac87 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 12:55:20 -0500 Subject: [PATCH 37/87] update Signed-off-by: Joe McCain III --- tensor/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 021bb264..0c69cbba 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -5,7 +5,6 @@ //! # acme-tensor //! //! -#![feature(array_chunks)] #[cfg(not(feature = "std"))] extern crate alloc; From 8a6e502f338c9ab74e9738dbb047474128475317 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 13:00:12 -0500 Subject: [PATCH 38/87] update Signed-off-by: Joe McCain III --- graphs/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index cb78e10d..1399d36e 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -20,6 +20,7 @@ serde = [ serde-ext = [ "dep:serde_json", "petgraph/serde-1", + "acme-core/serde", ] [lib] From b0b8b29890b31901d844bf63785690e85d07a1fc Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 21 Mar 2024 13:02:08 -0500 Subject: [PATCH 39/87] update Signed-off-by: Joe McCain III --- graphs/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 1399d36e..1fc89c94 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -1,6 +1,6 @@ [package] authors.workspace = true -description = "" +description = "Acme graphs" edition.workspace = true homepage.workspace = true license.workspace = true From 69cd341e56635359d4cc8f1e742fd9457abb9b63 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 22 Mar 2024 07:15:21 -0500 Subject: [PATCH 40/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 2 +- acme/Cargo.toml | 10 +- core/Cargo.toml | 2 - core/src/id/id.rs | 13 +- core/src/lib.rs | 6 +- core/src/ops/arithmetic.rs | 6 - core/src/stores/gradient.rs | 3 +- core/src/{cmp => types}/constants.rs | 0 core/src/{cmp => types}/dual.rs | 0 core/src/{cmp => types}/mod.rs | 0 core/src/{cmp => types}/operators.rs | 0 core/src/{cmp => types}/variables.rs | 0 graphs/Cargo.toml | 4 +- graphs/src/dcg/mod.rs | 24 +-- graphs/src/errors/mod.rs | 3 + graphs/src/scg/mod.rs | 79 +--------- graphs/tests/dcg.rs | 24 ++- graphs/tests/scg.rs | 83 ++++++++++ tensor/Cargo.toml | 2 +- tensor/src/actions/arange.rs | 158 +++++++++++++++++++ tensor/src/actions/mod.rs | 12 ++ tensor/src/impls/grad.rs | 6 +- tensor/src/impls/linalg.rs | 32 +++- tensor/src/impls/{arith.rs => ops/binary.rs} | 82 ++-------- tensor/src/impls/ops/unary.rs | 62 ++++++++ tensor/src/lib.rs | 6 +- tensor/src/ops/backprop.rs | 12 +- tensor/src/ops/kinds.rs | 2 +- tensor/src/ops/mod.rs | 2 +- tensor/src/tensor.rs | 41 +++-- tensor/tests/arith.rs | 6 +- 31 files changed, 454 insertions(+), 228 deletions(-) rename core/src/{cmp => types}/constants.rs (100%) rename core/src/{cmp => types}/dual.rs (100%) rename core/src/{cmp => types}/mod.rs (100%) rename core/src/{cmp => types}/operators.rs (100%) rename core/src/{cmp => types}/variables.rs (100%) create mode 100644 graphs/tests/scg.rs create mode 100644 tensor/src/actions/arange.rs create mode 100644 tensor/src/actions/mod.rs rename tensor/src/impls/{arith.rs => ops/binary.rs} (67%) create mode 100644 tensor/src/impls/ops/unary.rs diff --git a/Cargo.toml b/Cargo.toml index 23ffa735..a344ac64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://github.com/FL03/acme/wikis" license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0-nightly" # TODO - Update cargo package version +version = "0.3.0" # TODO - Update cargo package version [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 49d8466a..c2b48d22 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -77,11 +77,11 @@ required-features = ["macros"] [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0-nightly" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly" } +acme-core = { path = "../core", version = "0.3.0" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } [dev-dependencies] approx = "0.5" diff --git a/core/Cargo.toml b/core/Cargo.toml index 40f7770c..342abd37 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -19,7 +19,6 @@ serde = [ serde-ext = [ "dep:serde_json", - "petgraph/serde-1", ] [lib] @@ -36,7 +35,6 @@ test = true anyhow.workspace = true lazy_static = "1" num = "0.4" -petgraph = "0.6" serde = { optional = true, features = ["derive"], version = "1" } serde_json = { optional = true, version = "1" } smart-default.workspace = true diff --git a/core/src/id/id.rs b/core/src/id/id.rs index febb9966..92067e15 100644 --- a/core/src/id/id.rs +++ b/core/src/id/id.rs @@ -3,7 +3,6 @@ Contrib: FL03 */ use super::AtomicId; -use petgraph::prelude::NodeIndex; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -11,11 +10,11 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct Id { id: AtomicId, - index: NodeIndex, + index: usize, } impl Id { - pub fn new(index: NodeIndex) -> Self { + pub fn new(index: usize) -> Self { Self { id: AtomicId::new(), index, @@ -26,14 +25,14 @@ impl Id { *self.id } - pub fn index(&self) -> NodeIndex { + pub fn index(&self) -> usize { self.index } pub(crate) fn next_index(&self) -> Self { Self { id: self.id, - index: NodeIndex::new(self.index.index() + 1), + index: self.index() + 1, } } } @@ -41,9 +40,9 @@ impl Id { impl std::fmt::Display for Id { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if f.alternate() { - write!(f, "{}.{}", self.index.index(), self.id) + write!(f, "{}.{}", self.index(), self.id) } else { - write!(f, "{}", self.index.index()) + write!(f, "{}", self.index()) } } } diff --git a/core/src/lib.rs b/core/src/lib.rs index c7dd5fdc..6b88b58e 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -2,7 +2,7 @@ Appellation: acme-core Contrib: FL03 */ -//! # acme-core +//! # Core //! //! @@ -11,23 +11,23 @@ pub use self::{primitives::*, utils::*}; pub(crate) mod primitives; pub(crate) mod utils; -pub mod cmp; pub mod errors; pub mod eval; pub mod id; pub mod ops; pub mod specs; pub mod stores; +pub mod types; pub mod prelude { pub use crate::primitives::*; pub use crate::utils::*; - pub use crate::cmp::*; pub use crate::errors::*; pub use crate::eval::*; pub use crate::id::*; pub use crate::ops::*; pub use crate::specs::prelude::*; pub use crate::stores::*; + pub use crate::types::*; } diff --git a/core/src/ops/arithmetic.rs b/core/src/ops/arithmetic.rs index 87b903fd..2d6ae6ed 100644 --- a/core/src/ops/arithmetic.rs +++ b/core/src/ops/arithmetic.rs @@ -6,12 +6,6 @@ use serde::{Deserialize, Serialize}; use std::ops::{Add, Div, Mul, Sub}; -pub trait Trig { - fn sin(self) -> Self; - fn cos(self) -> Self; - fn tan(self) -> Self; -} - macro_rules! operator { ($op:ident) => { #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] diff --git a/core/src/stores/gradient.rs b/core/src/stores/gradient.rs index a80c8451..0089adae 100644 --- a/core/src/stores/gradient.rs +++ b/core/src/stores/gradient.rs @@ -3,11 +3,10 @@ Contrib: FL03 */ use super::Store; -use petgraph::prelude::NodeIndex; use std::any::Any; use std::collections::BTreeMap; -pub struct GradientStore { +pub struct GradientStore { store: BTreeMap>, } diff --git a/core/src/cmp/constants.rs b/core/src/types/constants.rs similarity index 100% rename from core/src/cmp/constants.rs rename to core/src/types/constants.rs diff --git a/core/src/cmp/dual.rs b/core/src/types/dual.rs similarity index 100% rename from core/src/cmp/dual.rs rename to core/src/types/dual.rs diff --git a/core/src/cmp/mod.rs b/core/src/types/mod.rs similarity index 100% rename from core/src/cmp/mod.rs rename to core/src/types/mod.rs diff --git a/core/src/cmp/operators.rs b/core/src/types/operators.rs similarity index 100% rename from core/src/cmp/operators.rs rename to core/src/types/operators.rs diff --git a/core/src/cmp/variables.rs b/core/src/types/variables.rs similarity index 100% rename from core/src/cmp/variables.rs rename to core/src/types/variables.rs diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 1fc89c94..957ea79c 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -1,6 +1,6 @@ [package] authors.workspace = true -description = "Acme graphs" +description = "This module implements several computational graphs" edition.workspace = true homepage.workspace = true license.workspace = true @@ -34,7 +34,7 @@ test = true [dev-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0-nightly" } +acme-core = { path = "../core", version = "0.3.0" } anyhow.workspace = true lazy_static = "1" diff --git a/graphs/src/dcg/mod.rs b/graphs/src/dcg/mod.rs index 8f40b94d..b1134473 100644 --- a/graphs/src/dcg/mod.rs +++ b/graphs/src/dcg/mod.rs @@ -16,26 +16,4 @@ pub mod node; pub(crate) type DynamicGraph = petgraph::graph::DiGraph, edge::Edge>; #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_dcg() { - let mut dcg = Dcg::::new(); - let a = dcg.input(true, 2.0); - let b = dcg.input(true, 3.0); - let c = dcg.add(a, b); - - let grad = dcg.gradient(c).unwrap(); - assert_eq!(grad[&a], 1.0); - - let mut dcg = Dcg::::new(); - let a = dcg.input(true, 2.0); - let b = dcg.input(true, 3.0); - let c = dcg.mul(a, b); - - let grad = dcg.gradient(c).unwrap(); - assert_eq!(grad[&a], 3.0); - assert_eq!(grad[&b], 2.0); - } -} +mod tests {} diff --git a/graphs/src/errors/mod.rs b/graphs/src/errors/mod.rs index af04e25c..82a4be25 100644 --- a/graphs/src/errors/mod.rs +++ b/graphs/src/errors/mod.rs @@ -2,6 +2,9 @@ Appellation: errors Contrib: FL03 */ +//! # Errors +//! +//! pub use self::error::*; pub(crate) mod error; diff --git a/graphs/src/scg/mod.rs b/graphs/src/scg/mod.rs index 58ccf33c..cff04ee4 100644 --- a/graphs/src/scg/mod.rs +++ b/graphs/src/scg/mod.rs @@ -12,81 +12,4 @@ pub(crate) mod graph; pub(crate) mod node; #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_dag() { - let mut dag = Scg::new(); - let x = dag.variable(1_f64); - let y = dag.variable(2_f64); - // f(x, y) = x + y - let c = dag.add(x, y).unwrap(); - // verify the value of c to be the sum of x and y - assert_eq!(*dag.get_value(c).unwrap(), 3.0); - // f(x, y) = y * (x + y) - let d = dag.mul(c, y).unwrap(); - // verify the value of d to be the product of c and y - assert_eq!(*dag.get_value(d).unwrap(), 6.0); - - let gc = dag.gradient_at(c).unwrap(); - - assert_eq!(gc[&x], 1.0); - assert_eq!(gc[&y], 1.0); - - let gd = dag.backward().unwrap(); - - assert_eq!(gd[&x], 2.0); - assert_eq!(gd[&y], 5.0); - } - - #[test] - fn test_backward() { - let mut dag = Scg::new(); - let x = dag.variable(1_f64); - let y = dag.variable(2_f64); - - let c = dag.sub(x, y).unwrap(); - - let d = dag.mul(c, y).unwrap(); - - assert_eq!(*dag.get_value(c).unwrap(), -1.0); - assert_eq!(*dag.get_value(d).unwrap(), -2.0); - - let gc = dag.gradient_at(c).unwrap(); - - assert_eq!(gc[&x], 1.0); - assert_eq!(gc[&y], -1.0); - - let gd = dag.backward().unwrap(); - - assert_eq!(gd[&x], 2.0); - assert_eq!(gd[&y], -3.0); - } - - #[ignore = "Not yet implemented"] - #[test] - fn test_division() { - let mut dag = Scg::new(); - let one = dag.constant(1_f64); - let x = dag.variable(1_f64); - let y = dag.variable(2_f64); - - let c = dag.add(x, y).unwrap(); - - let d = dag.div(one, c).unwrap(); - - assert_eq!(*dag.get_value(c).unwrap(), 3.0); - assert_eq!(*dag.get_value(d).unwrap(), 1.0 / 3.0); - - let gc = dag.gradient_at(c).unwrap(); - - assert_eq!(gc[&x], 1.0); - assert_eq!(gc[&y], 1.0); - - let gd = dag.backward().unwrap(); - - assert_eq!(gd[&x], -1.0); - assert_eq!(gd[&y], -1.0); - } -} +mod tests {} diff --git a/graphs/tests/dcg.rs b/graphs/tests/dcg.rs index 46f3b86c..7b8a41ae 100644 --- a/graphs/tests/dcg.rs +++ b/graphs/tests/dcg.rs @@ -3,4 +3,26 @@ Contrib: FL03 */ #![cfg(test)] -extern crate acme_graphs as acme; +extern crate acme_graphs as graphs; + +use graphs::dcg::Dcg; + +#[test] +fn test_dcg() { + let mut dcg = Dcg::::new(); + let a = dcg.input(true, 2.0); + let b = dcg.input(true, 3.0); + let c = dcg.add(a, b); + + let grad = dcg.gradient(c).unwrap(); + assert_eq!(grad[&a], 1.0); + + let mut dcg = Dcg::::new(); + let a = dcg.input(true, 2.0); + let b = dcg.input(true, 3.0); + let c = dcg.mul(a, b); + + let grad = dcg.gradient(c).unwrap(); + assert_eq!(grad[&a], 3.0); + assert_eq!(grad[&b], 2.0); +} diff --git a/graphs/tests/scg.rs b/graphs/tests/scg.rs new file mode 100644 index 00000000..f8926459 --- /dev/null +++ b/graphs/tests/scg.rs @@ -0,0 +1,83 @@ +/* + Appellation: scg + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_graphs as graphs; + +use graphs::scg::Scg; + +#[test] +fn test_scg() { + let mut dag = Scg::new(); + let x = dag.variable(1_f64); + let y = dag.variable(2_f64); + // f(x, y) = x + y + let c = dag.add(x, y).unwrap(); + // verify the value of c to be the sum of x and y + assert_eq!(*dag.get_value(c).unwrap(), 3.0); + // f(x, y) = y * (x + y) + let d = dag.mul(c, y).unwrap(); + // verify the value of d to be the product of c and y + assert_eq!(*dag.get_value(d).unwrap(), 6.0); + + let gc = dag.gradient_at(c).unwrap(); + + assert_eq!(gc[&x], 1.0); + assert_eq!(gc[&y], 1.0); + + let gd = dag.backward().unwrap(); + + assert_eq!(gd[&x], 2.0); + assert_eq!(gd[&y], 5.0); +} + +#[test] +fn test_backward() { + let mut dag = Scg::new(); + let x = dag.variable(1_f64); + let y = dag.variable(2_f64); + + let c = dag.sub(x, y).unwrap(); + + let d = dag.mul(c, y).unwrap(); + + assert_eq!(*dag.get_value(c).unwrap(), -1.0); + assert_eq!(*dag.get_value(d).unwrap(), -2.0); + + let gc = dag.gradient_at(c).unwrap(); + + assert_eq!(gc[&x], 1.0); + assert_eq!(gc[&y], -1.0); + + let gd = dag.backward().unwrap(); + + assert_eq!(gd[&x], 2.0); + assert_eq!(gd[&y], -3.0); +} + +#[ignore = "Not yet implemented"] +#[test] +fn test_division() { + let mut dag = Scg::new(); + let one = dag.constant(1_f64); + let x = dag.variable(1_f64); + let y = dag.variable(2_f64); + + let c = dag.add(x, y).unwrap(); + + let d = dag.div(one, c).unwrap(); + + assert_eq!(*dag.get_value(c).unwrap(), 3.0); + assert_eq!(*dag.get_value(d).unwrap(), 1.0 / 3.0); + + let gc = dag.gradient_at(c).unwrap(); + + assert_eq!(gc[&x], 1.0); + assert_eq!(gc[&y], 1.0); + + let gd = dag.backward().unwrap(); + + assert_eq!(gd[&x], -1.0); + assert_eq!(gd[&y], -1.0); +} diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index cefcd7f1..f99e463a 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -25,7 +25,7 @@ serde-ext = [ [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0-nightly" } +acme-core = { path = "../core", version = "0.3.0" } num = "0.4" serde = { optional = true, features = ["derive"], version = "1" } diff --git a/tensor/src/actions/arange.rs b/tensor/src/actions/arange.rs new file mode 100644 index 00000000..aa44ebee --- /dev/null +++ b/tensor/src/actions/arange.rs @@ -0,0 +1,158 @@ +/* + Appellation: arange + Contrib: FL03 +*/ +use num::traits::real::Real; +use num::traits::{One, Zero}; +use std::ops; + +pub trait Ranged { + fn arange(start: T, stop: T, step: T) -> Self; + + fn arange_between(start: T, stop: T) -> Self; + + fn arange_until(stop: T) -> Self; +} + +pub trait Linstep { + type Elem; + + fn linstep(start: Self::Elem, stop: Self::Elem, steps: usize) -> Vec; +} + +pub enum Ranges { + Arange { start: T, stop: T }, + Between { start: T, stop: T }, + Until { stop: T }, +} + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub enum Aranged { + Arange { start: T, stop: T, step: T }, + Between { start: T, stop: T }, + Until { stop: T }, +} + +impl Aranged +where + T: Copy, +{ + /// Returns the start value of the range. + pub fn start(&self) -> T + where + T: Zero, + { + match self { + Aranged::Arange { start, .. } => *start, + Aranged::Between { start, .. } => *start, + Aranged::Until { .. } => T::zero(), + } + } + /// Returns the stop value of the range. + pub fn stop(&self) -> T { + match self { + Aranged::Arange { stop, .. } => *stop, + Aranged::Between { stop, .. } => *stop, + Aranged::Until { stop } => *stop, + } + } + /// Returns the step value of the range. + pub fn step(&self) -> T + where + T: One, + { + match self { + Aranged::Arange { step, .. } => *step, + Aranged::Between { .. } => T::one(), + Aranged::Until { .. } => T::one(), + } + } + /// Returns the number of steps between the given boundaries + pub fn steps(&self) -> usize + where + T: Real, + { + match self { + Aranged::Arange { start, stop, step } => { + let n = ((*stop - *start) / *step).ceil().to_usize().unwrap(); + n + } + Aranged::Between { start, stop } => { + let n = (*stop - *start).to_usize().unwrap(); + n + } + + Aranged::Until { stop } => { + let n = stop.to_usize().unwrap(); + n + } + } + } +} + +impl From> for Aranged { + fn from(args: ops::Range) -> Self { + Aranged::Between { + start: args.start, + stop: args.end, + } + } +} + +impl From> for Aranged { + fn from(args: ops::RangeTo) -> Self { + Aranged::Until { stop: args.end } + } +} + +impl From<(T, T, T)> for Aranged { + fn from(args: (T, T, T)) -> Self { + Aranged::Arange { + start: args.0, + stop: args.1, + step: args.2, + } + } +} + +impl From<[T; 3]> for Aranged +where + T: Copy, +{ + fn from(args: [T; 3]) -> Self { + Aranged::Arange { + start: args[0], + stop: args[1], + step: args[2], + } + } +} + +impl From<(T, T)> for Aranged { + fn from(args: (T, T)) -> Self { + Aranged::Between { + start: args.0, + stop: args.1, + } + } +} + +impl From for Aranged { + fn from(stop: T) -> Self { + Aranged::Until { stop } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_arange_args() { + let arange = Aranged::Between { start: 0, stop: 10 }; + assert_eq!(arange.start(), 0); + assert_eq!(arange.stop(), 10); + assert_eq!(arange.step(), 1); + assert_eq!(arange, (0..10).into()); + } +} diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs new file mode 100644 index 00000000..0268da4f --- /dev/null +++ b/tensor/src/actions/mod.rs @@ -0,0 +1,12 @@ +/* + Appellation: actions + Contrib: FL03 +*/ +//! # Actions +//! +//! + +pub mod arange; + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index f2a180ef..3588e733 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -2,7 +2,7 @@ Appellation: grad Contrib: FL03 */ -use crate::ops::{BinaryOp, Op}; +use crate::ops::{BinaryOp, TensorOp}; use crate::prelude::Scalar; use crate::tensor::*; use acme::prelude::AtomicId; @@ -21,10 +21,10 @@ where if let Some(op) = &self.op { match op { - Op::Unary(_a, kind) => match kind { + TensorOp::Unary(_a, kind) => match kind { _ => todo!(), }, - Op::Binary(a, b, kind) => match kind { + TensorOp::Binary(a, b, kind) => match kind { BinaryOp::Add => { *store .entry(a.id().into()) diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 3dd1eb42..4f956760 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -3,10 +3,38 @@ Contrib: FL03 */ //! Implementations for linear algebra operations. -use crate::ops::{BinaryOp, Op}; +use crate::ops::{BinaryOp, TensorOp}; use crate::prelude::{Matmul, Scalar}; use crate::tensor::*; +pub(crate) fn matmul(lhs: &TensorBase, rhs: &TensorBase) -> TensorBase +where + T: Scalar, +{ + let lhs_shape = lhs.shape().clone(); + let rhs_shape = rhs.shape().clone(); + + let shape = lhs_shape.matmul_shape(rhs.shape()).unwrap(); + let mut result = vec![T::zero(); shape.elements()]; + + for i in 0..lhs_shape[0] { + for j in 0..rhs_shape[1] { + for k in 0..lhs_shape[1] { + let pos = i * rhs_shape[1] + j; + let left = i * lhs_shape[1] + k; + let right = k * rhs_shape[1] + j; + result[pos] += lhs.store[left] * rhs.store[right]; + } + } + } + let op = TensorOp::Binary( + Box::new(lhs.clone()), + Box::new(rhs.clone()), + BinaryOp::Matmul, + ); + from_vec_with_op(op, shape, result) +} + impl Matmul> for TensorBase where T: Scalar, @@ -24,7 +52,7 @@ where } } } - let op = Op::Binary( + let op = TensorOp::Binary( Box::new(self.clone()), Box::new(other.clone()), BinaryOp::Matmul, diff --git a/tensor/src/impls/arith.rs b/tensor/src/impls/ops/binary.rs similarity index 67% rename from tensor/src/impls/arith.rs rename to tensor/src/impls/ops/binary.rs index 0c996ad2..6a36822c 100644 --- a/tensor/src/impls/arith.rs +++ b/tensor/src/impls/ops/binary.rs @@ -2,38 +2,10 @@ Appellation: arith Contrib: FL03 */ -use crate::ops::{BinaryOp, Op, UnaryOp}; +use crate::ops::{BinaryOp, TensorOp}; use crate::prelude::Scalar; use crate::tensor::*; -impl std::ops::Neg for TensorBase -where - T: Copy + std::ops::Neg, -{ - type Output = Self; - - fn neg(self) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| -a).collect(); - let op = Op::Unary(Box::new(self), UnaryOp::Neg); - from_vec_with_op(op, shape, store) - } -} - -impl<'a, T> std::ops::Neg for &'a TensorBase -where - T: Copy + std::ops::Neg, -{ - type Output = TensorBase; - - fn neg(self) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| -a).collect(); - let op = Op::Unary(Box::new(self.clone()), UnaryOp::Neg); - from_vec_with_op(op, shape, store) - } -} - macro_rules! cmp { (ne: $lhs:expr, $rhs:expr) => { if $lhs != $rhs { @@ -42,8 +14,10 @@ macro_rules! cmp { }; } -macro_rules! impl_arith { +macro_rules! impl_arithmetic { ($trait:ident, $method:ident, $op:tt) => { + impl_scalar_arith!($trait, $method, $op); + impl std::ops::$trait for TensorBase where T: Scalar + std::ops::$trait, @@ -54,7 +28,7 @@ macro_rules! impl_arith { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); + let op = TensorOp::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); from_vec_with_op(op, shape, store) } } @@ -71,7 +45,7 @@ macro_rules! impl_arith { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); + let op = TensorOp::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); from_vec_with_op(op, shape, store) } } @@ -88,7 +62,7 @@ macro_rules! impl_arith { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); + let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); from_vec_with_op(op, shape, store) } } @@ -105,7 +79,7 @@ macro_rules! impl_arith { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = Op::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); + let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); from_vec_with_op(op, shape, store) } } @@ -168,44 +142,12 @@ macro_rules! impl_assign_op { } -macro_rules! impl_unary_arith { - ($variant:ident, $method:ident, $e:expr) => { - impl TensorBase - where - T: Scalar, - { - pub fn $method(self) -> Self { - let shape = self.shape().clone(); - let store = self.store.iter().map($e).collect(); - let op = Op::::Unary(Box::new(self), UnaryOp::$variant); - from_vec_with_op(op, shape, store) - } - } - }; -} - -impl_arith!(Add, add, +); -impl_arith!(Div, div, /); -impl_arith!(Mul, mul, *); -impl_arith!(Sub, sub, -); +impl_arithmetic!(Add, add, +); +impl_arithmetic!(Div, div, /); +impl_arithmetic!(Mul, mul, *); +impl_arithmetic!(Sub, sub, -); impl_assign_op!(AddAssign, add_assign, Add, +); impl_assign_op!(DivAssign, div_assign, Div, /); impl_assign_op!(MulAssign, mul_assign, Mul, *); impl_assign_op!(SubAssign, sub_assign, Sub, -); - -impl_scalar_arith!(Add, add, +); -impl_scalar_arith!(Div, div, /); -impl_scalar_arith!(Mul, mul, *); -impl_scalar_arith!(Sub, sub, -); - -impl_unary_arith!(Exp, exp, |v| v.exp()); -// impl_unary_arith!(Log, log, |v| v.log()); - -impl_unary_arith!(Cos, cos, |v| v.cos()); -impl_unary_arith!(Cosh, cosh, |v| v.cosh()); -impl_unary_arith!(Sin, sin, |v| v.sin()); -impl_unary_arith!(Sinh, sinh, |v| v.sinh()); -impl_unary_arith!(Sqrt, sqrt, |v| v.sqrt()); -impl_unary_arith!(Tan, tan, |v| v.tan()); -impl_unary_arith!(Tanh, tanh, |v| v.tanh()); diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs new file mode 100644 index 00000000..be5bc926 --- /dev/null +++ b/tensor/src/impls/ops/unary.rs @@ -0,0 +1,62 @@ +/* + Appellation: arith + Contrib: FL03 +*/ +use crate::ops::{TensorOp, UnaryOp}; +use crate::prelude::Scalar; +use crate::tensor::*; + +impl std::ops::Neg for TensorBase +where + T: Copy + std::ops::Neg, +{ + type Output = Self; + + fn neg(self) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().copied().map(|a| -a).collect(); + let op = TensorOp::Unary(Box::new(self), UnaryOp::Neg); + from_vec_with_op(op, shape, store) + } +} + +impl<'a, T> std::ops::Neg for &'a TensorBase +where + T: Copy + std::ops::Neg, +{ + type Output = TensorBase; + + fn neg(self) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().copied().map(|a| -a).collect(); + let op = TensorOp::Unary(Box::new(self.clone()), UnaryOp::Neg); + from_vec_with_op(op, shape, store) + } +} + +macro_rules! impl_unary_arith { + ($variant:ident, $method:ident, $e:expr) => { + impl TensorBase + where + T: Scalar, + { + pub fn $method(self) -> Self { + let shape = self.shape().clone(); + let store = self.store.iter().map($e).collect(); + let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); + from_vec_with_op(op, shape, store) + } + } + }; +} + +impl_unary_arith!(Exp, exp, |v| v.exp()); +// impl_unary_arith!(Log, log, |v| v.log()); + +impl_unary_arith!(Cos, cos, |v| v.cos()); +impl_unary_arith!(Cosh, cosh, |v| v.cosh()); +impl_unary_arith!(Sin, sin, |v| v.sin()); +impl_unary_arith!(Sinh, sinh, |v| v.sinh()); +impl_unary_arith!(Sqrt, sqrt, |v| v.sqrt()); +impl_unary_arith!(Tan, tan, |v| v.tan()); +impl_unary_arith!(Tanh, tanh, |v| v.tanh()); diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 0c69cbba..9dc24d86 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -15,6 +15,7 @@ pub use self::tensor::*; pub(crate) mod tensor; +pub mod actions; pub mod data; pub mod errors; pub mod linalg; @@ -24,7 +25,10 @@ pub mod specs; pub mod store; mod impls { - mod arith; + mod ops { + mod binary; + mod unary; + } mod grad; mod linalg; mod num; diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 954a5ff0..e3132d1b 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -2,12 +2,12 @@ Appellation: backprop Contrib: FL03 */ -use super::Op; +use super::TensorOp; -pub struct BackpropOp(Option>); +pub struct BackpropOp(Option>); impl BackpropOp { - pub fn new(op: Op) -> Self { + pub fn new(op: TensorOp) -> Self { BackpropOp(Some(op)) } @@ -15,15 +15,15 @@ impl BackpropOp { BackpropOp(None) } - pub fn op(&self) -> Option<&Op> { + pub fn op(&self) -> Option<&TensorOp> { self.0.as_ref() } - pub fn op_mut(&mut self) -> Option<&mut Op> { + pub fn op_mut(&mut self) -> Option<&mut TensorOp> { self.0.as_mut() } - pub fn into_inner(self) -> Option> { + pub fn into_inner(self) -> Option> { self.0 } } diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 31cbe181..714026ae 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive(Clone, Debug)] -pub enum Op { +pub enum TensorOp { Binary(Box>, Box>, BinaryOp), Unary(Box>, UnaryOp), } diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index 72c8741b..287e392b 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -9,7 +9,7 @@ pub(crate) mod kinds; pub mod op; -pub trait TensorOp {} +pub trait TensorExpr {} #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index d2005ee8..0394923c 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,11 +2,12 @@ Appellation: tensor Contrib: FL03 */ -use crate::ops::kinds::{BinaryOp, Op}; +use crate::ops::kinds::{BinaryOp, TensorOp}; use crate::prelude::Scalar; use crate::shape::{IntoShape, Rank, Shape}; use crate::store::Layout; use acme::prelude::AtomicId; +use num::traits::{NumAssign, One, Zero}; // use std::ops::{Index, IndexMut}; // use std::sync::{Arc, RwLock}; @@ -20,7 +21,7 @@ pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase } pub(crate) fn from_vec_with_op( - op: Op, + op: TensorOp, shape: impl IntoShape, store: Vec, ) -> TensorBase { @@ -38,11 +39,20 @@ pub(crate) fn from_vec_with_op( pub struct TensorBase { pub(crate) id: AtomicId, pub(crate) layout: Layout, - pub(crate) op: Option>, + pub(crate) op: Option>, pub(crate) store: Vec, } impl TensorBase { + pub fn new(shape: impl IntoShape) -> Self { + Self { + id: AtomicId::new(), + layout: Layout::contiguous(shape), + op: None, + store: Vec::new(), + } + } + pub fn from_vec(shape: impl IntoShape, store: Vec) -> Self { from_vec(shape, store) } @@ -51,7 +61,7 @@ impl TensorBase { fn position(&self, coords: impl AsRef<[usize]>) -> usize { self.layout.position(coords.as_ref()) } - + /// Returns the unique identifier of the tensor. pub fn id(&self) -> usize { self.id.get() } @@ -60,7 +70,7 @@ impl TensorBase { &self.layout } - pub fn op(&self) -> Option<&Op> { + pub fn op(&self) -> Option<&TensorOp> { self.op.as_ref() } @@ -127,12 +137,10 @@ where impl TensorBase where - T: Scalar, + T: Copy + NumAssign + PartialOrd, { - pub fn arange(start: T, end: T, step: T) -> Self - where - T: PartialOrd, - { + /// Create a tensor within a range of values + pub fn arange(start: T, end: T, step: T) -> Self { if T::is_zero(&step) { panic!("step must be non-zero"); } @@ -145,7 +153,11 @@ where } Self::from_vec(store.len(), store) } - +} +impl TensorBase +where + T: Clone + One, +{ pub fn ones(shape: impl IntoShape) -> Self { Self::fill(shape, T::one()) } @@ -153,7 +165,12 @@ where pub fn ones_like(tensor: &TensorBase) -> Self { Self::ones(tensor.shape().clone()) } +} +impl TensorBase +where + T: Clone + Zero, +{ pub fn zeros(shape: impl IntoShape) -> Self { Self::fill(shape, T::zero()) } @@ -179,7 +196,7 @@ where } } } - let op = Op::Binary( + let op = TensorOp::Binary( Box::new(self.clone()), Box::new(other.clone()), BinaryOp::Matmul, diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index 25dc7788..f56e3a48 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -15,7 +15,11 @@ fn test_add() { let b = TensorBase::::ones(shape); let c = a + &b; - assert_eq!(c, TensorBase::::ones(shape) * 2.0); + assert_eq!(c, TensorBase::fill(shape, 2_f64)); + + let a = TensorBase::::ones(shape); + let b = a + 1_f64; + assert_eq!(b, TensorBase::fill(shape, 2_f64)); } #[test] From e3276c758460983c30fe9788a598806bfaa75064 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 22 Mar 2024 09:08:06 -0500 Subject: [PATCH 41/87] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 1 + core/src/ops/arithmetic.rs | 74 +++++++++++++- core/src/ops/binary/kinds.rs | 9 ++ core/src/ops/binary/mod.rs | 10 +- core/src/ops/binary/operator.rs | 66 ++++++++++++ core/src/ops/kinds.rs | 8 -- core/src/ops/mod.rs | 6 ++ core/src/ops/unary/kinds.rs | 13 +++ core/src/ops/unary/mod.rs | 3 +- core/src/ops/unary/specs.rs | 111 +++++++++++++++++++++ core/src/specs/func/structural.rs | 1 + core/src/specs/hkt/applicative.rs | 98 ------------------ core/src/specs/hkt/functor.rs | 69 ------------- core/src/specs/hkt/mod.rs | 52 ---------- core/src/specs/hkt/monad.rs | 82 --------------- core/src/specs/mod.rs | 8 +- core/src/specs/{operator.rs => operand.rs} | 0 macros/src/lib.rs | 2 + tensor/src/impls/grad.rs | 8 +- tensor/src/impls/ops/binary.rs | 20 +++- tensor/src/lib.rs | 6 +- tensor/src/linalg/mod.rs | 4 + tensor/src/ops/kinds.rs | 9 +- tensor/src/ops/mod.rs | 2 - tensor/src/ops/op/binary.rs | 1 - tensor/src/ops/op/mod.rs | 16 --- tensor/src/ops/op/unary.rs | 68 ------------- tensor/src/store/layout.rs | 4 +- tensor/src/store/mod.rs | 3 + tensor/src/tensor.rs | 22 ++-- tensor/src/types/mod.rs | 10 ++ tensor/src/types/order.rs | 46 +++++++++ 32 files changed, 386 insertions(+), 446 deletions(-) create mode 100644 core/src/ops/binary/operator.rs create mode 100644 core/src/ops/unary/specs.rs delete mode 100644 core/src/specs/hkt/applicative.rs delete mode 100644 core/src/specs/hkt/functor.rs delete mode 100644 core/src/specs/hkt/mod.rs delete mode 100644 core/src/specs/hkt/monad.rs rename core/src/specs/{operator.rs => operand.rs} (100%) delete mode 100644 tensor/src/ops/op/binary.rs delete mode 100644 tensor/src/ops/op/mod.rs delete mode 100644 tensor/src/ops/op/unary.rs create mode 100644 tensor/src/types/mod.rs create mode 100644 tensor/src/types/order.rs diff --git a/acme/Cargo.toml b/acme/Cargo.toml index c2b48d22..1b8ddf7d 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -59,6 +59,7 @@ doctest = true test = true [[example]] +doc = true name = "autodiff" required-features = ["macros"] diff --git a/core/src/ops/arithmetic.rs b/core/src/ops/arithmetic.rs index 2d6ae6ed..be840e72 100644 --- a/core/src/ops/arithmetic.rs +++ b/core/src/ops/arithmetic.rs @@ -2,10 +2,13 @@ Appellation: arithmetic Contrib: FL03 */ +use super::BinaryOperation; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{Add, Div, Mul, Sub}; +pub trait ArithmeticOp: Add {} + macro_rules! operator { ($op:ident) => { #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -17,8 +20,8 @@ macro_rules! operator { Self } - pub fn name(&self) -> &'static str { - stringify!($op) + pub fn name(&self) -> String { + stringify!($op).to_lowercase() } } }; @@ -28,7 +31,7 @@ macro_rules! impl_binary_op { ($op:ident, $bound:ident, $operator:tt) => { operator!($op); - impl crate::ops::BinaryOperation for $op + impl BinaryOperation for $op where A: $bound, { @@ -42,7 +45,7 @@ macro_rules! impl_binary_op { (expr $op:ident, $bound:ident, $exp:expr) => { operator!($op); - impl crate::ops::BinaryOperation for $op + impl BinaryOperation for $op where A: $bound, { @@ -62,3 +65,66 @@ impl_binary_op!(Division, Div, /); impl_binary_op!(Multiplication, Mul, *); impl_binary_op!(Subtraction, Sub, -); + +#[derive(Clone)] +pub enum Arithmetic { + Add(Addition), + Div(Division), + Mul(Multiplication), + Sub(Subtraction), +} + +impl Arithmetic { + pub fn new(op: Arithmetic) -> Self { + op + } + + pub fn add() -> Self { + Self::Add(Addition::new()) + } + + pub fn div() -> Self { + Self::Div(Division::new()) + } + + pub fn mul() -> Self { + Self::Mul(Multiplication::new()) + } + + pub fn sub() -> Self { + Self::Sub(Subtraction::new()) + } + + pub fn op(&self) -> Box> + where + A: Add + Div + Mul + Sub, + { + match self.clone() { + Arithmetic::Add(op) => Box::new(op), + Arithmetic::Div(op) => Box::new(op), + Arithmetic::Mul(op) => Box::new(op), + Arithmetic::Sub(op) => Box::new(op), + } + } + + pub fn name(&self) -> String { + match self { + Arithmetic::Add(op) => op.name(), + Arithmetic::Div(op) => op.name(), + Arithmetic::Mul(op) => op.name(), + Arithmetic::Sub(op) => op.name(), + } + } + + pub fn eval(&self, lhs: A, rhs: B) -> C + where + A: Add + Div + Mul + Sub, + { + match self { + Arithmetic::Add(op) => op.eval(lhs, rhs), + Arithmetic::Div(op) => op.eval(lhs, rhs), + Arithmetic::Mul(op) => op.eval(lhs, rhs), + Arithmetic::Sub(op) => op.eval(lhs, rhs), + } + } +} diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs index 8c6ae23d..84c83e64 100644 --- a/core/src/ops/binary/kinds.rs +++ b/core/src/ops/binary/kinds.rs @@ -45,3 +45,12 @@ pub enum BinaryOp { Shl, Shr, } + +impl BinaryOp { + pub fn differentiable(&self) -> bool { + match self { + BinaryOp::Add | BinaryOp::Sub | BinaryOp::Mul | BinaryOp::Div | BinaryOp::Pow => true, + _ => false, + } + } +} diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index 318ecf5a..e6e5b685 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -2,20 +2,16 @@ Appellation: binary Contrib: FL03 */ -pub use self::kinds::*; +pub use self::{kinds::*, operator::*}; pub(crate) mod kinds; +pub(crate) mod operator; -pub trait BinaryOperator { +pub trait BinOp { type Output; fn apply(lhs: A, rhs: B) -> Self::Output; } -pub struct BinaryO { - pub args: (A, B), - pub op: BinaryOp, -} - #[cfg(test)] mod tests {} diff --git a/core/src/ops/binary/operator.rs b/core/src/ops/binary/operator.rs new file mode 100644 index 00000000..b12e7410 --- /dev/null +++ b/core/src/ops/binary/operator.rs @@ -0,0 +1,66 @@ +/* + Appellation: operator + Contrib: FL03 +*/ +use super::BinaryOp; + +pub struct BinaryArgs { + pub lhs: A, + pub rhs: B, +} + +impl BinaryArgs { + pub fn new(lhs: A, rhs: B) -> Self { + Self { lhs, rhs } + } + + pub fn swap(self) -> BinaryArgs { + BinaryArgs::new(self.rhs, self.lhs) + } + + pub fn lhs(&self) -> &A { + &self.lhs + } + + pub fn rhs(&self) -> &B { + &self.rhs + } +} + +impl From> for (A, B) { + fn from(args: BinaryArgs) -> Self { + (args.lhs, args.rhs) + } +} + +impl From<&BinaryArgs> for (A, B) +where + A: Clone, + B: Clone, +{ + fn from(args: &BinaryArgs) -> Self { + (args.lhs.clone(), args.rhs.clone()) + } +} + +impl From<(A, B)> for BinaryArgs { + fn from((lhs, rhs): (A, B)) -> Self { + Self::new(lhs, rhs) + } +} + +impl From<&(A, B)> for BinaryArgs +where + A: Clone, + B: Clone, +{ + fn from((lhs, rhs): &(A, B)) -> Self { + Self::new(lhs.clone(), rhs.clone()) + } +} + +pub struct BinaryOperator { + pub args: BinaryArgs, + pub communitative: bool, + pub op: BinaryOp, +} diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index da24f4f1..8deba2ce 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -9,14 +9,6 @@ use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; -#[derive(Clone)] -pub enum Expr { - Binary(T, T, BinaryExpr), - Compare(T, T, CompareExpr), - Custom(String), - Unary(T, UnaryExpr), -} - #[cfg_attr( feature = "serde", derive(Deserialize, Serialize,), diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index cc85f193..6fa6d049 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -18,3 +18,9 @@ pub trait BinaryOperation { fn eval(&self, lhs: A, rhs: B) -> Self::Output; } + +pub trait Operator { + type Output; + + fn kind(&self) -> String; +} diff --git a/core/src/ops/unary/kinds.rs b/core/src/ops/unary/kinds.rs index b7020b7c..8230b14c 100644 --- a/core/src/ops/unary/kinds.rs +++ b/core/src/ops/unary/kinds.rs @@ -36,6 +36,10 @@ pub enum UnaryOp { Cosh, Exp, Floor, + #[cfg_attr( + feature = "serde", + serde(alias = "inverse", alias = "recip", alias = "reciprocal") + )] Inv, Ln, Neg, @@ -46,3 +50,12 @@ pub enum UnaryOp { Tan, Tanh, } + +impl UnaryOp { + pub fn differentiable(&self) -> bool { + match self { + UnaryOp::Floor | UnaryOp::Inv => false, + _ => true, + } + } +} diff --git a/core/src/ops/unary/mod.rs b/core/src/ops/unary/mod.rs index 46e3f5a9..f18d6df7 100644 --- a/core/src/ops/unary/mod.rs +++ b/core/src/ops/unary/mod.rs @@ -5,9 +5,10 @@ //! # Unary Operations //! //! -pub use self::kinds::*; +pub use self::{kinds::*, specs::*}; pub(crate) mod kinds; +pub(crate) mod specs; pub trait UnaryOperation { type Output; diff --git a/core/src/ops/unary/specs.rs b/core/src/ops/unary/specs.rs new file mode 100644 index 00000000..f712cdd2 --- /dev/null +++ b/core/src/ops/unary/specs.rs @@ -0,0 +1,111 @@ +/* + Appellation: specs + Contrib: FL03 +*/ +use num::traits::{Inv, Num}; +use num::Complex; +use std::ops::Neg; + +macro_rules! unary_op_trait { + ($trait:ident, $method:ident) => { + pub trait $trait { + type Output; + + fn $method(self) -> Self::Output; + } + }; +} + +macro_rules! impl_unary_op { + ($trait:ident, $method:ident, $t:ty) => { + impl $trait for $t { + type Output = $t; + + fn $method(self) -> Self::Output { + <$t>::$method(self) + } + } + }; + ($trait:ident, $method:ident; [$($t:ty),*]) => { + $( + impl_unary_op!($trait, $method, $t); + )* + }; + ($trait:ident, $method:ident, $call:ident; $t:ty) => { + impl $trait for $t { + type Output = $t; + + fn $method(self) -> Self::Output { + <$t>::$call(self) + } + } + }; + (alts $trait:ident, $method:ident, $call:ident; [$($t:ty),*]) => { + $( + impl_unary_op!($trait, $method, $call; $t); + )* + }; +} + +unary_op_trait!(Abs, abs); +unary_op_trait!(Cos, cos); +unary_op_trait!(Cosh, cosh); +unary_op_trait!(Exp, exp); +unary_op_trait!(Ln, ln); +unary_op_trait!(Recip, recip); +unary_op_trait!(Sin, sin); +unary_op_trait!(Sinh, sinh); +unary_op_trait!(Sqrt, sqrt); +unary_op_trait!(Square, square); +unary_op_trait!(Tan, tan); +unary_op_trait!(Tanh, tanh); + +impl Abs for Complex +where + T: num::Float, +{ + type Output = T; + + fn abs(self) -> Self::Output { + let re = self.re.clone(); + let im = self.im.clone(); + let re = re * re; + let im = im * im; + let abs = re + im; + abs.sqrt() + } +} + +impl Recip for Complex +where + T: Clone + Num + Neg, +{ + type Output = Complex; + + fn recip(self) -> Self::Output { + self.inv() + } +} + +impl Square for T +where + T: Copy + std::ops::Mul, +{ + type Output = T; + + fn square(self) -> Self::Output { + self * self + } +} + +impl_unary_op!(Abs, abs; [isize, i8, i16, i32, i64, i128, f32, f64]); +impl_unary_op!(Cos, cos; [f64, f32, Complex, Complex]); +impl_unary_op!(Cosh, cosh; [f64, f32, Complex, Complex]); +impl_unary_op!(Exp, exp; [f64, f32, Complex, Complex]); +impl_unary_op!(Ln, ln; [f64, f32, Complex, Complex]); +impl_unary_op!(alts Recip, recip, inv; [f64, f32]); +impl_unary_op!(Sin, sin; [f64, f32, Complex, Complex]); +impl_unary_op!(Sinh, sinh; [f64, f32, Complex, Complex]); +impl_unary_op!(Sqrt, sqrt; [f64, f32, Complex, Complex]); +impl_unary_op!(Tan, tan; [f64, f32, Complex, Complex]); +impl_unary_op!(Tanh, tanh; [f64, f32, Complex, Complex]); diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs index 409c63d8..ea80def6 100644 --- a/core/src/specs/func/structural.rs +++ b/core/src/specs/func/structural.rs @@ -4,6 +4,7 @@ */ pub trait StructuralFn { + type Args: StructuredArgs; type Output; fn eval(&self) -> Self::Output; diff --git a/core/src/specs/hkt/applicative.rs b/core/src/specs/hkt/applicative.rs deleted file mode 100644 index 0d614f81..00000000 --- a/core/src/specs/hkt/applicative.rs +++ /dev/null @@ -1,98 +0,0 @@ -/* - Appellation: applicative - Contrib: FL03 -*/ -use super::functor::Functor; -use super::HKT; - -use std::rc::Rc; -use std::sync::Arc; - -pub trait Applicative: Functor { - fn pure_(value: U) -> Self::T - where - Self: HKT; - fn seq(&self, fs: >::T) -> >::T - where - F: Fn(&>::C) -> U, - Self: HKT; -} - -impl Applicative for Arc { - fn pure_(value: U) -> Self::T { - Arc::new(value) - } - - fn seq(&self, fs: >::T) -> Arc - where - F: Fn(&T) -> U, - { - let v = fs(self); - Arc::new(v) - } -} - -impl Applicative for Box { - fn pure_(value: U) -> Self::T { - Box::new(value) - } - - fn seq(&self, fs: >::T) -> Box - where - F: Fn(&T) -> U, - { - let v = fs(self); - Box::new(v) - } -} - -impl Applicative for Option { - fn pure_(value: U) -> Self::T { - Some(value) - } - - fn seq(&self, fs: >::T) -> Option - where - F: Fn(&T) -> U, - { - match *self { - Some(ref value) => match fs { - Some(f) => Some(f(value)), - None => None, - }, - None => None, - } - } -} - -impl Applicative for Rc { - fn pure_(value: U) -> Self::T { - Rc::new(value) - } - - fn seq(&self, fs: >::T) -> Rc - where - F: Fn(&T) -> U, - { - let v = fs(self); - Rc::new(v) - } -} - -impl Applicative for Vec { - fn pure_(value: U) -> Self::T { - vec![value] - } - - fn seq(&self, fs: >::T) -> Vec - where - F: Fn(&T) -> U, - { - let mut result = Vec::new(); - for (i, f) in fs.into_iter().enumerate() { - let v = (f)(&self[i]); - result.push(v) - } - return result; - } -} diff --git a/core/src/specs/hkt/functor.rs b/core/src/specs/hkt/functor.rs deleted file mode 100644 index 0f865492..00000000 --- a/core/src/specs/hkt/functor.rs +++ /dev/null @@ -1,69 +0,0 @@ -/* - Appellation: functor - Contrib: FL03 -*/ -//! # Functor -//! -//! A functor is a type that when mapped over, preserves the structure of the type while applying a function to the values within the type. -//! Functors are useful for modeling the functional effects on values of parameterized data types. -use super::HKT; -use std::rc::Rc; -use std::sync::Arc; - -pub trait Functor: HKT { - fn fmap(&self, f: F) -> Self::T - where - F: Fn(&Self::C) -> U; -} - -impl Functor for Arc { - fn fmap(&self, f: F) -> Arc - where - F: Fn(&T) -> U, - { - Arc::new(f(self)) - } -} - -impl Functor for Box { - fn fmap(&self, f: F) -> Box - where - F: Fn(&T) -> U, - { - Box::new(f(self)) - } -} - -impl Functor for Option { - fn fmap(&self, f: F) -> Option - where - F: Fn(&T) -> U, - { - if let Some(ref value) = self { - return Some(f(value)); - } - None - } -} - -impl Functor for Rc { - fn fmap(&self, f: F) -> Rc - where - F: Fn(&T) -> U, - { - Rc::new(f(self)) - } -} - -impl Functor for Vec { - fn fmap(&self, f: F) -> Vec - where - F: Fn(&T) -> U, - { - let mut result = Vec::with_capacity(self.len()); - for value in self { - result.push(f(value)); - } - result - } -} diff --git a/core/src/specs/hkt/mod.rs b/core/src/specs/hkt/mod.rs deleted file mode 100644 index a955c2ce..00000000 --- a/core/src/specs/hkt/mod.rs +++ /dev/null @@ -1,52 +0,0 @@ -/* - Appellation: hkt - Contrib: FL03 -*/ -//! # Higher Kinded Types -//! -//! - -pub mod applicative; -pub mod functor; -pub mod monad; - -use std::rc::Rc; -use std::sync::Arc; - -pub trait HKT { - type C; // Current Type - type T; // Type C swapped with U -} - -macro_rules! hkt { - ($t:ident) => { - impl HKT for $t { - type C = T; - type T = $t; - } - }; -} - -hkt!(Arc); -hkt!(Box); -hkt!(Option); -hkt!(Rc); -hkt!(Vec); - -#[cfg(test)] -mod tests { - - use super::functor::Functor; - use super::monad::Monad; - - #[test] - fn test_hkt_vec() { - let v = Vec::from_iter(0..9); - let v2 = v.fmap(|x| (x + 1).to_string()); - assert_eq!(v2, vec!["1", "2", "3", "4", "5", "6", "7", "8", "9"]); - - let v = Vec::return_(0); - let v2 = v.bind(|x| vec![x + 1]); - assert_eq!(v2, vec![1]); - } -} diff --git a/core/src/specs/hkt/monad.rs b/core/src/specs/hkt/monad.rs deleted file mode 100644 index 74c091dc..00000000 --- a/core/src/specs/hkt/monad.rs +++ /dev/null @@ -1,82 +0,0 @@ -/* - Appellation: monad - Contrib: FL03 -*/ -use super::applicative::Applicative; -use super::HKT; - -use std::rc::Rc; -use std::sync::Arc; - -pub trait Monad: Applicative { - fn return_(x: U) -> Self::T - where - Self: HKT, - { - Self::pure_(x) - } - - fn bind(&self, fs: F) -> Self::T - where - F: FnMut(&Self::C) -> Self::T; - - fn join(&self) -> T - where - Self: HKT, - T: Clone, - { - self.bind(|x| x.clone()) - } -} - -impl Monad for Arc { - fn bind(&self, mut fs: F) -> Arc - where - F: FnMut(&T) -> Arc, - { - fs(self) - } -} - -impl Monad for Box { - fn bind(&self, mut fs: F) -> Box - where - F: FnMut(&T) -> Box, - { - fs(self) - } -} - -impl Monad for Option { - fn bind(&self, mut fs: F) -> Option - where - F: FnMut(&T) -> Option, - { - match *self { - Some(ref value) => fs(value), - None => None, - } - } -} - -impl Monad for Rc { - fn bind(&self, mut fs: F) -> Rc - where - F: FnMut(&T) -> Rc, - { - fs(self) - } -} - -impl Monad for Vec { - fn bind(&self, mut fs: F) -> Vec - where - F: FnMut(&T) -> Vec, - { - let mut v = Vec::new(); - for x in self { - v.extend(fs(x)); - } - v - } -} diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 3c3411b3..072d3580 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,19 +3,17 @@ Contrib: FL03 */ -pub use self::{gradient::*, operator::*}; +pub use self::{gradient::*, operand::*}; pub(crate) mod gradient; -pub(crate) mod operator; +pub(crate) mod operand; pub mod func; -pub mod hkt; pub(crate) mod prelude { pub use super::func::*; pub use super::gradient::*; - pub use super::hkt::*; - pub use super::operator::*; + pub use super::operand::Operand; } #[cfg(test)] diff --git a/core/src/specs/operator.rs b/core/src/specs/operand.rs similarity index 100% rename from core/src/specs/operator.rs rename to core/src/specs/operand.rs diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 2d6b0c92..a8ee3435 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -30,6 +30,8 @@ pub fn partial(_attr: TokenStream, item: TokenStream) -> TokenStream { TokenStream::from(result) } +/// Compute the gradient of an expression +/// #[proc_macro] pub fn autodiff(input: TokenStream) -> TokenStream { // Parse the input expression into a syntax tree diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 3588e733..add23a48 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -21,9 +21,6 @@ where if let Some(op) = &self.op { match op { - TensorOp::Unary(_a, kind) => match kind { - _ => todo!(), - }, TensorOp::Binary(a, b, kind) => match kind { BinaryOp::Add => { *store @@ -35,7 +32,10 @@ where } _ => todo!(), }, - // _ => {} + TensorOp::Unary(_a, kind) => match kind { + _ => todo!(), + }, + _ => {} } } store diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 6a36822c..59d4d9da 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -97,8 +97,9 @@ macro_rules! impl_scalar_arith { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); - let store = self.into_store().iter().map(|a| *a $op other).collect(); - Self::Output::from_vec(shape, store) + let store = self.data().iter().map(|a| *a $op other).collect(); + let op = TensorOp::BinaryScalar(Box::new(self), other, BinaryOp::$trait); + from_vec_with_op(op, shape, store) } } @@ -111,7 +112,8 @@ macro_rules! impl_scalar_arith { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - Self::Output::from_vec(shape, store) + let op = TensorOp::BinaryScalar(Box::new(self.clone()), other, BinaryOp::$trait); + from_vec_with_op(op, shape, store) } } }; @@ -125,7 +127,11 @@ macro_rules! impl_assign_op { { fn $method(&mut self, other: Self) { cmp!(ne: self.shape(), other.shape()); - self.store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let shape = self.shape().clone(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); + let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$inner); + + *self = from_vec_with_op(op, shape, store); } } @@ -135,7 +141,11 @@ macro_rules! impl_assign_op { { fn $method(&mut self, other: &'a TensorBase) { cmp!(ne: self.shape(), other.shape()); - self.store = self.store.iter().zip(other.store.iter()).map(|(a, b)| *a $op *b).collect(); + let shape = self.shape().clone(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); + let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$inner); + + *self = from_vec_with_op(op, shape, store); } } }; diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 9dc24d86..6245c89f 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -2,8 +2,9 @@ Appellation: acme-tensor Contrib: FL03 */ -//! # acme-tensor +//! # Tensor //! +//! This library implements a tensor data structure with support for automatic differentiation. //! #[cfg(not(feature = "std"))] extern crate alloc; @@ -23,6 +24,7 @@ pub mod ops; pub mod shape; pub mod specs; pub mod store; +pub mod types; mod impls { mod ops { @@ -50,5 +52,7 @@ pub mod prelude { #[doc(inline)] pub use crate::store::*; #[doc(inline)] + pub use crate::types::prelude::*; + #[doc(inline)] pub use crate::Tensor; } diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index b0c82083..667ff8cb 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -7,5 +7,9 @@ //! pub mod arith; +pub trait Inverse { + fn inverse(&self) -> Self; +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 714026ae..dfa18cf9 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -10,6 +10,7 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive(Clone, Debug)] pub enum TensorOp { Binary(Box>, Box>, BinaryOp), + BinaryScalar(Box>, T, BinaryOp), Unary(Box>, UnaryOp), } @@ -84,13 +85,7 @@ pub enum UnaryOp { Tanh, } -pub struct BinOp { - pub lhs: TensorBase, - pub rhs: TensorBase, - pub op: BinaryOp, -} - -pub enum OpInput { +pub enum Inputs { Scalar(T), Tensor(TensorBase), } diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index 287e392b..93663896 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -7,8 +7,6 @@ pub use self::{backprop::*, kinds::*}; pub(crate) mod backprop; pub(crate) mod kinds; -pub mod op; - pub trait TensorExpr {} #[cfg(test)] diff --git a/tensor/src/ops/op/binary.rs b/tensor/src/ops/op/binary.rs deleted file mode 100644 index 8b137891..00000000 --- a/tensor/src/ops/op/binary.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/tensor/src/ops/op/mod.rs b/tensor/src/ops/op/mod.rs deleted file mode 100644 index 01ed16c5..00000000 --- a/tensor/src/ops/op/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - Appellation: ops - Contrib: FL03 -*/ - -pub mod binary; -pub mod unary; - -#[cfg(test)] -mod tests { - - #[test] - fn test_ops() { - assert_eq!(1 + 1, 2); - } -} diff --git a/tensor/src/ops/op/unary.rs b/tensor/src/ops/op/unary.rs deleted file mode 100644 index d7774f12..00000000 --- a/tensor/src/ops/op/unary.rs +++ /dev/null @@ -1,68 +0,0 @@ -/* - Appellation: unary - Contrib: FL03 -*/ -use crate::prelude::Scalar; - -macro_rules! unary_op_trait { - ($trait:ident, $method:ident) => { - pub trait $trait { - type Output; - - fn $method(self) -> Self::Output; - } - }; -} - -macro_rules! impl_unary_trait { - ($trait:ident, $method:ident) => { - impl $trait for T - where - T: Scalar, - { - type Output = T; - - fn $method(self) -> Self::Output { - ::$method(self) - } - } - }; -} - -unary_op_trait!(Abs, abs); -unary_op_trait!(Cos, cos); -unary_op_trait!(Cosh, cosh); -unary_op_trait!(Exp, exp); -unary_op_trait!(Ln, ln); -unary_op_trait!(Recip, recip); -unary_op_trait!(Sin, sin); -unary_op_trait!(Sinh, sinh); -unary_op_trait!(Sqrt, sqrt); -unary_op_trait!(Square, square); -unary_op_trait!(Tan, tan); -unary_op_trait!(Tanh, tanh); - -impl Abs for T -where - T: num::Signed, -{ - type Output = T; - - fn abs(self) -> Self::Output { - ::abs(&self) - } -} - -// impl Cos for T -// where -// T: Scalar, -// { -// type Output = T; - -// fn cos(self) -> Self::Output { -// ::cos(self) -// } -// } - -impl_unary_trait!(Cos, cos); -impl_unary_trait!(Cosh, cosh); diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 4877a25f..42ae33d7 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -2,7 +2,9 @@ Appellation: layout Contrib: FL03 */ - +//! # Layout +//! +//! use crate::shape::{IntoShape, Shape}; #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs index 9f9eaef6..9882cbdc 100644 --- a/tensor/src/store/mod.rs +++ b/tensor/src/store/mod.rs @@ -2,6 +2,9 @@ Appellation: store Contrib: FL03 */ +//! # Store +//! +//! This module provides the storage and layout for the tensor data structure. pub use self::{layout::*, storage::*}; pub(crate) mod layout; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 0394923c..2a9bf93b 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -65,7 +65,7 @@ impl TensorBase { pub fn id(&self) -> usize { self.id.get() } - + /// Get a reference to the layout of the tensor pub fn layout(&self) -> &Layout { &self.layout } @@ -91,30 +91,20 @@ impl TensorBase { pub(crate) fn data(&self) -> &Vec { &self.store } - - pub(crate) fn into_store(self) -> Vec { - self.store - } - - pub(crate) fn snapshot(&self) -> Vec - where - T: Clone, - { - self.store.clone() - } } impl TensorBase where T: Clone, { + /// Create an empty tensor from the given shape pub fn empty(shape: impl IntoShape) -> Self where T: Default, { Self::fill(shape, T::default()) } - + /// Create a tensor, from the given shape, filled with the given value pub fn fill(shape: impl IntoShape, value: T) -> Self { let shape = shape.into_shape(); let store = vec![value; shape.elements()]; @@ -158,10 +148,11 @@ impl TensorBase where T: Clone + One, { + /// Create a tensor, filled with ones, from the given shape pub fn ones(shape: impl IntoShape) -> Self { Self::fill(shape, T::one()) } - + /// Create a tensor, filled with ones, from the shape of another tensor pub fn ones_like(tensor: &TensorBase) -> Self { Self::ones(tensor.shape().clone()) } @@ -171,10 +162,11 @@ impl TensorBase where T: Clone + Zero, { + /// Create a tensor, filled with zeros, from the given shape pub fn zeros(shape: impl IntoShape) -> Self { Self::fill(shape, T::zero()) } - + /// Create a tensor, filled with zeros, from the shape of another tensor pub fn zeros_like(tensor: &TensorBase) -> Self { Self::zeros(tensor.shape().clone()) } diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs new file mode 100644 index 00000000..f03d19ee --- /dev/null +++ b/tensor/src/types/mod.rs @@ -0,0 +1,10 @@ +/* + Appellation: types + Contrib: FL03 +*/ + +pub mod order; + +pub(crate) mod prelude { + pub use super::order::MajorOrder; +} diff --git a/tensor/src/types/order.rs b/tensor/src/types/order.rs new file mode 100644 index 00000000..a229018c --- /dev/null +++ b/tensor/src/types/order.rs @@ -0,0 +1,46 @@ +/* + Appellation: order + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIter, EnumString, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum MajorOrder { + Column, + #[default] + Row, +} + +impl MajorOrder { + pub fn column() -> Self { + Self::Column + } + + pub fn row() -> Self { + Self::Row + } +} From c76c3759c689de42409fbb21ff20ee0a5f0c087f Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 22 Mar 2024 11:17:50 -0500 Subject: [PATCH 42/87] update Signed-off-by: Joe McCain III --- acme/examples/cgraph.rs | 8 +- core/src/ops/binary/mod.rs | 6 - core/src/ops/binary/operator.rs | 4 +- core/src/ops/kinds.rs | 238 +-------------------- core/src/ops/mod.rs | 11 +- core/src/ops/unary/specs.rs | 33 +-- graphs/src/dcg/edge.rs | 2 +- graphs/src/dcg/graph.rs | 5 +- graphs/src/dcg/node.rs | 2 +- graphs/src/grad/store.rs | 2 +- graphs/src/lib.rs | 3 + {core => graphs}/src/ops/arithmetic.rs | 63 ++++-- graphs/src/ops/kinds.rs | 263 ++++++++++++++++++++++++ graphs/src/ops/mod.rs | 23 +++ graphs/src/scg/graph.rs | 2 +- graphs/src/scg/node.rs | 2 +- tensor/src/actions/index/mod.rs | 13 ++ tensor/src/actions/index/slice.rs | 13 ++ tensor/src/actions/mod.rs | 1 + tensor/src/{errors => }/error.rs | 37 ++-- tensor/src/errors/mod.rs | 9 - tensor/src/impls/create.rs | 121 +++++++++++ tensor/src/impls/grad.rs | 7 +- tensor/src/impls/linalg.rs | 26 ++- tensor/src/impls/ops/binary.rs | 4 +- tensor/src/impls/ops/unary.rs | 4 +- tensor/src/impls/reshape.rs | 25 +++ tensor/src/lib.rs | 8 +- tensor/src/ops/backprop.rs | 33 ++- tensor/src/ops/kinds.rs | 77 +------ tensor/src/shape/dim/axis.rs | 18 ++ tensor/src/shape/{ => dim}/dimension.rs | 2 +- tensor/src/shape/dim/mod.rs | 34 +++ tensor/src/shape/{ => dim}/rank.rs | 0 tensor/src/shape/error.rs | 43 ++++ tensor/src/shape/mod.rs | 13 +- tensor/src/shape/shape.rs | 17 +- tensor/src/specs/ndtensor.rs | 2 +- tensor/src/tensor.rs | 130 +----------- tensor/src/types/id.rs | 88 ++++++++ tensor/src/types/mod.rs | 2 + tensor/src/types/order.rs | 15 ++ tensor/tests/arith.rs | 39 ++-- tensor/tests/composition.rs | 17 +- 44 files changed, 891 insertions(+), 574 deletions(-) rename {core => graphs}/src/ops/arithmetic.rs (73%) create mode 100644 graphs/src/ops/kinds.rs create mode 100644 graphs/src/ops/mod.rs create mode 100644 tensor/src/actions/index/mod.rs create mode 100644 tensor/src/actions/index/slice.rs rename tensor/src/{errors => }/error.rs (72%) delete mode 100644 tensor/src/errors/mod.rs create mode 100644 tensor/src/impls/create.rs create mode 100644 tensor/src/impls/reshape.rs create mode 100644 tensor/src/shape/dim/axis.rs rename tensor/src/shape/{ => dim}/dimension.rs (77%) create mode 100644 tensor/src/shape/dim/mod.rs rename tensor/src/shape/{ => dim}/rank.rs (100%) create mode 100644 tensor/src/shape/error.rs create mode 100644 tensor/src/types/id.rs diff --git a/acme/examples/cgraph.rs b/acme/examples/cgraph.rs index dd92ff96..44bc6ebe 100644 --- a/acme/examples/cgraph.rs +++ b/acme/examples/cgraph.rs @@ -1,13 +1,15 @@ /* - Appellation: simple + Appellation: compute_graph Contrib: FL03 */ +#![cfg(feature = "graph")] + extern crate acme; +use acme::graph::prelude::GraphResult; use acme::graph::scg::Scg; -use acme::prelude::Result; -fn main() -> Result<()> { +fn main() -> GraphResult<()> { let mut scg = Scg::new(); let x = scg.variable(1.0); let y = scg.variable(2.0); diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index e6e5b685..e941c04f 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -7,11 +7,5 @@ pub use self::{kinds::*, operator::*}; pub(crate) mod kinds; pub(crate) mod operator; -pub trait BinOp { - type Output; - - fn apply(lhs: A, rhs: B) -> Self::Output; -} - #[cfg(test)] mod tests {} diff --git a/core/src/ops/binary/operator.rs b/core/src/ops/binary/operator.rs index b12e7410..72fb146d 100644 --- a/core/src/ops/binary/operator.rs +++ b/core/src/ops/binary/operator.rs @@ -4,7 +4,7 @@ */ use super::BinaryOp; -pub struct BinaryArgs { +pub struct BinaryArgs { pub lhs: A, pub rhs: B, } @@ -59,7 +59,7 @@ where } } -pub struct BinaryOperator { +pub struct BinaryOperator { pub args: BinaryArgs, pub communitative: bool, pub op: BinaryOp, diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index 8deba2ce..cca99083 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -2,11 +2,10 @@ Appellation: kinds Contrib: FL03 */ -use super::arithmetic::*; -use super::BinaryOperation; +use super::binary::{BinaryOp, BinaryOperator}; +use super::unary::UnaryOp; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use smart_default::SmartDefault; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[cfg_attr( @@ -18,7 +17,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Clone, Copy, Debug, - Default, Display, EnumCount, EnumIs, @@ -30,234 +28,12 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; PartialOrd, VariantNames, )] -#[repr(u8)] #[strum(serialize_all = "lowercase")] -pub enum CompareExpr { - #[default] - Eq, - Ge, - Gt, - Le, - Lt, - Ne, -} - -#[derive( - Clone, - Copy, - Debug, - Display, - EnumCount, - EnumIs, - EnumIter, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - SmartDefault, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "lowercase", untagged) -)] -#[repr(u8)] -#[strum(serialize_all = "lowercase")] -pub enum BinaryExpr { - #[default] - Add(Addition), - Div(Division), - Maximum, - Minimum, - Mul(Multiplication), - Sub(Subtraction), -} - -impl BinaryExpr { - pub fn add() -> Self { - Self::Add(Addition) - } - - pub fn div() -> Self { - Self::Div(Division) - } - - pub fn maximum() -> Self { - Self::Maximum - } - - pub fn minimum() -> Self { - Self::Minimum - } - - pub fn mul() -> Self { - Self::Mul(Multiplication) - } - - pub fn sub() -> Self { - Self::Sub(Subtraction) - } - - pub fn is_commutative(&self) -> bool { - match self { - Self::Add(_) | Self::Mul(_) => true, - _ => false, - } - } -} - -impl BinaryOperation for BinaryExpr -where - T: Copy + Default + PartialOrd + num::traits::NumOps, -{ - type Output = T; - - fn eval(&self, lhs: T, rhs: T) -> Self::Output { - match self { - Self::Add(_) => lhs + rhs, - Self::Div(_) => lhs / rhs, - Self::Maximum => { - if lhs > rhs { - lhs - } else { - rhs - } - } - Self::Minimum => { - if lhs < rhs { - lhs - } else { - rhs - } - } - Self::Mul(_) => lhs * rhs, - Self::Sub(_) => lhs - rhs, - } - } -} - -impl From for BinaryExpr { - fn from(_: Addition) -> Self { - Self::Add(Addition) - } -} - -impl From for BinaryExpr { - fn from(_: Division) -> Self { - Self::Div(Division) - } -} - -#[derive( - Clone, - Copy, - Debug, - Default, - Display, - EnumCount, - EnumIs, - EnumIter, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "lowercase", untagged) -)] -#[repr(u8)] -#[strum(serialize_all = "lowercase")] -pub enum UnaryExpr { - #[default] - Abs, - Ceil, - Cos, - Cosh, - Exp, - Inverse, // or Reciprocal - Floor, - Log, - Neg, - Round, - Rsqrt, - Sin, - Sinh, - Sqrt, - Tan, - Tanh, -} - -#[derive( - Clone, - Debug, - Display, - EnumCount, - EnumIs, - EnumIter, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - SmartDefault, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "lowercase", untagged) -)] -#[repr(u8)] -#[strum(serialize_all = "lowercase")] -pub enum Operations { - Binary(BinaryExpr), - Compare(CompareExpr), - #[default] - Unary(UnaryExpr), - Custom { - name: String, - }, -} - -impl Operations { - /// A functional constructor for [Ops::Binary] - pub fn binary(op: BinaryExpr) -> Self { - Self::Binary(op) - } - /// A functional constructor for [Ops::Compare] - pub fn compare(op: CompareExpr) -> Self { - Self::Compare(op) - } - /// A functional constructor for [Ops::Custom] - pub fn custom(name: impl Into) -> Self { - Self::Custom { name: name.into() } - } - /// A functional constructor for [Ops::Unary] - pub fn unary(op: UnaryExpr) -> Self { - Self::Unary(op) - } -} - -impl From for Operations { - fn from(op: BinaryExpr) -> Self { - Self::Binary(op) - } -} - -impl From for Operations { - fn from(op: CompareExpr) -> Self { - Self::Compare(op) - } +pub enum Op { + Binary(BinaryOp), + Unary(UnaryOp), } -impl From for Operations { - fn from(op: UnaryExpr) -> Self { - Self::Unary(op) - } +pub enum Expr { + Binary(BinaryOperator>), } diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 6fa6d049..1e31fca0 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -5,21 +5,14 @@ //! # Operations //! //! -pub use self::{arithmetic::*, kinds::*}; +pub use self::kinds::*; -pub(crate) mod arithmetic; pub(crate) mod kinds; pub mod binary; pub mod unary; -pub trait BinaryOperation { - type Output; - - fn eval(&self, lhs: A, rhs: B) -> Self::Output; -} - -pub trait Operator { +pub trait Operation { type Output; fn kind(&self) -> String; diff --git a/core/src/ops/unary/specs.rs b/core/src/ops/unary/specs.rs index f712cdd2..4e9ceb00 100644 --- a/core/src/ops/unary/specs.rs +++ b/core/src/ops/unary/specs.rs @@ -2,9 +2,8 @@ Appellation: specs Contrib: FL03 */ -use num::traits::{Inv, Num}; +use num::traits::Inv; use num::Complex; -use std::ops::Neg; macro_rules! unary_op_trait { ($trait:ident, $method:ident) => { @@ -14,6 +13,13 @@ macro_rules! unary_op_trait { fn $method(self) -> Self::Output; } }; + (owned $trait:ident, $method:ident) => { + pub trait $trait { + type Output; + + fn $method(&self) -> Self::Output; + } + }; } macro_rules! impl_unary_op { @@ -26,6 +32,15 @@ macro_rules! impl_unary_op { } } }; + (generic $trait:ident, $method:ident, s => $s:tt, t => $t:tt) => { + impl $trait for S where S: $s, T: $t { + type Output = T; + + fn $method(self) -> Self::Output { + <$t>::$method(self) + } + } + }; ($trait:ident, $method:ident; [$($t:ty),*]) => { $( impl_unary_op!($trait, $method, $t); @@ -67,20 +82,15 @@ where type Output = T; fn abs(self) -> Self::Output { - let re = self.re.clone(); - let im = self.im.clone(); - let re = re * re; - let im = im * im; - let abs = re + im; - abs.sqrt() + self.norm() } } -impl Recip for Complex +impl Recip for T where - T: Clone + Num + Neg, + T: Inv, { - type Output = Complex; + type Output = ::Output; fn recip(self) -> Self::Output { self.inv() @@ -103,7 +113,6 @@ impl_unary_op!(Cos, cos; [f64, f32, Complex, Complex]); impl_unary_op!(Cosh, cosh; [f64, f32, Complex, Complex]); impl_unary_op!(Exp, exp; [f64, f32, Complex, Complex]); impl_unary_op!(Ln, ln; [f64, f32, Complex, Complex]); -impl_unary_op!(alts Recip, recip, inv; [f64, f32]); impl_unary_op!(Sin, sin; [f64, f32, Complex, Complex]); impl_unary_op!(Sinh, sinh; [f64, f32, Complex, Complex]); impl_unary_op!(Sqrt, sqrt; [f64, f32, Complex, Complex]); diff --git a/graphs/src/dcg/edge.rs b/graphs/src/dcg/edge.rs index 1030d71c..4715d535 100644 --- a/graphs/src/dcg/edge.rs +++ b/graphs/src/dcg/edge.rs @@ -2,7 +2,7 @@ Appellation: edge Contrib: FL03 */ -use petgraph::graph::NodeIndex; +use crate::NodeIndex; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index 396095fa..5b1947cf 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -5,11 +5,12 @@ use super::edge::Edge; use super::node::Node; use super::DynamicGraph; +use crate::ops::*; use crate::prelude::GraphResult as Result; -use acme::ops::*; +use crate::NodeIndex; use num::traits::{Num, NumAssignOps, NumOps}; use petgraph::algo::toposort; -use petgraph::prelude::{Direction, NodeIndex}; +use petgraph::prelude::Direction; use std::collections::HashMap; use std::ops::Index; diff --git a/graphs/src/dcg/node.rs b/graphs/src/dcg/node.rs index 99d29307..da5e6257 100644 --- a/graphs/src/dcg/node.rs +++ b/graphs/src/dcg/node.rs @@ -2,7 +2,7 @@ Appellation: node Contrib: FL03 */ -use acme::ops::Operations; +use crate::ops::Operations; use petgraph::prelude::NodeIndex; #[derive(Clone, Debug)] diff --git a/graphs/src/grad/store.rs b/graphs/src/grad/store.rs index ec1e2f2f..be163931 100644 --- a/graphs/src/grad/store.rs +++ b/graphs/src/grad/store.rs @@ -2,8 +2,8 @@ Appellation: gradient Contrib: FL03 */ +use crate::NodeIndex; use acme::stores::Store; -use petgraph::prelude::NodeIndex; use std::any::Any; use std::collections::BTreeMap; diff --git a/graphs/src/lib.rs b/graphs/src/lib.rs index d554a632..b713ba69 100644 --- a/graphs/src/lib.rs +++ b/graphs/src/lib.rs @@ -16,8 +16,11 @@ pub(crate) mod graph; pub mod dcg; pub mod errors; pub mod grad; +pub mod ops; pub mod scg; +pub use petgraph::graph::{EdgeIndex, GraphIndex, NodeIndex}; + pub mod prelude { #[doc(inline)] pub use crate::dcg::Dcg; diff --git a/core/src/ops/arithmetic.rs b/graphs/src/ops/arithmetic.rs similarity index 73% rename from core/src/ops/arithmetic.rs rename to graphs/src/ops/arithmetic.rs index be840e72..8b94768e 100644 --- a/core/src/ops/arithmetic.rs +++ b/graphs/src/ops/arithmetic.rs @@ -5,12 +5,11 @@ use super::BinaryOperation; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::ops::{Add, Div, Mul, Sub}; - -pub trait ArithmeticOp: Add {} +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; macro_rules! operator { ($op:ident) => { + #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct $op; @@ -25,12 +24,51 @@ macro_rules! operator { } } }; + ($($op:ident),*) => { + $( + operator!($op); + )* + }; + +} + +macro_rules! operators { + (class $group:ident; {$($op:ident: $variant:ident),*}) => { + $( + operator!($op); + )* + #[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, + )] + #[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) + )] + #[repr(u8)] + #[strum(serialize_all = "lowercase")] + pub enum $group { + $( + $variant($op), + )* + } + }; } macro_rules! impl_binary_op { ($op:ident, $bound:ident, $operator:tt) => { - operator!($op); - impl BinaryOperation for $op where A: $bound, @@ -43,8 +81,6 @@ macro_rules! impl_binary_op { } }; (expr $op:ident, $bound:ident, $exp:expr) => { - operator!($op); - impl BinaryOperation for $op where A: $bound, @@ -58,6 +94,11 @@ macro_rules! impl_binary_op { }; } +// operator!(Addition, Division, Multiplication, Subtraction); +operators!(class Arithmetic; {Addition: Add, Division: Div, Multiplication: Mul, Subtraction: Sub}); + +use std::ops::{Add, Div, Mul, Sub}; + impl_binary_op!(Addition, Add, +); impl_binary_op!(Division, Div, /); @@ -66,14 +107,6 @@ impl_binary_op!(Multiplication, Mul, *); impl_binary_op!(Subtraction, Sub, -); -#[derive(Clone)] -pub enum Arithmetic { - Add(Addition), - Div(Division), - Mul(Multiplication), - Sub(Subtraction), -} - impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op diff --git a/graphs/src/ops/kinds.rs b/graphs/src/ops/kinds.rs new file mode 100644 index 00000000..8deba2ce --- /dev/null +++ b/graphs/src/ops/kinds.rs @@ -0,0 +1,263 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ +use super::arithmetic::*; +use super::BinaryOperation; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use smart_default::SmartDefault; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum CompareExpr { + #[default] + Eq, + Ge, + Gt, + Le, + Lt, + Ne, +} + +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + SmartDefault, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum BinaryExpr { + #[default] + Add(Addition), + Div(Division), + Maximum, + Minimum, + Mul(Multiplication), + Sub(Subtraction), +} + +impl BinaryExpr { + pub fn add() -> Self { + Self::Add(Addition) + } + + pub fn div() -> Self { + Self::Div(Division) + } + + pub fn maximum() -> Self { + Self::Maximum + } + + pub fn minimum() -> Self { + Self::Minimum + } + + pub fn mul() -> Self { + Self::Mul(Multiplication) + } + + pub fn sub() -> Self { + Self::Sub(Subtraction) + } + + pub fn is_commutative(&self) -> bool { + match self { + Self::Add(_) | Self::Mul(_) => true, + _ => false, + } + } +} + +impl BinaryOperation for BinaryExpr +where + T: Copy + Default + PartialOrd + num::traits::NumOps, +{ + type Output = T; + + fn eval(&self, lhs: T, rhs: T) -> Self::Output { + match self { + Self::Add(_) => lhs + rhs, + Self::Div(_) => lhs / rhs, + Self::Maximum => { + if lhs > rhs { + lhs + } else { + rhs + } + } + Self::Minimum => { + if lhs < rhs { + lhs + } else { + rhs + } + } + Self::Mul(_) => lhs * rhs, + Self::Sub(_) => lhs - rhs, + } + } +} + +impl From for BinaryExpr { + fn from(_: Addition) -> Self { + Self::Add(Addition) + } +} + +impl From for BinaryExpr { + fn from(_: Division) -> Self { + Self::Div(Division) + } +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum UnaryExpr { + #[default] + Abs, + Ceil, + Cos, + Cosh, + Exp, + Inverse, // or Reciprocal + Floor, + Log, + Neg, + Round, + Rsqrt, + Sin, + Sinh, + Sqrt, + Tan, + Tanh, +} + +#[derive( + Clone, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + SmartDefault, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum Operations { + Binary(BinaryExpr), + Compare(CompareExpr), + #[default] + Unary(UnaryExpr), + Custom { + name: String, + }, +} + +impl Operations { + /// A functional constructor for [Ops::Binary] + pub fn binary(op: BinaryExpr) -> Self { + Self::Binary(op) + } + /// A functional constructor for [Ops::Compare] + pub fn compare(op: CompareExpr) -> Self { + Self::Compare(op) + } + /// A functional constructor for [Ops::Custom] + pub fn custom(name: impl Into) -> Self { + Self::Custom { name: name.into() } + } + /// A functional constructor for [Ops::Unary] + pub fn unary(op: UnaryExpr) -> Self { + Self::Unary(op) + } +} + +impl From for Operations { + fn from(op: BinaryExpr) -> Self { + Self::Binary(op) + } +} + +impl From for Operations { + fn from(op: CompareExpr) -> Self { + Self::Compare(op) + } +} + +impl From for Operations { + fn from(op: UnaryExpr) -> Self { + Self::Unary(op) + } +} diff --git a/graphs/src/ops/mod.rs b/graphs/src/ops/mod.rs new file mode 100644 index 00000000..ea089ec6 --- /dev/null +++ b/graphs/src/ops/mod.rs @@ -0,0 +1,23 @@ +/* + Appellation: ops + Contrib: FL03 +*/ +//! # Operations +//! +//! +pub use self::{arithmetic::*, kinds::*}; + +pub(crate) mod arithmetic; +pub(crate) mod kinds; + +pub trait BinaryOperation { + type Output; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output; +} + +pub trait Operator { + type Output; + + fn kind(&self) -> String; +} diff --git a/graphs/src/scg/graph.rs b/graphs/src/scg/graph.rs index d6a0a713..206c2c6d 100644 --- a/graphs/src/scg/graph.rs +++ b/graphs/src/scg/graph.rs @@ -3,8 +3,8 @@ Contrib: FL03 */ use super::Node; +use crate::ops::{BinaryExpr, BinaryOperation, Operations}; use crate::prelude::GraphResult as Result; -use acme::prelude::{BinaryExpr, BinaryOperation, Operations}; use num::traits::{NumAssign, NumOps, Signed}; use petgraph::algo::toposort; use petgraph::prelude::{DiGraph, NodeIndex}; diff --git a/graphs/src/scg/node.rs b/graphs/src/scg/node.rs index 33f5aaca..0d7ec53b 100644 --- a/graphs/src/scg/node.rs +++ b/graphs/src/scg/node.rs @@ -6,8 +6,8 @@ //! //! A computational graph relies on weighted nodes to represent constants, operations, and variables. //! The edges connecting to any given node are considered to be inputs and help to determine the flow of information +use crate::ops::Operations; use acme::id::AtomicId; -use acme::ops::Operations; use petgraph::prelude::NodeIndex; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/tensor/src/actions/index/mod.rs b/tensor/src/actions/index/mod.rs new file mode 100644 index 00000000..f00c1a5b --- /dev/null +++ b/tensor/src/actions/index/mod.rs @@ -0,0 +1,13 @@ +/* + Appellation: index + Contrib: FL03 +*/ +//! # Index +//! +//! +pub use self::slice::*; + +pub(crate) mod slice; + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/actions/index/slice.rs b/tensor/src/actions/index/slice.rs new file mode 100644 index 00000000..c6ffdbcf --- /dev/null +++ b/tensor/src/actions/index/slice.rs @@ -0,0 +1,13 @@ +/* + Appellation: slice + Contrib: FL03 +*/ +//! # Slice +//! +//! + +pub struct Slice { + pub start: usize, + pub end: usize, + pub step: usize, +} diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index 0268da4f..1ac686aa 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -7,6 +7,7 @@ //! pub mod arange; +pub mod index; #[cfg(test)] mod tests {} diff --git a/tensor/src/errors/error.rs b/tensor/src/error.rs similarity index 72% rename from tensor/src/errors/error.rs rename to tensor/src/error.rs index a9ab014d..553d13d6 100644 --- a/tensor/src/errors/error.rs +++ b/tensor/src/error.rs @@ -2,11 +2,16 @@ Appellation: error Contrib: FL03 */ +use crate::shape::error::ShapeError; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; -#[derive(Clone, Debug, Display, EnumCount, EnumIs, VariantNames)] +pub type TensorResult = std::result::Result; + +#[derive( + Clone, Debug, Display, EnumCount, EnumIs, Eq, Hash, Ord, PartialEq, PartialOrd, VariantNames, +)] #[cfg_attr( feature = "serde", derive(Deserialize, Serialize), @@ -32,7 +37,22 @@ impl From<&str> for TensorError { } } -#[derive(Clone, Copy, Debug, Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames)] +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] #[cfg_attr( feature = "serde", derive(Deserialize, Serialize), @@ -46,19 +66,6 @@ pub enum ArithmeticError { Underflow, } -#[derive(Clone, Copy, Debug, Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize), - serde(rename_all = "snake_case", untagged) -)] -#[repr(usize)] -#[strum(serialize_all = "snake_case")] -pub enum ShapeError { - IncompatibleShapes, - InvalidShape, -} - macro_rules! into_tensor_error { ($error:ident, $kind:ident) => { impl From<$error> for TensorError { diff --git a/tensor/src/errors/mod.rs b/tensor/src/errors/mod.rs deleted file mode 100644 index 731b1e27..00000000 --- a/tensor/src/errors/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -/* - Appellation: errors - Contrib: FL03 -*/ -pub use self::error::*; - -pub(crate) mod error; - -pub type TensorResult = std::result::Result; diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs new file mode 100644 index 00000000..064a1386 --- /dev/null +++ b/tensor/src/impls/create.rs @@ -0,0 +1,121 @@ +/* + Appellation: create + Contrib: FL03 +*/ +use crate::prelude::IntoShape; +use crate::tensor::*; +use num::traits::{FromPrimitive, NumAssign, One, Zero}; + +impl TensorBase +where + T: Clone, +{ + /// Create an empty tensor from the given shape + pub fn empty(shape: impl IntoShape) -> Self + where + T: Default, + { + Self::fill(shape, T::default()) + } + /// Create a tensor, from the given shape, filled with the given value + pub fn fill(shape: impl IntoShape, value: T) -> Self { + let shape = shape.into_shape(); + let store = vec![value; shape.elements()]; + Self::from_vec(shape, store) + } +} + +impl TensorBase +where + T: Copy + NumAssign + PartialOrd, +{ + /// Create a tensor within a range of values + pub fn arange(start: T, end: T, step: T) -> Self { + if T::is_zero(&step) { + panic!("step must be non-zero"); + } + // let steps = ((end - start) / step).ceil() as usize; + let mut store = Vec::new(); + let mut value = start; + while value < (end - step) { + store.push(value); + value += step; + } + from_vec((store.len(),), store) + } + + /// Create a tensor within a range of values + pub fn linstep(start: T, end: T, steps: usize) -> Self + where + T: FromPrimitive, + { + // let steps = ((end - start) / step).ceil() as usize; + let step = (end - start) / T::from_usize(steps).unwrap(); + let mut store = Vec::with_capacity(steps); + let mut value: T = start; + for _ in 0..steps { + store.push(value); + value += step; + } + from_vec((store.len(),), store) + } + + pub fn logstep(start: T, end: T, steps: usize) -> Self + where + T: num::Float, + { + let start = start.log2(); + let end = end.log2(); + let step = (end - start) / T::from(steps).unwrap(); + let mut store = Vec::with_capacity(steps); + let mut value: T = start; + for _ in 0..steps { + store.push(value.exp2()); + value += step; + } + from_vec((store.len(),), store) + } + + pub fn geomspace(start: T, end: T, steps: usize) -> Self + where + T: num::Float, + { + let start = start.log10(); + let end = end.log10(); + let step = (end - start) / T::from(steps).unwrap(); + let mut store = Vec::with_capacity(steps); + let mut value: T = start; + for _ in 0..steps { + store.push(value.exp()); + value += step; + } + from_vec((store.len(),), store) + } +} +impl TensorBase +where + T: Clone + One, +{ + /// Create a tensor, filled with ones, from the given shape + pub fn ones(shape: impl IntoShape) -> Self { + Self::fill(shape, T::one()) + } + /// Create a tensor, filled with ones, from the shape of another tensor + pub fn ones_like(tensor: &TensorBase) -> Self { + Self::ones(tensor.shape().clone()) + } +} + +impl TensorBase +where + T: Clone + Zero, +{ + /// Create a tensor, filled with zeros, from the given shape + pub fn zeros(shape: impl IntoShape) -> Self { + Self::fill(shape, T::zero()) + } + /// Create a tensor, filled with zeros, from the shape of another tensor + pub fn zeros_like(tensor: &TensorBase) -> Self { + Self::zeros(tensor.shape().clone()) + } +} diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index add23a48..98dd2126 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -2,12 +2,11 @@ Appellation: grad Contrib: FL03 */ -use crate::ops::{BinaryOp, TensorOp}; -use crate::prelude::Scalar; +use crate::prelude::{Scalar, TensorId, TensorOp}; use crate::tensor::*; -use acme::prelude::AtomicId; +use acme::ops::binary::BinaryOp; -pub(crate) type GradStore = std::collections::BTreeMap; +pub(crate) type GradStore = std::collections::BTreeMap; impl TensorBase where diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 4f956760..50404d2b 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -3,14 +3,20 @@ Contrib: FL03 */ //! Implementations for linear algebra operations. -use crate::ops::{BinaryOp, TensorOp}; -use crate::prelude::{Matmul, Scalar}; +//! +//! +use crate::prelude::{Matmul, Scalar, TensorOp, TensorResult}; +use crate::shape::ShapeError; use crate::tensor::*; -pub(crate) fn matmul(lhs: &TensorBase, rhs: &TensorBase) -> TensorBase +pub(crate) fn matmul(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> where T: Scalar, { + if lhs.shape().rank() != rhs.shape().rank() { + return Err(ShapeError::IncompatibleShapes.into()); + } + let lhs_shape = lhs.shape().clone(); let rhs_shape = rhs.shape().clone(); @@ -27,12 +33,8 @@ where } } } - let op = TensorOp::Binary( - Box::new(lhs.clone()), - Box::new(rhs.clone()), - BinaryOp::Matmul, - ); - from_vec_with_op(op, shape, result) + let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); + Ok(from_vec_with_op(op, shape, result)) } impl Matmul> for TensorBase @@ -52,11 +54,7 @@ where } } } - let op = TensorOp::Binary( - Box::new(self.clone()), - Box::new(other.clone()), - BinaryOp::Matmul, - ); + let op = TensorOp::Matmul(Box::new(self.clone()), Box::new(other.clone())); from_vec_with_op(op, shape, result) } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 59d4d9da..e232255f 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -2,9 +2,9 @@ Appellation: arith Contrib: FL03 */ -use crate::ops::{BinaryOp, TensorOp}; -use crate::prelude::Scalar; +use crate::prelude::{Scalar, TensorOp}; use crate::tensor::*; +use acme::ops::binary::BinaryOp; macro_rules! cmp { (ne: $lhs:expr, $rhs:expr) => { diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index be5bc926..5bb67efb 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -2,9 +2,9 @@ Appellation: arith Contrib: FL03 */ -use crate::ops::{TensorOp, UnaryOp}; -use crate::prelude::Scalar; +use crate::prelude::{Scalar, TensorOp}; use crate::tensor::*; +use acme::ops::unary::UnaryOp; impl std::ops::Neg for TensorBase where diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs new file mode 100644 index 00000000..6cbc3ea3 --- /dev/null +++ b/tensor/src/impls/reshape.rs @@ -0,0 +1,25 @@ +/* + Appellation: reshape + Contrib: FL03 +*/ +use crate::prelude::IntoShape; +use crate::tensor::TensorBase; + +impl TensorBase +where + T: Clone + Default, +{ + pub fn broadcast(&self, shape: impl IntoShape) -> Self { + let shape = shape.into_shape(); + + let _diff = *self.shape().rank() - *shape.rank(); + + unimplemented!() + } + + pub fn reshape(&self, shape: impl IntoShape) -> Self { + let _shape = shape.into_shape(); + + unimplemented!() + } +} diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 6245c89f..8be5c7cb 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -12,13 +12,13 @@ extern crate alloc; extern crate acme_core as acme; #[doc(inline)] -pub use self::tensor::*; +pub use self::{error::*, tensor::*}; +pub(crate) mod error; pub(crate) mod tensor; pub mod actions; pub mod data; -pub mod errors; pub mod linalg; pub mod ops; pub mod shape; @@ -31,9 +31,11 @@ mod impls { mod binary; mod unary; } + mod create; mod grad; mod linalg; mod num; + mod reshape; } pub type Tensor = tensor::TensorBase; @@ -42,7 +44,7 @@ pub mod prelude { #[doc(inline)] pub use crate::data::*; #[doc(inline)] - pub use crate::errors::*; + pub use crate::error::*; #[doc(inline)] pub use crate::ops::*; #[doc(inline)] diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index e3132d1b..81dd7b43 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -4,15 +4,16 @@ */ use super::TensorOp; -pub struct BackpropOp(Option>); +#[derive(Clone, Debug)] +pub struct TrackedOp(Option>); -impl BackpropOp { +impl TrackedOp { pub fn new(op: TensorOp) -> Self { - BackpropOp(Some(op)) + TrackedOp(Some(op)) } pub fn none() -> Self { - BackpropOp(None) + TrackedOp(None) } pub fn op(&self) -> Option<&TensorOp> { @@ -27,3 +28,27 @@ impl BackpropOp { self.0 } } + +impl Default for TrackedOp { + fn default() -> Self { + Self::none() + } +} + +impl From>> for TrackedOp { + fn from(op: Option>) -> Self { + TrackedOp(op) + } +} + +impl From> for TrackedOp { + fn from(op: TensorOp) -> Self { + TrackedOp(Some(op)) + } +} + +impl From> for Option> { + fn from(op: TrackedOp) -> Option> { + op.into_inner() + } +} diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index dfa18cf9..d24a57fd 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -3,86 +3,15 @@ Contrib: FL03 */ use crate::TensorBase; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; +use acme::ops::binary::BinaryOp; +use acme::ops::unary::UnaryOp; #[derive(Clone, Debug)] pub enum TensorOp { Binary(Box>, Box>, BinaryOp), BinaryScalar(Box>, T, BinaryOp), Unary(Box>, UnaryOp), -} - -#[derive( - Clone, - Copy, - Debug, - Display, - EnumCount, - EnumIs, - EnumIter, - EnumString, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize), - serde(rename_all = "lowercase", untagged) -)] -#[repr(u8)] -#[strum(serialize_all = "lowercase")] -pub enum BinaryOp { - Add, - Div, - Matmul, - Mul, - Sub, -} - -#[derive( - Clone, - Copy, - Debug, - Display, - EnumCount, - EnumIs, - EnumIter, - EnumString, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize), - serde(rename_all = "lowercase", untagged) -)] -#[repr(u8)] -#[strum(serialize_all = "lowercase")] -pub enum UnaryOp { - Abs, - Cos, - Cosh, - Exp, - Log, - Ln, - Neg, - Reciprocal, - Sin, - Sinh, - Sqrt, - Square, - Tan, - Tanh, + Matmul(Box>, Box>), } pub enum Inputs { diff --git a/tensor/src/shape/dim/axis.rs b/tensor/src/shape/dim/axis.rs new file mode 100644 index 00000000..df582df5 --- /dev/null +++ b/tensor/src/shape/dim/axis.rs @@ -0,0 +1,18 @@ +/* + Appellation: axis + Contrib: FL03 +*/ +//! # Axis +//! + +pub struct Axis(pub(crate) usize); + +impl Axis { + pub fn new(axis: usize) -> Self { + Axis(axis) + } + + pub fn axis(&self) -> usize { + self.0 + } +} diff --git a/tensor/src/shape/dimension.rs b/tensor/src/shape/dim/dimension.rs similarity index 77% rename from tensor/src/shape/dimension.rs rename to tensor/src/shape/dim/dimension.rs index f7c016bd..8912ec08 100644 --- a/tensor/src/shape/dimension.rs +++ b/tensor/src/shape/dim/dimension.rs @@ -3,4 +3,4 @@ Contrib: FL03 */ -pub trait Dimension {} +pub struct Dim; diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs new file mode 100644 index 00000000..56d57a01 --- /dev/null +++ b/tensor/src/shape/dim/mod.rs @@ -0,0 +1,34 @@ +/* + Appellation: dim + Contrib: FL03 +*/ +//! # Dimension +//! + +pub use self::{axis::Axis, dimension::*, rank::Rank}; + +pub(crate) mod axis; +pub(crate) mod dimension; +pub(crate) mod rank; + +pub trait IntoAxis { + fn into_axis(self) -> Axis; +} + +impl IntoAxis for usize { + fn into_axis(self) -> Axis { + Axis::new(self) + } +} + +pub trait IntoRank { + fn into_rank(self) -> Rank; +} + +impl IntoRank for usize { + fn into_rank(self) -> Rank { + Rank::new(self) + } +} + +pub trait Dimension {} diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/dim/rank.rs similarity index 100% rename from tensor/src/shape/rank.rs rename to tensor/src/shape/dim/rank.rs diff --git a/tensor/src/shape/error.rs b/tensor/src/shape/error.rs new file mode 100644 index 00000000..4de02823 --- /dev/null +++ b/tensor/src/shape/error.rs @@ -0,0 +1,43 @@ +/* + Appellation: error + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +pub type ShapeResult = std::result::Result; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(usize)] +#[strum(serialize_all = "snake_case")] +pub enum ShapeError { + IncompatibleShapes, + InvalidShape, +} + +unsafe impl Send for ShapeError {} + +unsafe impl Sync for ShapeError {} + +impl std::error::Error for ShapeError {} diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index 26351378..fd717d73 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -3,13 +3,16 @@ Contrib: FL03 */ //! # Shapes -pub use self::{dimension::*, rank::*, shape::*, stride::*}; +//! +//! +pub use self::{error::*, shape::*, stride::*}; -pub(crate) mod dimension; -pub(crate) mod rank; +pub(crate) mod error; pub(crate) mod shape; pub(crate) mod stride; +pub mod dim; + pub trait IntoShape { fn into_shape(self) -> Shape; } @@ -24,10 +27,10 @@ where } pub(crate) mod prelude { - pub use super::dimension::*; - pub use super::rank::*; + pub use super::dim::*; pub use super::shape::*; pub use super::stride::*; + pub use super::IntoShape; } #[cfg(test)] diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 94789dba..115a1fda 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -2,26 +2,13 @@ Appellation: shape Contrib: FL03 */ -use super::Rank; -use crate::errors::ShapeError; +use super::dim::Rank; +use super::error::ShapeError; use crate::prelude::TensorResult; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::{self, Deref}; -pub trait IntoShape { - fn into_shape(self) -> Shape; -} - -impl IntoShape for S -where - S: Into, -{ - fn into_shape(self) -> Shape { - self.into() - } -} - #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Shape(Vec); diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 810e67a9..ce62659d 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -2,7 +2,7 @@ Appellation: ndtensor Contrib: FL03 */ -use crate::shape::{Rank, Shape}; +use crate::shape::prelude::{Rank, Shape}; use crate::store::Layout; use acme::prelude::AtomicId; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 2a9bf93b..8fc41864 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,18 +2,15 @@ Appellation: tensor Contrib: FL03 */ -use crate::ops::kinds::{BinaryOp, TensorOp}; -use crate::prelude::Scalar; -use crate::shape::{IntoShape, Rank, Shape}; +// use crate::ops::TrackedOp; +use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorOp}; use crate::store::Layout; -use acme::prelude::AtomicId; -use num::traits::{NumAssign, One, Zero}; -// use std::ops::{Index, IndexMut}; +use std::ops::Index; // use std::sync::{Arc, RwLock}; pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase { TensorBase { - id: AtomicId::new(), + id: TensorId::new(), layout: Layout::contiguous(shape), op: None, store, @@ -27,7 +24,7 @@ pub(crate) fn from_vec_with_op( ) -> TensorBase { let layout = Layout::contiguous(shape); TensorBase { - id: AtomicId::new(), + id: TensorId::new(), layout, op: Some(op), store, @@ -37,7 +34,7 @@ pub(crate) fn from_vec_with_op( #[derive(Clone, Debug)] // #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] pub struct TensorBase { - pub(crate) id: AtomicId, + pub(crate) id: TensorId, pub(crate) layout: Layout, pub(crate) op: Option>, pub(crate) store: Vec, @@ -46,7 +43,7 @@ pub struct TensorBase { impl TensorBase { pub fn new(shape: impl IntoShape) -> Self { Self { - id: AtomicId::new(), + id: TensorId::new(), layout: Layout::contiguous(shape), op: None, store: Vec::new(), @@ -56,11 +53,6 @@ impl TensorBase { pub fn from_vec(shape: impl IntoShape, store: Vec) -> Self { from_vec(shape, store) } - - // Function to get the index of the data based on coordinates - fn position(&self, coords: impl AsRef<[usize]>) -> usize { - self.layout.position(coords.as_ref()) - } /// Returns the unique identifier of the tensor. pub fn id(&self) -> usize { self.id.get() @@ -91,113 +83,13 @@ impl TensorBase { pub(crate) fn data(&self) -> &Vec { &self.store } -} - -impl TensorBase -where - T: Clone, -{ - /// Create an empty tensor from the given shape - pub fn empty(shape: impl IntoShape) -> Self - where - T: Default, - { - Self::fill(shape, T::default()) - } - /// Create a tensor, from the given shape, filled with the given value - pub fn fill(shape: impl IntoShape, value: T) -> Self { - let shape = shape.into_shape(); - let store = vec![value; shape.elements()]; - Self::from_vec(shape, store) - } -} - -impl TensorBase -where - T: Clone + Default, -{ - pub fn broadcast(&self, shape: impl IntoShape) -> Self { - let shape = shape.into_shape(); - - let _diff = *self.shape().rank() - *shape.rank(); - - self.clone() - } -} - -impl TensorBase -where - T: Copy + NumAssign + PartialOrd, -{ - /// Create a tensor within a range of values - pub fn arange(start: T, end: T, step: T) -> Self { - if T::is_zero(&step) { - panic!("step must be non-zero"); - } - - let mut store = vec![start]; - let mut cur = T::zero(); - while store.last().unwrap() < &end { - cur += step; - store.push(cur); - } - Self::from_vec(store.len(), store) - } -} -impl TensorBase -where - T: Clone + One, -{ - /// Create a tensor, filled with ones, from the given shape - pub fn ones(shape: impl IntoShape) -> Self { - Self::fill(shape, T::one()) - } - /// Create a tensor, filled with ones, from the shape of another tensor - pub fn ones_like(tensor: &TensorBase) -> Self { - Self::ones(tensor.shape().clone()) - } -} - -impl TensorBase -where - T: Clone + Zero, -{ - /// Create a tensor, filled with zeros, from the given shape - pub fn zeros(shape: impl IntoShape) -> Self { - Self::fill(shape, T::zero()) - } - /// Create a tensor, filled with zeros, from the shape of another tensor - pub fn zeros_like(tensor: &TensorBase) -> Self { - Self::zeros(tensor.shape().clone()) - } -} - -impl TensorBase -where - T: Scalar, -{ - pub fn matmul(&self, other: &Self) -> Self { - let shape = self.shape().matmul_shape(other.shape()).unwrap(); - let mut result = vec![T::zero(); shape.elements()]; - - for i in 0..self.shape()[0] { - for j in 0..other.shape()[1] { - for k in 0..self.shape()[1] { - result[i * other.shape()[1] + j] += - self.store[i * self.shape()[1] + k] * other.store[k * other.shape()[1] + j]; - } - } - } - let op = TensorOp::Binary( - Box::new(self.clone()), - Box::new(other.clone()), - BinaryOp::Matmul, - ); - from_vec_with_op(op, shape, result) + // An internal function to get the index of the data based on coordinates + pub(crate) fn position(&self, coords: impl AsRef<[usize]>) -> usize { + self.layout.position(coords.as_ref()) } } -impl std::ops::Index<&[usize]> for TensorBase { +impl Index<&[usize]> for TensorBase { type Output = T; fn index(&self, index: &[usize]) -> &Self::Output { diff --git a/tensor/src/types/id.rs b/tensor/src/types/id.rs new file mode 100644 index 00000000..4bebd351 --- /dev/null +++ b/tensor/src/types/id.rs @@ -0,0 +1,88 @@ +/* + Appellation: id + Contrib: FL03 +*/ +//! # Tensor Id +//! +//! +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use std::ops::{Deref, DerefMut}; + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +pub struct TensorId(usize); + +impl TensorId { + pub fn new() -> Self { + use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; + static COUNTER: AtomicUsize = AtomicUsize::new(1); + Self(COUNTER.fetch_add(1, Relaxed)) + } + + pub fn next(&self) -> Self { + Self::new() + } + + pub fn set(&mut self, id: usize) { + self.0 = id; + } + + pub fn get(&self) -> usize { + self.0 + } + + pub fn into_inner(self) -> usize { + self.0 + } +} + +impl AsRef for TensorId { + fn as_ref(&self) -> &usize { + &self.0 + } +} + +impl AsMut for TensorId { + fn as_mut(&mut self) -> &mut usize { + &mut self.0 + } +} + +impl Default for TensorId { + fn default() -> Self { + Self::new() + } +} + +impl Deref for TensorId { + type Target = usize; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for TensorId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl std::fmt::Display for TensorId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for TensorId { + fn from(id: usize) -> Self { + Self(id) + } +} + +impl From for usize { + fn from(id: TensorId) -> Self { + id.0 + } +} diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index f03d19ee..824c9ac2 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -3,8 +3,10 @@ Contrib: FL03 */ +pub mod id; pub mod order; pub(crate) mod prelude { + pub use super::id::TensorId; pub use super::order::MajorOrder; } diff --git a/tensor/src/types/order.rs b/tensor/src/types/order.rs index a229018c..d6e9b9f9 100644 --- a/tensor/src/types/order.rs +++ b/tensor/src/types/order.rs @@ -44,3 +44,18 @@ impl MajorOrder { Self::Row } } + +impl From for usize { + fn from(order: MajorOrder) -> Self { + order as usize + } +} + +impl From for MajorOrder { + fn from(order: usize) -> Self { + match order % Self::COUNT { + 0 => Self::Column, + _ => Self::Row, + } + } +} diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index f56e3a48..9fb6d467 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -5,65 +5,64 @@ #![cfg(test)] extern crate acme_tensor as acme; -use acme::TensorBase; -// use acme::prelude::Matmul; +use acme::prelude::{Matmul, Tensor}; #[test] fn test_add() { let shape = (2, 2); - let a = TensorBase::::ones(shape); - let b = TensorBase::::ones(shape); + let a = Tensor::::ones(shape); + let b = Tensor::::ones(shape); let c = a + &b; - assert_eq!(c, TensorBase::fill(shape, 2_f64)); + assert_eq!(c, Tensor::fill(shape, 2_f64)); - let a = TensorBase::::ones(shape); + let a = Tensor::::ones(shape); let b = a + 1_f64; - assert_eq!(b, TensorBase::fill(shape, 2_f64)); + assert_eq!(b, Tensor::fill(shape, 2_f64)); } #[test] fn test_div() { let shape = (2, 2); - let a = TensorBase::::ones(shape); - let b = TensorBase::::ones(shape) * 2.0; + let a = Tensor::::ones(shape); + let b = Tensor::::ones(shape) * 2.0; let c = a / b; - assert_eq!(c, TensorBase::::fill(shape, 0.5)); + assert_eq!(c, Tensor::::fill(shape, 0.5)); } #[test] fn test_mul() { let shape = (2, 2); - let a = TensorBase::::ones(shape); - let b = TensorBase::::ones(shape); + let a = Tensor::::ones(shape); + let b = Tensor::::ones(shape); let c = a * b; - assert_eq!(c, TensorBase::::ones(shape)); + assert_eq!(c, Tensor::::ones(shape)); } #[test] fn test_sub() { let shape = (2, 2); - let a = TensorBase::::ones(shape); - let b = TensorBase::::ones(shape); + let a = Tensor::::ones(shape); + let b = Tensor::::ones(shape); let c = a - &b; - assert_eq!(c, TensorBase::::zeros(shape)); + assert_eq!(c, Tensor::::zeros(shape)); } #[test] fn test_matmul() { - let a = TensorBase::::fill((3, 2), 2_f64); - let b = TensorBase::::ones((2, 3)); + let a = Tensor::::fill((3, 2), 2_f64); + let b = Tensor::::ones((2, 3)); let c = a.matmul(&b); - assert_eq!(c, TensorBase::::fill((3, 3), 4.0)); + assert_eq!(c, Tensor::::fill((3, 3), 4.0)); } #[test] fn test_trig() { - let a = TensorBase::::ones((2, 2)); + let a = Tensor::::ones((2, 2)); let b = a.clone().sin(); let c = a.cos(); diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index 8ec55e7d..554ce2b5 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -19,8 +19,23 @@ fn test_tensor() { #[test] fn test_arange() { let exp = Shape::from(10); - let a = Tensor::arange(0_f64, 1_f64, 0.1); + let a = Tensor::arange(0_f64, 10_f64, 1_f64); assert_eq!(a.shape(), &exp); + + for i in 0..10 { + assert_eq!(a[&[i]], i as f64); + } +} + +#[test] +fn test_linstep() { + let exp = Shape::from(10); + let a = Tensor::linstep(0_f64, 10_f64, 10); + assert_eq!(a.shape(), &exp); + let b = Tensor::arange(0_f64, 10_f64, 1_f64); + for i in 0..10 { + assert_eq!(a[&[i]], b[&[i]]); + } } #[test] From e54598a3155a28a69c979ff71dbe25ae6625b6f6 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 22 Mar 2024 12:29:13 -0500 Subject: [PATCH 43/87] update Signed-off-by: Joe McCain III --- tensor/src/impls/create.rs | 14 ++++-- tensor/src/impls/grad.rs | 100 +++++++++++++++++++++++++++++-------- tensor/src/impls/linalg.rs | 7 ++- tensor/src/shape/shape.rs | 28 +++++++++++ tensor/src/tensor.rs | 15 +++++- tensor/tests/arith.rs | 2 +- tensor/tests/backward.rs | 49 ++++++++++++++++++ 7 files changed, 185 insertions(+), 30 deletions(-) create mode 100644 tensor/tests/backward.rs diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index 064a1386..702314f1 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -37,7 +37,7 @@ where // let steps = ((end - start) / step).ceil() as usize; let mut store = Vec::new(); let mut value = start; - while value < (end - step) { + while value < end { store.push(value); value += step; } @@ -101,9 +101,13 @@ where Self::fill(shape, T::one()) } /// Create a tensor, filled with ones, from the shape of another tensor - pub fn ones_like(tensor: &TensorBase) -> Self { + pub fn ones_from(tensor: &TensorBase) -> Self { Self::ones(tensor.shape().clone()) } + /// Create a tensor, filled with ones, from the shape of the tensor + pub fn ones_like(&self) -> Self { + Self::ones(self.shape().clone()) + } } impl TensorBase @@ -115,7 +119,11 @@ where Self::fill(shape, T::zero()) } /// Create a tensor, filled with zeros, from the shape of another tensor - pub fn zeros_like(tensor: &TensorBase) -> Self { + pub fn zeros_from(tensor: &TensorBase) -> Self { Self::zeros(tensor.shape().clone()) } + /// Create a tensor, filled with zeros, from the shape of the tensor + pub fn zeros_like(&self) -> Self { + Self::zeros(self.shape().clone()) + } } diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 98dd2126..02363b1a 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -5,38 +5,94 @@ use crate::prelude::{Scalar, TensorId, TensorOp}; use crate::tensor::*; use acme::ops::binary::BinaryOp; - +use std::collections::HashMap; pub(crate) type GradStore = std::collections::BTreeMap; impl TensorBase where T: Scalar, { - pub fn grad(&self) -> GradStore> { - let mut store = GradStore::new(); - store.insert(self.id().into(), TensorBase::ones_like(self)); + fn sorted_nodes(&self) -> Vec<&TensorBase> { + // The vec of sorted nodes is passed as an owned value rather than a mutable reference + // to get around some lifetime limitations. + fn walk<'a, T>( + node: &'a TensorBase, + nodes: Vec<&'a TensorBase>, + visited: &mut HashMap, + ) -> (bool, Vec<&'a TensorBase>) { + if let Some(&tg) = visited.get(&node.id()) { + return (tg, nodes); + } + let mut track_grad = false; + let mut nodes = if node.is_variable() { + // Do not call recursively on the "leaf" nodes. + track_grad = true; + nodes + } else if let Some(op) = node.op() { + match op { + TensorOp::Binary(a, b, _kind) => { + let (track_a, nodes) = walk(a, nodes, visited); + let (track_b, nodes) = walk(b, nodes, visited); + track_grad = track_a || track_b; + nodes + } + TensorOp::Unary(a, _kind) => { + let (track, nodes) = walk(a, nodes, visited); + track_grad = track; + nodes + } + _ => nodes, + } + } else { + nodes + }; + visited.insert(node.id(), track_grad); + if track_grad { + nodes.push(node); + } + (track_grad, nodes) + } + let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new()); + nodes.reverse(); + nodes + } - let grad = store.get(&self.id().into()).unwrap().clone(); + pub fn grad(&self) -> GradStore> + where + T: std::fmt::Debug, + { + // get the sorted nodes + let sorted = self.sorted_nodes(); + // initialize a new gradient store + let mut store = GradStore::new(); + store.insert(sorted.first().unwrap().id(), self.ones_like()); - if let Some(op) = &self.op { - match op { - TensorOp::Binary(a, b, kind) => match kind { - BinaryOp::Add => { - *store - .entry(a.id().into()) - .or_insert(TensorBase::zeros_like(a)) += grad.clone(); - *store - .entry(b.id().into()) - .or_insert(TensorBase::zeros_like(b)) += grad; - } - _ => todo!(), - }, - TensorOp::Unary(_a, kind) => match kind { - _ => todo!(), - }, - _ => {} + for node in sorted.iter() { + if node.is_variable() { + continue; + } + let grad = store.get(&node.id()).unwrap().clone(); + if let Some(op) = &self.op { + match op { + TensorOp::Binary(a, b, kind) => match kind { + BinaryOp::Add => { + *store.entry(a.id()).or_insert(a.zeros_like()) += &grad; + *store.entry(b.id()).or_insert(b.zeros_like()) += &grad; + } + BinaryOp::Mul => { + *store.entry(a.id()).or_insert(a.zeros_like()) += &grad * b.as_ref(); + *store.entry(b.id()).or_insert(b.zeros_like()) += &grad * a.as_ref(); + } + _ => todo!(), + }, + TensorOp::Unary(_a, kind) => match kind { + _ => todo!(), + }, + _ => {} + } } } + store } } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 50404d2b..f338bec4 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -18,6 +18,8 @@ where } let lhs_shape = lhs.shape().clone(); + let lhs_n = *lhs_shape.last().unwrap(); + // let lhs_m = lhs_shape. let rhs_shape = rhs.shape().clone(); let shape = lhs_shape.matmul_shape(rhs.shape()).unwrap(); @@ -25,9 +27,9 @@ where for i in 0..lhs_shape[0] { for j in 0..rhs_shape[1] { - for k in 0..lhs_shape[1] { + for k in 0..lhs_n { let pos = i * rhs_shape[1] + j; - let left = i * lhs_shape[1] + k; + let left = i * lhs_n + k; let right = k * rhs_shape[1] + j; result[pos] += lhs.store[left] * rhs.store[right]; } @@ -42,6 +44,7 @@ where T: Scalar, { type Output = Self; + fn matmul(&self, other: &Self) -> Self { let shape = self.shape().matmul_shape(other.shape()).unwrap(); let mut result = vec![T::zero(); shape.elements()]; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 115a1fda..49f8a5e0 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -79,6 +79,34 @@ impl Shape { self.0.len().into() } + pub fn remove(&mut self, index: usize) -> usize { + self.0.remove(index) + } + + pub fn columns(&self) -> usize { + if self.len() >= 2 { + *self.last().unwrap() + } else if self.len() == 1 { + 1 + } else { + 0 + } + } + + pub fn rows(&self) -> usize { + if self.len() >= 2 { + self[self.len() - 2] + } else if self.len() == 1 { + self[0] + } else { + 0 + } + } + + pub fn set(&mut self, index: usize, dim: usize) { + self.0[index] = dim + } + pub(crate) fn stride_contiguous(&self) -> Vec { let mut stride: Vec<_> = self .0 diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 8fc41864..49f15fcc 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -54,8 +54,8 @@ impl TensorBase { from_vec(shape, store) } /// Returns the unique identifier of the tensor. - pub fn id(&self) -> usize { - self.id.get() + pub fn id(&self) -> TensorId { + self.id } /// Get a reference to the layout of the tensor pub fn layout(&self) -> &Layout { @@ -77,6 +77,17 @@ impl TensorBase { pub fn stride(&self) -> &[usize] { self.layout.stride() } + + pub fn is_variable(&self) -> bool { + self.op.is_none() + } + + pub fn to_vec(&self) -> Vec + where + T: Clone, + { + self.store.clone() + } } impl TensorBase { diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index 9fb6d467..1c4bb313 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -12,7 +12,7 @@ fn test_add() { let shape = (2, 2); let a = Tensor::::ones(shape); let b = Tensor::::ones(shape); - let c = a + &b; + let c = &a + &b; assert_eq!(c, Tensor::fill(shape, 2_f64)); diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs new file mode 100644 index 00000000..a8b6a9bd --- /dev/null +++ b/tensor/tests/backward.rs @@ -0,0 +1,49 @@ +/* + Appellation: backward + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::Tensor; + +#[test] +fn test_backward() { + let shape = (2, 2); + let a = Tensor::::ones(shape); + let b = Tensor::::ones(shape); + let c = &a + &b; + let grad = c.grad(); + + assert_eq!( + grad[&a.id()], + Tensor::ones(shape), + "{:?} != {:?}", + grad[&a.id()].to_vec(), + vec![1_f64; 4] + ); + assert_eq!(grad[&b.id()], Tensor::ones(shape)); + + let a = Tensor::::ones(shape); + let b = Tensor::::fill(shape, 2_f64); + let c = &a * &b; + + let grad = c.grad(); + + assert_eq!(grad[&a.id()], Tensor::::fill(shape, 2_f64)); + assert_eq!(grad[&b.id()], Tensor::ones(shape)); +} + +#[test] +#[ignore = "Needs to be fixed"] +fn test_add_mul() { + let shape = (2, 2); + let a = Tensor::::ones(shape); + let b = Tensor::::ones(shape); + let c = &a + &b; + let d = &a * &c; + let grad = d.grad(); + + assert_eq!(grad[&a.id()], Tensor::fill(shape, 3_f64)); + assert_eq!(grad[&b.id()], Tensor::ones(shape)); +} From 8363af2ef2c435492bb4754e239d08e576085492 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 22 Mar 2024 14:05:06 -0500 Subject: [PATCH 44/87] update Signed-off-by: Joe McCain III --- tensor/src/impls/create.rs | 10 ++--- tensor/src/impls/grad.rs | 25 ++++++------ tensor/src/impls/linalg.rs | 4 +- tensor/src/impls/ops/binary.rs | 16 ++++---- tensor/src/impls/ops/unary.rs | 42 ++++++++++---------- tensor/src/tensor.rs | 21 +++++++--- tensor/src/types/mod.rs | 2 + tensor/src/types/mode.rs | 72 ++++++++++++++++++++++++++++++++++ tensor/src/types/order.rs | 3 +- tensor/tests/backward.rs | 21 +++++----- 10 files changed, 147 insertions(+), 69 deletions(-) create mode 100644 tensor/src/types/mode.rs diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index 702314f1..d5731192 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -21,7 +21,7 @@ where pub fn fill(shape: impl IntoShape, value: T) -> Self { let shape = shape.into_shape(); let store = vec![value; shape.elements()]; - Self::from_vec(shape, store) + from_vec(false.into(), shape, store) } } @@ -41,7 +41,7 @@ where store.push(value); value += step; } - from_vec((store.len(),), store) + from_vec(false.into(), (store.len(),), store) } /// Create a tensor within a range of values @@ -57,7 +57,7 @@ where store.push(value); value += step; } - from_vec((store.len(),), store) + from_vec(false.into(), (store.len(),), store) } pub fn logstep(start: T, end: T, steps: usize) -> Self @@ -73,7 +73,7 @@ where store.push(value.exp2()); value += step; } - from_vec((store.len(),), store) + from_vec(false.into(), (store.len(),), store) } pub fn geomspace(start: T, end: T, steps: usize) -> Self @@ -89,7 +89,7 @@ where store.push(value.exp()); value += step; } - from_vec((store.len(),), store) + from_vec(false.into(), (store.len(),), store) } } impl TensorBase diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 02363b1a..b7da0648 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -31,14 +31,10 @@ where } else if let Some(op) = node.op() { match op { TensorOp::Binary(a, b, _kind) => { - let (track_a, nodes) = walk(a, nodes, visited); - let (track_b, nodes) = walk(b, nodes, visited); - track_grad = track_a || track_b; - nodes - } - TensorOp::Unary(a, _kind) => { - let (track, nodes) = walk(a, nodes, visited); - track_grad = track; + let (tg, nodes) = walk(a, nodes, visited); + track_grad |= tg; + let (tg, nodes) = walk(b, nodes, visited); + track_grad |= tg; nodes } _ => nodes, @@ -65,7 +61,7 @@ where let sorted = self.sorted_nodes(); // initialize a new gradient store let mut store = GradStore::new(); - store.insert(sorted.first().unwrap().id(), self.ones_like()); + store.insert(self.id(), self.ones_like()); for node in sorted.iter() { if node.is_variable() { @@ -74,18 +70,19 @@ where let grad = store.get(&node.id()).unwrap().clone(); if let Some(op) = &self.op { match op { - TensorOp::Binary(a, b, kind) => match kind { + TensorOp::Binary(lhs, rhs, kind) => match kind { BinaryOp::Add => { - *store.entry(a.id()).or_insert(a.zeros_like()) += &grad; - *store.entry(b.id()).or_insert(b.zeros_like()) += &grad; + *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad; + *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad; } BinaryOp::Mul => { - *store.entry(a.id()).or_insert(a.zeros_like()) += &grad * b.as_ref(); - *store.entry(b.id()).or_insert(b.zeros_like()) += &grad * a.as_ref(); + *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad * rhs.as_ref(); + *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad * lhs.as_ref(); } _ => todo!(), }, TensorOp::Unary(_a, kind) => match kind { + _ => todo!(), }, _ => {} diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index f338bec4..58cfe15b 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -36,7 +36,7 @@ where } } let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); - Ok(from_vec_with_op(op, shape, result)) + Ok(from_vec_with_op(false, op, shape, result)) } impl Matmul> for TensorBase @@ -58,6 +58,6 @@ where } } let op = TensorOp::Matmul(Box::new(self.clone()), Box::new(other.clone())); - from_vec_with_op(op, shape, result) + from_vec_with_op(false, op, shape, result) } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index e232255f..93b18cad 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -29,7 +29,7 @@ macro_rules! impl_arithmetic { let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorOp::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } @@ -46,7 +46,7 @@ macro_rules! impl_arithmetic { let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorOp::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } @@ -63,7 +63,7 @@ macro_rules! impl_arithmetic { let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } @@ -80,7 +80,7 @@ macro_rules! impl_arithmetic { let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } }; @@ -99,7 +99,7 @@ macro_rules! impl_scalar_arith { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); let op = TensorOp::BinaryScalar(Box::new(self), other, BinaryOp::$trait); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } @@ -113,7 +113,7 @@ macro_rules! impl_scalar_arith { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); let op = TensorOp::BinaryScalar(Box::new(self.clone()), other, BinaryOp::$trait); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } }; @@ -131,7 +131,7 @@ macro_rules! impl_assign_op { let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$inner); - *self = from_vec_with_op(op, shape, store); + *self = from_vec_with_op(false, op, shape, store); } } @@ -145,7 +145,7 @@ macro_rules! impl_assign_op { let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$inner); - *self = from_vec_with_op(op, shape, store); + *self = from_vec_with_op(false, op, shape, store); } } }; diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 5bb67efb..1367b21a 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -16,7 +16,7 @@ where let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| -a).collect(); let op = TensorOp::Unary(Box::new(self), UnaryOp::Neg); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } @@ -30,33 +30,33 @@ where let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| -a).collect(); let op = TensorOp::Unary(Box::new(self.clone()), UnaryOp::Neg); - from_vec_with_op(op, shape, store) + from_vec_with_op(false, op, shape, store) } } macro_rules! impl_unary_arith { ($variant:ident, $method:ident, $e:expr) => { - impl TensorBase - where - T: Scalar, - { - pub fn $method(self) -> Self { - let shape = self.shape().clone(); - let store = self.store.iter().map($e).collect(); - let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); - from_vec_with_op(op, shape, store) - } + pub fn $method(self) -> Self { + let shape = self.shape().clone(); + let store = self.store.iter().map($e).collect(); + let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); + from_vec_with_op(false, op, shape, store) } }; } -impl_unary_arith!(Exp, exp, |v| v.exp()); -// impl_unary_arith!(Log, log, |v| v.log()); +impl TensorBase +where + T: Scalar, +{ + impl_unary_arith!(Exp, exp, |v| v.exp()); + // impl_unary_arith!(Log, log, |v| v.log()); -impl_unary_arith!(Cos, cos, |v| v.cos()); -impl_unary_arith!(Cosh, cosh, |v| v.cosh()); -impl_unary_arith!(Sin, sin, |v| v.sin()); -impl_unary_arith!(Sinh, sinh, |v| v.sinh()); -impl_unary_arith!(Sqrt, sqrt, |v| v.sqrt()); -impl_unary_arith!(Tan, tan, |v| v.tan()); -impl_unary_arith!(Tanh, tanh, |v| v.tanh()); + impl_unary_arith!(Cos, cos, |v| v.cos()); + impl_unary_arith!(Cosh, cosh, |v| v.cosh()); + impl_unary_arith!(Sin, sin, |v| v.sin()); + impl_unary_arith!(Sinh, sinh, |v| v.sinh()); + impl_unary_arith!(Sqrt, sqrt, |v| v.sqrt()); + impl_unary_arith!(Tan, tan, |v| v.tan()); + impl_unary_arith!(Tanh, tanh, |v| v.tanh()); +} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 49f15fcc..b24b0c73 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -3,14 +3,15 @@ Contrib: FL03 */ // use crate::ops::TrackedOp; -use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorOp}; +use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorMode, TensorOp}; use crate::store::Layout; use std::ops::Index; // use std::sync::{Arc, RwLock}; -pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase { +pub(crate) fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec) -> TensorBase { TensorBase { id: TensorId::new(), + kind, layout: Layout::contiguous(shape), op: None, store, @@ -18,6 +19,7 @@ pub(crate) fn from_vec(shape: impl IntoShape, store: Vec) -> TensorBase } pub(crate) fn from_vec_with_op( + kind: impl Into, op: TensorOp, shape: impl IntoShape, store: Vec, @@ -25,6 +27,7 @@ pub(crate) fn from_vec_with_op( let layout = Layout::contiguous(shape); TensorBase { id: TensorId::new(), + kind: kind.into(), layout, op: Some(op), store, @@ -35,23 +38,25 @@ pub(crate) fn from_vec_with_op( // #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] pub struct TensorBase { pub(crate) id: TensorId, + pub(crate) kind: TensorMode, pub(crate) layout: Layout, pub(crate) op: Option>, pub(crate) store: Vec, } impl TensorBase { - pub fn new(shape: impl IntoShape) -> Self { + pub fn new(kind: TensorMode, shape: impl IntoShape) -> Self { Self { id: TensorId::new(), + kind, layout: Layout::contiguous(shape), op: None, store: Vec::new(), } } - pub fn from_vec(shape: impl IntoShape, store: Vec) -> Self { - from_vec(shape, store) + pub fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec) -> Self { + from_vec(kind, shape, store) } /// Returns the unique identifier of the tensor. pub fn id(&self) -> TensorId { @@ -79,9 +84,13 @@ impl TensorBase { } pub fn is_variable(&self) -> bool { - self.op.is_none() + self.kind.is_variable() } + pub fn variable(mut self) -> Self { + self.kind = TensorMode::Variable; + self + } pub fn to_vec(&self) -> Vec where T: Clone, diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index 824c9ac2..ec977ea2 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -4,9 +4,11 @@ */ pub mod id; +pub mod mode; pub mod order; pub(crate) mod prelude { pub use super::id::TensorId; + pub use super::mode::TensorMode; pub use super::order::MajorOrder; } diff --git a/tensor/src/types/mode.rs b/tensor/src/types/mode.rs new file mode 100644 index 00000000..ec8683ac --- /dev/null +++ b/tensor/src/types/mode.rs @@ -0,0 +1,72 @@ +/* + Appellation: mode + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(u8)] +#[strum(serialize_all = "lowercase")] +pub enum TensorMode { + #[default] + Normal, + Variable, +} + +impl TensorMode { + pub fn normal() -> Self { + Self::Normal + } + + pub fn variable() -> Self { + Self::Variable + } +} + +impl From for usize { + fn from(mode: TensorMode) -> Self { + mode as usize + } +} + +impl From for TensorMode { + fn from(mode: usize) -> Self { + match mode % Self::COUNT { + 0 => Self::Normal, + _ => Self::Variable, + } + } +} + +impl From for TensorMode { + fn from(is_variable: bool) -> Self { + if is_variable { + Self::Variable + } else { + Self::Normal + } + } +} diff --git a/tensor/src/types/order.rs b/tensor/src/types/order.rs index d6e9b9f9..a8f35c28 100644 --- a/tensor/src/types/order.rs +++ b/tensor/src/types/order.rs @@ -4,7 +4,7 @@ */ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIter, EnumString, VariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[cfg_attr( feature = "serde", @@ -18,6 +18,7 @@ use strum::{Display, EnumCount, EnumIter, EnumString, VariantNames}; Default, Display, EnumCount, + EnumIs, EnumIter, EnumString, Eq, diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index a8b6a9bd..4a7dd0ee 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -10,22 +10,19 @@ use acme::prelude::Tensor; #[test] fn test_backward() { let shape = (2, 2); - let a = Tensor::::ones(shape); - let b = Tensor::::ones(shape); + let a = Tensor::::ones(shape).variable(); + let b = Tensor::::ones(shape).variable(); let c = &a + &b; let grad = c.grad(); assert_eq!( grad[&a.id()], Tensor::ones(shape), - "{:?} != {:?}", - grad[&a.id()].to_vec(), - vec![1_f64; 4] ); assert_eq!(grad[&b.id()], Tensor::ones(shape)); - let a = Tensor::::ones(shape); - let b = Tensor::::fill(shape, 2_f64); + let a = Tensor::::ones(shape).variable(); + let b = Tensor::::fill(shape, 2_f64).variable(); let c = &a * &b; let grad = c.grad(); @@ -38,12 +35,12 @@ fn test_backward() { #[ignore = "Needs to be fixed"] fn test_add_mul() { let shape = (2, 2); - let a = Tensor::::ones(shape); - let b = Tensor::::ones(shape); - let c = &a + &b; - let d = &a * &c; + let a = Tensor::::ones(shape).variable(); + let b = Tensor::::ones(shape).variable(); + // let c = &a + &b; + let d = &a * (&a + &b); let grad = d.grad(); - assert_eq!(grad[&a.id()], Tensor::fill(shape, 3_f64)); + assert_eq!(grad[&a.id()], &a * 2.0 + &b); assert_eq!(grad[&b.id()], Tensor::ones(shape)); } From bc162412641252e465603f2d4ad51d23a1c6cbb2 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 22 Mar 2024 14:14:47 -0500 Subject: [PATCH 45/87] update Signed-off-by: Joe McCain III --- tensor/src/impls/grad.rs | 7 ++++--- tensor/src/impls/reshape.rs | 13 +++++++++---- tensor/src/shape/error.rs | 1 + tensor/src/shape/mod.rs | 1 + tensor/src/tensor.rs | 3 +++ tensor/tests/backward.rs | 5 +---- tensor/tests/composition.rs | 10 ++++++++++ 7 files changed, 29 insertions(+), 11 deletions(-) diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index b7da0648..14cf832f 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -76,13 +76,14 @@ where *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad; } BinaryOp::Mul => { - *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad * rhs.as_ref(); - *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad * lhs.as_ref(); + *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += + &grad * rhs.as_ref(); + *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += + &grad * lhs.as_ref(); } _ => todo!(), }, TensorOp::Unary(_a, kind) => match kind { - _ => todo!(), }, _ => {} diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 6cbc3ea3..df6b4d78 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -2,7 +2,7 @@ Appellation: reshape Contrib: FL03 */ -use crate::prelude::IntoShape; +use crate::prelude::{IntoShape, ShapeError, TensorResult}; use crate::tensor::TensorBase; impl TensorBase @@ -17,9 +17,14 @@ where unimplemented!() } - pub fn reshape(&self, shape: impl IntoShape) -> Self { - let _shape = shape.into_shape(); + pub fn reshape(self, shape: impl IntoShape) -> TensorResult { + let mut tensor = self; + let shape = shape.into_shape(); + if tensor.elements() != shape.elements() { + return Err(ShapeError::MismatchedElements.into()); + } - unimplemented!() + tensor.layout.shape = shape; + Ok(tensor) } } diff --git a/tensor/src/shape/error.rs b/tensor/src/shape/error.rs index 4de02823..b18f41d2 100644 --- a/tensor/src/shape/error.rs +++ b/tensor/src/shape/error.rs @@ -34,6 +34,7 @@ pub type ShapeResult = std::result::Result; pub enum ShapeError { IncompatibleShapes, InvalidShape, + MismatchedElements, } unsafe impl Send for ShapeError {} diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index fd717d73..ee55c46a 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -28,6 +28,7 @@ where pub(crate) mod prelude { pub use super::dim::*; + pub use super::error::*; pub use super::shape::*; pub use super::stride::*; pub use super::IntoShape; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index b24b0c73..ada43c0f 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -58,6 +58,9 @@ impl TensorBase { pub fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec) -> Self { from_vec(kind, shape, store) } + pub fn elements(&self) -> usize { + self.layout.elements() + } /// Returns the unique identifier of the tensor. pub fn id(&self) -> TensorId { self.id diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 4a7dd0ee..82cd65be 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -15,10 +15,7 @@ fn test_backward() { let c = &a + &b; let grad = c.grad(); - assert_eq!( - grad[&a.id()], - Tensor::ones(shape), - ); + assert_eq!(grad[&a.id()], Tensor::ones(shape),); assert_eq!(grad[&b.id()], Tensor::ones(shape)); let a = Tensor::::ones(shape).variable(); diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index 554ce2b5..e703130b 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -16,6 +16,16 @@ fn test_tensor() { assert_ne!(&a, &b); } +#[test] +fn test_reshape() { + let shape = (2, 2); + let a = Tensor::::ones(shape); + let b = a.clone().reshape((4,)).unwrap(); + + assert_ne!(&a.shape(), &b.shape()); + assert_eq!(&a.elements(), &b.elements()); +} + #[test] fn test_arange() { let exp = Shape::from(10); From 6fc5798eb2f020deeee1be93286087498b9e03ef Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 23 Mar 2024 09:23:37 -0500 Subject: [PATCH 46/87] update Signed-off-by: Joe McCain III --- core/src/lib.rs | 2 - core/src/ops/binary/mod.rs | 3 +- core/src/ops/binary/specs.rs | 54 ++++++++++++++ core/src/specs/gradient.rs | 2 +- core/src/specs/mod.rs | 4 +- core/src/{stores/mod.rs => specs/store.rs} | 5 -- core/src/stores/gradient.rs | 54 -------------- core/src/stores/stack.rs | 8 -- graphs/src/grad/store.rs | 2 +- tensor/src/actions/grad/mod.rs | 13 ++++ tensor/src/actions/grad/store.rs | 85 ++++++++++++++++++++++ tensor/src/actions/mod.rs | 5 ++ tensor/src/impls/create.rs | 6 +- tensor/src/impls/grad.rs | 84 +++++++++++---------- tensor/src/linalg/mod.rs | 1 + tensor/src/linalg/uplo/kinds.rs | 51 +++++++++++++ tensor/src/linalg/uplo/mod.rs | 13 ++++ tensor/src/shape/shape.rs | 8 +- tensor/src/tensor.rs | 1 + tensor/tests/backward.rs | 4 +- tensor/tests/composition.rs | 2 +- 21 files changed, 284 insertions(+), 123 deletions(-) create mode 100644 core/src/ops/binary/specs.rs rename core/src/{stores/mod.rs => specs/store.rs} (94%) delete mode 100644 core/src/stores/gradient.rs delete mode 100644 core/src/stores/stack.rs create mode 100644 tensor/src/actions/grad/mod.rs create mode 100644 tensor/src/actions/grad/store.rs create mode 100644 tensor/src/linalg/uplo/kinds.rs create mode 100644 tensor/src/linalg/uplo/mod.rs diff --git a/core/src/lib.rs b/core/src/lib.rs index 6b88b58e..a7104cdc 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -16,7 +16,6 @@ pub mod eval; pub mod id; pub mod ops; pub mod specs; -pub mod stores; pub mod types; pub mod prelude { @@ -28,6 +27,5 @@ pub mod prelude { pub use crate::id::*; pub use crate::ops::*; pub use crate::specs::prelude::*; - pub use crate::stores::*; pub use crate::types::*; } diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index e941c04f..b4bde322 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -2,10 +2,11 @@ Appellation: binary Contrib: FL03 */ -pub use self::{kinds::*, operator::*}; +pub use self::{kinds::*, operator::*, specs::*}; pub(crate) mod kinds; pub(crate) mod operator; +pub(crate) mod specs; #[cfg(test)] mod tests {} diff --git a/core/src/ops/binary/specs.rs b/core/src/ops/binary/specs.rs new file mode 100644 index 00000000..dc41afc5 --- /dev/null +++ b/core/src/ops/binary/specs.rs @@ -0,0 +1,54 @@ +/* + Appellation: specs + Contrib: FL03 +*/ + + +pub trait Logarithm { + type Output; + + fn log(self, base: T) -> Self::Output; +} + +macro_rules! impl_log { + ($t:ty) => { + impl Logarithm<$t> for $t { + type Output = $t; + + fn log(self, base: $t) -> Self::Output { + self.log(base) + } + } + }; + (other $t:ty => $out:ty; $method:ident) => { + impl Logarithm<$t> for $t { + type Output = $out; + + fn log(self, base: $t) -> Self::Output { + self.$method(base) + } + } + }; + (all [$($t:ty),*]) => { + $( + impl_log!($t); + )* + }; +} + + + +impl_log!(all [f32, f64]); + +impl_log!(other i8 => u32; ilog); +impl_log!(other i16 => u32; ilog); +impl_log!(other i32 => u32; ilog); +impl_log!(other i64 => u32; ilog); +impl_log!(other i128 => u32; ilog); +impl_log!(other isize => u32; ilog); +impl_log!(other u8 => u32; ilog); +impl_log!(other u16 => u32; ilog); +impl_log!(other u32 => u32; ilog); +impl_log!(other u64 => u32; ilog); +impl_log!(other u128 => u32; ilog); +impl_log!(other usize => u32; ilog); \ No newline at end of file diff --git a/core/src/specs/gradient.rs b/core/src/specs/gradient.rs index b660976f..8be0df67 100644 --- a/core/src/specs/gradient.rs +++ b/core/src/specs/gradient.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ -use crate::prelude::Store; +use super::store::Store; pub trait IsDifferentiable { fn differentiable(&self) -> bool; diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 072d3580..b91a59a0 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,10 +3,11 @@ Contrib: FL03 */ -pub use self::{gradient::*, operand::*}; +pub use self::{gradient::*, operand::*, store::*}; pub(crate) mod gradient; pub(crate) mod operand; +pub(crate) mod store; pub mod func; @@ -14,6 +15,7 @@ pub(crate) mod prelude { pub use super::func::*; pub use super::gradient::*; pub use super::operand::Operand; + pub use super::store::*; } #[cfg(test)] diff --git a/core/src/stores/mod.rs b/core/src/specs/store.rs similarity index 94% rename from core/src/stores/mod.rs rename to core/src/specs/store.rs index 732d2ba6..c55e1c43 100644 --- a/core/src/stores/mod.rs +++ b/core/src/specs/store.rs @@ -2,11 +2,6 @@ Appellation: stores Contrib: FL03 */ -pub use self::{gradient::*, stack::*}; - -pub(crate) mod gradient; -pub(crate) mod stack; - use std::borrow::Borrow; use std::collections::{BTreeMap, HashMap}; diff --git a/core/src/stores/gradient.rs b/core/src/stores/gradient.rs deleted file mode 100644 index 0089adae..00000000 --- a/core/src/stores/gradient.rs +++ /dev/null @@ -1,54 +0,0 @@ -/* - Appellation: gradient - Contrib: FL03 -*/ -use super::Store; -use std::any::Any; -use std::collections::BTreeMap; - -pub struct GradientStore { - store: BTreeMap>, -} - -impl GradientStore -where - K: Ord, -{ - pub fn new() -> Self { - Self { - store: BTreeMap::new(), - } - } - - pub fn or_insert(&mut self, key: K, value: Box) -> &mut dyn Any { - self.store.entry(key).or_insert(value) - } -} - -impl Store for GradientStore -where - K: Ord, - T: Clone + 'static, -{ - fn get(&self, key: &K) -> Option<&T> { - self.store.get(key).map(|v| v.downcast_ref::().unwrap()) - } - - fn get_mut(&mut self, key: &K) -> Option<&mut T> { - self.store - .get_mut(key) - .map(|v| v.downcast_mut::().unwrap()) - } - - fn insert(&mut self, key: K, value: T) -> Option { - self.store - .insert(key, Box::new(value)) - .map(|v| v.downcast_ref::().unwrap().clone()) - } - - fn remove(&mut self, key: &K) -> Option { - self.store - .remove(key) - .map(|v| v.downcast_ref::().unwrap().clone()) - } -} diff --git a/core/src/stores/stack.rs b/core/src/stores/stack.rs deleted file mode 100644 index 4e672757..00000000 --- a/core/src/stores/stack.rs +++ /dev/null @@ -1,8 +0,0 @@ -/* - Appellation: stack - Contrib: FL03 -*/ - -pub struct Stack { - pub store: Vec<(K, V)>, -} diff --git a/graphs/src/grad/store.rs b/graphs/src/grad/store.rs index be163931..44315386 100644 --- a/graphs/src/grad/store.rs +++ b/graphs/src/grad/store.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::NodeIndex; -use acme::stores::Store; +use acme::prelude::Store; use std::any::Any; use std::collections::BTreeMap; diff --git a/tensor/src/actions/grad/mod.rs b/tensor/src/actions/grad/mod.rs new file mode 100644 index 00000000..0ba83036 --- /dev/null +++ b/tensor/src/actions/grad/mod.rs @@ -0,0 +1,13 @@ +/* + Appellation: grad + Contrib: FL03 +*/ +//! # Gradient +//! +//! +pub use self::store::GradStore; + +pub(crate) mod store; + +#[cfg(test)] +mod tests {} \ No newline at end of file diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs new file mode 100644 index 00000000..bf810d33 --- /dev/null +++ b/tensor/src/actions/grad/store.rs @@ -0,0 +1,85 @@ +/* + Appellation: store + Contrib: FL03 +*/ +use crate::TensorBase; +use crate::prelude::TensorId; +use acme::prelude::Store; +use std::collections::btree_map::{BTreeMap, Entry}; +use std::ops::{Index, IndexMut}; + +pub struct GradStore { + pub(crate) store: BTreeMap> +} + +impl GradStore { + pub fn new() -> Self { + Self { + store: BTreeMap::new() + } + } + /// Clears the store, removing all values. + pub fn clear(&mut self) { + self.store.clear() + } + /// Returns a reference to the value corresponding to the key. + pub fn entry(&mut self, key: TensorId) -> Entry<'_, TensorId, TensorBase> { + self.store.entry(key) + } + /// Returns a reference to the value corresponding to the key. + pub fn get_tensor(&self, item: &TensorBase) -> Option<&TensorBase> { + self.store.get(&item.id()) + } + /// Inserts a tensor into the store. + pub fn insert_tensor(&mut self, tensor: TensorBase) -> Option> { + self.insert(tensor.id, tensor) + } + /// Returns true if the store contains no elements. + pub fn is_empty(&self) -> bool { + self.store.is_empty() + } + /// Returns the number of elements in the store. + pub fn len(&self) -> usize { + self.store.len() + } + /// If the store does not have a tensor with the given id, insert it. + /// Returns a mutable reference to the tensor. + pub fn or_insert(&mut self, tensor: TensorBase) -> &mut TensorBase { + self.entry(tensor.id).or_insert(tensor) + } + +} + +impl Store> for GradStore { + fn get(&self, key: &TensorId) -> Option<&TensorBase> { + self.store.get(key) + } + + fn get_mut(&mut self, key: &TensorId) -> Option<&mut TensorBase> { + self.store.get_mut(key) + } + + fn insert(&mut self, key: TensorId, value: TensorBase) -> Option> { + self.store.insert(key, value) + } + + fn remove(&mut self, key: &TensorId) -> Option> { + self.store.remove(key) + } + +} + +impl Index<&TensorId> for GradStore { + type Output = TensorBase; + + fn index(&self, index: &TensorId) -> &Self::Output { + &self.store[index] + } +} + +impl IndexMut<&TensorId> for GradStore { + fn index_mut(&mut self, index: &TensorId) -> &mut Self::Output { + self.get_mut(index).expect("Tensor not found") + } + +} \ No newline at end of file diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index 1ac686aa..ec0edfa4 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -7,7 +7,12 @@ //! pub mod arange; +pub mod grad; pub mod index; +pub(crate) mod prelude { + pub use super::arange::*; +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index d5731192..0cf3510d 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -45,7 +45,7 @@ where } /// Create a tensor within a range of values - pub fn linstep(start: T, end: T, steps: usize) -> Self + pub fn linspace(start: T, end: T, steps: usize) -> Self where T: FromPrimitive, { @@ -60,9 +60,9 @@ where from_vec(false.into(), (store.len(),), store) } - pub fn logstep(start: T, end: T, steps: usize) -> Self + pub fn logspace(start: T, end: T, steps: usize) -> Self where - T: num::Float, + T: num::traits::real::Real, { let start = start.log2(); let end = end.log2(); diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 14cf832f..d59860ce 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -2,58 +2,61 @@ Appellation: grad Contrib: FL03 */ +use crate::actions::grad::GradStore; use crate::prelude::{Scalar, TensorId, TensorOp}; -use crate::tensor::*; +use crate::TensorBase; use acme::ops::binary::BinaryOp; +use acme::prelude::Store; use std::collections::HashMap; -pub(crate) type GradStore = std::collections::BTreeMap; + +// The vec of sorted nodes is passed as an owned value rather than a mutable reference +// to get around some lifetime limitations. +fn walk<'a, T>( + node: &'a TensorBase, + nodes: Vec<&'a TensorBase>, + visited: &mut HashMap, +) -> (bool, Vec<&'a TensorBase>) { + if let Some(&tg) = visited.get(&node.id()) { + return (tg, nodes); + } + // track the gradient of the current node + let mut track = false; + let mut nodes = if node.is_variable() { + // Do not call recursively on the "leaf" nodes. + track = true; + nodes + } else if let Some(op) = node.op() { + match op { + TensorOp::Binary(lhs, rhs, _kind) => { + let (tg, nodes) = walk(lhs, nodes, visited); + track |= tg; + let (tg, nodes) = walk(rhs, nodes, visited); + track |= tg; + nodes + } + _ => nodes, + } + } else { + nodes + }; + visited.insert(node.id(), track); + if track { + nodes.push(node); + } + (track, nodes) +} impl TensorBase where T: Scalar, { fn sorted_nodes(&self) -> Vec<&TensorBase> { - // The vec of sorted nodes is passed as an owned value rather than a mutable reference - // to get around some lifetime limitations. - fn walk<'a, T>( - node: &'a TensorBase, - nodes: Vec<&'a TensorBase>, - visited: &mut HashMap, - ) -> (bool, Vec<&'a TensorBase>) { - if let Some(&tg) = visited.get(&node.id()) { - return (tg, nodes); - } - let mut track_grad = false; - let mut nodes = if node.is_variable() { - // Do not call recursively on the "leaf" nodes. - track_grad = true; - nodes - } else if let Some(op) = node.op() { - match op { - TensorOp::Binary(a, b, _kind) => { - let (tg, nodes) = walk(a, nodes, visited); - track_grad |= tg; - let (tg, nodes) = walk(b, nodes, visited); - track_grad |= tg; - nodes - } - _ => nodes, - } - } else { - nodes - }; - visited.insert(node.id(), track_grad); - if track_grad { - nodes.push(node); - } - (track_grad, nodes) - } let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new()); nodes.reverse(); nodes } - pub fn grad(&self) -> GradStore> + pub fn grad(&self) -> GradStore where T: std::fmt::Debug, { @@ -61,13 +64,16 @@ where let sorted = self.sorted_nodes(); // initialize a new gradient store let mut store = GradStore::new(); + // insert the gradient w.r.t. the current node store.insert(self.id(), self.ones_like()); for node in sorted.iter() { if node.is_variable() { continue; } - let grad = store.get(&node.id()).unwrap().clone(); + // get the gradient of the node + let grad = store.remove(&node.id()).expect("Gradient not found"); + // handle the different types of operations if let Some(op) = &self.op { match op { TensorOp::Binary(lhs, rhs, kind) => match kind { diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index 667ff8cb..aa31f9a7 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -6,6 +6,7 @@ //! //! pub mod arith; +pub mod uplo; pub trait Inverse { fn inverse(&self) -> Self; diff --git a/tensor/src/linalg/uplo/kinds.rs b/tensor/src/linalg/uplo/kinds.rs new file mode 100644 index 00000000..3aec3fbf --- /dev/null +++ b/tensor/src/linalg/uplo/kinds.rs @@ -0,0 +1,51 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(usize)] +#[strum(serialize_all = "lowercase")] +pub enum UPLO { + Lower, + #[default] + Upper, +} + +impl UPLO { + pub fn lower() -> Self { + Self::Lower + } + + pub fn upper() -> Self { + Self::Upper + } +} + +unsafe impl Send for UPLO {} + +unsafe impl Sync for UPLO {} diff --git a/tensor/src/linalg/uplo/mod.rs b/tensor/src/linalg/uplo/mod.rs new file mode 100644 index 00000000..5f848d52 --- /dev/null +++ b/tensor/src/linalg/uplo/mod.rs @@ -0,0 +1,13 @@ +/* + Appellation: uplo + Contrib: FL03 +*/ +//! # Upper Lower +//! +//! +pub use self::kinds::UPLO; + +pub(crate) mod kinds; + +#[cfg(test)] +mod tests {} \ No newline at end of file diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 49f8a5e0..1c4ff155 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -85,7 +85,7 @@ impl Shape { pub fn columns(&self) -> usize { if self.len() >= 2 { - *self.last().unwrap() + self.0[1] } else if self.len() == 1 { 1 } else { @@ -94,10 +94,8 @@ impl Shape { } pub fn rows(&self) -> usize { - if self.len() >= 2 { - self[self.len() - 2] - } else if self.len() == 1 { - self[0] + if self.len() >= 1 { + *self.0.first().unwrap() } else { 0 } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index ada43c0f..e4187462 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -58,6 +58,7 @@ impl TensorBase { pub fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec) -> Self { from_vec(kind, shape, store) } + /// Returns the number of elements in the tensor. pub fn elements(&self) -> usize { self.layout.elements() } diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 82cd65be..fb50a5e8 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -29,7 +29,7 @@ fn test_backward() { } #[test] -#[ignore = "Needs to be fixed"] +// #[ignore = "Needs to be fixed"] fn test_add_mul() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); @@ -38,6 +38,6 @@ fn test_add_mul() { let d = &a * (&a + &b); let grad = d.grad(); - assert_eq!(grad[&a.id()], &a * 2.0 + &b); + assert_eq!(grad[&a.id()], Tensor::fill(shape, 3_f64)); assert_eq!(grad[&b.id()], Tensor::ones(shape)); } diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index e703130b..d7e7c265 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -40,7 +40,7 @@ fn test_arange() { #[test] fn test_linstep() { let exp = Shape::from(10); - let a = Tensor::linstep(0_f64, 10_f64, 10); + let a = Tensor::linspace(0_f64, 10_f64, 10); assert_eq!(a.shape(), &exp); let b = Tensor::arange(0_f64, 10_f64, 1_f64); for i in 0..10 { From 5cf2e361bb2f725c18b80dfe84642c09e8ca2322 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 23 Mar 2024 11:00:10 -0500 Subject: [PATCH 47/87] update Signed-off-by: Joe McCain III --- .gitpod.yml | 7 -- acme/src/lib.rs | 2 + core/src/ops/binary/specs.rs | 5 +- core/src/types/constants.rs | 2 +- core/src/types/variables.rs | 144 +++++++------------------ tensor/src/actions/arange.rs | 126 ++++++++++------------ tensor/src/actions/grad/mod.rs | 2 +- tensor/src/actions/grad/store.rs | 35 +++--- tensor/src/actions/iter/iterator.rs | 9 ++ tensor/src/actions/iter/mod.rs | 17 +++ tensor/src/actions/mod.rs | 1 + tensor/src/data/mod.rs | 6 ++ tensor/src/data/repr/mod.rs | 10 ++ tensor/src/data/repr/owned.rs | 120 +++++++++++++++++++++ tensor/src/impls/create.rs | 7 ++ tensor/src/impls/grad.rs | 2 +- tensor/src/impls/linalg.rs | 8 +- tensor/src/impls/ops/binary.rs | 12 +-- tensor/src/impls/ops/unary.rs | 33 +++--- tensor/src/lib.rs | 2 + tensor/src/linalg/uplo/mod.rs | 6 +- tensor/src/store/layout.rs | 3 + tensor/src/store/mod.rs | 9 +- tensor/src/tensor.rs | 14 +-- tensor/src/types/{mode.rs => kinds.rs} | 12 +-- tensor/src/types/mod.rs | 6 +- tensor/src/types/order.rs | 14 +-- tensor/tests/backward.rs | 2 +- 28 files changed, 361 insertions(+), 255 deletions(-) delete mode 100644 .gitpod.yml create mode 100644 tensor/src/actions/iter/iterator.rs create mode 100644 tensor/src/actions/iter/mod.rs create mode 100644 tensor/src/data/repr/mod.rs create mode 100644 tensor/src/data/repr/owned.rs rename tensor/src/types/{mode.rs => kinds.rs} (85%) diff --git a/.gitpod.yml b/.gitpod.yml deleted file mode 100644 index b6598339..00000000 --- a/.gitpod.yml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: - - init: | - rustup default nightly - rustup target add wasm32-unknown-unknown wasm32-wasi --toolchain nightly - rustup component add clippy rustfmt --toolchain nightly - cargo build -F full --workspace --release - command: cargo watch -x test --all \ No newline at end of file diff --git a/acme/src/lib.rs b/acme/src/lib.rs index cb52e647..7b65292e 100644 --- a/acme/src/lib.rs +++ b/acme/src/lib.rs @@ -7,6 +7,8 @@ //! Acme is an autodifferentiaion library for Rust. It is designed to be a //! flexible and powerful tool for building machine learning models and //! other differentiable programs. +#![crate_name = "acme"] + #[doc(inline)] pub use acme_core::*; #[cfg(feature = "derive")] diff --git a/core/src/ops/binary/specs.rs b/core/src/ops/binary/specs.rs index dc41afc5..23ab4322 100644 --- a/core/src/ops/binary/specs.rs +++ b/core/src/ops/binary/specs.rs @@ -3,7 +3,6 @@ Contrib: FL03 */ - pub trait Logarithm { type Output; @@ -36,8 +35,6 @@ macro_rules! impl_log { }; } - - impl_log!(all [f32, f64]); impl_log!(other i8 => u32; ilog); @@ -51,4 +48,4 @@ impl_log!(other u16 => u32; ilog); impl_log!(other u32 => u32; ilog); impl_log!(other u64 => u32; ilog); impl_log!(other u128 => u32; ilog); -impl_log!(other usize => u32; ilog); \ No newline at end of file +impl_log!(other usize => u32; ilog); diff --git a/core/src/types/constants.rs b/core/src/types/constants.rs index 2a3682fb..4afee709 100644 --- a/core/src/types/constants.rs +++ b/core/src/types/constants.rs @@ -12,7 +12,7 @@ use std::ops::{Deref, DerefMut, Neg, Not}; #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[repr(transparent)] +#[repr(C)] pub struct Constant(pub T); impl Constant { diff --git a/core/src/types/variables.rs b/core/src/types/variables.rs index a963e141..009d95ce 100644 --- a/core/src/types/variables.rs +++ b/core/src/types/variables.rs @@ -7,10 +7,10 @@ use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::borrow::{Borrow, BorrowMut}; -use std::ops::{Add, Div, Mul, Sub}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[repr(C)] pub struct Variable { name: String, pub(crate) value: Option, @@ -24,8 +24,8 @@ impl Variable { } } - pub fn is_none(&self) -> bool { - self.value.is_none() + pub const fn is_initialized(&self) -> bool { + self.value.is_some() } pub fn name(&self) -> &str { @@ -98,110 +98,6 @@ unsafe impl Send for Variable {} unsafe impl Sync for Variable {} -impl Add for Variable -where - T: Add + Default, -{ - type Output = Self; - - fn add(self, rhs: Self) -> Self::Output { - let name = "+".to_string(); - let value = self.eval() + rhs.eval(); - Variable::new(name).with_value(value) - } -} - -impl Add for Variable -where - T: Add + Default + std::fmt::Display, -{ - type Output = Self; - - fn add(self, rhs: T) -> Self::Output { - let name = format!("{} + {}", self.name, rhs); - let value = self.eval() + rhs; - Variable::new(name).with_value(value) - } -} - -impl Div for Variable -where - T: Div + Default, -{ - type Output = Self; - - fn div(self, rhs: Self) -> Self::Output { - let name = format!("{} / {}", self.name, rhs.name); - let value = self.eval() / rhs.eval(); - Variable::new(name).with_value(value) - } -} - -impl Div for Variable -where - T: Div + Default + std::fmt::Display, -{ - type Output = Self; - - fn div(self, rhs: T) -> Self::Output { - let name = format!("{} / {}", self.name, rhs); - let value = self.eval() / rhs; - Variable::new(name).with_value(value) - } -} - -impl Mul for Variable -where - T: Mul + Default, -{ - type Output = Self; - - fn mul(self, rhs: Self) -> Self::Output { - let name = format!("{} * {}", self.name, rhs.name); - let value = self.eval() * rhs.eval(); - Variable::new(name).with_value(value) - } -} - -impl Mul for Variable -where - T: Mul + Default + std::fmt::Display, -{ - type Output = Self; - - fn mul(self, rhs: T) -> Self::Output { - let name = format!("{} * {}", self.name, rhs); - let value = self.eval() * rhs; - Variable::new(name).with_value(value) - } -} - -impl Sub for Variable -where - T: Sub + Default, -{ - type Output = Self; - - fn sub(self, rhs: Self) -> Self::Output { - let name = format!("{} - {}", self.name, rhs.name); - let value = self.eval() - rhs.eval(); - Variable::new(name).with_value(value) - } -} - -impl Sub for Variable -where - T: Sub + Default + std::fmt::Display, -{ - type Output = Self; - - fn sub(self, rhs: T) -> Self::Output { - let name = format!("{} - {}", self.name, rhs); - let value = self.eval() - rhs; - Variable::new(name).with_value(value) - } -} - impl One for Variable where T: Clone + Default + One, @@ -223,3 +119,37 @@ where self.clone().eval().is_zero() } } + +macro_rules! impl_std_op { + ($parent:ident: $trait:ident, $method:ident) => { + impl std::ops::$trait for $parent + where + T: Clone + Default + std::ops::$trait, + { + type Output = Self; + + fn $method(self, rhs: Self) -> Self::Output { + let name = format!("{}", stringify!($method)); + let value = self.eval().$method(rhs.eval()); + $parent::new(name).with_value(value) + } + } + + impl std::ops::$trait for $parent + where + T: Clone + Default + std::ops::$trait, + { + type Output = Self; + + fn $method(self, rhs: T) -> Self::Output { + let name = format!("{}", stringify!($method)); + let value = self.eval().$method(rhs); + $parent::new(name).with_value(value) + } + } + }; +} +impl_std_op!(Variable: Add, add); +impl_std_op!(Variable: Div, div); +impl_std_op!(Variable: Mul, mul); +impl_std_op!(Variable: Sub, sub); diff --git a/tensor/src/actions/arange.rs b/tensor/src/actions/arange.rs index aa44ebee..825679a9 100644 --- a/tensor/src/actions/arange.rs +++ b/tensor/src/actions/arange.rs @@ -2,34 +2,55 @@ Appellation: arange Contrib: FL03 */ -use num::traits::real::Real; -use num::traits::{One, Zero}; +use num::traits::{FromPrimitive, Num, ToPrimitive, Zero}; use std::ops; -pub trait Ranged { - fn arange(start: T, stop: T, step: T) -> Self; +pub struct Arange { + range: Aranged, + step: T, +} - fn arange_between(start: T, stop: T) -> Self; +impl Arange { + pub fn new(range: Aranged, step: T) -> Self { + Self { range, step } + } - fn arange_until(stop: T) -> Self; + pub fn range(start: T, stop: T, step: T) -> Self { + Self::new(Aranged::Range { start, stop }, step) + } } +impl Arange +where + T: Copy + Num, +{ + pub fn start(&self) -> T { + self.range.start() + } -pub trait Linstep { - type Elem; + pub fn steps(&self) -> usize + where + T: FromPrimitive + ToPrimitive, + { + let start = self.range.start(); + let stop = self.range.stop(); + let step = self.step; + let steps = (stop - start) / step; + steps.to_usize().unwrap() + } - fn linstep(start: Self::Elem, stop: Self::Elem, steps: usize) -> Vec; -} + pub fn step(&self) -> T { + self.step + } -pub enum Ranges { - Arange { start: T, stop: T }, - Between { start: T, stop: T }, - Until { stop: T }, + pub fn stop(&self) -> T { + self.range.stop() + } } #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Aranged { - Arange { start: T, stop: T, step: T }, - Between { start: T, stop: T }, + Range { start: T, stop: T }, + Inclusive { start: T, stop: T }, Until { stop: T }, } @@ -43,56 +64,35 @@ where T: Zero, { match self { - Aranged::Arange { start, .. } => *start, - Aranged::Between { start, .. } => *start, + Aranged::Range { start, .. } => *start, + Aranged::Inclusive { start, .. } => *start, Aranged::Until { .. } => T::zero(), } } /// Returns the stop value of the range. pub fn stop(&self) -> T { match self { - Aranged::Arange { stop, .. } => *stop, - Aranged::Between { stop, .. } => *stop, + Aranged::Range { stop, .. } => *stop, + Aranged::Inclusive { stop, .. } => *stop, Aranged::Until { stop } => *stop, } } - /// Returns the step value of the range. - pub fn step(&self) -> T + + pub fn step_size(&self, steps: usize) -> T where - T: One, + T: FromPrimitive + Num, { - match self { - Aranged::Arange { step, .. } => *step, - Aranged::Between { .. } => T::one(), - Aranged::Until { .. } => T::one(), - } - } - /// Returns the number of steps between the given boundaries - pub fn steps(&self) -> usize - where - T: Real, - { - match self { - Aranged::Arange { start, stop, step } => { - let n = ((*stop - *start) / *step).ceil().to_usize().unwrap(); - n - } - Aranged::Between { start, stop } => { - let n = (*stop - *start).to_usize().unwrap(); - n - } - - Aranged::Until { stop } => { - let n = stop.to_usize().unwrap(); - n - } - } + let steps = T::from_usize(steps).unwrap(); + let start = self.start(); + let stop = self.stop(); + let step = (stop - start) / steps; + step } } impl From> for Aranged { fn from(args: ops::Range) -> Self { - Aranged::Between { + Aranged::Range { start: args.start, stop: args.end, } @@ -105,32 +105,21 @@ impl From> for Aranged { } } -impl From<(T, T, T)> for Aranged { - fn from(args: (T, T, T)) -> Self { - Aranged::Arange { - start: args.0, - stop: args.1, - step: args.2, - } - } -} - -impl From<[T; 3]> for Aranged +impl From<[T; 2]> for Aranged where T: Copy, { - fn from(args: [T; 3]) -> Self { - Aranged::Arange { + fn from(args: [T; 2]) -> Self { + Aranged::Range { start: args[0], stop: args[1], - step: args[2], } } } impl From<(T, T)> for Aranged { fn from(args: (T, T)) -> Self { - Aranged::Between { + Aranged::Inclusive { start: args.0, stop: args.1, } @@ -148,11 +137,12 @@ mod tests { use super::*; #[test] - fn test_arange_args() { - let arange = Aranged::Between { start: 0, stop: 10 }; + fn test_arange() { + let setup = Aranged::Range { start: 0, stop: 10 }; + let arange = Arange::new(setup, 1); assert_eq!(arange.start(), 0); assert_eq!(arange.stop(), 10); assert_eq!(arange.step(), 1); - assert_eq!(arange, (0..10).into()); + assert_eq!(setup, (0..10).into()); } } diff --git a/tensor/src/actions/grad/mod.rs b/tensor/src/actions/grad/mod.rs index 0ba83036..34acc879 100644 --- a/tensor/src/actions/grad/mod.rs +++ b/tensor/src/actions/grad/mod.rs @@ -10,4 +10,4 @@ pub use self::store::GradStore; pub(crate) mod store; #[cfg(test)] -mod tests {} \ No newline at end of file +mod tests {} diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs index bf810d33..ca729688 100644 --- a/tensor/src/actions/grad/store.rs +++ b/tensor/src/actions/grad/store.rs @@ -2,20 +2,20 @@ Appellation: store Contrib: FL03 */ -use crate::TensorBase; use crate::prelude::TensorId; +use crate::TensorBase; use acme::prelude::Store; use std::collections::btree_map::{BTreeMap, Entry}; use std::ops::{Index, IndexMut}; pub struct GradStore { - pub(crate) store: BTreeMap> + pub(crate) store: BTreeMap>, } impl GradStore { pub fn new() -> Self { Self { - store: BTreeMap::new() + store: BTreeMap::new(), } } /// Clears the store, removing all values. @@ -45,33 +45,45 @@ impl GradStore { /// If the store does not have a tensor with the given id, insert it. /// Returns a mutable reference to the tensor. pub fn or_insert(&mut self, tensor: TensorBase) -> &mut TensorBase { - self.entry(tensor.id).or_insert(tensor) + self.entry(tensor.id()).or_insert(tensor) + } + + pub fn or_insert_default(&mut self, tensor: &TensorBase) -> &mut TensorBase + where + T: Clone + Default, + { + self.entry(tensor.id()).or_insert(tensor.default_like()) + } + + pub fn or_insert_zeros(&mut self, tensor: &TensorBase) -> &mut TensorBase + where + T: Clone + num::Zero, + { + self.entry(tensor.id()).or_insert(tensor.zeros_like()) } - } impl Store> for GradStore { fn get(&self, key: &TensorId) -> Option<&TensorBase> { self.store.get(key) } - + fn get_mut(&mut self, key: &TensorId) -> Option<&mut TensorBase> { self.store.get_mut(key) } - + fn insert(&mut self, key: TensorId, value: TensorBase) -> Option> { self.store.insert(key, value) } - + fn remove(&mut self, key: &TensorId) -> Option> { self.store.remove(key) } - } impl Index<&TensorId> for GradStore { type Output = TensorBase; - + fn index(&self, index: &TensorId) -> &Self::Output { &self.store[index] } @@ -81,5 +93,4 @@ impl IndexMut<&TensorId> for GradStore { fn index_mut(&mut self, index: &TensorId) -> &mut Self::Output { self.get_mut(index).expect("Tensor not found") } - -} \ No newline at end of file +} diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs new file mode 100644 index 00000000..e5088402 --- /dev/null +++ b/tensor/src/actions/iter/iterator.rs @@ -0,0 +1,9 @@ +/* + Appellation: iterator + Contrib: FL03 +*/ +//! # Iterator +//! +//! + +pub struct Iterator; diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs new file mode 100644 index 00000000..db635fcd --- /dev/null +++ b/tensor/src/actions/iter/mod.rs @@ -0,0 +1,17 @@ +/* + Appellation: grad + Contrib: FL03 +*/ +//! # Gradient +//! +//! +pub use self::iterator::Iterator; + +pub(crate) mod iterator; + +pub trait TensorIter { + type Item; +} + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index ec0edfa4..171949aa 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -9,6 +9,7 @@ pub mod arange; pub mod grad; pub mod index; +pub mod iter; pub(crate) mod prelude { pub use super::arange::*; diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index b0dde87b..ef3b9deb 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -2,7 +2,13 @@ Appellation: data Contrib: FL03 */ +//! # Data +//! +//! #![allow(dead_code, unused_imports)] + +pub mod repr; + pub unsafe trait RawData { type Elem; } diff --git a/tensor/src/data/repr/mod.rs b/tensor/src/data/repr/mod.rs new file mode 100644 index 00000000..15723f77 --- /dev/null +++ b/tensor/src/data/repr/mod.rs @@ -0,0 +1,10 @@ +/* + Appellation: repr + Contrib: FL03 +*/ +pub use self::owned::*; + +pub(crate) mod owned; + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/data/repr/owned.rs b/tensor/src/data/repr/owned.rs new file mode 100644 index 00000000..d0d954b8 --- /dev/null +++ b/tensor/src/data/repr/owned.rs @@ -0,0 +1,120 @@ +/* + Appellation: owned + Contrib: FL03 +*/ +use crate::data::nonnull_from_vec_data; +use core::mem::{self, ManuallyDrop}; +use core::ptr::NonNull; +use core::slice; + +#[derive(Debug)] +#[repr(C)] +pub struct OwnedRepr { + capacity: usize, + len: usize, + ptr: NonNull, +} + +impl OwnedRepr { + pub fn from_vec(vec: Vec) -> Self { + let mut v = ManuallyDrop::new(vec); + let capacity = v.capacity(); + let len = v.len(); + let ptr = nonnull_from_vec_data(&mut v); + + Self { capacity, len, ptr } + } + + pub fn capacity(&self) -> usize { + self.capacity + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn ptr(&self) -> NonNull { + self.ptr + } + + /// Set the valid length of the data + /// + /// ## Safety + /// + /// The first `new_len` elements of the data should be valid. + pub(crate) unsafe fn set_len(&mut self, new_len: usize) { + debug_assert!(new_len <= self.capacity); + self.len = new_len; + } + + fn take_as_vec(&mut self) -> Vec { + let capacity = self.capacity; + let len = self.len; + + self.capacity = 0; + self.len = 0; + + unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) } + } + + pub(crate) fn into_vec(self) -> Vec { + ManuallyDrop::new(self).take_as_vec() + } + + pub(crate) fn as_slice(&self) -> &[A] { + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } + } +} + +impl Clone for OwnedRepr +where + A: Clone, +{ + fn clone(&self) -> Self { + Self::from(self.as_slice().to_owned()) + } + + fn clone_from(&mut self, other: &Self) { + let mut v = self.take_as_vec(); + let other = other.as_slice(); + + if v.len() > other.len() { + v.truncate(other.len()); + } + let (front, back) = other.split_at(v.len()); + v.clone_from_slice(front); + v.extend_from_slice(back); + *self = Self::from(v); + } +} + +impl Drop for OwnedRepr { + fn drop(&mut self) { + if self.capacity > 0 { + // correct because: If the elements don't need dropping, an + // empty Vec is ok. Only the Vec's allocation needs dropping. + // + // implemented because: in some places in ndarray + // where A: Copy (hence does not need drop) we use uninitialized elements in + // vectors. Setting the length to 0 avoids that the vector tries to + // drop, slice or otherwise produce values of these elements. + // (The details of the validity letting this happen with nonzero len, are + // under discussion as of this writing.) + if !mem::needs_drop::() { + self.len = 0; + } + // drop as a Vec. + self.take_as_vec(); + } + } +} + +unsafe impl Send for OwnedRepr {} + +unsafe impl Sync for OwnedRepr {} + +impl From> for OwnedRepr { + fn from(vec: Vec) -> Self { + Self::from_vec(vec) + } +} diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index 0cf3510d..80e37dc5 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -23,6 +23,13 @@ where let store = vec![value; shape.elements()]; from_vec(false.into(), shape, store) } + + pub fn default_like(&self) -> Self + where + T: Default, + { + Self::fill(self.shape().clone(), T::default()) + } } impl TensorBase diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index d59860ce..a2307c7a 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -67,7 +67,7 @@ where // insert the gradient w.r.t. the current node store.insert(self.id(), self.ones_like()); - for node in sorted.iter() { + for node in sorted { if node.is_variable() { continue; } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 58cfe15b..59f3d62c 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -18,15 +18,15 @@ where } let lhs_shape = lhs.shape().clone(); - let lhs_n = *lhs_shape.last().unwrap(); - // let lhs_m = lhs_shape. + let lhs_m = lhs_shape.rows(); + let lhs_n = lhs_shape.columns(); let rhs_shape = rhs.shape().clone(); let shape = lhs_shape.matmul_shape(rhs.shape()).unwrap(); let mut result = vec![T::zero(); shape.elements()]; - for i in 0..lhs_shape[0] { - for j in 0..rhs_shape[1] { + for i in 0..lhs_m { + for j in 0..rhs_shape.columns() { for k in 0..lhs_n { let pos = i * rhs_shape[1] + j; let left = i * lhs_n + k; diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 93b18cad..02587756 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -2,8 +2,8 @@ Appellation: arith Contrib: FL03 */ -use crate::prelude::{Scalar, TensorOp}; -use crate::tensor::*; +use crate::prelude::TensorOp; +use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; macro_rules! cmp { @@ -20,7 +20,7 @@ macro_rules! impl_arithmetic { impl std::ops::$trait for TensorBase where - T: Scalar + std::ops::$trait, + T: Copy + std::ops::$trait, { type Output = Self; @@ -35,7 +35,7 @@ macro_rules! impl_arithmetic { impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase where - T: Scalar + std::ops::$trait, + T: Copy + std::ops::$trait, { type Output = TensorBase; @@ -52,7 +52,7 @@ macro_rules! impl_arithmetic { impl<'a, T> std::ops::$trait> for &'a TensorBase where - T: Scalar + std::ops::$trait, + T: Copy + std::ops::$trait, { type Output = TensorBase; @@ -69,7 +69,7 @@ macro_rules! impl_arithmetic { impl<'a, 'b, T> std::ops::$trait<&'b TensorBase> for &'a TensorBase where - T: Scalar + std::ops::$trait, + T: Copy + std::ops::$trait, { type Output = TensorBase; diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 1367b21a..80c827cf 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -34,11 +34,19 @@ where } } -macro_rules! impl_unary_arith { - ($variant:ident, $method:ident, $e:expr) => { +macro_rules! impl_unary_op { + ($variant:ident, $method:ident) => { pub fn $method(self) -> Self { let shape = self.shape().clone(); - let store = self.store.iter().map($e).collect(); + let store = self.store.iter().copied().map(|v| v.$method()).collect(); + let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); + from_vec_with_op(false, op, shape, store) + } + }; + (custom $variant:ident, $method:ident, $f:expr) => { + pub fn $method(self) -> Self { + let shape = self.shape().clone(); + let store = self.store.iter().copied().map($f).collect(); let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } @@ -49,14 +57,13 @@ impl TensorBase where T: Scalar, { - impl_unary_arith!(Exp, exp, |v| v.exp()); - // impl_unary_arith!(Log, log, |v| v.log()); - - impl_unary_arith!(Cos, cos, |v| v.cos()); - impl_unary_arith!(Cosh, cosh, |v| v.cosh()); - impl_unary_arith!(Sin, sin, |v| v.sin()); - impl_unary_arith!(Sinh, sinh, |v| v.sinh()); - impl_unary_arith!(Sqrt, sqrt, |v| v.sqrt()); - impl_unary_arith!(Tan, tan, |v| v.tan()); - impl_unary_arith!(Tanh, tanh, |v| v.tanh()); + impl_unary_op!(Cos, cos); + impl_unary_op!(Cosh, cosh); + impl_unary_op!(Exp, exp); + impl_unary_op!(Ln, ln); + impl_unary_op!(Sin, sin); + impl_unary_op!(Sinh, sinh); + impl_unary_op!(Sqrt, sqrt); + impl_unary_op!(Tan, tan); + impl_unary_op!(Tanh, tanh); } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 8be5c7cb..73a2c014 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -41,6 +41,8 @@ mod impls { pub type Tensor = tensor::TensorBase; pub mod prelude { + #[doc(inline)] + pub use crate::actions::prelude::*; #[doc(inline)] pub use crate::data::*; #[doc(inline)] diff --git a/tensor/src/linalg/uplo/mod.rs b/tensor/src/linalg/uplo/mod.rs index 5f848d52..2e23235d 100644 --- a/tensor/src/linalg/uplo/mod.rs +++ b/tensor/src/linalg/uplo/mod.rs @@ -3,11 +3,11 @@ Contrib: FL03 */ //! # Upper Lower -//! -//! +//! +//! pub use self::kinds::UPLO; pub(crate) mod kinds; #[cfg(test)] -mod tests {} \ No newline at end of file +mod tests {} diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 42ae33d7..5d7fa48f 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -6,8 +6,11 @@ //! //! use crate::shape::{IntoShape, Shape}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Layout { pub(crate) offset: usize, pub(crate) shape: Shape, diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs index 9882cbdc..60bfde80 100644 --- a/tensor/src/store/mod.rs +++ b/tensor/src/store/mod.rs @@ -10,13 +10,14 @@ pub use self::{layout::*, storage::*}; pub(crate) mod layout; pub(crate) mod storage; -use std::sync::{Arc, RwLock}; - -pub type ArcTensor = Arc>>; - pub trait TensorStore { type Elem; } +pub enum TensorData { + Scalar(T), + Tensor(Vec>), +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index e4187462..282ef166 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -3,12 +3,12 @@ Contrib: FL03 */ // use crate::ops::TrackedOp; -use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorMode, TensorOp}; +use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind, TensorOp}; use crate::store::Layout; use std::ops::Index; // use std::sync::{Arc, RwLock}; -pub(crate) fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec) -> TensorBase { +pub(crate) fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> TensorBase { TensorBase { id: TensorId::new(), kind, @@ -19,7 +19,7 @@ pub(crate) fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec } pub(crate) fn from_vec_with_op( - kind: impl Into, + kind: impl Into, op: TensorOp, shape: impl IntoShape, store: Vec, @@ -38,14 +38,14 @@ pub(crate) fn from_vec_with_op( // #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] pub struct TensorBase { pub(crate) id: TensorId, - pub(crate) kind: TensorMode, + pub(crate) kind: TensorKind, pub(crate) layout: Layout, pub(crate) op: Option>, pub(crate) store: Vec, } impl TensorBase { - pub fn new(kind: TensorMode, shape: impl IntoShape) -> Self { + pub fn new(kind: TensorKind, shape: impl IntoShape) -> Self { Self { id: TensorId::new(), kind, @@ -55,7 +55,7 @@ impl TensorBase { } } - pub fn from_vec(kind: TensorMode, shape: impl IntoShape, store: Vec) -> Self { + pub fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> Self { from_vec(kind, shape, store) } /// Returns the number of elements in the tensor. @@ -92,7 +92,7 @@ impl TensorBase { } pub fn variable(mut self) -> Self { - self.kind = TensorMode::Variable; + self.kind = TensorKind::Variable; self } pub fn to_vec(&self) -> Vec diff --git a/tensor/src/types/mode.rs b/tensor/src/types/kinds.rs similarity index 85% rename from tensor/src/types/mode.rs rename to tensor/src/types/kinds.rs index ec8683ac..f7fc1747 100644 --- a/tensor/src/types/mode.rs +++ b/tensor/src/types/kinds.rs @@ -30,13 +30,13 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; )] #[repr(u8)] #[strum(serialize_all = "lowercase")] -pub enum TensorMode { +pub enum TensorKind { #[default] Normal, Variable, } -impl TensorMode { +impl TensorKind { pub fn normal() -> Self { Self::Normal } @@ -46,13 +46,13 @@ impl TensorMode { } } -impl From for usize { - fn from(mode: TensorMode) -> Self { +impl From for usize { + fn from(mode: TensorKind) -> Self { mode as usize } } -impl From for TensorMode { +impl From for TensorKind { fn from(mode: usize) -> Self { match mode % Self::COUNT { 0 => Self::Normal, @@ -61,7 +61,7 @@ impl From for TensorMode { } } -impl From for TensorMode { +impl From for TensorKind { fn from(is_variable: bool) -> Self { if is_variable { Self::Variable diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index ec977ea2..54fb2162 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -4,11 +4,11 @@ */ pub mod id; -pub mod mode; +pub mod kinds; pub mod order; pub(crate) mod prelude { pub use super::id::TensorId; - pub use super::mode::TensorMode; - pub use super::order::MajorOrder; + pub use super::kinds::TensorKind; + pub use super::order::Order; } diff --git a/tensor/src/types/order.rs b/tensor/src/types/order.rs index a8f35c28..9180ab95 100644 --- a/tensor/src/types/order.rs +++ b/tensor/src/types/order.rs @@ -9,7 +9,7 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[cfg_attr( feature = "serde", derive(Deserialize, Serialize,), - serde(rename_all = "lowercase", untagged) + serde(rename_all = "snake_case", untagged) )] #[derive( Clone, @@ -29,14 +29,14 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; VariantNames, )] #[repr(u8)] -#[strum(serialize_all = "lowercase")] -pub enum MajorOrder { +#[strum(serialize_all = "snake_case")] +pub enum Order { Column, #[default] Row, } -impl MajorOrder { +impl Order { pub fn column() -> Self { Self::Column } @@ -46,13 +46,13 @@ impl MajorOrder { } } -impl From for usize { - fn from(order: MajorOrder) -> Self { +impl From for usize { + fn from(order: Order) -> Self { order as usize } } -impl From for MajorOrder { +impl From for Order { fn from(order: usize) -> Self { match order % Self::COUNT { 0 => Self::Column, diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index fb50a5e8..0b429d80 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -29,7 +29,7 @@ fn test_backward() { } #[test] -// #[ignore = "Needs to be fixed"] +#[ignore = "Needs to be fixed"] fn test_add_mul() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); From ea41045696cccadf7c0b1e751fbc59fd9aae154b Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 23 Mar 2024 11:01:15 -0500 Subject: [PATCH 48/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 2 +- acme/Cargo.toml | 10 +++++----- graphs/Cargo.toml | 2 +- tensor/Cargo.toml | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a344ac64..7648cdb7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://github.com/FL03/acme/wikis" license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0" # TODO - Update cargo package version +version = "0.3.0-nightly.1" # TODO - Update cargo package version [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 1b8ddf7d..ce34cfc2 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -78,11 +78,11 @@ required-features = ["macros"] [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly.1" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.1" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.1" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.1" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.1" } [dev-dependencies] approx = "0.5" diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 957ea79c..07df229b 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -34,7 +34,7 @@ test = true [dev-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly.1" } anyhow.workspace = true lazy_static = "1" diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index f99e463a..b9ea3ec0 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -25,7 +25,7 @@ serde-ext = [ [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly.1" } num = "0.4" serde = { optional = true, features = ["derive"], version = "1" } From 5bbaa243cc35c404df85de3e70e6b67b74b48363 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 23 Mar 2024 12:14:39 -0500 Subject: [PATCH 49/87] update Signed-off-by: Joe McCain III --- .github/workflows/crates.yml | 3 - Cargo.toml | 2 +- acme/Cargo.toml | 18 ++-- acme/examples/autodiff.rs | 24 +++--- acme/examples/{cgraph.rs => graph.rs} | 0 acme/tests/default.rs | 20 ++++- acme/tests/macros.rs | 74 +++++++++++++++- derive/src/cmp/params/mod.rs | 2 +- graphs/Cargo.toml | 3 +- macros/src/cmp/mod.rs | 7 -- macros/src/cmp/store.rs | 117 -------------------------- macros/src/grad/mod.rs | 2 +- macros/src/gradient.rs | 109 ------------------------ macros/src/lib.rs | 30 ++----- {acme => macros}/tests/autodiff.rs | 58 ++----------- macros/tests/default.rs | 19 +++++ macros/tests/gradient.rs | 75 ----------------- tensor/Cargo.toml | 2 +- tensor/src/actions/mod.rs | 7 +- tensor/src/impls/grad.rs | 1 + tensor/src/tensor.rs | 28 ++++-- tensor/tests/backward.rs | 5 +- 22 files changed, 178 insertions(+), 428 deletions(-) rename acme/examples/{cgraph.rs => graph.rs} (100%) delete mode 100644 macros/src/cmp/mod.rs delete mode 100644 macros/src/cmp/store.rs delete mode 100644 macros/src/gradient.rs rename {acme => macros}/tests/autodiff.rs (74%) create mode 100644 macros/tests/default.rs delete mode 100644 macros/tests/gradient.rs diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index cfe946ca..5697c206 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -29,9 +29,6 @@ jobs: - name: Publish (${{ env.CARGO_PACKAGE_NAME }}) run: cargo publish --all-features -v -p ${{ env.CARGO_PACKAGE_NAME }} --token ${{ secrets.CARGO_REGISTRY_TOKEN }} features: - concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-sdk - cancel-in-progress: false name: Publish (sdk) needs: core runs-on: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index 7648cdb7..a344ac64 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://github.com/FL03/acme/wikis" license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0-nightly.1" # TODO - Update cargo package version +version = "0.3.0" # TODO - Update cargo package version [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index ce34cfc2..90315d5b 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -64,29 +64,27 @@ name = "autodiff" required-features = ["macros"] [[example]] -name = "cgraph" +name = "graph" required-features = ["graph"] -[[test]] -name = "autodiff" -required-features = ["macros"] - [[test]] name = "macros" required-features = ["macros"] + [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0-nightly.1" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.1" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.1" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.1" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.1" } +acme-core = { path = "../core", version = "0.3.0" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } [dev-dependencies] approx = "0.5" num = "0.4" +rand = "0.8" [package.metadata.docs.rs] all-features = true diff --git a/acme/examples/autodiff.rs b/acme/examples/autodiff.rs index 53002ebd..d0393451 100644 --- a/acme/examples/autodiff.rs +++ b/acme/examples/autodiff.rs @@ -2,12 +2,11 @@ Appellation: autodiff Contrib: FL03 */ -#![allow(dead_code, unused_variables)] -#![feature(fn_traits)] +// #![cfg(feature = "macros")] + extern crate acme; use acme::autodiff; -use acme::prelude::sigmoid; macro_rules! eval { ($var:ident: $ex:expr) => { @@ -18,22 +17,21 @@ macro_rules! eval { fn main() -> Result<(), Box> { let x = 2_f64; - // samples(x); - // let z = sigmoid(x); - // show_item!(sigmoid(x)); + // multiply(x, x); - multiply(x, x); + samples(x); Ok(()) } -pub fn multiply(x: A, y: B) -> C -where - A: std::ops::Mul, -{ - x * y -} +// #[partial] +// pub fn multiply(x: A, y: B) -> C +// where +// A: std::ops::Mul, +// { +// x * y +// } fn samples(x: f64) { eval!(x: x.tan()); diff --git a/acme/examples/cgraph.rs b/acme/examples/graph.rs similarity index 100% rename from acme/examples/cgraph.rs rename to acme/examples/graph.rs diff --git a/acme/tests/default.rs b/acme/tests/default.rs index ff4c83fe..90cf5178 100644 --- a/acme/tests/default.rs +++ b/acme/tests/default.rs @@ -1,7 +1,19 @@ -#[cfg(test)] +/* + Appellation: default + Contrib: FL03 +*/ +#![cfg(test)] + +pub fn multiply(x: A, y: B) -> C +where + A: std::ops::Mul, +{ + x * y +} + #[test] fn compiles() { - let add = |a, b| a + b; - let result = add(2, 2); - assert_eq!(result, 4); + assert!(multiply(2, 3) > 0); + assert_eq!(multiply(2, 3), 6); + assert_ne!(multiply(2, 3), 7); } diff --git a/acme/tests/macros.rs b/acme/tests/macros.rs index 9c7d0c7f..28d7b8fc 100644 --- a/acme/tests/macros.rs +++ b/acme/tests/macros.rs @@ -2,6 +2,76 @@ Appellation: macros Contrib: FL03 */ -#![cfg(test)] -#![cfg(feature = "macros")] +#![cfg(all(test, feature = "macros"))] + extern crate acme; + +use acme::prelude::autodiff; +use num::Float; + +pub fn sigmoid(x: T) -> T +where + T: Float, +{ + (T::one() + x.neg().exp()).recip() +} + +pub trait Sigmoid { + fn sigmoid(self) -> Self; +} + +impl Sigmoid for T +where + T: Float, +{ + fn sigmoid(self) -> Self { + (T::one() + self.neg().exp()).recip() + } +} + +pub fn add(a: A, b: B) -> C +where + A: std::ops::Add, +{ + a + b +} + +pub fn sigmoid_prime(x: T) -> T +where + T: Float, +{ + x.neg().exp() / (T::one() + x.neg().exp()).powi(2) +} + +trait Square { + fn square(self) -> Self; +} + +impl Square for T +where + T: Copy + std::ops::Mul, +{ + fn square(self) -> Self { + self * self + } +} + +#[ignore = "Currently, support for function calls is not fully implemented"] +#[test] +fn test_function_call() { + let (x, y) = (1_f64, 2_f64); + // differentiating a function call w.r.t. x + assert_eq!(autodiff!(x: add(x, y)), 1.0); + // differentiating a function call w.r.t. some variable + assert_eq!(autodiff!(a: add(x, y)), 0.0); + assert_eq!(autodiff!(y: sigmoid::(y)), sigmoid_prime(y)); +} + +#[ignore = "Custom trait methods are not yet supported"] +#[test] +fn test_method() { + let (x, y) = (1_f64, 2_f64); + assert_eq!(autodiff!(x: x.mul(y)), 2.0); + assert_eq!(autodiff!(x: x.square()), 2.0); + assert_eq!(autodiff!(x: x.sigmoid()), sigmoid_prime(x)); +} diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs index 0a7cedbb..743d14aa 100644 --- a/derive/src/cmp/params/mod.rs +++ b/derive/src/cmp/params/mod.rs @@ -16,7 +16,7 @@ pub fn generate_keys(fields: &Fields, name: &Ident) -> TokenStream { fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { let FieldsNamed { named, .. } = fields; - let fields_str = named.iter().cloned().map(|field| field.ident.unwrap()); + let _fields_str = named.iter().cloned().map(|field| field.ident.unwrap()); let variants = named.iter().cloned().map(|field| { let ident = field.ident.unwrap(); let variant_ident = format_ident!("{}", capitalize_first(&ident.to_string())); diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 07df229b..698db56f 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -34,9 +34,8 @@ test = true [dev-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0-nightly.1" } +acme-core = { path = "../core", version = "0.3.0" } -anyhow.workspace = true lazy_static = "1" num = "0.4" petgraph = "0.6" diff --git a/macros/src/cmp/mod.rs b/macros/src/cmp/mod.rs deleted file mode 100644 index 3996240c..00000000 --- a/macros/src/cmp/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -/* - Appellation: cmp - Contrib: FL03 -*/ -pub use self::store::*; - -pub(crate) mod store; diff --git a/macros/src/cmp/store.rs b/macros/src/cmp/store.rs deleted file mode 100644 index bb0293a5..00000000 --- a/macros/src/cmp/store.rs +++ /dev/null @@ -1,117 +0,0 @@ -/* - Appellation: store - Contrib: FL03 -*/ -use proc_macro2::TokenStream; -use std::collections::hash_map::Entry; -use std::collections::HashMap; -use syn::Expr; - -pub struct GradientStore { - pub(crate) store: HashMap, -} - -impl GradientStore -where - K: Eq + std::hash::Hash, -{ - pub fn new() -> Self { - Self { - store: HashMap::new(), - } - } - - pub fn entry(&mut self, k: K) -> Entry { - self.store.entry(k) - } - - pub fn get(&self, k: &K) -> Option<&TokenStream> { - self.store.get(k) - } - - pub fn get_mut(&mut self, k: &K) -> Option<&mut TokenStream> { - self.store.get_mut(k) - } - - pub fn insert(&mut self, k: K, v: TokenStream) -> Option { - self.store.insert(k, v) - } - - pub fn or_insert(&mut self, k: K, v: TokenStream) -> &mut TokenStream { - self.entry(k).or_insert(v) - } - - pub fn remove(&mut self, k: &K) -> Option { - self.store.remove(k) - } - - pub fn retain(&mut self, f: F) - where - F: FnMut(&K, &mut TokenStream) -> bool, - { - self.store.retain(f); - } -} - -impl GradientStore { - pub fn retain_vars(&mut self) { - self.retain(|k, _v| matches!(k, Expr::Path(_))); - } -} - -impl std::ops::Deref for GradientStore { - type Target = HashMap; - - fn deref(&self) -> &Self::Target { - &self.store - } -} - -impl std::ops::DerefMut for GradientStore { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.store - } -} - -impl std::ops::Index<&K> for GradientStore -where - K: Eq + std::hash::Hash, -{ - type Output = TokenStream; - - fn index(&self, k: &K) -> &Self::Output { - self.get(k).expect("Key not found") - } -} - -impl std::ops::IndexMut<&K> for GradientStore -where - K: Eq + std::hash::Hash, -{ - fn index_mut(&mut self, k: &K) -> &mut Self::Output { - self.get_mut(k).expect("Key not found") - } -} - -impl IntoIterator for GradientStore { - type Item = (K, TokenStream); - type IntoIter = std::collections::hash_map::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.store.into_iter() - } -} - -impl FromIterator<(K, TokenStream)> for GradientStore -where - K: Eq + std::hash::Hash, -{ - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - Self { - store: HashMap::from_iter(iter), - } - } -} diff --git a/macros/src/grad/mod.rs b/macros/src/grad/mod.rs index 65109759..fbf804d2 100644 --- a/macros/src/grad/mod.rs +++ b/macros/src/grad/mod.rs @@ -16,7 +16,7 @@ pub fn gradient(grad: &GradientAst) -> TokenStream { handle_item_fn(&item) } -fn handle_item_fn(item: &ItemFn) -> TokenStream { +pub fn handle_item_fn(item: &ItemFn) -> TokenStream { let ItemFn { block, sig, .. } = item; let Signature { inputs, .. } = sig; diff --git a/macros/src/gradient.rs b/macros/src/gradient.rs deleted file mode 100644 index 72f1a2bb..00000000 --- a/macros/src/gradient.rs +++ /dev/null @@ -1,109 +0,0 @@ -/* - Appellation: gradient - Contrib: FL03 -*/ -use crate::cmp::GradientStore; -use proc_macro2::TokenStream; -use quote::quote; -use syn::{Expr, ExprBinary, ExprUnary}; - -pub fn compute_grad(expr: &Expr) -> TokenStream { - // Initialize an empty HashMap to hold the gradient values - let mut store = GradientStore::new(); - // begin by computing the gradient of the expression w.r.t. itself - // store.insert(expr.clone(), quote! { 1.0 }); - - // Generate code to compute the gradient of the expression w.r.t. each variable - handle_expr(expr, &mut store); - - store.retain_vars(); - - let values = store - .into_iter() - .map(|(k, v)| { - quote! { (#k, #v) } - }) - .collect::>(); - // Convert the gradient values into a token stream - quote! { [#(#values),*] } -} - -pub fn handle_expr(expr: &Expr, store: &mut GradientStore) -> TokenStream { - match expr { - Expr::Binary(inner) => { - let df = binary_grad(inner, store); - df - } - // Handle constants - Expr::Const(_) => quote! { 0.0 }, - // Handle literals - Expr::Lit(_) => quote! { 0.0 }, - Expr::Paren(inner) => handle_expr(&inner.expr, store), - // Handle path variables (identifiers) - Expr::Path(inner) => { - let path = &inner.path; - // Only considers single-segment paths; i.e., x in the expression let x = ___; - if path.segments.len() != 1 { - panic!("Unsupported path!"); - } - let grad = quote! { 1.0 }; - // store.insert(node, grad.clone()); - grad - } - // Handle references (borrowed variables denoted with & or &mut) - Expr::Reference(inner) => handle_expr(&inner.expr, store), - // Handle unary expressions (e.g., negation, natural log, etc.) - Expr::Unary(inner) => { - // Compute the gradient of the expression - let df = handle_unary(inner, store); - - df - } - // Handle other expressions - _ => panic!("Unsupported expression!"), - } -} - -fn binary_grad(expr: &ExprBinary, store: &mut GradientStore) -> TokenStream { - use syn::BinOp; - // create a cloned reference to the expression - let node: Expr = expr.clone().into(); - // let grad = store.entry(node).or_insert(quote! { 0.0 }).clone(); - let grad = store.remove(&node).unwrap_or(quote! { 0.0 }); - let ExprBinary { - left, op, right, .. - } = expr; - - // Recursivley compute the gradient of the left and right children - let dl = handle_expr(left, store); - let dr = handle_expr(right, store); - match op { - BinOp::Add(_) => { - let gl = store.or_insert(*left.clone(), quote! { 0.0 }); - *gl = quote! { #gl + #dl }; - let gr = store.or_insert(*right.clone(), quote! { 0.0 }); - *gr = quote! { #gr + #dr }; - } - BinOp::Mul(_) => { - let gl = store.or_insert(*left.clone(), quote! { 0.0 }); - *gl = quote! { #gl + #right * #dl }; - let gr = store.or_insert(*right.clone(), quote! { 0.0 }); - *gr = quote! { #gr + #left * #dr }; - } - _ => panic!("Unsupported binary operator!"), - }; - grad -} - -fn handle_unary(expr: &ExprUnary, store: &mut GradientStore) -> TokenStream { - use syn::UnOp; - handle_expr(&expr.expr, store); - let dv = &store[&expr.expr.clone()]; - let df = match expr.op { - UnOp::Neg(_) => { - quote! { -#dv } - } - _ => panic!("Unsupported unary operator!"), - }; - df -} diff --git a/macros/src/lib.rs b/macros/src/lib.rs index a8ee3435..6650e94d 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -5,28 +5,22 @@ //! # acme-macros //! //! -extern crate proc_macro as pm; +extern crate proc_macro; pub(crate) mod ast; -pub(crate) mod cmp; pub(crate) mod diff; pub(crate) mod grad; pub(crate) mod ops; -pub(crate) mod gradient; - -use ast::gradient::GradientAst; +// use ast::gradient::GradientAst; use ast::partials::PartialAst; -use pm::TokenStream; -use syn::{parse_macro_input, Expr}; +use proc_macro::TokenStream; +use syn::parse_macro_input; #[proc_macro_attribute] pub fn partial(_attr: TokenStream, item: TokenStream) -> TokenStream { - // let attr = parse_macro_input!(attr as syn::Attribute); - // let item = parse_macro_input!(item as syn::ItemFn); - // let ast = ast::gradient::GradientAst::new(attr, item); - let ast = parse_macro_input!(item as GradientAst); - let result = grad::gradient(&ast); + let ast = parse_macro_input!(item as syn::ItemFn); + let result = grad::handle_item_fn(&ast); TokenStream::from(result) } @@ -44,18 +38,6 @@ pub fn autodiff(input: TokenStream) -> TokenStream { TokenStream::from(result) } -#[proc_macro] -pub fn gradient(input: TokenStream) -> TokenStream { - // Parse the input expression into a syntax tree - let expr = parse_macro_input!(input as Expr); - - // Generate code to compute the gradient - let result = gradient::compute_grad(&expr); - - // Return the generated code as a token stream - TokenStream::from(result) -} - pub(crate) mod kw { syn::custom_keyword!(eval); syn::custom_keyword!(grad); diff --git a/acme/tests/autodiff.rs b/macros/tests/autodiff.rs similarity index 74% rename from acme/tests/autodiff.rs rename to macros/tests/autodiff.rs index 8fba9119..dcdf09b8 100644 --- a/acme/tests/autodiff.rs +++ b/macros/tests/autodiff.rs @@ -1,42 +1,21 @@ /* - Appellation: gradient + Appellation: autodiff Contrib: FL03 */ -#![cfg(all(test, feature = "macros"))] +#![cfg(test)] +#![allow(unused_variables)] +extern crate acme_macros as macros; -extern crate acme; - -use acme::prelude::autodiff; use approx::assert_abs_diff_eq; -use num::traits::Float; - -pub fn add(a: A, b: B) -> C -where - A: std::ops::Add, -{ - a + b -} +use macros::autodiff; pub fn sigmoid_prime(x: T) -> T where - T: Float, + T: num::Float, { x.neg().exp() / (T::one() + x.neg().exp()).powi(2) } -trait Square { - fn square(self) -> Self; -} - -impl Square for T -where - T: Copy + std::ops::Mul, -{ - fn square(self) -> Self { - self * self - } -} - #[test] fn test_autodiff() { let (x, y) = (1_f64, 2_f64); @@ -133,7 +112,7 @@ fn test_trig() { let x: f64 = 2.0; assert_eq!(autodiff!(x: x.cos()), -x.sin()); assert_eq!(autodiff!(x: x.sin()), x.cos()); - assert_eq!(autodiff!(x: x.tan()), x.cos().square().recip()); + assert_eq!(autodiff!(x: x.tan()), x.cos().powi(2).recip()); } #[test] @@ -149,7 +128,7 @@ fn test_chained() { let x: f64 = 2.0; assert_abs_diff_eq!( autodiff!(x: x.sin() * x.cos()), - 2_f64 * x.cos().square() - 1_f64, + 2_f64 * x.cos().powi(2) - 1_f64, epsilon = 1e-8 ); assert_eq!(autodiff!(x: x.sin().cos()), -x.cos() * x.sin().sin()); @@ -169,24 +148,3 @@ fn test_sigmoid() { sigmoid_prime(x) ); } - -#[ignore = "Currently, support for function calls is not fully implemented"] -#[test] -fn test_function_call() { - use acme::prelude::sigmoid; - let (x, y) = (1_f64, 2_f64); - // differentiating a function call w.r.t. x - assert_eq!(autodiff!(x: add(x, y)), 1.0); - // differentiating a function call w.r.t. some variable - assert_eq!(autodiff!(a: add(x, y)), 0.0); - assert_eq!(autodiff!(y: sigmoid::(y)), sigmoid_prime(y)); -} - -#[ignore = "Custom trait methods are not yet supported"] -#[test] -fn test_method() { - let (x, y) = (1_f64, 2_f64); - assert_eq!(autodiff!(x: x.mul(y)), 2.0); - - assert_eq!(autodiff!(x: x.sigmoid()), sigmoid_prime(x)); -} diff --git a/macros/tests/default.rs b/macros/tests/default.rs new file mode 100644 index 00000000..90cf5178 --- /dev/null +++ b/macros/tests/default.rs @@ -0,0 +1,19 @@ +/* + Appellation: default + Contrib: FL03 +*/ +#![cfg(test)] + +pub fn multiply(x: A, y: B) -> C +where + A: std::ops::Mul, +{ + x * y +} + +#[test] +fn compiles() { + assert!(multiply(2, 3) > 0); + assert_eq!(multiply(2, 3), 6); + assert_ne!(multiply(2, 3), 7); +} diff --git a/macros/tests/gradient.rs b/macros/tests/gradient.rs deleted file mode 100644 index a34612f3..00000000 --- a/macros/tests/gradient.rs +++ /dev/null @@ -1,75 +0,0 @@ -/* - Appellation: gradient - Contrib: FL03 -*/ -#[cfg(test)] -extern crate acme_macros as macros; - -use macros::gradient; - -#[test] -fn test_grad_addition() { - let x = 1.0; - let y = 2.0; - let df = gradient!(x + y); - // let df = BTreeMap::from_iter(df); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &x).collect::>(), - [(x, 1.0)] - ); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &y).collect::>(), - [(y, 1.0)] - ); - let z = 3.0; - let df = gradient!(x + y + z); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &x).collect::>(), - [(x, 1.0)] - ); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &y).collect::>(), - [(y, 1.0)] - ); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &z).collect::>(), - [(z, 1.0)] - ); -} - -#[test] -fn test_grad_multiply() { - let x = 1.0; - let y = 2.0; - let df = gradient!(x * y); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &x).collect::>(), - [(x, 2.0)] - ); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &y).collect::>(), - [(y, 1.0)] - ); - let df = gradient!(x * y + 3.0); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &x).collect::>(), - [(x, 2.0)] - ); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &y).collect::>(), - [(y, 1.0)] - ); -} - -#[ignore = "Needs to be fixed"] -#[test] -fn test_grad_mixed() { - let x = 1.0; - let y = 2.0; - let df = gradient!(y * (x + y)); - // assert_eq!(df.into_iter().filter(|(k, _v)| k == &x).collect::>(), [(x, 2.0)]); - assert_eq!( - df.into_iter().filter(|(k, _v)| k == &y).collect::>(), - [(y, 5.0)] - ); -} diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index b9ea3ec0..f99e463a 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -25,7 +25,7 @@ serde-ext = [ [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0-nightly.1" } +acme-core = { path = "../core", version = "0.3.0" } num = "0.4" serde = { optional = true, features = ["derive"], version = "1" } diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index 171949aa..376a774a 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -4,7 +4,12 @@ */ //! # Actions //! -//! +//! This module contains the implementations of the various actions that can be performed on tensors. +//! The actions include: +//! - Composition +//! - Differentiation +//! - Indexing +//! - Iteration pub mod arange; pub mod grad; diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index a2307c7a..3af2efec 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -73,6 +73,7 @@ where } // get the gradient of the node let grad = store.remove(&node.id()).expect("Gradient not found"); + let grad = grad.detach(); // handle the different types of operations if let Some(op) = &self.op { match op { diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 282ef166..57e41a66 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -58,6 +58,23 @@ impl TensorBase { pub fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> Self { from_vec(kind, shape, store) } + + pub fn detach(&self) -> Self + where + T: Clone, + { + if self.op.is_none() && !self.is_variable() { + self.clone() + } else { + Self { + id: TensorId::new(), + kind: TensorKind::Normal, + layout: self.layout.clone(), + op: None, + store: self.store.clone(), + } + } + } /// Returns the number of elements in the tensor. pub fn elements(&self) -> usize { self.layout.elements() @@ -70,15 +87,15 @@ impl TensorBase { pub fn layout(&self) -> &Layout { &self.layout } - + /// Get a reference to the operation of the tensor pub fn op(&self) -> Option<&TensorOp> { self.op.as_ref() } - + /// Get a reference to the rank of the tensor pub fn rank(&self) -> Rank { self.layout.shape().rank() } - + /// Get a reference to the shape of the tensor pub fn shape(&self) -> &Shape { self.layout.shape() } @@ -86,15 +103,16 @@ impl TensorBase { pub fn stride(&self) -> &[usize] { self.layout.stride() } - + /// A function to check if the tensor is a variable pub fn is_variable(&self) -> bool { self.kind.is_variable() } - + /// Changes the kind of tensor to a variable pub fn variable(mut self) -> Self { self.kind = TensorKind::Variable; self } + /// Turn the tensor into a one-dimensional vector pub fn to_vec(&self) -> Vec where T: Clone, diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 0b429d80..3908bd90 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -34,8 +34,9 @@ fn test_add_mul() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); let b = Tensor::::ones(shape).variable(); - // let c = &a + &b; - let d = &a * (&a + &b); + println!("A({}), B({})", a.id(), b.id()); + let c = &a + &b; + let d = &a * &c; let grad = d.grad(); assert_eq!(grad[&a.id()], Tensor::fill(shape, 3_f64)); From c19daa78447743b0f36fc380d3bd835af7bb5286 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 23 Mar 2024 14:05:19 -0500 Subject: [PATCH 50/87] update Signed-off-by: Joe McCain III --- graphs/src/dcg/edge.rs | 19 +++++-- graphs/src/dcg/graph.rs | 22 ++++---- graphs/src/grad/store.rs | 52 ++++++++++++------ graphs/src/id/entry.rs | 92 ++++++++++++++++++++++++++++++++ graphs/src/id/id.rs | 54 +++++++++++++++++++ graphs/src/id/mod.rs | 20 +++++++ graphs/src/lib.rs | 1 + graphs/tests/dcg.rs | 13 +++++ tensor/src/actions/grad/store.rs | 1 + tensor/src/impls/grad.rs | 2 +- tensor/tests/backward.rs | 19 ++++++- 11 files changed, 262 insertions(+), 33 deletions(-) create mode 100644 graphs/src/id/entry.rs create mode 100644 graphs/src/id/id.rs create mode 100644 graphs/src/id/mod.rs diff --git a/graphs/src/dcg/edge.rs b/graphs/src/dcg/edge.rs index 4715d535..1a87c42e 100644 --- a/graphs/src/dcg/edge.rs +++ b/graphs/src/dcg/edge.rs @@ -2,6 +2,7 @@ Appellation: edge Contrib: FL03 */ +use crate::id::Id; use crate::NodeIndex; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -9,19 +10,29 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Edge { - source: Idx, + source: Id, } impl Edge { pub fn new(source: Idx) -> Self { - Self { source } + Self { + source: Id::new(source), + } } - pub fn source(&self) -> &Idx { + pub fn get_id(&self) -> usize { + self.source.id() + } + + pub fn get_index(&self) -> &Idx { + self.source.index() + } + + pub fn source(&self) -> &Id { &self.source } - pub fn into_source(self) -> Idx { + pub fn into_source(self) -> Id { self.source } } diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index 5b1947cf..5ba2d31e 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -69,22 +69,24 @@ impl Dcg { where T: Copy + Default + Num + NumAssignOps + NumOps, { - let sorted = toposort(&self.store, None)?; - let target = *sorted.last().unwrap(); + let mut sorted = toposort(&self.store, None)?; + sorted.reverse(); + let target = *sorted.first().unwrap(); let mut gradients = HashMap::::new(); gradients.insert(target, T::one()); - for node in sorted.iter().rev() { - let node_grad = gradients[node]; - let node_op = self.get(*node).unwrap(); + for scope in sorted.iter().copied() { + // Get the gradient of the current scope + let grad = gradients[&scope]; + let node = &self[scope]; - if let Node::Op { inputs, op } = node_op { + if let Node::Op { inputs, op } = node { match op { Operations::Binary(inner) => match *inner { BinaryExpr::Add(_) => { - for arg in self.store.neighbors_directed(*node, Direction::Incoming) { - *gradients.entry(arg).or_default() += node_grad; + for arg in self.store.neighbors_directed(scope, Direction::Incoming) { + *gradients.entry(arg).or_default() += grad; } } BinaryExpr::Mul(_) => { @@ -92,8 +94,8 @@ impl Dcg { let rhs = inputs[1]; let lhs_val = self.get(lhs).unwrap().get_value(); let rhs_val = self.get(rhs).unwrap().get_value(); - *gradients.entry(lhs).or_default() += node_grad * rhs_val; - *gradients.entry(rhs).or_default() += node_grad * lhs_val; + *gradients.entry(lhs).or_default() += grad * rhs_val; + *gradients.entry(rhs).or_default() += grad * lhs_val; } _ => {} }, diff --git a/graphs/src/grad/store.rs b/graphs/src/grad/store.rs index 44315386..df561537 100644 --- a/graphs/src/grad/store.rs +++ b/graphs/src/grad/store.rs @@ -5,13 +5,14 @@ use crate::NodeIndex; use acme::prelude::Store; use std::any::Any; -use std::collections::BTreeMap; +use std::collections::btree_map::{BTreeMap, Entry}; +use std::ops::{Index, IndexMut}; -pub struct GradientStore { - store: BTreeMap>, +pub struct GradientStore> { + store: BTreeMap, } -impl GradientStore +impl GradientStore where K: Ord, { @@ -21,35 +22,52 @@ where } } - pub fn or_insert(&mut self, key: K, value: Box) -> &mut dyn Any { + pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { + self.store.entry(key) + } + + pub fn or_insert(&mut self, key: K, value: V) -> &mut V { self.store.entry(key).or_insert(value) } } -impl Store for GradientStore +impl Store for GradientStore where K: Ord, - T: Clone + 'static, { fn get(&self, key: &K) -> Option<&T> { - self.store.get(key).map(|v| v.downcast_ref::().unwrap()) + self.store.get(key) } fn get_mut(&mut self, key: &K) -> Option<&mut T> { - self.store - .get_mut(key) - .map(|v| v.downcast_mut::().unwrap()) + self.store.get_mut(key) } fn insert(&mut self, key: K, value: T) -> Option { - self.store - .insert(key, Box::new(value)) - .map(|v| v.downcast_ref::().unwrap().clone()) + self.store.insert(key, value) } fn remove(&mut self, key: &K) -> Option { - self.store - .remove(key) - .map(|v| v.downcast_ref::().unwrap().clone()) + self.store.remove(key) + } +} + +impl Index for GradientStore +where + K: Ord, +{ + type Output = T; + + fn index(&self, key: K) -> &Self::Output { + self.store.get(&key).expect("Key not found") + } +} + +impl IndexMut for GradientStore +where + K: Ord, +{ + fn index_mut(&mut self, key: K) -> &mut Self::Output { + self.store.get_mut(&key).expect("Key not found") } } diff --git a/graphs/src/id/entry.rs b/graphs/src/id/entry.rs new file mode 100644 index 00000000..b3394377 --- /dev/null +++ b/graphs/src/id/entry.rs @@ -0,0 +1,92 @@ +/* + Appellation: atomic + Contrib: FL03 +*/ +//! # Atomic Id +//! +//! +use super::Identifier; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +#[repr(transparent)] +pub struct EntryId(usize); + +impl EntryId { + pub fn new() -> Self { + static COUNTER: AtomicUsize = AtomicUsize::new(1); + Self(COUNTER.fetch_add(1, Relaxed)) + } + + pub fn next(&self) -> Self { + Self::new() + } + + pub fn set(&mut self, id: usize) { + self.0 = id; + } + + pub const fn get(&self) -> usize { + self.0 + } + + pub fn into_inner(self) -> usize { + self.0 + } +} + +impl AsRef for EntryId { + fn as_ref(&self) -> &usize { + &self.0 + } +} + +impl AsMut for EntryId { + fn as_mut(&mut self) -> &mut usize { + &mut self.0 + } +} + +impl Default for EntryId { + fn default() -> Self { + Self::new() + } +} + +impl Deref for EntryId { + type Target = usize; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EntryId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Identifier for EntryId {} + +impl std::fmt::Display for EntryId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for EntryId { + fn from(id: usize) -> Self { + Self(id) + } +} + +impl From for usize { + fn from(id: EntryId) -> Self { + id.0 + } +} diff --git a/graphs/src/id/id.rs b/graphs/src/id/id.rs new file mode 100644 index 00000000..71d4baca --- /dev/null +++ b/graphs/src/id/id.rs @@ -0,0 +1,54 @@ +/* + Appellation: id + Contrib: FL03 +*/ +use super::EntryId; +use crate::NodeIndex; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] +pub struct Id { + id: EntryId, + index: Idx, +} + +impl Id { + pub fn new(index: Idx) -> Self { + Self { + id: EntryId::new(), + index, + } + } + + pub fn id(&self) -> usize { + *self.id + } + + pub fn index(&self) -> &Idx { + &self.index + } +} + +impl Default for Id +where + Idx: Default, +{ + fn default() -> Self { + Self::new(Idx::default()) + } +} + +impl std::fmt::Display for Id +where + Idx: std::fmt::Display, +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + if f.alternate() { + write!(f, "{}.{}", self.index(), self.id) + } else { + write!(f, "{}", self.index()) + } + } +} diff --git a/graphs/src/id/mod.rs b/graphs/src/id/mod.rs new file mode 100644 index 00000000..b3d7b847 --- /dev/null +++ b/graphs/src/id/mod.rs @@ -0,0 +1,20 @@ +/* + Appellation: ids + Contrib: FL03 +*/ +//! # Ids +//! +//! +pub use self::{entry::*, id::*}; + +pub(crate) mod entry; +pub(crate) mod id; + +pub trait Identifier {} + +pub trait Index { + fn next(&self) -> Self; +} + +#[cfg(test)] +mod tests {} diff --git a/graphs/src/lib.rs b/graphs/src/lib.rs index b713ba69..a404f744 100644 --- a/graphs/src/lib.rs +++ b/graphs/src/lib.rs @@ -16,6 +16,7 @@ pub(crate) mod graph; pub mod dcg; pub mod errors; pub mod grad; +pub mod id; pub mod ops; pub mod scg; diff --git a/graphs/tests/dcg.rs b/graphs/tests/dcg.rs index 7b8a41ae..3d9c46cb 100644 --- a/graphs/tests/dcg.rs +++ b/graphs/tests/dcg.rs @@ -26,3 +26,16 @@ fn test_dcg() { assert_eq!(grad[&a], 3.0); assert_eq!(grad[&b], 2.0); } + +#[test] +fn test_simple_chained() { + let mut dcg = Dcg::::new(); + let a = dcg.input(true, 2.0); + let b = dcg.input(true, 1.0); + let c = dcg.add(a, b); + let d = dcg.mul(a, c); + + let grad = dcg.gradient(d).unwrap(); + assert_eq!(grad[&a], 2.0); + assert_eq!(grad[&b], 2.0); +} diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs index ca729688..56f2fc45 100644 --- a/tensor/src/actions/grad/store.rs +++ b/tensor/src/actions/grad/store.rs @@ -8,6 +8,7 @@ use acme::prelude::Store; use std::collections::btree_map::{BTreeMap, Entry}; use std::ops::{Index, IndexMut}; +#[derive(Clone, Debug)] pub struct GradStore { pub(crate) store: BTreeMap>, } diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 3af2efec..d1cc1d88 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -65,7 +65,7 @@ where // initialize a new gradient store let mut store = GradStore::new(); // insert the gradient w.r.t. the current node - store.insert(self.id(), self.ones_like()); + store.insert(sorted[0].id(), sorted[0].ones_like()); for node in sorted { if node.is_variable() { diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 3908bd90..64f9d5bb 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -24,7 +24,24 @@ fn test_backward() { let grad = c.grad(); - assert_eq!(grad[&a.id()], Tensor::::fill(shape, 2_f64)); + assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); + assert_eq!(grad[&b.id()], Tensor::ones(shape)); +} + +#[test] +#[ignore = "Needs to be fixed"] +fn test_add_chain() { + let shape = (2, 2); + + let a = Tensor::::ones(shape).variable(); + let b = Tensor::::fill(shape, 2_f64).variable(); + let c = &a + &b; + let d = &c + &a; + + let grad = d.grad(); + // println!("Gradient:\n\n{:?}\n\n", &grad); + + assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); assert_eq!(grad[&b.id()], Tensor::ones(shape)); } From 887dea7fe9f0122c610114ec6ecf21d8b4ddd182 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sat, 23 Mar 2024 14:27:36 -0500 Subject: [PATCH 51/87] update Signed-off-by: Joe McCain III --- tensor/src/lib.rs | 2 ++ tensor/src/linalg/mod.rs | 18 +++++++++++++++- tensor/src/specs/mod.rs | 46 ++++++++++++++++++++++++++++++++++++---- 3 files changed, 61 insertions(+), 5 deletions(-) diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 73a2c014..0416dd57 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -48,6 +48,8 @@ pub mod prelude { #[doc(inline)] pub use crate::error::*; #[doc(inline)] + pub use crate::linalg::prelude::*; + #[doc(inline)] pub use crate::ops::*; #[doc(inline)] pub use crate::shape::prelude::*; diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index aa31f9a7..4fae49d5 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -9,7 +9,23 @@ pub mod arith; pub mod uplo; pub trait Inverse { - fn inverse(&self) -> Self; + fn inv(self) -> Self; +} + +/// Matrix multiplication +pub trait Matmul { + type Output; + + fn matmul(&self, rhs: &Rhs) -> Self::Output; +} + +pub trait Transpose { + fn transpose(&self) -> Self; +} + +pub(crate) mod prelude { + pub use super::uplo::UPLO; + pub use super::{Inverse, Matmul}; } #[cfg(test)] diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index cf76149a..3894f4f0 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -6,22 +6,60 @@ pub mod ndtensor; pub mod scalar; +use num::{Complex, Num}; + pub trait Affine { type Output; fn affine(&self, mul: &T, add: &T) -> Self::Output; } -pub trait Matmul { - type Output; +/// +pub trait Conjugate { + type Complex; + type Real; + + fn conj(&self) -> Self::Complex; +} + +macro_rules! impl_conj { + ($t:ty) => { + impl Conjugate for $t { + type Complex = Complex; + type Real = Self; - fn matmul(&self, rhs: &Rhs) -> Self::Output; + fn conj(&self) -> Self::Complex { + Complex::new(*self, <$t>::default()) + } + } + }; + ($($t:ty),*) => { + $( + impl_conj!($t); + )* + }; } +impl Conjugate for Complex +where + T: Clone + Num + std::ops::Neg, +{ + type Complex = Self; + type Real = T; + + fn conj(&self) -> Self::Complex { + Complex::conj(self) + } +} + +impl_conj!(u8, u16, u32, u64, u128, usize); +impl_conj!(i8, i16, i32, i64, i128, isize); +impl_conj!(f32, f64); + pub(crate) mod prelude { pub use super::ndtensor::*; pub use super::scalar::*; - pub use super::{Affine, Matmul}; + pub use super::Affine; } #[cfg(test)] From e03199934d57d70d10860112ef8d0e7d96775a2b Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Wed, 27 Mar 2024 10:27:16 -0500 Subject: [PATCH 52/87] update Signed-off-by: Joe McCain III --- core/src/errors/error.rs | 53 +++++++---- core/src/errors/kinds.rs | 96 +++++++++++++++++--- graphs/src/dcg/graph.rs | 16 ++-- graphs/tests/dcg.rs | 2 +- macros/src/lib.rs | 3 + tensor/src/actions/arange.rs | 13 +-- tensor/src/actions/grad/store.rs | 2 +- tensor/src/data/elem.rs | 9 ++ tensor/src/data/mod.rs | 50 ++++++---- tensor/src/data/repr/mod.rs | 10 -- tensor/src/data/repr/owned.rs | 25 +++-- tensor/src/impls/grad.rs | 8 +- tensor/src/impls/linalg.rs | 33 +------ tensor/src/lib.rs | 6 +- tensor/src/linalg/{uplo/kinds.rs => uplo.rs} | 0 tensor/src/linalg/uplo/mod.rs | 13 --- tensor/src/ops/kinds.rs | 35 +++++++ tensor/src/seal.rs | 29 ++++++ tensor/src/specs/mod.rs | 1 - tensor/src/specs/ndtensor.rs | 4 +- tensor/src/types/dtype.rs | 58 ++++++++++++ tensor/src/types/mod.rs | 1 + tensor/src/utils.rs | 62 +++++++++++++ tensor/tests/backward.rs | 10 +- 24 files changed, 396 insertions(+), 143 deletions(-) create mode 100644 tensor/src/data/elem.rs delete mode 100644 tensor/src/data/repr/mod.rs rename tensor/src/linalg/{uplo/kinds.rs => uplo.rs} (100%) delete mode 100644 tensor/src/linalg/uplo/mod.rs create mode 100644 tensor/src/seal.rs create mode 100644 tensor/src/types/dtype.rs create mode 100644 tensor/src/utils.rs diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index ad369b63..67580549 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -2,7 +2,7 @@ Appellation: error Contrib: FL03 */ -use super::kinds::ErrorKind; +use super::kinds::{ErrorKind, ExternalError, SyncError}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -20,18 +20,32 @@ impl Error { message: msg.to_string(), } } - - pub fn kind(&self) -> ErrorKind { - self.kind + /// Get an owned reference to the error kind + pub fn kind(&self) -> &ErrorKind { + &self.kind } - + /// Get an owned reference to the error message pub fn message(&self) -> &str { &self.message } - + /// Set the error message + pub fn set_message(&mut self, msg: impl ToString) { + self.message = msg.to_string(); + } + /// Consume the error and return the message pub fn into_message(self) -> String { self.message } + /// A functional method for setting the error kind + pub fn with_kind(mut self, kind: ErrorKind) -> Self { + self.kind = kind; + self + } + /// A functional method for setting the error message + pub fn with_message(mut self, msg: impl ToString) -> Self { + self.message = msg.to_string(); + self + } } impl std::fmt::Display for Error { @@ -50,7 +64,7 @@ impl From for Error { impl From> for Error { fn from(err: std::sync::TryLockError) -> Self { - Self::new(ErrorKind::Sync, err.to_string()) + Self::new(ErrorKind::Sync(SyncError::TryLock), err.to_string()) } } @@ -69,14 +83,19 @@ macro_rules! error_from { }; } -// macro_rules! into_error { -// (kind $kind:expr, $t:ty) => { -// impl From<$t> for Error { -// fn from(err: $t) -> Self { -// Self::new($kind, err.to_string()) -// } -// } -// }; -// } +macro_rules! err_variant { + (external $variant:ident, $t:ty) => { + impl From<$t> for Error { + fn from(err: $t) -> Self { + Self::new( + ErrorKind::External(ExternalError::$variant), + err.to_string(), + ) + } + } + }; +} -error_from!(shared ErrorKind::Unknown, (&str, String, Box)); +err_variant!(external Unknown, &str); +err_variant!(external Unknown, String); +err_variant!(external Unknown, Box); diff --git a/core/src/errors/kinds.rs b/core/src/errors/kinds.rs index fa0e0dd1..a3ad03be 100644 --- a/core/src/errors/kinds.rs +++ b/core/src/errors/kinds.rs @@ -4,26 +4,20 @@ */ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use smart_default::SmartDefault; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; pub trait ErrorType { type Kind; - fn kind(&self) -> Self::Kind; + fn kind(&self) -> &Self::Kind; fn name(&self) -> &str; } -pub enum Errors { - Specific(Box>), - Unknown, -} - #[derive( Clone, - Copy, Debug, - Default, Display, EnumCount, EnumIs, @@ -33,6 +27,7 @@ pub enum Errors { Ord, PartialEq, PartialOrd, + SmartDefault, VariantNames, )] #[cfg_attr( @@ -42,11 +37,9 @@ pub enum Errors { )] #[strum(serialize_all = "snake_case")] pub enum ErrorKind { - Func, - Graph, - Sync, #[default] - Unknown, + External(ExternalError), + Sync(SyncError), } #[derive( @@ -70,18 +63,91 @@ pub enum ErrorKind { serde(rename_all = "snake_case") )] #[strum(serialize_all = "snake_case")] -pub enum ExternalError { - Known(E), +pub enum ExternalError { + Custom(E), #[default] Unknown, } impl ExternalError { pub fn new(error: E) -> Self { - Self::Known(error) + Self::Custom(error) } pub fn unknown() -> Self { Self::Unknown } } + +impl ErrorType for ExternalError +where + E: ErrorType, +{ + type Kind = ExternalError; + + fn kind(&self) -> &Self::Kind { + &self + } + + fn name(&self) -> &str { + match self { + Self::Custom(inner) => inner.name(), + Self::Unknown => "unknown", + } + } +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum StdError { + #[default] + IO, + Parse, + Sync(SyncError), +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum SyncError { + #[default] + Poison, + TryLock, +} diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index 5ba2d31e..d7e5f2d9 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -117,15 +117,15 @@ impl Dcg { let topo = toposort(&self.store, None)?; - for node in topo.iter().rev() { - let node_grad = gradients[node]; - let node_op = self.get(*node).unwrap(); + for scope in topo.iter().rev() { + let grad = gradients[scope]; + let node = self.get(*scope).unwrap(); - if let Node::Op { inputs, op } = node_op { + if let Node::Op { inputs, op } = node { match op { Operations::Binary(BinaryExpr::Add(_)) => { - for arg in self.store.neighbors_directed(*node, Direction::Incoming) { - *gradients.entry(arg).or_default() += node_grad; + for arg in self.store.neighbors_directed(*scope, Direction::Incoming) { + *gradients.entry(arg).or_default() += grad; } } Operations::Binary(BinaryExpr::Mul(_)) => { @@ -133,8 +133,8 @@ impl Dcg { let rhs = inputs[1]; let lhs_val = self[lhs].get_value(); let rhs_val = self[rhs].get_value(); - *gradients.entry(lhs).or_default() += node_grad * rhs_val; - *gradients.entry(rhs).or_default() += node_grad * lhs_val; + *gradients.entry(lhs).or_default() += grad * rhs_val; + *gradients.entry(rhs).or_default() += grad * lhs_val; } // Handle other operations as needed _ => {} diff --git a/graphs/tests/dcg.rs b/graphs/tests/dcg.rs index 3d9c46cb..a243907d 100644 --- a/graphs/tests/dcg.rs +++ b/graphs/tests/dcg.rs @@ -28,7 +28,7 @@ fn test_dcg() { } #[test] -fn test_simple_chained() { +fn test_composite_expr() { let mut dcg = Dcg::::new(); let a = dcg.input(true, 2.0); let b = dcg.input(true, 1.0); diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 6650e94d..a4eb21d8 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -26,6 +26,9 @@ pub fn partial(_attr: TokenStream, item: TokenStream) -> TokenStream { /// Compute the gradient of an expression /// +/// # Examples +/// +/// #[proc_macro] pub fn autodiff(input: TokenStream) -> TokenStream { // Parse the input expression into a syntax tree diff --git a/tensor/src/actions/arange.rs b/tensor/src/actions/arange.rs index 825679a9..e0ddb501 100644 --- a/tensor/src/actions/arange.rs +++ b/tensor/src/actions/arange.rs @@ -2,7 +2,7 @@ Appellation: arange Contrib: FL03 */ -use num::traits::{FromPrimitive, Num, ToPrimitive, Zero}; +use num::traits::{FromPrimitive, Num, ToPrimitive}; use std::ops; pub struct Arange { @@ -21,7 +21,7 @@ impl Arange { } impl Arange where - T: Copy + Num, + T: Copy + Default + Num, { pub fn start(&self) -> T { self.range.start() @@ -56,17 +56,14 @@ pub enum Aranged { impl Aranged where - T: Copy, + T: Copy + Default, { /// Returns the start value of the range. - pub fn start(&self) -> T - where - T: Zero, - { + pub fn start(&self) -> T { match self { Aranged::Range { start, .. } => *start, Aranged::Inclusive { start, .. } => *start, - Aranged::Until { .. } => T::zero(), + Aranged::Until { .. } => T::default(), } } /// Returns the stop value of the range. diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs index 56f2fc45..93cc7f91 100644 --- a/tensor/src/actions/grad/store.rs +++ b/tensor/src/actions/grad/store.rs @@ -33,7 +33,7 @@ impl GradStore { } /// Inserts a tensor into the store. pub fn insert_tensor(&mut self, tensor: TensorBase) -> Option> { - self.insert(tensor.id, tensor) + self.insert(tensor.id(), tensor) } /// Returns true if the store contains no elements. pub fn is_empty(&self) -> bool { diff --git a/tensor/src/data/elem.rs b/tensor/src/data/elem.rs new file mode 100644 index 00000000..5290cc4f --- /dev/null +++ b/tensor/src/data/elem.rs @@ -0,0 +1,9 @@ +/* + Appellation: elem + Contrib: FL03 +*/ +//! # Elements +//! +//! + +pub trait Element {} diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index ef3b9deb..0a49cfff 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -5,35 +5,47 @@ //! # Data //! //! -#![allow(dead_code, unused_imports)] +pub mod elem; -pub mod repr; +pub mod repr { + pub mod owned; +} +#[allow(clippy::missing_safety_doc)] pub unsafe trait RawData { type Elem; + + #[doc(hidden)] + fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool; + + private_decl! {} } pub trait Data: RawData {} -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; -use core::ptr::NonNull; +pub(crate) mod utils { + #[cfg(not(feature = "std"))] + #[allow(unused_imports)] + use alloc::vec::Vec; + use core::ptr::NonNull; -/// Return a NonNull pointer to the vector's data -pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { - // this pointer is guaranteed to be non-null - unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } -} + /// Return a NonNull pointer to the vector's data + pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { + // this pointer is guaranteed to be non-null + unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } + } -/// Converts `ptr` to `NonNull` -/// -/// Safety: `ptr` *must* be non-null. -/// This is checked with a debug assertion, and will panic if this is not true, -/// but treat this as an unconditional conversion. -#[inline] -pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull { - debug_assert!(!ptr.is_null()); - NonNull::new_unchecked(ptr) + /// Converts `ptr` to `NonNull` + /// + /// Safety: `ptr` *must* be non-null. + /// This is checked with a debug assertion, and will panic if this is not true, + /// but treat this as an unconditional conversion. + #[allow(dead_code)] + #[inline] + pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull { + debug_assert!(!ptr.is_null()); + NonNull::new_unchecked(ptr) + } } #[cfg(test)] diff --git a/tensor/src/data/repr/mod.rs b/tensor/src/data/repr/mod.rs deleted file mode 100644 index 15723f77..00000000 --- a/tensor/src/data/repr/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -/* - Appellation: repr - Contrib: FL03 -*/ -pub use self::owned::*; - -pub(crate) mod owned; - -#[cfg(test)] -mod tests {} diff --git a/tensor/src/data/repr/owned.rs b/tensor/src/data/repr/owned.rs index d0d954b8..77fb9223 100644 --- a/tensor/src/data/repr/owned.rs +++ b/tensor/src/data/repr/owned.rs @@ -2,7 +2,8 @@ Appellation: owned Contrib: FL03 */ -use crate::data::nonnull_from_vec_data; +use crate::data::utils::nonnull_from_vec_data; +use crate::data::RawData; use core::mem::{self, ManuallyDrop}; use core::ptr::NonNull; use core::slice; @@ -25,6 +26,10 @@ impl OwnedRepr { Self { capacity, len, ptr } } + pub(crate) fn as_slice(&self) -> &[A] { + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } + } + pub fn capacity(&self) -> usize { self.capacity } @@ -36,7 +41,6 @@ impl OwnedRepr { pub fn ptr(&self) -> NonNull { self.ptr } - /// Set the valid length of the data /// /// ## Safety @@ -60,10 +64,6 @@ impl OwnedRepr { pub(crate) fn into_vec(self) -> Vec { ManuallyDrop::new(self).take_as_vec() } - - pub(crate) fn as_slice(&self) -> &[A] { - unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } - } } impl Clone for OwnedRepr @@ -118,3 +118,16 @@ impl From> for OwnedRepr { Self::from_vec(vec) } } + +unsafe impl RawData for OwnedRepr { + type Elem = A; + + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { + let slc = self.as_slice(); + let ptr = slc.as_ptr() as *mut A; + let end = unsafe { ptr.add(slc.len()) }; + self_ptr >= ptr && self_ptr <= end + } + + private_impl! {} +} diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index d1cc1d88..7a00bc29 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::actions::grad::GradStore; -use crate::prelude::{Scalar, TensorId, TensorOp}; +use crate::prelude::{Scalar, TensorId, TensorOp, TensorResult}; use crate::TensorBase; use acme::ops::binary::BinaryOp; use acme::prelude::Store; @@ -56,7 +56,7 @@ where nodes } - pub fn grad(&self) -> GradStore + pub fn grad(&self) -> TensorResult> where T: std::fmt::Debug, { @@ -65,7 +65,7 @@ where // initialize a new gradient store let mut store = GradStore::new(); // insert the gradient w.r.t. the current node - store.insert(sorted[0].id(), sorted[0].ones_like()); + store.insert(self.id(), self.ones_like()); for node in sorted { if node.is_variable() { @@ -98,6 +98,6 @@ where } } - store + Ok(store) } } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 59f3d62c..08e8b06a 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -5,40 +5,9 @@ //! Implementations for linear algebra operations. //! //! -use crate::prelude::{Matmul, Scalar, TensorOp, TensorResult}; -use crate::shape::ShapeError; +use crate::prelude::{Matmul, Scalar, TensorOp}; use crate::tensor::*; -pub(crate) fn matmul(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> -where - T: Scalar, -{ - if lhs.shape().rank() != rhs.shape().rank() { - return Err(ShapeError::IncompatibleShapes.into()); - } - - let lhs_shape = lhs.shape().clone(); - let lhs_m = lhs_shape.rows(); - let lhs_n = lhs_shape.columns(); - let rhs_shape = rhs.shape().clone(); - - let shape = lhs_shape.matmul_shape(rhs.shape()).unwrap(); - let mut result = vec![T::zero(); shape.elements()]; - - for i in 0..lhs_m { - for j in 0..rhs_shape.columns() { - for k in 0..lhs_n { - let pos = i * rhs_shape[1] + j; - let left = i * lhs_n + k; - let right = k * rhs_shape[1] + j; - result[pos] += lhs.store[left] * rhs.store[right]; - } - } - } - let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); - Ok(from_vec_with_op(false, op, shape, result)) -} - impl Matmul> for TensorBase where T: Scalar, diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 0416dd57..20ba2ca1 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -12,10 +12,13 @@ extern crate alloc; extern crate acme_core as acme; #[doc(inline)] -pub use self::{error::*, tensor::*}; +pub use self::{error::*, seal::*, tensor::*, utils::*}; pub(crate) mod error; +#[macro_use] +pub(crate) mod seal; pub(crate) mod tensor; +pub(crate) mod utils; pub mod actions; pub mod data; @@ -59,6 +62,7 @@ pub mod prelude { pub use crate::store::*; #[doc(inline)] pub use crate::types::prelude::*; + pub use crate::utils::*; #[doc(inline)] pub use crate::Tensor; } diff --git a/tensor/src/linalg/uplo/kinds.rs b/tensor/src/linalg/uplo.rs similarity index 100% rename from tensor/src/linalg/uplo/kinds.rs rename to tensor/src/linalg/uplo.rs diff --git a/tensor/src/linalg/uplo/mod.rs b/tensor/src/linalg/uplo/mod.rs deleted file mode 100644 index 2e23235d..00000000 --- a/tensor/src/linalg/uplo/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - Appellation: uplo - Contrib: FL03 -*/ -//! # Upper Lower -//! -//! -pub use self::kinds::UPLO; - -pub(crate) mod kinds; - -#[cfg(test)] -mod tests {} diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index d24a57fd..9bd5905e 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -14,6 +14,41 @@ pub enum TensorOp { Matmul(Box>, Box>), } +impl TensorOp { + pub fn binary(lhs: TensorBase, rhs: TensorBase, op: BinaryOp) -> Self { + TensorOp::Binary(Box::new(lhs), Box::new(rhs), op) + } + + pub fn binary_scalar(lhs: TensorBase, rhs: T, op: BinaryOp) -> Self { + TensorOp::BinaryScalar(Box::new(lhs), rhs, op) + } + + pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { + TensorOp::Unary(Box::new(tensor), op) + } + + pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { + TensorOp::Matmul(Box::new(lhs), Box::new(rhs)) + } + + pub fn lhs(&self) -> &TensorBase { + match self { + TensorOp::Binary(lhs, _, _) => lhs, + TensorOp::BinaryScalar(lhs, _, _) => lhs, + TensorOp::Unary(lhs, _) => lhs, + TensorOp::Matmul(lhs, _) => lhs, + } + } + + pub fn rhs(&self) -> Option<&TensorBase> { + match self { + TensorOp::Binary(_, rhs, _) => Some(rhs), + TensorOp::Matmul(_, rhs) => Some(rhs), + _ => None, + } + } +} + pub enum Inputs { Scalar(T), Tensor(TensorBase), diff --git a/tensor/src/seal.rs b/tensor/src/seal.rs new file mode 100644 index 00000000..1b6ae830 --- /dev/null +++ b/tensor/src/seal.rs @@ -0,0 +1,29 @@ +/* + Appellation: seal + Contrib: FL03 +*/ +//! The public parts of this private module are used to create traits +//! that cannot be implemented outside of our own crate. This way we +//! can feel free to extend those traits without worrying about it +//! being a breaking change for other implementations. + +/// If this type is pub but not publicly reachable, third parties +/// can't name it and can't implement traits using it. +pub struct PrivateMarker; + +macro_rules! private_decl { + () => { + /// This trait is private to implement; this method exists to make it + /// impossible to implement outside the crate. + #[doc(hidden)] + fn __private__(&self) -> $crate::seal::PrivateMarker; + }; +} + +macro_rules! private_impl { + () => { + fn __private__(&self) -> $crate::seal::PrivateMarker { + $crate::seal::PrivateMarker + } + }; +} diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index 3894f4f0..c3a91b1d 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -52,7 +52,6 @@ where } } -impl_conj!(u8, u16, u32, u64, u128, usize); impl_conj!(i8, i16, i32, i64, i128, isize); impl_conj!(f32, f64); diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index ce62659d..693a5492 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -2,16 +2,16 @@ Appellation: ndtensor Contrib: FL03 */ +use crate::prelude::TensorId; use crate::shape::prelude::{Rank, Shape}; use crate::store::Layout; -use acme::prelude::AtomicId; pub trait NdTensor { fn elements(&self) -> usize { self.layout().elements() } - fn id(&self) -> AtomicId; + fn id(&self) -> TensorId; fn layout(&self) -> &Layout; diff --git a/tensor/src/types/dtype.rs b/tensor/src/types/dtype.rs new file mode 100644 index 00000000..2b0e8a18 --- /dev/null +++ b/tensor/src/types/dtype.rs @@ -0,0 +1,58 @@ +/* + Appellation: dtype + Contrib: FL03 +*/ + +use std::any::TypeId; + +pub enum DType { + Float(Float), + Integer(Integer), +} + +pub enum Float { + F32, + F64, +} + +impl Float { + pub fn from_type(_value: &T) -> Result + where + T: 'static, + { + if TypeId::of::() == TypeId::of::() { + Ok(Float::F32) + } else if TypeId::of::() == TypeId::of::() { + Ok(Float::F64) + } else { + Err(()) + } + } +} + +impl From for Float { + fn from(_: f32) -> Self { + Float::F32 + } +} + +impl From for Float { + fn from(_: f64) -> Self { + Float::F64 + } +} + +pub struct Integer { + pub bits: NumBits, + pub signed: bool, +} + +#[repr(u8)] +pub enum NumBits { + B8 = 8, + B16 = 16, + B32 = 32, + B64 = 64, + B128 = 128, + BSize, +} diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index 54fb2162..1c5b8970 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -3,6 +3,7 @@ Contrib: FL03 */ +pub mod dtype; pub mod id; pub mod kinds; pub mod order; diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs new file mode 100644 index 00000000..93c7e8fd --- /dev/null +++ b/tensor/src/utils.rs @@ -0,0 +1,62 @@ +/* + Appellation: utils + Contrib: FL03 +*/ +//! # Utilities +//! +//! +use crate::prelude::{Scalar, TensorOp, TensorResult}; +use crate::shape::ShapeError; +use crate::tensor::{from_vec_with_op, TensorBase}; + +pub fn matmul(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> +where + T: Scalar, +{ + if lhs.shape().rank() != rhs.shape().rank() { + return Err(ShapeError::IncompatibleShapes.into()); + } + + let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); + let mut result = vec![T::zero(); shape.elements()]; + + for i in 0..lhs.shape().rows() { + for j in 0..rhs.shape().columns() { + for k in 0..lhs.shape().columns() { + let pos = i * rhs.shape().columns() + j; + let left = i * lhs.shape().columns() + k; + let right = k * rhs.shape().columns() + j; + result[pos] += lhs.store[left] * rhs.store[right]; + } + } + } + let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); + let tensor = from_vec_with_op(false, op, shape, result); + Ok(tensor) +} + +pub fn dot_product(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> +where + T: Scalar, +{ + if lhs.shape().rank() != rhs.shape().rank() { + return Err(ShapeError::IncompatibleShapes.into()); + } + + let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); + let mut result = vec![T::zero(); shape.elements()]; + + for i in 0..lhs.shape().rows() { + for j in 0..rhs.shape().columns() { + for k in 0..lhs.shape().columns() { + let pos = i * rhs.shape().columns() + j; + let left = i * lhs.shape().columns() + k; + let right = k * rhs.shape().columns() + j; + result[pos] += lhs.store[left] * rhs.store[right]; + } + } + } + let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); + let tensor = from_vec_with_op(false, op, shape, result); + Ok(tensor) +} diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 64f9d5bb..7bfc1d45 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -13,7 +13,7 @@ fn test_backward() { let a = Tensor::::ones(shape).variable(); let b = Tensor::::ones(shape).variable(); let c = &a + &b; - let grad = c.grad(); + let grad = c.grad().unwrap(); assert_eq!(grad[&a.id()], Tensor::ones(shape),); assert_eq!(grad[&b.id()], Tensor::ones(shape)); @@ -22,7 +22,7 @@ fn test_backward() { let b = Tensor::::fill(shape, 2_f64).variable(); let c = &a * &b; - let grad = c.grad(); + let grad = c.grad().unwrap(); assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); assert_eq!(grad[&b.id()], Tensor::ones(shape)); @@ -38,7 +38,7 @@ fn test_add_chain() { let c = &a + &b; let d = &c + &a; - let grad = d.grad(); + let grad = d.grad().unwrap(); // println!("Gradient:\n\n{:?}\n\n", &grad); assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); @@ -51,10 +51,10 @@ fn test_add_mul() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); let b = Tensor::::ones(shape).variable(); - println!("A({}), B({})", a.id(), b.id()); + println!("*** Variables ***\nA: {}\nB: {}", a.id(), b.id()); let c = &a + &b; let d = &a * &c; - let grad = d.grad(); + let grad = d.grad().unwrap(); assert_eq!(grad[&a.id()], Tensor::fill(shape, 3_f64)); assert_eq!(grad[&b.id()], Tensor::ones(shape)); From c1532944d1109244f03efd9220bda340be9057a5 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Wed, 27 Mar 2024 11:22:00 -0500 Subject: [PATCH 53/87] update Signed-off-by: Joe McCain III --- core/src/lib.rs | 2 +- core/src/ops/mod.rs | 7 +++++++ tensor/src/impls/grad.rs | 10 ++++++---- tensor/src/ops/backprop.rs | 22 +++++++++++----------- tensor/src/specs/mod.rs | 6 ++++++ tensor/src/tensor.rs | 2 +- tensor/tests/arith.rs | 2 +- tensor/tests/backward.rs | 16 +++++++++++++++- 8 files changed, 48 insertions(+), 19 deletions(-) diff --git a/core/src/lib.rs b/core/src/lib.rs index a7104cdc..5b6ca527 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -25,7 +25,7 @@ pub mod prelude { pub use crate::errors::*; pub use crate::eval::*; pub use crate::id::*; - pub use crate::ops::*; + pub use crate::ops::prelude::*; pub use crate::specs::prelude::*; pub use crate::types::*; } diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 1e31fca0..01f599f1 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -17,3 +17,10 @@ pub trait Operation { fn kind(&self) -> String; } + +pub(crate) mod prelude { + pub use super::binary::*; + pub use super::kinds::Op; + pub use super::unary::*; + pub use super::Operation; +} diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 7a00bc29..47ccca7c 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -5,9 +5,9 @@ use crate::actions::grad::GradStore; use crate::prelude::{Scalar, TensorId, TensorOp, TensorResult}; use crate::TensorBase; -use acme::ops::binary::BinaryOp; -use acme::prelude::Store; +use acme::prelude::{BinaryOp, Store}; use std::collections::HashMap; +use std::ops::{Add, Mul}; // The vec of sorted nodes is passed as an owned value rather than a mutable reference // to get around some lifetime limitations. @@ -79,8 +79,10 @@ where match op { TensorOp::Binary(lhs, rhs, kind) => match kind { BinaryOp::Add => { - *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad; - *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad; + let a = store.entry(lhs.id()).or_insert(lhs.zeros_like()); + *a = &*a + &grad; + let b = store.entry(rhs.id()).or_insert(rhs.zeros_like()); + *b = &*b + &grad; } BinaryOp::Mul => { *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 81dd7b43..e20c4972 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -5,15 +5,15 @@ use super::TensorOp; #[derive(Clone, Debug)] -pub struct TrackedOp(Option>); +pub struct BackpropOp(Option>); -impl TrackedOp { +impl BackpropOp { pub fn new(op: TensorOp) -> Self { - TrackedOp(Some(op)) + BackpropOp(Some(op)) } pub fn none() -> Self { - TrackedOp(None) + BackpropOp(None) } pub fn op(&self) -> Option<&TensorOp> { @@ -29,26 +29,26 @@ impl TrackedOp { } } -impl Default for TrackedOp { +impl Default for BackpropOp { fn default() -> Self { Self::none() } } -impl From>> for TrackedOp { +impl From>> for BackpropOp { fn from(op: Option>) -> Self { - TrackedOp(op) + BackpropOp(op) } } -impl From> for TrackedOp { +impl From> for BackpropOp { fn from(op: TensorOp) -> Self { - TrackedOp(Some(op)) + BackpropOp(Some(op)) } } -impl From> for Option> { - fn from(op: TrackedOp) -> Option> { +impl From> for Option> { + fn from(op: BackpropOp) -> Option> { op.into_inner() } } diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index c3a91b1d..5c5d92e1 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -55,6 +55,12 @@ where impl_conj!(i8, i16, i32, i64, i128, isize); impl_conj!(f32, f64); +pub trait Pow { + type Output; + + fn pow(&self, exp: T) -> Self::Output; +} + pub(crate) mod prelude { pub use super::ndtensor::*; pub use super::scalar::*; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 57e41a66..b9884974 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -36,7 +36,7 @@ pub(crate) fn from_vec_with_op( #[derive(Clone, Debug)] // #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] -pub struct TensorBase { +pub struct TensorBase { pub(crate) id: TensorId, pub(crate) kind: TensorKind, pub(crate) layout: Layout, diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index 1c4bb313..9386f4a1 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -25,7 +25,7 @@ fn test_add() { fn test_div() { let shape = (2, 2); let a = Tensor::::ones(shape); - let b = Tensor::::ones(shape) * 2.0; + let b = Tensor::::fill(shape, 2_f64); let c = a / b; assert_eq!(c, Tensor::::fill(shape, 0.5)); diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 7bfc1d45..9182ac8f 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -9,14 +9,28 @@ use acme::prelude::Tensor; #[test] fn test_backward() { + let shape = (2, 2); + let a = Tensor::::ones(shape).variable(); + let grad = a.grad().unwrap(); + + assert_eq!(grad[&a.id()], Tensor::ones(shape),); +} + +#[test] +fn test_addition() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); let b = Tensor::::ones(shape).variable(); let c = &a + &b; let grad = c.grad().unwrap(); - assert_eq!(grad[&a.id()], Tensor::ones(shape),); + assert_eq!(grad[&a.id()], Tensor::ones(shape)); assert_eq!(grad[&b.id()], Tensor::ones(shape)); +} + +#[test] +fn test_multiplication() { + let shape = (2, 2); let a = Tensor::::ones(shape).variable(); let b = Tensor::::fill(shape, 2_f64).variable(); From 97d54c047d8c9b3c26fcc0cd1e85ed0a2b6e8705 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 03:31:58 -0500 Subject: [PATCH 54/87] update Signed-off-by: Joe McCain III --- core/src/errors/error.rs | 55 ++++------ core/src/errors/kinds.rs | 153 --------------------------- core/src/errors/kinds/external.rs | 63 +++++++++++ core/src/errors/kinds/mod.rs | 50 +++++++++ core/src/errors/kinds/propagation.rs | 94 ++++++++++++++++ core/src/errors/kinds/standard.rs | 64 +++++++++++ core/src/lib.rs | 1 + core/src/math/linalg/fields/mod.rs | 12 +++ core/src/math/linalg/mod.rs | 16 +++ core/src/math/mod.rs | 12 +++ core/src/ops/mod.rs | 21 ++++ core/src/ops/unary/mod.rs | 48 ++++++++- core/src/ops/unary/operator.rs | 29 +++++ core/src/specs/func/structural.rs | 9 ++ core/src/specs/mod.rs | 33 +++++- core/src/specs/operand.rs | 14 --- core/src/specs/store.rs | 28 +++-- graphs/src/dcg/graph.rs | 17 +++ graphs/src/dcg/node.rs | 16 ++- tensor/src/actions/grad/store.rs | 6 +- tensor/src/data/elem.rs | 7 +- tensor/src/data/mod.rs | 4 +- tensor/src/data/repr/owned.rs | 26 +++-- tensor/src/impls/create.rs | 3 +- tensor/src/impls/grad.rs | 78 +++++++------- tensor/src/impls/ops/unary.rs | 10 ++ tensor/src/specs/mod.rs | 47 +------- tensor/src/specs/ndtensor.rs | 15 +++ tensor/src/store/layout.rs | 6 +- tensor/src/store/mod.rs | 5 + tensor/src/tensor.rs | 119 ++++++++++++++++----- tensor/src/types/dtype.rs | 80 ++++++++++++++ tensor/src/types/kinds.rs | 6 +- tensor/src/types/mod.rs | 1 + tensor/tests/backward.rs | 26 +++++ 35 files changed, 830 insertions(+), 344 deletions(-) delete mode 100644 core/src/errors/kinds.rs create mode 100644 core/src/errors/kinds/external.rs create mode 100644 core/src/errors/kinds/mod.rs create mode 100644 core/src/errors/kinds/propagation.rs create mode 100644 core/src/errors/kinds/standard.rs create mode 100644 core/src/math/linalg/fields/mod.rs create mode 100644 core/src/math/linalg/mod.rs create mode 100644 core/src/math/mod.rs create mode 100644 core/src/ops/unary/operator.rs delete mode 100644 core/src/specs/operand.rs diff --git a/core/src/errors/error.rs b/core/src/errors/error.rs index 67580549..964b3bd6 100644 --- a/core/src/errors/error.rs +++ b/core/src/errors/error.rs @@ -5,23 +5,24 @@ use super::kinds::{ErrorKind, ExternalError, SyncError}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use std::fmt::{Debug, Display}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Error { - kind: ErrorKind, +pub struct Error { + kind: ErrorKind, message: String, } -impl Error { - pub fn new(kind: ErrorKind, msg: impl ToString) -> Self { +impl Error { + pub fn new(kind: ErrorKind, msg: impl ToString) -> Self { Self { kind, message: msg.to_string(), } } /// Get an owned reference to the error kind - pub fn kind(&self) -> &ErrorKind { + pub fn kind(&self) -> &ErrorKind { &self.kind } /// Get an owned reference to the error message @@ -37,7 +38,7 @@ impl Error { self.message } /// A functional method for setting the error kind - pub fn with_kind(mut self, kind: ErrorKind) -> Self { + pub fn with_kind(mut self, kind: ErrorKind) -> Self { self.kind = kind; self } @@ -48,54 +49,40 @@ impl Error { } } -impl std::fmt::Display for Error { +impl Display for Error +where + K: ToString, +{ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}: {}", self.kind, self.message) + write!(f, "{}: {}", self.kind.to_string(), self.message) } } -impl std::error::Error for Error {} +impl std::error::Error for Error where K: Debug + Display {} -impl From for Error { - fn from(kind: ErrorKind) -> Self { +impl From> for Error { + fn from(kind: ErrorKind) -> Self { Self::new(kind, "") } } -impl From> for Error { +impl From> for Error { fn from(err: std::sync::TryLockError) -> Self { Self::new(ErrorKind::Sync(SyncError::TryLock), err.to_string()) } } -macro_rules! error_from { - (shared $kind:expr, ($($t:ty),*)) => { - $( - error_from!($kind, $t); - )* - }; +macro_rules! err_from { ($kind:expr, $t:ty) => { - impl From<$t> for Error { + impl From<$t> for Error { fn from(err: $t) -> Self { Self::new($kind, err.to_string()) } } }; -} - -macro_rules! err_variant { - (external $variant:ident, $t:ty) => { - impl From<$t> for Error { - fn from(err: $t) -> Self { - Self::new( - ErrorKind::External(ExternalError::$variant), - err.to_string(), - ) - } - } + ($kind:expr => ($($t:ty),*)) => { + $(err_from!($kind, $t);)* }; } -err_variant!(external Unknown, &str); -err_variant!(external Unknown, String); -err_variant!(external Unknown, Box); +err_from!(ErrorKind::External(ExternalError::Unknown) => (&str, String, Box)); diff --git a/core/src/errors/kinds.rs b/core/src/errors/kinds.rs deleted file mode 100644 index a3ad03be..00000000 --- a/core/src/errors/kinds.rs +++ /dev/null @@ -1,153 +0,0 @@ -/* - Appellation: error - Contrib: FL03 -*/ -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; -use smart_default::SmartDefault; -use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; - -pub trait ErrorType { - type Kind; - - fn kind(&self) -> &Self::Kind; - - fn name(&self) -> &str; -} - -#[derive( - Clone, - Debug, - Display, - EnumCount, - EnumIs, - EnumIter, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - SmartDefault, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "snake_case") -)] -#[strum(serialize_all = "snake_case")] -pub enum ErrorKind { - #[default] - External(ExternalError), - Sync(SyncError), -} - -#[derive( - Clone, - Copy, - Debug, - Default, - Display, - EnumCount, - EnumIs, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "snake_case") -)] -#[strum(serialize_all = "snake_case")] -pub enum ExternalError { - Custom(E), - #[default] - Unknown, -} - -impl ExternalError { - pub fn new(error: E) -> Self { - Self::Custom(error) - } - - pub fn unknown() -> Self { - Self::Unknown - } -} - -impl ErrorType for ExternalError -where - E: ErrorType, -{ - type Kind = ExternalError; - - fn kind(&self) -> &Self::Kind { - &self - } - - fn name(&self) -> &str { - match self { - Self::Custom(inner) => inner.name(), - Self::Unknown => "unknown", - } - } -} - -#[derive( - Clone, - Copy, - Debug, - Default, - Display, - EnumCount, - EnumIs, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "snake_case") -)] -#[strum(serialize_all = "snake_case")] -pub enum StdError { - #[default] - IO, - Parse, - Sync(SyncError), -} - -#[derive( - Clone, - Copy, - Debug, - Default, - Display, - EnumCount, - EnumIs, - Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, -)] -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize,), - serde(rename_all = "snake_case") -)] -#[strum(serialize_all = "snake_case")] -pub enum SyncError { - #[default] - Poison, - TryLock, -} diff --git a/core/src/errors/kinds/external.rs b/core/src/errors/kinds/external.rs new file mode 100644 index 00000000..2d5b7294 --- /dev/null +++ b/core/src/errors/kinds/external.rs @@ -0,0 +1,63 @@ +/* + Appellation: external + Contrib: FL03 +*/ +use super::ErrorType; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, VariantNames}; + +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum ExternalError { + Custom(E), + #[default] + Unknown, +} + +impl ExternalError { + pub fn new(error: E) -> Self { + Self::Custom(error) + } + + pub fn unknown() -> Self { + Self::Unknown + } +} + +impl ErrorType for ExternalError +where + E: ToString, +{ + type Kind = ExternalError; + + fn kind(&self) -> &Self::Kind { + &self + } + + fn name(&self) -> String { + match self { + Self::Custom(inner) => inner.to_string(), + _ => self.to_string(), + } + } +} diff --git a/core/src/errors/kinds/mod.rs b/core/src/errors/kinds/mod.rs new file mode 100644 index 00000000..5af05b81 --- /dev/null +++ b/core/src/errors/kinds/mod.rs @@ -0,0 +1,50 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ +pub use self::{external::*, propagation::*, standard::*}; + +pub(crate) mod external; +pub(crate) mod propagation; +pub(crate) mod standard; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use smart_default::SmartDefault; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; + +pub trait ErrorType { + type Kind: std::fmt::Display; + + fn kind(&self) -> &Self::Kind; + + fn name(&self) -> String; +} + +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + SmartDefault, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum ErrorKind { + #[default] + External(ExternalError), + Sync(SyncError), +} diff --git a/core/src/errors/kinds/propagation.rs b/core/src/errors/kinds/propagation.rs new file mode 100644 index 00000000..3cf48b0c --- /dev/null +++ b/core/src/errors/kinds/propagation.rs @@ -0,0 +1,94 @@ +/* + Appellation: propagation + Contrib: FL03 +*/ +use super::ErrorType; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, VariantNames}; + +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum ModuleError { + Predict(PredictError), +} + +impl ErrorType for ModuleError { + type Kind = ModuleError; + + fn kind(&self) -> &Self::Kind { + self + } + + fn name(&self) -> String { + self.to_string() + } +} + +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum PredictError { + Arithmetic, + NumericalError, +} + +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum GradientError { + Backward, + Forward, +} diff --git a/core/src/errors/kinds/standard.rs b/core/src/errors/kinds/standard.rs new file mode 100644 index 00000000..e0159354 --- /dev/null +++ b/core/src/errors/kinds/standard.rs @@ -0,0 +1,64 @@ +/* + Appellation: standard + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; + +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum StdError { + #[default] + IO, + Parse, + Sync(SyncError), +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum SyncError { + #[default] + Poison, + TryLock, +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 5b6ca527..3d6ab0a0 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -14,6 +14,7 @@ pub(crate) mod utils; pub mod errors; pub mod eval; pub mod id; +pub mod math; pub mod ops; pub mod specs; pub mod types; diff --git a/core/src/math/linalg/fields/mod.rs b/core/src/math/linalg/fields/mod.rs new file mode 100644 index 00000000..d821f6a3 --- /dev/null +++ b/core/src/math/linalg/fields/mod.rs @@ -0,0 +1,12 @@ +/* + Appellation: fields + Contrib: FL03 +*/ +//! # Fields +//! +//! + +pub trait Field {} + +#[cfg(test)] +mod tests {} diff --git a/core/src/math/linalg/mod.rs b/core/src/math/linalg/mod.rs new file mode 100644 index 00000000..e7951389 --- /dev/null +++ b/core/src/math/linalg/mod.rs @@ -0,0 +1,16 @@ +/* + Appellation: linalg + Contrib: FL03 +*/ +//! # Linear Algebra +//! +//! This module implements fundamental linear algebra concepts and operations. +//! +pub mod fields; + +pub trait VectorSpace {} + +pub trait Subspace: VectorSpace {} + +#[cfg(test)] +mod tests {} diff --git a/core/src/math/mod.rs b/core/src/math/mod.rs new file mode 100644 index 00000000..701d84f2 --- /dev/null +++ b/core/src/math/mod.rs @@ -0,0 +1,12 @@ +/* + Appellation: math + Contrib: FL03 +*/ +//! # Linear Algebra +//! +//! This module implements fundamental linear algebra concepts and operations. +//! +pub mod linalg; + +#[cfg(test)] +mod tests {} diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 01f599f1..3074f1d1 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -18,6 +18,27 @@ pub trait Operation { fn kind(&self) -> String; } +pub trait Pow { + type Output; + + fn pow(&self, exp: T) -> Self::Output; +} + +pub trait Powc: Pow { + fn powc(&self, exp: T) -> Self::Output; +} + +pub trait Powi: Pow { + fn powi(&self, exp: T) -> Self::Output; +} + +pub trait Powf: Pow +where + T: num::Float, +{ + fn powf(&self, exp: T) -> Self::Output; +} + pub(crate) mod prelude { pub use super::binary::*; pub use super::kinds::Op; diff --git a/core/src/ops/unary/mod.rs b/core/src/ops/unary/mod.rs index f18d6df7..c9fa1d86 100644 --- a/core/src/ops/unary/mod.rs +++ b/core/src/ops/unary/mod.rs @@ -5,16 +5,60 @@ //! # Unary Operations //! //! -pub use self::{kinds::*, specs::*}; +pub use self::{kinds::*, operator::*, specs::*}; pub(crate) mod kinds; +pub(crate) mod operator; pub(crate) mod specs; +use num::{Complex, Num}; + pub trait UnaryOperation { type Output; - fn eval(self) -> Self::Output; + fn unary(self, expr: UnaryOp) -> Self::Output; +} + +/// +pub trait Conjugate { + type Complex; + type Real; + + fn conj(&self) -> Self::Complex; } +macro_rules! impl_conj { + ($t:ty) => { + impl Conjugate for $t { + type Complex = Complex; + type Real = Self; + + fn conj(&self) -> Self::Complex { + Complex::new(*self, <$t>::default()) + } + } + }; + ($($t:ty),*) => { + $( + impl_conj!($t); + )* + }; +} + +impl Conjugate for Complex +where + T: Clone + Num + std::ops::Neg, +{ + type Complex = Self; + type Real = T; + + fn conj(&self) -> Self::Complex { + Complex::conj(self) + } +} + +impl_conj!(i8, i16, i32, i64, i128, isize); +impl_conj!(f32, f64); + #[cfg(test)] mod tests {} diff --git a/core/src/ops/unary/operator.rs b/core/src/ops/unary/operator.rs new file mode 100644 index 00000000..13592a99 --- /dev/null +++ b/core/src/ops/unary/operator.rs @@ -0,0 +1,29 @@ +/* + Appellation: operator + Contrib: FL03 +*/ +use super::{UnaryOp, UnaryOperation}; +// use std::marker::PhantomData; + +pub struct UnaryOperator { + pub args: A, + pub differentiable: bool, + pub op: UnaryOp, +} + +impl UnaryOperator { + pub fn new(args: A, op: UnaryOp) -> Self { + Self { + args, + differentiable: op.differentiable(), + op, + } + } + + pub fn eval(self) -> A::Output + where + A: UnaryOperation, + { + self.args.unary(self.op) + } +} diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs index ea80def6..c3a499cc 100644 --- a/core/src/specs/func/structural.rs +++ b/core/src/specs/func/structural.rs @@ -11,3 +11,12 @@ pub trait StructuralFn { } pub trait StructuredArgs {} + +pub struct StructFunc +where + F: StructuralFn, + A: StructuredArgs, +{ + args: A, + func: F, +} diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index b91a59a0..dc71076d 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,18 +3,45 @@ Contrib: FL03 */ -pub use self::{gradient::*, operand::*, store::*}; +pub use self::{gradient::*, store::*}; pub(crate) mod gradient; -pub(crate) mod operand; pub(crate) mod store; pub mod func; +use crate::errors::PredictError; + +pub trait Backward { + type Output; + + fn backward(&self) -> Self::Output; +} + +pub trait Forward { + type Output; + + fn forward(&self, args: &T) -> Result; +} + +impl Forward for Option +where + S: Forward, + T: Clone, +{ + type Output = T; + + fn forward(&self, args: &T) -> Result { + match self { + Some(s) => s.forward(args), + None => Ok(args.clone()), + } + } +} + pub(crate) mod prelude { pub use super::func::*; pub use super::gradient::*; - pub use super::operand::Operand; pub use super::store::*; } diff --git a/core/src/specs/operand.rs b/core/src/specs/operand.rs deleted file mode 100644 index 7ef43d05..00000000 --- a/core/src/specs/operand.rs +++ /dev/null @@ -1,14 +0,0 @@ -/* - Appellation: operator - Contrib: FL03 -*/ - -pub trait Operand { - type Output; - - fn name(&self) -> &str; - - fn eval(&self, args: Args) -> Self::Output; - - fn grad(&self, args: Args) -> Vec; -} diff --git a/core/src/specs/store.rs b/core/src/specs/store.rs index c55e1c43..75245ae4 100644 --- a/core/src/specs/store.rs +++ b/core/src/specs/store.rs @@ -5,22 +5,30 @@ use std::borrow::Borrow; use std::collections::{BTreeMap, HashMap}; -pub trait Get -where - K: Borrow, -{ - fn get(&self, key: &Q) -> Option<&V>; +pub trait Get { + type Key: Borrow; + type Value; + + fn get(&self, key: &Q) -> Option<&Self::Value>; +} + +pub trait GetMut: Get { + fn get_mut(&mut self, key: &Q) -> Option<&mut Self::Value>; } -impl Get for BTreeMap +impl Get for BTreeMap where K: Borrow + Ord, Q: Ord, { - fn get(&self, key: &Q) -> Option<&V> { + type Key = K; + type Value = V; + + fn get(&self, key: &Q) -> Option<&Self::Value> { BTreeMap::get(self, key) } } + pub trait Store { fn get(&self, key: &K) -> Option<&V>; @@ -31,6 +39,12 @@ pub trait Store { fn remove(&mut self, key: &K) -> Option; } +pub trait Cache { + fn get_or_insert_with(&mut self, key: K, f: F) -> &mut V + where + F: FnOnce() -> V; +} + pub trait OrInsert { fn or_insert(&mut self, key: K, value: V) -> &mut V; } diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index d7e5f2d9..af54f91f 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -121,6 +121,23 @@ impl Dcg { let grad = gradients[scope]; let node = self.get(*scope).unwrap(); + match node { + Node::Binary { lhs, rhs, op } => match op { + BinaryExpr::Add(_) => { + *gradients.entry(*lhs).or_default() += grad; + *gradients.entry(*rhs).or_default() += grad; + } + BinaryExpr::Mul(_) => { + let lhs_val = self.get(*lhs).unwrap().get_value(); + let rhs_val = self.get(*rhs).unwrap().get_value(); + *gradients.entry(*lhs).or_default() += grad * rhs_val; + *gradients.entry(*rhs).or_default() += grad * lhs_val; + } + _ => {} + }, + _ => {} + } + if let Node::Op { inputs, op } = node { match op { Operations::Binary(BinaryExpr::Add(_)) => { diff --git a/graphs/src/dcg/node.rs b/graphs/src/dcg/node.rs index da5e6257..033d6e08 100644 --- a/graphs/src/dcg/node.rs +++ b/graphs/src/dcg/node.rs @@ -2,11 +2,16 @@ Appellation: node Contrib: FL03 */ -use crate::ops::Operations; -use petgraph::prelude::NodeIndex; +use crate::ops::{BinaryExpr, Operations}; +use crate::NodeIndex; #[derive(Clone, Debug)] pub enum Node { + Binary { + lhs: NodeIndex, + rhs: NodeIndex, + op: BinaryExpr, + }, Op { inputs: Vec, op: Operations, @@ -18,6 +23,13 @@ pub enum Node { } impl Node { + pub fn binary(lhs: NodeIndex, rhs: NodeIndex, op: impl Into) -> Self { + Node::Binary { + lhs, + rhs, + op: op.into(), + } + } pub fn op(inputs: impl IntoIterator, op: impl Into) -> Self { Node::Op { inputs: Vec::from_iter(inputs), diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs index 93cc7f91..655c24e1 100644 --- a/tensor/src/actions/grad/store.rs +++ b/tensor/src/actions/grad/store.rs @@ -5,7 +5,7 @@ use crate::prelude::TensorId; use crate::TensorBase; use acme::prelude::Store; -use std::collections::btree_map::{BTreeMap, Entry}; +use std::collections::btree_map::{BTreeMap, Entry, Keys}; use std::ops::{Index, IndexMut}; #[derive(Clone, Debug)] @@ -39,6 +39,10 @@ impl GradStore { pub fn is_empty(&self) -> bool { self.store.is_empty() } + + pub fn keys(&self) -> Keys<'_, TensorId, TensorBase> { + self.store.keys() + } /// Returns the number of elements in the store. pub fn len(&self) -> usize { self.store.len() diff --git a/tensor/src/data/elem.rs b/tensor/src/data/elem.rs index 5290cc4f..b19a4a5b 100644 --- a/tensor/src/data/elem.rs +++ b/tensor/src/data/elem.rs @@ -5,5 +5,10 @@ //! # Elements //! //! +use crate::prelude::DType; -pub trait Element {} +pub trait Element { + type Elem; + + fn dtype(&self) -> DType; +} diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 0a49cfff..53525b72 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -11,6 +11,8 @@ pub mod repr { pub mod owned; } +pub trait Data: RawData {} + #[allow(clippy::missing_safety_doc)] pub unsafe trait RawData { type Elem; @@ -21,8 +23,6 @@ pub unsafe trait RawData { private_decl! {} } -pub trait Data: RawData {} - pub(crate) mod utils { #[cfg(not(feature = "std"))] #[allow(unused_imports)] diff --git a/tensor/src/data/repr/owned.rs b/tensor/src/data/repr/owned.rs index 77fb9223..b6413e48 100644 --- a/tensor/src/data/repr/owned.rs +++ b/tensor/src/data/repr/owned.rs @@ -17,30 +17,36 @@ pub struct OwnedRepr { } impl OwnedRepr { + /// Create an [OwnedRepr] from a [Vec] pub fn from_vec(vec: Vec) -> Self { let mut v = ManuallyDrop::new(vec); - let capacity = v.capacity(); - let len = v.len(); - let ptr = nonnull_from_vec_data(&mut v); - Self { capacity, len, ptr } + Self { + capacity: v.capacity(), + len: v.len(), + ptr: nonnull_from_vec_data(&mut v), + } + } + + pub fn as_ptr(&self) -> *const A { + self.ptr.as_ptr() + } + + pub fn as_ptr_mut(&mut self) -> *mut A { + self.ptr.as_ptr() } pub(crate) fn as_slice(&self) -> &[A] { unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } - pub fn capacity(&self) -> usize { + pub const fn capacity(&self) -> usize { self.capacity } - pub fn len(&self) -> usize { + pub const fn len(&self) -> usize { self.len } - - pub fn ptr(&self) -> NonNull { - self.ptr - } /// Set the valid length of the data /// /// ## Safety diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index 80e37dc5..38b29f2f 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::prelude::IntoShape; -use crate::tensor::*; +use crate::tensor::{from_vec, TensorBase}; use num::traits::{FromPrimitive, NumAssign, One, Zero}; impl TensorBase @@ -99,6 +99,7 @@ where from_vec(false.into(), (store.len(),), store) } } + impl TensorBase where T: Clone + One, diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 47ccca7c..bb0f8d31 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -6,52 +6,52 @@ use crate::actions::grad::GradStore; use crate::prelude::{Scalar, TensorId, TensorOp, TensorResult}; use crate::TensorBase; use acme::prelude::{BinaryOp, Store}; -use std::collections::HashMap; -use std::ops::{Add, Mul}; -// The vec of sorted nodes is passed as an owned value rather than a mutable reference -// to get around some lifetime limitations. -fn walk<'a, T>( - node: &'a TensorBase, - nodes: Vec<&'a TensorBase>, - visited: &mut HashMap, -) -> (bool, Vec<&'a TensorBase>) { - if let Some(&tg) = visited.get(&node.id()) { - return (tg, nodes); - } - // track the gradient of the current node - let mut track = false; - let mut nodes = if node.is_variable() { - // Do not call recursively on the "leaf" nodes. - track = true; - nodes - } else if let Some(op) = node.op() { - match op { - TensorOp::Binary(lhs, rhs, _kind) => { - let (tg, nodes) = walk(lhs, nodes, visited); - track |= tg; - let (tg, nodes) = walk(rhs, nodes, visited); - track |= tg; - nodes - } - _ => nodes, - } - } else { - nodes - }; - visited.insert(node.id(), track); - if track { - nodes.push(node); - } - (track, nodes) -} +pub(crate) type Visited = std::collections::HashMap; impl TensorBase where T: Scalar, { fn sorted_nodes(&self) -> Vec<&TensorBase> { - let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new()); + // The vec of sorted nodes is passed as an owned value rather than a mutable reference + // to get around some lifetime limitations. + fn walk<'a, T>( + node: &'a TensorBase, + nodes: Vec<&'a TensorBase>, + visited: &mut Visited, + ) -> (bool, Vec<&'a TensorBase>) { + if let Some(&tg) = visited.get(&node.id()) { + return (tg, nodes); + } + // track the gradient of the current node + let mut track = false; + let mut nodes = if node.is_variable() { + // Do not call recursively on the "leaf" nodes. + track = true; + nodes + } else if let Some(op) = node.op() { + match op { + TensorOp::Binary(lhs, rhs, _kind) => { + let (tg, nodes) = walk(lhs, nodes, visited); + track |= tg; + let (tg, nodes) = walk(rhs, nodes, visited); + track |= tg; + nodes + } + _ => nodes, + } + } else { + nodes + }; + visited.insert(node.id(), track); + if track { + nodes.push(node); + } + (track, nodes) + } + + let (_tg, mut nodes) = walk(self, Vec::new(), &mut Visited::new()); nodes.reverse(); nodes } diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 80c827cf..1e23d951 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -57,6 +57,16 @@ impl TensorBase where T: Scalar, { + pub fn abs(self) -> TensorBase<::Real> + where + T: Scalar, + { + let shape = self.shape().clone(); + let store = self.store.iter().copied().map(|v| v.abs()).collect(); + let op = TensorOp::Unary(Box::new(self), UnaryOp::Abs); + from_vec_with_op(false, op, shape, store) + } + impl_unary_op!(Cos, cos); impl_unary_op!(Cosh, cosh); impl_unary_op!(Exp, exp); diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index 5c5d92e1..a41f9cec 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -6,59 +6,16 @@ pub mod ndtensor; pub mod scalar; -use num::{Complex, Num}; - pub trait Affine { type Output; fn affine(&self, mul: &T, add: &T) -> Self::Output; } -/// -pub trait Conjugate { - type Complex; - type Real; - - fn conj(&self) -> Self::Complex; -} - -macro_rules! impl_conj { - ($t:ty) => { - impl Conjugate for $t { - type Complex = Complex; - type Real = Self; - - fn conj(&self) -> Self::Complex { - Complex::new(*self, <$t>::default()) - } - } - }; - ($($t:ty),*) => { - $( - impl_conj!($t); - )* - }; -} - -impl Conjugate for Complex -where - T: Clone + Num + std::ops::Neg, -{ - type Complex = Self; - type Real = T; - - fn conj(&self) -> Self::Complex { - Complex::conj(self) - } -} - -impl_conj!(i8, i16, i32, i64, i128, isize); -impl_conj!(f32, f64); - -pub trait Pow { +pub trait Vstack { type Output; - fn pow(&self, exp: T) -> Self::Output; + fn vstack(&self, other: &T) -> Self::Output; } pub(crate) mod prelude { diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 693a5492..c3dc536a 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -7,6 +7,8 @@ use crate::shape::prelude::{Rank, Shape}; use crate::store::Layout; pub trait NdTensor { + type Elem; + fn elements(&self) -> usize { self.layout().elements() } @@ -27,3 +29,16 @@ pub trait NdTensor { self.layout().stride() } } + +pub trait NdStore { + type Container; + type Elem; +} + +pub trait NdIterator { + type Item; +} + +pub trait NdIndex { + type Output; +} diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 5d7fa48f..580d3a39 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -46,9 +46,9 @@ impl Layout { } } - pub(crate) fn position(&self, coords: &[usize]) -> usize { + pub(crate) fn position(&self, coords: impl AsRef<[usize]>) -> usize { let mut index = self.offset; - for (i, &coord) in coords.iter().enumerate() { + for (i, &coord) in coords.as_ref().iter().enumerate() { index += coord * self.stride[i]; } index @@ -66,7 +66,7 @@ impl Layout { &self.shape } - pub fn stride(&self) -> &Vec { + pub fn stride(&self) -> &[usize] { &self.stride } } diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs index 60bfde80..3b06467f 100644 --- a/tensor/src/store/mod.rs +++ b/tensor/src/store/mod.rs @@ -19,5 +19,10 @@ pub enum TensorData { Tensor(Vec>), } +pub enum TensorBackend { + Scalar, + Tensor, +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index b9884974..76739605 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -5,33 +5,36 @@ // use crate::ops::TrackedOp; use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind, TensorOp}; use crate::store::Layout; +use acme::prelude::BinaryOp; use std::ops::Index; // use std::sync::{Arc, RwLock}; -pub(crate) fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> TensorBase { +pub(crate) fn new( + kind: TensorKind, + op: Option>, + shape: impl IntoShape, + store: Vec, +) -> TensorBase { TensorBase { id: TensorId::new(), kind, layout: Layout::contiguous(shape), - op: None, + op, store, } } +pub(crate) fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> TensorBase { + new(kind, None, shape, store) +} + pub(crate) fn from_vec_with_op( kind: impl Into, op: TensorOp, shape: impl IntoShape, store: Vec, ) -> TensorBase { - let layout = Layout::contiguous(shape); - TensorBase { - id: TensorId::new(), - kind: kind.into(), - layout, - op: Some(op), - store, - } + new(kind.into(), Some(op), shape, store) } #[derive(Clone, Debug)] @@ -46,17 +49,30 @@ pub struct TensorBase { impl TensorBase { pub fn new(kind: TensorKind, shape: impl IntoShape) -> Self { + let shape = shape.into_shape(); + let store = Vec::with_capacity(shape.elements()); Self { id: TensorId::new(), kind, layout: Layout::contiguous(shape), op: None, - store: Vec::new(), + store, } } - pub fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> Self { - from_vec(kind, shape, store) + pub fn from_vec( + kind: TensorKind, + op: Option>, + shape: impl IntoShape, + store: Vec, + ) -> Self { + Self { + id: TensorId::new(), + kind, + layout: Layout::contiguous(shape), + op, + store, + } } pub fn detach(&self) -> Self @@ -80,10 +96,10 @@ impl TensorBase { self.layout.elements() } /// Returns the unique identifier of the tensor. - pub fn id(&self) -> TensorId { + pub const fn id(&self) -> TensorId { self.id } - /// Get a reference to the layout of the tensor + /// Get a reference to the [Layout] of the tensor pub fn layout(&self) -> &Layout { &self.layout } @@ -91,20 +107,24 @@ impl TensorBase { pub fn op(&self) -> Option<&TensorOp> { self.op.as_ref() } - /// Get a reference to the rank of the tensor + /// Get an owned reference to the [Rank] of the tensor pub fn rank(&self) -> Rank { self.layout.shape().rank() } - /// Get a reference to the shape of the tensor + /// An owned reference of the tensors [Shape] pub fn shape(&self) -> &Shape { self.layout.shape() } - + /// Get a reference to the stride of the tensor pub fn stride(&self) -> &[usize] { self.layout.stride() } + /// A function to check if the tensor is a scalar + pub fn is_scalar(&self) -> bool { + self.shape().len() == 0 + } /// A function to check if the tensor is a variable - pub fn is_variable(&self) -> bool { + pub const fn is_variable(&self) -> bool { self.kind.is_variable() } /// Changes the kind of tensor to a variable @@ -119,23 +139,74 @@ impl TensorBase { { self.store.clone() } + + pub fn apply_binary(&self, op: BinaryOp, other: &Self, f: F) -> Self + where + F: Fn(&T, &T) -> T, + T: Clone, + { + let store = self + .store + .iter() + .zip(other.store.iter()) + .map(|(a, b)| f(a, b)) + .collect(); + TensorBase { + id: TensorId::new(), + kind: self.kind, + layout: self.layout.clone(), + op: Some(TensorOp::Binary( + Box::new(self.clone()), + Box::new(other.clone()), + op, + )), + store, + } + } + + pub fn map<'a, F>(&'a self, f: F) -> TensorBase + where + F: FnMut(&'a T) -> T, + T: 'a + Clone, + { + let store = self.store.iter().map(f).collect(); + TensorBase { + id: TensorId::new(), + kind: self.kind, + layout: self.layout.clone(), + op: self.op.clone(), + store, + } + } + + pub fn mapv(&self, f: F) -> TensorBase + where + F: Fn(T) -> T, + T: Copy, + { + let store = self.store.iter().copied().map(f).collect(); + TensorBase { + id: TensorId::new(), + kind: self.kind, + layout: self.layout.clone(), + op: self.op.clone(), + store, + } + } } impl TensorBase { pub(crate) fn data(&self) -> &Vec { &self.store } - // An internal function to get the index of the data based on coordinates - pub(crate) fn position(&self, coords: impl AsRef<[usize]>) -> usize { - self.layout.position(coords.as_ref()) - } } impl Index<&[usize]> for TensorBase { type Output = T; fn index(&self, index: &[usize]) -> &Self::Output { - &self.store[self.position(index)] + let i = self.layout().position(index); + &self.store[i] } } diff --git a/tensor/src/types/dtype.rs b/tensor/src/types/dtype.rs index 2b0e8a18..8c881206 100644 --- a/tensor/src/types/dtype.rs +++ b/tensor/src/types/dtype.rs @@ -5,11 +5,30 @@ use std::any::TypeId; +pub enum TypeError { + ConversionError, +} + pub enum DType { Float(Float), Integer(Integer), } +impl DType { + pub fn from_type(_value: &T) -> Result + where + T: 'static, + { + if let Ok(float) = Float::from_type(_value) { + Ok(DType::Float(float)) + } else if let Ok(integer) = Integer::from_type(_value) { + Ok(DType::Integer(integer)) + } else { + Err(()) + } + } +} + pub enum Float { F32, F64, @@ -47,6 +66,67 @@ pub struct Integer { pub signed: bool, } +impl Integer { + pub fn from_type(_value: &T) -> Result + where + T: 'static, + { + if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B8, + signed: true, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B16, + signed: true, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B32, + signed: true, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B64, + signed: true, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B128, + signed: true, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B8, + signed: false, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B16, + signed: false, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B32, + signed: false, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B64, + signed: false, + }) + } else if TypeId::of::() == TypeId::of::() { + Ok(Integer { + bits: NumBits::B128, + signed: false, + }) + } else { + Err(()) + } + } +} + #[repr(u8)] pub enum NumBits { B8 = 8, diff --git a/tensor/src/types/kinds.rs b/tensor/src/types/kinds.rs index f7fc1747..275a63d2 100644 --- a/tensor/src/types/kinds.rs +++ b/tensor/src/types/kinds.rs @@ -28,12 +28,12 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; PartialOrd, VariantNames, )] -#[repr(u8)] +#[repr(C)] #[strum(serialize_all = "lowercase")] pub enum TensorKind { #[default] - Normal, - Variable, + Normal = 0, + Variable = 1, } impl TensorKind { diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index 1c5b8970..b84c7a2f 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -9,6 +9,7 @@ pub mod kinds; pub mod order; pub(crate) mod prelude { + pub use super::dtype::DType; pub use super::id::TensorId; pub use super::kinds::TensorKind; pub use super::order::Order; diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 9182ac8f..08e24a05 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -28,6 +28,32 @@ fn test_addition() { assert_eq!(grad[&b.id()], Tensor::ones(shape)); } +#[test] +fn test_addition_2() { + let shape = (2, 2); + let a = Tensor::::ones(shape).variable(); + let b = Tensor::::ones(shape).variable(); + let c = Tensor::::ones(shape).variable(); + let tmp = &a + &b; + println!("Tmp: {}", &tmp.id()); + let d = tmp + &c; + + assert_eq!(&d, &Tensor::fill(shape, 3_f64)); + println!( + "*** Variables ***\nA: {}\nB: {}\nC: {}\n\n", + &a.id(), + &b.id(), + &c.id() + ); + println!("*** Outcomes ***\nD: {}", &d.id()); + let grad = d.grad().unwrap(); + println!("{:?}", &grad.keys()); + + for i in [a.id(), b.id(), c.id()].iter() { + assert_eq!(grad[i], Tensor::ones(shape)); + } +} + #[test] fn test_multiplication() { let shape = (2, 2); From 029fe253c41da1fe5dd1fd2eedc596f6f0274387 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 03:42:53 -0500 Subject: [PATCH 55/87] update Signed-off-by: Joe McCain III --- tensor/src/shape/dim/axis.rs | 26 ++++++++++++++++++++++++++ tensor/src/shape/dim/mod.rs | 10 ++++++++-- tensor/src/shape/shape.rs | 11 +++++++++-- tensor/src/utils.rs | 24 ++++++++++++------------ 4 files changed, 55 insertions(+), 16 deletions(-) diff --git a/tensor/src/shape/dim/axis.rs b/tensor/src/shape/dim/axis.rs index df582df5..d54f84e3 100644 --- a/tensor/src/shape/dim/axis.rs +++ b/tensor/src/shape/dim/axis.rs @@ -4,7 +4,13 @@ */ //! # Axis //! +//! +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use std::ops::Deref; +#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Axis(pub(crate) usize); impl Axis { @@ -16,3 +22,23 @@ impl Axis { self.0 } } + +impl AsRef for Axis { + fn as_ref(&self) -> &usize { + &self.0 + } +} + +impl Deref for Axis { + type Target = usize; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::fmt::Display for Axis { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} \ No newline at end of file diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index 56d57a01..543deb7b 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -11,6 +11,14 @@ pub(crate) mod axis; pub(crate) mod dimension; pub(crate) mod rank; +pub trait Dimension { + type Pattern; + + fn elements(&self) -> usize; + + fn ndim(&self) -> usize; +} + pub trait IntoAxis { fn into_axis(self) -> Axis; } @@ -30,5 +38,3 @@ impl IntoRank for usize { Rank::new(self) } } - -pub trait Dimension {} diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 1c4ff155..3ccf3ad2 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -83,7 +83,7 @@ impl Shape { self.0.remove(index) } - pub fn columns(&self) -> usize { + pub fn ncols(&self) -> usize { if self.len() >= 2 { self.0[1] } else if self.len() == 1 { @@ -93,7 +93,7 @@ impl Shape { } } - pub fn rows(&self) -> usize { + pub fn nrows(&self) -> usize { if self.len() >= 1 { *self.0.first().unwrap() } else { @@ -311,6 +311,13 @@ unsafe impl Send for Shape {} unsafe impl Sync for Shape {} +// macro_rules! tuple_vec { +// ($($n:tt),*) => { +// vec![$($n,)*] +// }; + +// } + // macro_rules! impl_from_tuple { // ($($n:tt: $name:ident),+) => { // impl<$($name),+> From<($($name,)+)> for Shape diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 93c7e8fd..f2afe09d 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -20,12 +20,12 @@ where let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); let mut result = vec![T::zero(); shape.elements()]; - for i in 0..lhs.shape().rows() { - for j in 0..rhs.shape().columns() { - for k in 0..lhs.shape().columns() { - let pos = i * rhs.shape().columns() + j; - let left = i * lhs.shape().columns() + k; - let right = k * rhs.shape().columns() + j; + for i in 0..lhs.shape().nrows() { + for j in 0..rhs.shape().ncols() { + for k in 0..lhs.shape().ncols() { + let pos = i * rhs.shape().ncols() + j; + let left = i * lhs.shape().ncols() + k; + let right = k * rhs.shape().ncols() + j; result[pos] += lhs.store[left] * rhs.store[right]; } } @@ -46,12 +46,12 @@ where let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); let mut result = vec![T::zero(); shape.elements()]; - for i in 0..lhs.shape().rows() { - for j in 0..rhs.shape().columns() { - for k in 0..lhs.shape().columns() { - let pos = i * rhs.shape().columns() + j; - let left = i * lhs.shape().columns() + k; - let right = k * rhs.shape().columns() + j; + for i in 0..lhs.shape().nrows() { + for j in 0..rhs.shape().ncols() { + for k in 0..lhs.shape().ncols() { + let pos = i * rhs.shape().ncols() + j; + let left = i * lhs.shape().ncols() + k; + let right = k * rhs.shape().ncols() + j; result[pos] += lhs.store[left] * rhs.store[right]; } } From 282d4863118492168fa9a2049ad1bd6d4259c23f Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 11:26:31 -0500 Subject: [PATCH 56/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 7 +- acme/Cargo.toml | 20 ++-- core/src/math/mod.rs | 7 +- core/src/ops/mod.rs | 6 ++ core/src/specs/gradient.rs | 16 +-- derive/Cargo.toml | 1 + graphs/Cargo.toml | 10 +- graphs/src/dcg/graph.rs | 177 ++++++++++++++++--------------- graphs/src/dcg/node.rs | 16 ++- graphs/src/scg/graph.rs | 2 +- graphs/tests/dcg.rs | 32 ++++-- macros/Cargo.toml | 1 + macros/src/grad/mod.rs | 23 ++++ tensor/Cargo.toml | 17 ++- tensor/src/actions/grad/store.rs | 55 +++++++--- tensor/src/actions/index/mod.rs | 2 + tensor/src/data/mod.rs | 116 ++++++++++++++++++-- tensor/src/data/repr/owned.rs | 150 +++++++++++++++++++++++--- tensor/src/data/repr/shared.rs | 138 ++++++++++++++++++++++++ tensor/src/data/repr/view.rs | 39 +++++++ tensor/src/data/specs.rs | 154 +++++++++++++++++++++++++++ tensor/src/impls/create.rs | 2 +- tensor/src/impls/grad.rs | 56 +++++++--- tensor/src/impls/linalg.rs | 4 +- tensor/src/impls/ops/binary.rs | 45 ++++++-- tensor/src/impls/ops/unary.rs | 10 +- tensor/src/impls/reshape.rs | 10 +- tensor/src/lib.rs | 2 +- tensor/src/ops/backprop.rs | 32 ++++++ tensor/src/shape/dim/axis.rs | 20 +++- tensor/src/shape/mod.rs | 2 +- tensor/src/shape/shape.rs | 37 ++++--- tensor/src/shape/stride.rs | 58 ++++++++++ tensor/src/specs/ndtensor.rs | 8 +- tensor/src/specs/scalar.rs | 2 +- tensor/src/store/layout.rs | 28 +++-- tensor/src/store/storage.rs | 93 ++++++++++++---- tensor/src/tensor.rs | 30 +++--- tensor/src/utils.rs | 8 +- tensor/tests/backward.rs | 59 ++++++----- 40 files changed, 1179 insertions(+), 316 deletions(-) create mode 100644 tensor/src/data/repr/shared.rs create mode 100644 tensor/src/data/repr/view.rs create mode 100644 tensor/src/data/specs.rs diff --git a/Cargo.toml b/Cargo.toml index a344ac64..cdd20fc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,14 @@ [workspace.package] authors = ["FL03 (https://github.com/FL03)"] -categories = [] -description = "Acme is an autodiff library for Rust." +categories = ["mathematics", "science"] +description = "Acme aims to provide a solid foundation for developing robust machine-learning systems in Rust." edition = "2021" homepage = "https://github.com/FL03/acme/wikis" +keywords = ["acme", "autodiff", "mathematics", "tensor"] license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0" # TODO - Update cargo package version +version = "0.3.0-nightly.2" # TODO - Update cargo package version [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 90315d5b..0b7329ba 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -1,13 +1,12 @@ [package] authors.workspace = true -categories = [] +categories.workspace = true description.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["async", "scsys"] +keywords.workspace = true license.workspace = true name = "acme" -publish = true readme.workspace = true repository.workspace = true version.workspace = true @@ -75,11 +74,16 @@ required-features = ["macros"] [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +# acme-core = { path = "../core", version = "0.3.0" } +# acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +# acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly.2" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.2" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.2" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.2" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.2" } [dev-dependencies] approx = "0.5" diff --git a/core/src/math/mod.rs b/core/src/math/mod.rs index 701d84f2..6a31e9b6 100644 --- a/core/src/math/mod.rs +++ b/core/src/math/mod.rs @@ -2,10 +2,11 @@ Appellation: math Contrib: FL03 */ -//! # Linear Algebra -//! -//! This module implements fundamental linear algebra concepts and operations. +//! # Mathematics //! +//! This module contains the core mathematical operations and structures used +//! throughout the library. It is divided into submodules for each mathematical +//! operation or structure. pub mod linalg; #[cfg(test)] diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 3074f1d1..fdfd0be0 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -39,6 +39,12 @@ where fn powf(&self, exp: T) -> Self::Output; } +pub trait Squared { + type Output; + + fn squared(&self) -> Self::Output; +} + pub(crate) mod prelude { pub use super::binary::*; pub use super::kinds::Op; diff --git a/core/src/specs/gradient.rs b/core/src/specs/gradient.rs index 8be0df67..e63405d9 100644 --- a/core/src/specs/gradient.rs +++ b/core/src/specs/gradient.rs @@ -27,21 +27,11 @@ pub trait Grad { fn grad(&self) -> Self::Gradient; } -pub trait Partial { - type Args; - type Output; - - fn partial(&self) -> fn(Self::Args) -> Self::Output; - - fn partial_at(&self, args: Self::Args) -> Self::Output { - (self.partial())(args) - } -} - pub trait Parameter { type Key; type Value; - fn key(&self) -> Self::Key; - fn value(&self) -> Self::Value; + fn key(&self) -> &Self::Key; + + fn value(&self) -> &Self::Value; } diff --git a/derive/Cargo.toml b/derive/Cargo.toml index c5d7bb9c..ec74f8a7 100644 --- a/derive/Cargo.toml +++ b/derive/Cargo.toml @@ -4,6 +4,7 @@ categories.workspace = true description.workspace = true edition.workspace = true homepage.workspace = true +keywords.workspace = true license.workspace = true name = "acme-derive" readme.workspace = true diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 698db56f..2e9ec24e 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -1,6 +1,7 @@ [package] authors.workspace = true -description = "This module implements several computational graphs" +categories.workspace = true +description = "Computational graphs for Rust" edition.workspace = true homepage.workspace = true license.workspace = true @@ -34,9 +35,6 @@ test = true [dev-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } - -lazy_static = "1" num = "0.4" petgraph = "0.6" serde = { optional = true, features = ["derive"], version = "1" } @@ -44,6 +42,10 @@ serde_json = { optional = true, version = "1" } smart-default.workspace = true strum.workspace = true +[dependencies.acme-core] +path = "../core" +version = "0.3.0-nightly.2" # "0.3.0" + [package.metadata.docs.rs] all-features = true rustc-args = ["--cfg", "docsrs"] diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index af54f91f..e8a9385c 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -8,11 +8,10 @@ use super::DynamicGraph; use crate::ops::*; use crate::prelude::GraphResult as Result; use crate::NodeIndex; -use num::traits::{Num, NumAssignOps, NumOps}; +use num::traits::NumAssign; use petgraph::algo::toposort; -use petgraph::prelude::Direction; use std::collections::HashMap; -use std::ops::Index; +use std::ops::{Index, Neg}; pub struct Dcg { store: DynamicGraph, @@ -25,6 +24,22 @@ impl Dcg { } } + pub fn binary( + &mut self, + lhs: NodeIndex, + rhs: NodeIndex, + op: impl Into, + ) -> NodeIndex { + let c = self.store.add_node(Node::binary(lhs, rhs, op)); + self.store.add_edge(lhs, c, Edge::new(lhs)); + self.store.add_edge(rhs, c, Edge::new(rhs)); + c + } + + pub fn constant(&mut self, value: T) -> NodeIndex { + self.input(false, value) + } + pub fn get(&self, index: NodeIndex) -> Option<&Node> { self.store.node_weight(index) } @@ -33,10 +48,6 @@ impl Dcg { self.store.add_node(node.into()) } - pub fn remove(&mut self, index: NodeIndex) -> Option> { - self.store.remove_node(index) - } - pub fn input(&mut self, param: bool, value: T) -> NodeIndex { self.store.add_node(Node::input(param, value)) } @@ -54,112 +65,104 @@ impl Dcg { } c } + + pub fn remove(&mut self, index: NodeIndex) -> Option> { + self.store.remove_node(index) + } + + pub fn unary(&mut self, input: NodeIndex, op: impl Into) -> NodeIndex { + let c = self.store.add_node(Node::unary(input, op)); + self.store.add_edge(input, c, Edge::new(input)); + c + } + + pub fn variable(&mut self, value: T) -> NodeIndex { + self.input(true, value) + } } impl Dcg { pub fn add(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { - self.op([lhs, rhs], BinaryExpr::add()) + self.binary(lhs, rhs, BinaryExpr::add()) } - pub fn mul(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { - self.op([lhs, rhs], BinaryExpr::mul()) - } - - pub fn backward(&self) -> Result> - where - T: Copy + Default + Num + NumAssignOps + NumOps, - { - let mut sorted = toposort(&self.store, None)?; - sorted.reverse(); - let target = *sorted.first().unwrap(); - - let mut gradients = HashMap::::new(); - gradients.insert(target, T::one()); - - for scope in sorted.iter().copied() { - // Get the gradient of the current scope - let grad = gradients[&scope]; - let node = &self[scope]; - - if let Node::Op { inputs, op } = node { - match op { - Operations::Binary(inner) => match *inner { - BinaryExpr::Add(_) => { - for arg in self.store.neighbors_directed(scope, Direction::Incoming) { - *gradients.entry(arg).or_default() += grad; - } - } - BinaryExpr::Mul(_) => { - let lhs = inputs[0]; - let rhs = inputs[1]; - let lhs_val = self.get(lhs).unwrap().get_value(); - let rhs_val = self.get(rhs).unwrap().get_value(); - *gradients.entry(lhs).or_default() += grad * rhs_val; - *gradients.entry(rhs).or_default() += grad * lhs_val; - } - _ => {} - }, - // Handle other operations as needed - _ => {} - } - } - } + pub fn div(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { + self.binary(lhs, rhs, BinaryExpr::div()) + } - Ok(gradients) + pub fn mul(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { + self.binary(lhs, rhs, BinaryExpr::mul()) } - pub fn gradient(&self, output: NodeIndex) -> Result> - where - T: Copy + Default + Num + NumAssignOps + NumOps, - { - let mut gradients = HashMap::::new(); - gradients.insert(output, T::one()); // Initialize output gradient to 1.0 + pub fn sub(&mut self, lhs: NodeIndex, rhs: NodeIndex) -> NodeIndex { + self.binary(lhs, rhs, BinaryExpr::sub()) + } +} - let topo = toposort(&self.store, None)?; +impl Dcg +where + T: Copy + Default + Neg + NumAssign, +{ + pub fn backward(&self) -> Result> { + let sorted = toposort(&self.store, None)?; + let target = sorted.last().unwrap(); + self.gradient(*target) + } + pub fn gradient(&self, target: NodeIndex) -> Result> { + let mut store = HashMap::::new(); + // initialize the stack + let mut stack = Vec::<(NodeIndex, T)>::new(); + // start by computing the gradient of the target w.r.t. itself + stack.push((target, T::one())); + store.insert(target, T::one()); - for scope in topo.iter().rev() { - let grad = gradients[scope]; - let node = self.get(*scope).unwrap(); + while let Some((i, grad)) = stack.pop() { + let node = &self[i]; match node { Node::Binary { lhs, rhs, op } => match op { BinaryExpr::Add(_) => { - *gradients.entry(*lhs).or_default() += grad; - *gradients.entry(*rhs).or_default() += grad; + *store.entry(*lhs).or_default() += grad; + *store.entry(*rhs).or_default() += grad; + + stack.push((*lhs, grad)); + stack.push((*rhs, grad)); } BinaryExpr::Mul(_) => { - let lhs_val = self.get(*lhs).unwrap().get_value(); - let rhs_val = self.get(*rhs).unwrap().get_value(); - *gradients.entry(*lhs).or_default() += grad * rhs_val; - *gradients.entry(*rhs).or_default() += grad * lhs_val; + let lhs_grad = grad * self[*rhs].value(); + let rhs_grad = grad * self[*lhs].value(); + *store.entry(*lhs).or_default() += lhs_grad; + *store.entry(*rhs).or_default() += rhs_grad; + + stack.push((*lhs, lhs_grad)); + stack.push((*rhs, rhs_grad)); + } + BinaryExpr::Sub(_) => { + *store.entry(*lhs).or_default() += grad; + *store.entry(*rhs).or_default() -= grad; + + stack.push((*lhs, grad)); + stack.push((*rhs, grad.neg())); } _ => {} }, - _ => {} - } - - if let Node::Op { inputs, op } = node { - match op { - Operations::Binary(BinaryExpr::Add(_)) => { - for arg in self.store.neighbors_directed(*scope, Direction::Incoming) { - *gradients.entry(arg).or_default() += grad; - } + Node::Unary { op, .. } => match op { + _ => { + unimplemented!(); } - Operations::Binary(BinaryExpr::Mul(_)) => { - let lhs = inputs[0]; - let rhs = inputs[1]; - let lhs_val = self[lhs].get_value(); - let rhs_val = self[rhs].get_value(); - *gradients.entry(lhs).or_default() += grad * rhs_val; - *gradients.entry(rhs).or_default() += grad * lhs_val; + }, + Node::Input { param, .. } => { + if *param { + continue; } - // Handle other operations as needed - _ => {} + *store.entry(i).or_default() += grad; + stack.push((i, grad)); } + _ => {} } } - Ok(gradients) + Ok(store) } } diff --git a/graphs/src/dcg/node.rs b/graphs/src/dcg/node.rs index 033d6e08..536af67d 100644 --- a/graphs/src/dcg/node.rs +++ b/graphs/src/dcg/node.rs @@ -2,7 +2,7 @@ Appellation: node Contrib: FL03 */ -use crate::ops::{BinaryExpr, Operations}; +use crate::ops::{BinaryExpr, Operations, UnaryExpr}; use crate::NodeIndex; #[derive(Clone, Debug)] @@ -12,6 +12,10 @@ pub enum Node { rhs: NodeIndex, op: BinaryExpr, }, + Unary { + input: NodeIndex, + op: UnaryExpr, + }, Op { inputs: Vec, op: Operations, @@ -30,6 +34,14 @@ impl Node { op: op.into(), } } + + pub fn unary(input: NodeIndex, op: impl Into) -> Self { + Node::Unary { + input, + op: op.into(), + } + } + pub fn op(inputs: impl IntoIterator, op: impl Into) -> Self { Node::Op { inputs: Vec::from_iter(inputs), @@ -41,7 +53,7 @@ impl Node { Node::Input { param, value } } - pub fn get_value(&self) -> T + pub fn value(&self) -> T where T: Copy + Default, { diff --git a/graphs/src/scg/graph.rs b/graphs/src/scg/graph.rs index 206c2c6d..280ecb12 100644 --- a/graphs/src/scg/graph.rs +++ b/graphs/src/scg/graph.rs @@ -125,7 +125,7 @@ where T::default() }; // add or insert the gradient of the input - *gradients.entry(*input).or_insert(T::default()) += dt; + *gradients.entry(*input).or_default() += dt; // push the input and its gradient onto the stack stack.push((*input, dt)); } diff --git a/graphs/tests/dcg.rs b/graphs/tests/dcg.rs index a243907d..9a8a5336 100644 --- a/graphs/tests/dcg.rs +++ b/graphs/tests/dcg.rs @@ -10,16 +10,16 @@ use graphs::dcg::Dcg; #[test] fn test_dcg() { let mut dcg = Dcg::::new(); - let a = dcg.input(true, 2.0); - let b = dcg.input(true, 3.0); + let a = dcg.variable(2.0); + let b = dcg.variable(3.0); let c = dcg.add(a, b); let grad = dcg.gradient(c).unwrap(); assert_eq!(grad[&a], 1.0); let mut dcg = Dcg::::new(); - let a = dcg.input(true, 2.0); - let b = dcg.input(true, 3.0); + let a = dcg.variable(2.0); + let b = dcg.variable(3.0); let c = dcg.mul(a, b); let grad = dcg.gradient(c).unwrap(); @@ -28,14 +28,26 @@ fn test_dcg() { } #[test] +fn test_backward() { + let mut dcg = Dcg::::new(); + let a = dcg.variable(2.0); + let b = dcg.variable(3.0); + let c = dcg.add(a, b); + + let grad = dcg.backward().unwrap(); + assert_eq!(grad, dcg.gradient(c).unwrap()); +} + +#[test] +#[ignore = "Not yet implemented"] fn test_composite_expr() { let mut dcg = Dcg::::new(); - let a = dcg.input(true, 2.0); - let b = dcg.input(true, 1.0); + let a = dcg.variable(1_f64); + let b = dcg.variable(2_f64); let c = dcg.add(a, b); - let d = dcg.mul(a, c); + let _d = dcg.mul(c, b); - let grad = dcg.gradient(d).unwrap(); - assert_eq!(grad[&a], 2.0); - assert_eq!(grad[&b], 2.0); + let grad = dcg.backward().unwrap(); + assert_eq!(grad[&a], 2_f64); + assert_eq!(grad[&b], 5_f64); } diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 05591129..9eab7382 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -4,6 +4,7 @@ categories.workspace = true description.workspace = true edition.workspace = true homepage.workspace = true +keywords.workspace = true license.workspace = true name = "acme-macros" readme.workspace = true diff --git a/macros/src/grad/mod.rs b/macros/src/grad/mod.rs index fbf804d2..49b43b58 100644 --- a/macros/src/grad/mod.rs +++ b/macros/src/grad/mod.rs @@ -38,3 +38,26 @@ pub fn handle_item_fn(item: &ItemFn) -> TokenStream { [#(#grad)*] } } + +pub fn item_fn_partial(item: &ItemFn) -> TokenStream { + let ItemFn { block, sig, .. } = item; + let Signature { inputs, .. } = sig; + + let mut vars = Vec::new(); + for input in inputs { + if let syn::FnArg::Typed(typed) = input { + if let syn::Pat::Ident(ident) = &*typed.pat { + vars.push(ident.ident.clone()); + } + } + } + + let grad = vars + .iter() + .map(|var| handle_block(&block, &var)) + .collect::>(); + + quote! { + [#(#grad)*] + } +} diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index f99e463a..b669ac68 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -1,9 +1,10 @@ [package] authors.workspace = true categories.workspace = true -description.workspace = true +description = "A comprehensive tensor library for Rust with support for automatic-differentiation." edition.workspace = true homepage.workspace = true +keywords = ["acme", "autodiff", "data-structure", "tensor"] license.workspace = true name = "acme-tensor" repository.workspace = true @@ -11,7 +12,11 @@ readme.workspace = true version.workspace = true [features] -default = [] +default = [ + "std" +] + +std = [] serde = [ "dep:serde", @@ -25,13 +30,17 @@ serde-ext = [ [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } - num = "0.4" +rawpointer = "0.2" serde = { optional = true, features = ["derive"], version = "1" } strum = { features = ["derive"], version = "0.26" } +[dependencies.acme-core] +path = "../core" +version = "0.3.0-nightly.2" # "0.3.0" + [dev-dependencies] +approx = "0.5" [package.metadata.docs.rs] all-features = true diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs index 655c24e1..bc2707c2 100644 --- a/tensor/src/actions/grad/store.rs +++ b/tensor/src/actions/grad/store.rs @@ -52,14 +52,16 @@ impl GradStore { pub fn or_insert(&mut self, tensor: TensorBase) -> &mut TensorBase { self.entry(tensor.id()).or_insert(tensor) } - + /// If the store does not have a tensor with the given id, insert a tensor with the same shape + /// and dtype as the given tensor, where all elements are default. pub fn or_insert_default(&mut self, tensor: &TensorBase) -> &mut TensorBase where T: Clone + Default, { self.entry(tensor.id()).or_insert(tensor.default_like()) } - + /// If the store does not have a tensor with the given id, insert a tensor with the same shape + /// and dtype as the given tensor, where all elements are zeros. pub fn or_insert_zeros(&mut self, tensor: &TensorBase) -> &mut TensorBase where T: Clone + num::Zero, @@ -68,21 +70,17 @@ impl GradStore { } } -impl Store> for GradStore { - fn get(&self, key: &TensorId) -> Option<&TensorBase> { - self.store.get(key) - } - - fn get_mut(&mut self, key: &TensorId) -> Option<&mut TensorBase> { - self.store.get_mut(key) - } - - fn insert(&mut self, key: TensorId, value: TensorBase) -> Option> { - self.store.insert(key, value) +impl Extend<(TensorId, TensorBase)> for GradStore { + fn extend)>>(&mut self, iter: I) { + self.store.extend(iter) } +} - fn remove(&mut self, key: &TensorId) -> Option> { - self.store.remove(key) +impl FromIterator<(TensorId, TensorBase)> for GradStore { + fn from_iter)>>(iter: I) -> Self { + Self { + store: BTreeMap::from_iter(iter), + } } } @@ -99,3 +97,30 @@ impl IndexMut<&TensorId> for GradStore { self.get_mut(index).expect("Tensor not found") } } + +impl IntoIterator for GradStore { + type Item = (TensorId, TensorBase); + type IntoIter = std::collections::btree_map::IntoIter>; + + fn into_iter(self) -> Self::IntoIter { + self.store.into_iter() + } +} + +impl Store> for GradStore { + fn get(&self, key: &TensorId) -> Option<&TensorBase> { + self.store.get(key) + } + + fn get_mut(&mut self, key: &TensorId) -> Option<&mut TensorBase> { + self.store.get_mut(key) + } + + fn insert(&mut self, key: TensorId, value: TensorBase) -> Option> { + self.store.insert(key, value) + } + + fn remove(&mut self, key: &TensorId) -> Option> { + self.store.remove(key) + } +} diff --git a/tensor/src/actions/index/mod.rs b/tensor/src/actions/index/mod.rs index f00c1a5b..a0bf39ff 100644 --- a/tensor/src/actions/index/mod.rs +++ b/tensor/src/actions/index/mod.rs @@ -9,5 +9,7 @@ pub use self::slice::*; pub(crate) mod slice; +pub trait TensorIdx {} + #[cfg(test)] mod tests {} diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 53525b72..4d46824f 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -5,27 +5,120 @@ //! # Data //! //! +pub use self::specs::*; + +pub(crate) mod specs; + pub mod elem; pub mod repr { - pub mod owned; + pub use self::{owned::OwnedRepr, shared::OwnedArcRepr, view::*}; + + pub(crate) mod owned; + pub(crate) mod shared; + pub(crate) mod view; +} + +use crate::prelude::{BackpropOp, Layout, TensorId, TensorKind}; +use core::ptr::NonNull; + +pub type Tensor = BaseTensor>; + +pub type ArcTensor = BaseTensor>; + +#[derive(Clone)] +pub struct BaseTensor +where + S: RawData, +{ + id: TensorId, + data: S, + kind: TensorKind, + layout: Layout, + op: BackpropOp, + ptr: NonNull, } -pub trait Data: RawData {} +impl BaseTensor +where + S: RawData, +{ + #[inline(always)] + pub fn as_ptr(&self) -> *const A { + self.ptr.as_ptr() as *const A + } + + /// Return a mutable pointer to the first element in the array. + /// + /// This method attempts to unshare the data. If `S: DataMut`, then the + /// data is guaranteed to be uniquely held on return. + /// + /// # Warning + /// + /// When accessing elements through this pointer, make sure to use strides + /// obtained *after* calling this method, since the process of unsharing + /// the data may change the strides. + #[inline(always)] + pub fn as_mut_ptr(&mut self) -> *mut A + where + S: RawDataMut, + { + // self.try_ensure_unique(); // for ArcArray + self.ptr.as_ptr() + } + + /// Without any coping, turn the tensor into a shared tensor. + pub fn into_shared(self) -> ArcTensor + where + S: DataOwned, + { + let data = self.data.into_shared(); + // safe because: equivalent unmoved data, ptr and dims remain valid + // unsafe { Self::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } + unsafe { BaseTensor::from_data_ptr(data, self.ptr) } + } + + pub fn size(&self) -> usize { + self.layout.size() + } +} -#[allow(clippy::missing_safety_doc)] -pub unsafe trait RawData { - type Elem; +// Internal methods +impl BaseTensor +where + S: RawData, +{ + pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { + let tensor = Self { + id: TensorId::new(), + data, + kind: TensorKind::Normal, + layout: Layout::contiguous(0), + op: BackpropOp::none(), + ptr, + }; + debug_assert!(tensor.pointer_is_inbounds()); + tensor + } - #[doc(hidden)] - fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool; + pub(crate) fn pointer_is_inbounds(&self) -> bool { + self.data._is_pointer_inbounds(self.as_ptr()) + } - private_decl! {} + pub(crate) unsafe fn with_layout(self, layout: Layout) -> BaseTensor { + Self { + id: self.id, + data: self.data, + kind: self.kind, + layout, + op: self.op, + ptr: self.ptr, + } + } } pub(crate) mod utils { #[cfg(not(feature = "std"))] - #[allow(unused_imports)] use alloc::vec::Vec; use core::ptr::NonNull; @@ -48,5 +141,10 @@ pub(crate) mod utils { } } +pub(crate) mod prelude { + pub use super::repr::*; + pub use super::specs::*; +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/data/repr/owned.rs b/tensor/src/data/repr/owned.rs index b6413e48..af500ca2 100644 --- a/tensor/src/data/repr/owned.rs +++ b/tensor/src/data/repr/owned.rs @@ -2,11 +2,15 @@ Appellation: owned Contrib: FL03 */ +use crate::data::repr::OwnedArcRepr; use crate::data::utils::nonnull_from_vec_data; -use crate::data::RawData; -use core::mem::{self, ManuallyDrop}; +use crate::data::{ArcTensor, BaseTensor, Tensor}; +use crate::data::{Data, DataMut, DataOwned, RawData, RawDataClone, RawDataMut, RawDataSubst}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ptr::NonNull; use core::slice; +use rawpointer::PointerExt; +use std::sync::Arc; #[derive(Debug)] #[repr(C)] @@ -36,10 +40,6 @@ impl OwnedRepr { self.ptr.as_ptr() } - pub(crate) fn as_slice(&self) -> &[A] { - unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } - } - pub const fn capacity(&self) -> usize { self.capacity } @@ -47,17 +47,50 @@ impl OwnedRepr { pub const fn len(&self) -> usize { self.len } +} + +// Internal methods +impl OwnedRepr { + pub(crate) fn as_nonnull_mut(&mut self) -> NonNull { + self.ptr + } + + pub(crate) fn as_slice(&self) -> &[A] { + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } + } + + /// Cast self into equivalent repr of other element type + /// + /// ## Safety + /// + /// Caller must ensure the two types have the same representation. + /// **Panics** if sizes don't match (which is not a sufficient check). + pub(crate) unsafe fn data_subst(self) -> OwnedRepr { + // necessary but not sufficient check + assert_eq!(mem::size_of::(), mem::size_of::()); + let self_ = ManuallyDrop::new(self); + OwnedRepr { + ptr: self_.ptr.cast::(), + len: self_.len, + capacity: self_.capacity, + } + } + #[allow(dead_code)] + pub(crate) fn into_vec(self) -> Vec { + ManuallyDrop::new(self).take_as_vec() + } /// Set the valid length of the data /// /// ## Safety /// /// The first `new_len` elements of the data should be valid. + #[allow(dead_code)] pub(crate) unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity); self.len = new_len; } - fn take_as_vec(&mut self) -> Vec { + pub(crate) fn take_as_vec(&mut self) -> Vec { let capacity = self.capacity; let len = self.len; @@ -66,10 +99,6 @@ impl OwnedRepr { unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) } } - - pub(crate) fn into_vec(self) -> Vec { - ManuallyDrop::new(self).take_as_vec() - } } impl Clone for OwnedRepr @@ -115,13 +144,42 @@ impl Drop for OwnedRepr { } } -unsafe impl Send for OwnedRepr {} +unsafe impl Data for OwnedRepr { + #[inline] + fn into_owned(self_: BaseTensor) -> Tensor + where + A: Clone, + { + self_ + } -unsafe impl Sync for OwnedRepr {} + #[inline] + fn try_into_owned_nocopy( + self_: BaseTensor, + ) -> Result, BaseTensor> { + Ok(self_) + } -impl From> for OwnedRepr { - fn from(vec: Vec) -> Self { - Self::from_vec(vec) + fn to_shared(self_: &BaseTensor) -> ArcTensor + where + Self::Elem: Clone, + { + // clone to shared + self_.to_owned().into_shared() + } +} + +unsafe impl DataMut for OwnedRepr {} + +unsafe impl DataOwned for OwnedRepr { + type MaybeUninit = OwnedRepr>; + + fn new(elements: Vec) -> Self { + OwnedRepr::from(elements) + } + + fn into_shared(self) -> OwnedArcRepr { + OwnedArcRepr(Arc::new(self)) } } @@ -137,3 +195,63 @@ unsafe impl RawData for OwnedRepr { private_impl! {} } + +unsafe impl RawDataMut for OwnedRepr { + fn try_ensure_unique(_: &mut BaseTensor) + where + Self: Sized, + { + } + + fn try_is_unique(&mut self) -> Option { + Some(true) + } +} + +unsafe impl RawDataClone for OwnedRepr +where + A: Clone, +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { + let mut u = self.clone(); + let mut new_ptr = u.as_nonnull_mut(); + if mem::size_of::() != 0 { + let our_off = + (ptr.as_ptr() as isize - self.as_ptr() as isize) / mem::size_of::() as isize; + new_ptr = PointerExt::offset(new_ptr, our_off); + } + (u, new_ptr) + } + + unsafe fn clone_from_with_ptr( + &mut self, + other: &Self, + ptr: NonNull, + ) -> NonNull { + let our_off = if mem::size_of::() != 0 { + (ptr.as_ptr() as isize - other.as_ptr() as isize) / mem::size_of::() as isize + } else { + 0 + }; + self.clone_from(other); + PointerExt::offset(self.as_nonnull_mut(), our_off) + } +} + +impl RawDataSubst for OwnedRepr { + type Output = OwnedRepr; + + unsafe fn data_subst(self) -> Self::Output { + self.data_subst() + } +} + +unsafe impl Send for OwnedRepr {} + +unsafe impl Sync for OwnedRepr {} + +impl From> for OwnedRepr { + fn from(vec: Vec) -> Self { + Self::from_vec(vec) + } +} diff --git a/tensor/src/data/repr/shared.rs b/tensor/src/data/repr/shared.rs new file mode 100644 index 00000000..60a2a332 --- /dev/null +++ b/tensor/src/data/repr/shared.rs @@ -0,0 +1,138 @@ +/* + Appellation: shared + Contrib: FL03 +*/ +use crate::data::repr::OwnedRepr; +use crate::data::specs::*; +use crate::data::{ArcTensor, BaseTensor, Tensor}; +#[cfg(not(feature = "std"))] +use alloc::sync::Arc; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use rawpointer::PointerExt; +#[cfg(feature = "std")] +use std::sync::Arc; + +#[derive(Debug)] +pub struct OwnedArcRepr(pub(crate) Arc>); + +impl Clone for OwnedArcRepr { + fn clone(&self) -> Self { + OwnedArcRepr(self.0.clone()) + } +} + +unsafe impl Data for OwnedArcRepr { + fn into_owned(self_: BaseTensor) -> crate::data::Tensor + where + Self::Elem: Clone, + { + // Self::ensure_unique(&mut self_); + let data = Arc::try_unwrap(self_.data.0).ok().unwrap(); + // safe because data is equivalent + unsafe { BaseTensor::from_data_ptr(data, self_.ptr).with_layout(self_.layout) } + } + + fn try_into_owned_nocopy( + self_: BaseTensor, + ) -> Result, BaseTensor> { + match Arc::try_unwrap(self_.data.0) { + Ok(owned_data) => unsafe { + // Safe because the data is equivalent. + Ok(BaseTensor::from_data_ptr(owned_data, self_.ptr).with_layout(self_.layout)) + }, + Err(arc_data) => unsafe { + // Safe because the data is equivalent; we're just + // reconstructing `self_`. + Err(BaseTensor::from_data_ptr(OwnedArcRepr(arc_data), self_.ptr) + .with_layout(self_.layout)) + }, + } + } + + #[allow(clippy::wrong_self_convention)] + fn to_shared(self_: &BaseTensor) -> ArcTensor + where + Self::Elem: Clone, + { + // to shared using clone of OwnedArcRepr without clone of raw data. + self_.clone() + } +} + +unsafe impl DataMut for OwnedArcRepr where A: Clone {} + +unsafe impl DataOwned for OwnedArcRepr { + type MaybeUninit = OwnedArcRepr>; + + fn new(elements: Vec) -> Self { + OwnedArcRepr(Arc::new(OwnedRepr::from(elements))) + } + + fn into_shared(self) -> OwnedArcRepr { + self + } +} + +unsafe impl DataShared for OwnedArcRepr {} + +unsafe impl RawData for OwnedArcRepr { + type Elem = A; + + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { + self.0._is_pointer_inbounds(self_ptr) + } + + private_impl! {} +} + +unsafe impl RawDataClone for OwnedArcRepr { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { + // pointer is preserved + (self.clone(), ptr) + } +} + +// NOTE: Copy on write +unsafe impl RawDataMut for OwnedArcRepr +where + A: Clone, +{ + fn try_ensure_unique(self_: &mut BaseTensor) + where + Self: Sized, + { + if Arc::get_mut(&mut self_.data.0).is_some() { + return; + } + if self_.size() <= self_.data.0.len() / 2 { + // Clone only the visible elements if the current view is less than + // half of backing data. + *self_ = self_.to_owned().into_shared(); + return; + } + let rcvec = &mut self_.data.0; + let a_size = core::mem::size_of::() as isize; + let our_off = if a_size != 0 { + (self_.ptr.as_ptr() as isize - rcvec.as_ptr() as isize) / a_size + } else { + 0 + }; + let rvec = Arc::make_mut(rcvec); + unsafe { + self_.ptr = PointerExt::offset(rvec.as_nonnull_mut(), our_off); + } + } + + fn try_is_unique(&mut self) -> Option { + Some(Arc::get_mut(&mut self.0).is_some()) + } +} + +impl RawDataSubst for OwnedArcRepr { + type Output = OwnedArcRepr; + + unsafe fn data_subst(self) -> Self::Output { + OwnedArcRepr(Arc::from_raw(Arc::into_raw(self.0) as *const OwnedRepr)) + } +} diff --git a/tensor/src/data/repr/view.rs b/tensor/src/data/repr/view.rs new file mode 100644 index 00000000..b39c6a06 --- /dev/null +++ b/tensor/src/data/repr/view.rs @@ -0,0 +1,39 @@ +/* + Appellation: view + Contrib: FL03 +*/ +use core::marker::PhantomData; + +/// Array pointer’s representation. +/// +/// *Don’t use this type directly—use the type aliases +/// [`RawArrayView`] / [`RawArrayViewMut`] for the array type!* +#[derive(Copy, Clone)] +// This is just a marker type, to carry the mutability and element type. +pub struct RawViewRepr { + ptr: PhantomData, +} + +impl RawViewRepr { + #[inline(always)] + const fn new() -> Self { + RawViewRepr { ptr: PhantomData } + } +} + +/// Array view’s representation. +/// +/// *Don’t use this type directly—use the type aliases +/// [`ArrayView`] / [`ArrayViewMut`] for the array type!* +#[derive(Copy, Clone)] +// This is just a marker type, to carry the lifetime parameter. +pub struct ViewRepr { + life: PhantomData, +} + +impl ViewRepr { + #[inline(always)] + const fn new() -> Self { + ViewRepr { life: PhantomData } + } +} diff --git a/tensor/src/data/specs.rs b/tensor/src/data/specs.rs new file mode 100644 index 00000000..100cec36 --- /dev/null +++ b/tensor/src/data/specs.rs @@ -0,0 +1,154 @@ +/* + Appellation: specs + Contrib: FL03 +*/ +use crate::data::repr::OwnedArcRepr; +use crate::data::{ArcTensor, BaseTensor, Tensor}; +use core::mem::MaybeUninit; +use core::ptr::NonNull; + +/// Array representation trait. +/// +/// For an array with elements that can be accessed with safe code. +/// +/// ***Internal trait, see `RawData`.*** +#[allow(clippy::missing_safety_doc)] // not implementable downstream +pub unsafe trait Data: RawData { + /// Converts the array to a uniquely owned array, cloning elements if necessary. + #[doc(hidden)] + #[allow(clippy::wrong_self_convention)] + fn into_owned(self_: BaseTensor) -> Tensor + where + Self::Elem: Clone; + + /// Converts the array into `Array` if this is possible without + /// cloning the array elements. Otherwise, returns `self_` unchanged. + #[doc(hidden)] + fn try_into_owned_nocopy( + self_: BaseTensor, + ) -> Result, BaseTensor>; + + /// Return a shared ownership (copy on write) array based on the existing one, + /// cloning elements if necessary. + #[doc(hidden)] + #[allow(clippy::wrong_self_convention)] + fn to_shared(self_: &BaseTensor) -> ArcTensor + where + Self::Elem: Clone; +} + +#[allow(clippy::missing_safety_doc)] // not implementable downstream +pub unsafe trait DataMut: Data + RawDataMut { + /// Ensures that the array has unique access to its data. + #[doc(hidden)] + #[inline] + fn ensure_unique(self_: &mut BaseTensor) + where + Self: Sized, + { + Self::try_ensure_unique(self_) + } + + /// Returns whether the array has unique access to its data. + #[doc(hidden)] + #[inline] + #[allow(clippy::wrong_self_convention)] // mut needed for Arc types + fn is_unique(&mut self) -> bool { + self.try_is_unique().unwrap() + } +} + +#[allow(clippy::missing_safety_doc)] // not implementable downstream +pub unsafe trait DataOwned: Data { + /// Corresponding owned data with MaybeUninit elements + type MaybeUninit: DataOwned> + + RawDataSubst; + #[doc(hidden)] + fn new(elements: Vec) -> Self; + + /// Converts the data representation to a shared (copy on write) + /// representation, without any copying. + #[doc(hidden)] + fn into_shared(self) -> OwnedArcRepr; +} + +/// Array representation trait. +/// +/// A representation that is a lightweight view. +/// +/// ***Internal trait, see `Data`.*** +#[allow(clippy::missing_safety_doc)] // not implementable downstream +pub unsafe trait DataShared: Clone + Data + RawDataClone {} + +#[allow(clippy::missing_safety_doc)] +pub unsafe trait RawData: Sized { + type Elem; + + #[doc(hidden)] + fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool; + + private_decl! {} +} + +/// Array representation trait. +/// +/// For an array with writable elements. +/// +/// ***Internal trait, see `RawData`.*** +#[allow(clippy::missing_safety_doc)] // not implementable downstream +pub unsafe trait RawDataMut: RawData { + /// If possible, ensures that the array has unique access to its data. + /// + /// The implementer must ensure that if the input is contiguous, then the + /// output has the same strides as input. + /// + /// Additionally, if `Self` provides safe mutable access to array elements, + /// then this method **must** panic or ensure that the data is unique. + #[doc(hidden)] + fn try_ensure_unique(_: &mut BaseTensor) + where + Self: Sized; + + /// If possible, returns whether the array has unique access to its data. + /// + /// If `Self` provides safe mutable access to array elements, then it + /// **must** return `Some(_)`. + #[doc(hidden)] + fn try_is_unique(&mut self) -> Option; +} + +/// Array representation trait. +/// +/// An array representation that can be cloned. +/// +/// ***Internal trait, see `RawData`.*** +#[allow(clippy::missing_safety_doc)] // not implementable downstream +pub unsafe trait RawDataClone: RawData { + #[doc(hidden)] + /// Unsafe because, `ptr` must point inside the current storage. + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull); + + #[doc(hidden)] + unsafe fn clone_from_with_ptr( + &mut self, + other: &Self, + ptr: NonNull, + ) -> NonNull { + let (data, ptr) = other.clone_with_ptr(ptr); + *self = data; + ptr + } +} + +pub trait RawDataSubst: RawData { + /// The resulting array storage of the same kind but substituted element type + type Output: RawData; + + /// Unsafely translate the data representation from one element + /// representation to another. + /// + /// ## Safety + /// + /// Caller must ensure the two types have the same representation. + unsafe fn data_subst(self) -> Self::Output; +} diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index 38b29f2f..50e53658 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -20,7 +20,7 @@ where /// Create a tensor, from the given shape, filled with the given value pub fn fill(shape: impl IntoShape, value: T) -> Self { let shape = shape.into_shape(); - let store = vec![value; shape.elements()]; + let store = vec![value; shape.size()]; from_vec(false.into(), shape, store) } diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index bb0f8d31..37bef4fa 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -5,7 +5,7 @@ use crate::actions::grad::GradStore; use crate::prelude::{Scalar, TensorId, TensorOp, TensorResult}; use crate::TensorBase; -use acme::prelude::{BinaryOp, Store}; +use acme::prelude::{BinaryOp, Store, UnaryOp}; pub(crate) type Visited = std::collections::HashMap; @@ -13,9 +13,9 @@ impl TensorBase where T: Scalar, { - fn sorted_nodes(&self) -> Vec<&TensorBase> { - // The vec of sorted nodes is passed as an owned value rather than a mutable reference - // to get around some lifetime limitations. + /// [TensorBase::toposort] returns a topologically sorted list of nodes in the graph. + fn toposort(&self) -> Vec<&TensorBase> { + // Here, the sorted nodes are passed as an owned value rather than as a mutable reference to workaround some lifetime limitations. fn walk<'a, T>( node: &'a TensorBase, nodes: Vec<&'a TensorBase>, @@ -26,11 +26,12 @@ where } // track the gradient of the current node let mut track = false; + // recursively call on the children nodes let mut nodes = if node.is_variable() { // Do not call recursively on the "leaf" nodes. track = true; nodes - } else if let Some(op) = node.op() { + } else if let Some(op) = node.op().op() { match op { TensorOp::Binary(lhs, rhs, _kind) => { let (tg, nodes) = walk(lhs, nodes, visited); @@ -39,6 +40,11 @@ where track |= tg; nodes } + TensorOp::Unary(a, _kind) => { + let (tg, nodes) = walk(a, nodes, visited); + track |= tg; + nodes + } _ => nodes, } } else { @@ -56,18 +62,15 @@ where nodes } - pub fn grad(&self) -> TensorResult> - where - T: std::fmt::Debug, - { + pub fn grad(&self) -> TensorResult> { // get the sorted nodes - let sorted = self.sorted_nodes(); + let sorted = self.toposort(); // initialize a new gradient store let mut store = GradStore::new(); // insert the gradient w.r.t. the current node store.insert(self.id(), self.ones_like()); - for node in sorted { + for node in sorted.iter() { if node.is_variable() { continue; } @@ -75,14 +78,18 @@ where let grad = store.remove(&node.id()).expect("Gradient not found"); let grad = grad.detach(); // handle the different types of operations - if let Some(op) = &self.op { + if let Some(op) = &*node.op { match op { TensorOp::Binary(lhs, rhs, kind) => match kind { BinaryOp::Add => { - let a = store.entry(lhs.id()).or_insert(lhs.zeros_like()); - *a = &*a + &grad; - let b = store.entry(rhs.id()).or_insert(rhs.zeros_like()); - *b = &*b + &grad; + *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad; + *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad; + } + BinaryOp::Div => { + *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += + &grad / rhs.as_ref(); + *store.entry(rhs.id()).or_insert(rhs.zeros_like()) -= + &grad * lhs.as_ref() / (rhs.as_ref() * rhs.as_ref()); } BinaryOp::Mul => { *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += @@ -90,9 +97,24 @@ where *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad * lhs.as_ref(); } + BinaryOp::Sub => { + *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad; + *store.entry(rhs.id()).or_insert(rhs.zeros_like()) -= &grad; + } _ => todo!(), }, - TensorOp::Unary(_a, kind) => match kind { + TensorOp::Unary(val, kind) => match kind { + UnaryOp::Cos => { + *store.entry(val.id()).or_insert(val.zeros_like()) -= + &grad * val.clone().sin(); + } + UnaryOp::Neg => { + *store.entry(val.id()).or_insert(val.zeros_like()) -= &grad; + } + UnaryOp::Sin => { + *store.entry(val.id()).or_insert(val.zeros_like()) += + &grad * val.clone().cos(); + } _ => todo!(), }, _ => {} diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 08e8b06a..cedfbc97 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -16,7 +16,7 @@ where fn matmul(&self, other: &Self) -> Self { let shape = self.shape().matmul_shape(other.shape()).unwrap(); - let mut result = vec![T::zero(); shape.elements()]; + let mut result = vec![T::zero(); shape.size()]; for i in 0..self.shape()[0] { for j in 0..other.shape()[1] { @@ -26,7 +26,7 @@ where } } } - let op = TensorOp::Matmul(Box::new(self.clone()), Box::new(other.clone())); + let op = TensorOp::matmul(self.clone(), other.clone()); from_vec_with_op(false, op, shape, result) } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 02587756..5b10d5a8 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -5,6 +5,7 @@ use crate::prelude::TensorOp; use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; +use num::traits::Pow; macro_rules! cmp { (ne: $lhs:expr, $rhs:expr) => { @@ -14,6 +15,34 @@ macro_rules! cmp { }; } +impl Pow for TensorBase +where + T: Copy + Pow, +{ + type Output = Self; + + fn pow(self, exp: T) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| a.pow(exp)).collect(); + let op = TensorOp::binary_scalar(self, exp, BinaryOp::Pow); + from_vec_with_op(false, op, shape, store) + } +} + +impl<'a, T> Pow for &'a TensorBase +where + T: Copy + Pow, +{ + type Output = TensorBase; + + fn pow(self, exp: T) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| a.pow(exp)).collect(); + let op = TensorOp::binary_scalar(self.clone(), exp, BinaryOp::Pow); + from_vec_with_op(false, op, shape, store) + } +} + macro_rules! impl_arithmetic { ($trait:ident, $method:ident, $op:tt) => { impl_scalar_arith!($trait, $method, $op); @@ -28,7 +57,7 @@ macro_rules! impl_arithmetic { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::Binary(Box::new(self), Box::new(other), BinaryOp::$trait); + let op = TensorOp::binary(self, other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -45,7 +74,7 @@ macro_rules! impl_arithmetic { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::Binary(Box::new(self), Box::new(other.clone()), BinaryOp::$trait); + let op = TensorOp::binary(self, other.clone(), BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -62,7 +91,7 @@ macro_rules! impl_arithmetic { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$trait); + let op = TensorOp::binary(self.clone(), other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -79,7 +108,7 @@ macro_rules! impl_arithmetic { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$trait); + let op = TensorOp::binary(self.clone(), other.clone(), BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -98,7 +127,7 @@ macro_rules! impl_scalar_arith { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorOp::BinaryScalar(Box::new(self), other, BinaryOp::$trait); + let op = TensorOp::binary_scalar(self, other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -112,7 +141,7 @@ macro_rules! impl_scalar_arith { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorOp::BinaryScalar(Box::new(self.clone()), other, BinaryOp::$trait); + let op = TensorOp::binary_scalar(self.clone(), other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -129,7 +158,7 @@ macro_rules! impl_assign_op { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other), BinaryOp::$inner); + let op = TensorOp::binary(self.clone(), other, BinaryOp::$inner); *self = from_vec_with_op(false, op, shape, store); } @@ -143,7 +172,7 @@ macro_rules! impl_assign_op { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::Binary(Box::new(self.clone()), Box::new(other.clone()), BinaryOp::$inner); + let op = TensorOp::binary(self.clone(), other.clone(), BinaryOp::$inner); *self = from_vec_with_op(false, op, shape, store); } diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 1e23d951..22abce89 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -15,7 +15,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| -a).collect(); - let op = TensorOp::Unary(Box::new(self), UnaryOp::Neg); + let op = TensorOp::unary(self, UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } } @@ -29,7 +29,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| -a).collect(); - let op = TensorOp::Unary(Box::new(self.clone()), UnaryOp::Neg); + let op = TensorOp::unary(self.clone(), UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } } @@ -39,7 +39,7 @@ macro_rules! impl_unary_op { pub fn $method(self) -> Self { let shape = self.shape().clone(); let store = self.store.iter().copied().map(|v| v.$method()).collect(); - let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); + let op = TensorOp::unary(self, UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } }; @@ -47,7 +47,7 @@ macro_rules! impl_unary_op { pub fn $method(self) -> Self { let shape = self.shape().clone(); let store = self.store.iter().copied().map($f).collect(); - let op = TensorOp::::Unary(Box::new(self), UnaryOp::$variant); + let op = TensorOp::unary(self, UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } }; @@ -63,7 +63,7 @@ where { let shape = self.shape().clone(); let store = self.store.iter().copied().map(|v| v.abs()).collect(); - let op = TensorOp::Unary(Box::new(self), UnaryOp::Abs); + let op = TensorOp::unary(self, UnaryOp::Abs); from_vec_with_op(false, op, shape, store) } diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index df6b4d78..009200f5 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -17,10 +17,18 @@ where unimplemented!() } + pub fn pad(&self, shape: impl IntoShape, _with: T) -> Self { + let shape = shape.into_shape(); + + let _diff = *self.shape().rank() - *shape.rank(); + + unimplemented!() + } + pub fn reshape(self, shape: impl IntoShape) -> TensorResult { let mut tensor = self; let shape = shape.into_shape(); - if tensor.elements() != shape.elements() { + if tensor.elements() != shape.size() { return Err(ShapeError::MismatchedElements.into()); } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 20ba2ca1..d52fca9e 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -47,7 +47,7 @@ pub mod prelude { #[doc(inline)] pub use crate::actions::prelude::*; #[doc(inline)] - pub use crate::data::*; + pub use crate::data::prelude::*; #[doc(inline)] pub use crate::error::*; #[doc(inline)] diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index e20c4972..dea5aa23 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -3,6 +3,10 @@ Contrib: FL03 */ use super::TensorOp; +use crate::TensorBase; +use acme::prelude::BinaryOp; +use core::borrow::Borrow; +use core::ops::{Deref, DerefMut}; #[derive(Clone, Debug)] pub struct BackpropOp(Option>); @@ -16,6 +20,14 @@ impl BackpropOp { BackpropOp(None) } + pub fn binary(lhs: TensorBase, rhs: TensorBase, kind: BinaryOp) -> Self { + BackpropOp(Some(TensorOp::binary(lhs, rhs, kind))) + } + + pub fn is_none(&self) -> bool { + self.0.is_none() + } + pub fn op(&self) -> Option<&TensorOp> { self.0.as_ref() } @@ -29,12 +41,32 @@ impl BackpropOp { } } +impl Borrow>> for BackpropOp { + fn borrow(&self) -> &Option> { + &self.0 + } +} + impl Default for BackpropOp { fn default() -> Self { Self::none() } } +impl Deref for BackpropOp { + type Target = Option>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for BackpropOp { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + impl From>> for BackpropOp { fn from(op: Option>) -> Self { BackpropOp(op) diff --git a/tensor/src/shape/dim/axis.rs b/tensor/src/shape/dim/axis.rs index d54f84e3..2be8ca9b 100644 --- a/tensor/src/shape/dim/axis.rs +++ b/tensor/src/shape/dim/axis.rs @@ -4,7 +4,7 @@ */ //! # Axis //! -//! +//! An [Axis] is used to represent a dimension in a tensor. #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::ops::Deref; @@ -18,6 +18,10 @@ impl Axis { Axis(axis) } + pub fn into_inner(self) -> usize { + self.0 + } + pub fn axis(&self) -> usize { self.0 } @@ -41,4 +45,16 @@ impl std::fmt::Display for Axis { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } -} \ No newline at end of file +} + +impl From for Axis { + fn from(axis: usize) -> Self { + Axis(axis) + } +} + +impl From for usize { + fn from(axis: Axis) -> Self { + axis.0 + } +} diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index ee55c46a..680bf994 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -43,7 +43,7 @@ mod tests { let mut shape = Shape::default(); shape.extend([1, 1, 1]); assert_eq!(shape, Shape::new(vec![1, 1, 1])); - assert_eq!(shape.elements(), 1); + assert_eq!(shape.size(), 1); assert_eq!(*shape.rank(), 3); } } diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 3ccf3ad2..c38e6d0e 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -48,10 +48,6 @@ impl Shape { &self.0 } - pub fn elements(&self) -> usize { - self.0.iter().product() - } - pub fn insert(&mut self, index: usize, dim: usize) { self.0.insert(index, dim) } @@ -70,19 +66,6 @@ impl Shape { } true } - - pub fn push(&mut self, dim: usize) { - self.0.push(dim) - } - - pub fn rank(&self) -> Rank { - self.0.len().into() - } - - pub fn remove(&mut self, index: usize) -> usize { - self.0.remove(index) - } - pub fn ncols(&self) -> usize { if self.len() >= 2 { self.0[1] @@ -93,6 +76,10 @@ impl Shape { } } + pub fn ndim(&self) -> usize { + self.0.len() + } + pub fn nrows(&self) -> usize { if self.len() >= 1 { *self.0.first().unwrap() @@ -101,10 +88,26 @@ impl Shape { } } + pub fn push(&mut self, dim: usize) { + self.0.push(dim) + } + + pub fn rank(&self) -> Rank { + self.0.len().into() + } + + pub fn remove(&mut self, index: usize) -> usize { + self.0.remove(index) + } + pub fn set(&mut self, index: usize, dim: usize) { self.0[index] = dim } + pub fn size(&self) -> usize { + self.0.iter().product() + } + pub(crate) fn stride_contiguous(&self) -> Vec { let mut stride: Vec<_> = self .0 diff --git a/tensor/src/shape/stride.rs b/tensor/src/shape/stride.rs index f0a00e4a..8a298dfe 100644 --- a/tensor/src/shape/stride.rs +++ b/tensor/src/shape/stride.rs @@ -2,5 +2,63 @@ Appellation: stride Contrib: FL03 */ +use core::ops::{Deref, DerefMut}; + +pub trait IntoStride { + fn into_stride(self) -> Stride; +} pub struct Stride(pub Vec); + +impl Stride { + pub fn ndim(&self) -> usize { + self.0.len() + } +} + +impl AsRef<[usize]> for Stride { + fn as_ref(&self) -> &[usize] { + &self.0 + } +} + +impl AsMut<[usize]> for Stride { + fn as_mut(&mut self) -> &mut [usize] { + &mut self.0 + } +} + +impl Deref for Stride { + type Target = [usize]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Stride { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Extend for Stride { + fn extend>(&mut self, iter: I) { + self.0.extend(iter) + } +} + +impl FromIterator for Stride { + fn from_iter>(iter: I) -> Self { + Stride(Vec::from_iter(iter)) + } +} + +impl IntoIterator for Stride { + type Item = usize; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index c3dc536a..03bba7f0 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -9,10 +9,6 @@ use crate::store::Layout; pub trait NdTensor { type Elem; - fn elements(&self) -> usize { - self.layout().elements() - } - fn id(&self) -> TensorId; fn layout(&self) -> &Layout; @@ -25,6 +21,10 @@ pub trait NdTensor { self.layout().shape() } + fn size(&self) -> usize { + self.shape().size() + } + fn stride(&self) -> &[usize] { self.layout().stride() } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index bce4b6e6..d7584629 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -4,7 +4,7 @@ */ use num::complex::Complex; use num::traits::real::Real; -use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps}; +use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps, Pow}; use std::iter::{Product, Sum}; use std::ops::Neg; diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 580d3a39..b406581e 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -46,16 +46,9 @@ impl Layout { } } - pub(crate) fn position(&self, coords: impl AsRef<[usize]>) -> usize { - let mut index = self.offset; - for (i, &coord) in coords.as_ref().iter().enumerate() { - index += coord * self.stride[i]; - } - index - } - - pub fn elements(&self) -> usize { - self.shape.elements() + pub fn ndim(&self) -> usize { + debug_assert_eq!(self.stride.len(), self.shape.ndim()); + self.shape.ndim() } pub fn offset(&self) -> usize { @@ -66,7 +59,22 @@ impl Layout { &self.shape } + pub fn size(&self) -> usize { + self.shape.size() + } + pub fn stride(&self) -> &[usize] { &self.stride } } + +// Internal methods +impl Layout { + pub(crate) fn position(&self, coords: impl AsRef<[usize]>) -> usize { + let mut index = self.offset; + for (i, &coord) in coords.as_ref().iter().enumerate() { + index += coord * self.stride[i]; + } + index + } +} diff --git a/tensor/src/store/storage.rs b/tensor/src/store/storage.rs index feb823bc..3a5b435a 100644 --- a/tensor/src/store/storage.rs +++ b/tensor/src/store/storage.rs @@ -2,40 +2,89 @@ Appellation: storage Contrib: FL03 */ +use crate::prelude::{DataOwned, Layout, OwnedArcRepr, RawData, RawDataMut}; +use core::ptr::NonNull; -#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Storage { - pub(crate) data: Vec, +pub type ArcStore = StoreBase>; + +#[derive(Clone)] +pub struct StoreBase +where + S: RawData, +{ + data: S, + layout: Layout, + ptr: NonNull, } -impl Storage { - pub fn new() -> Self { - Self { data: Vec::new() } +impl StoreBase +where + S: RawData, +{ + #[inline(always)] + pub fn as_ptr(&self) -> *const A { + self.ptr.as_ptr() as *const A } - pub fn insert(&mut self, index: usize, value: T) { - self.data.insert(index, value); + /// Return a mutable pointer to the first element in the array. + /// + /// This method attempts to unshare the data. If `S: DataMut`, then the + /// data is guaranteed to be uniquely held on return. + /// + /// # Warning + /// + /// When accessing elements through this pointer, make sure to use strides + /// obtained *after* calling this method, since the process of unsharing + /// the data may change the strides. + #[inline(always)] + pub fn as_mut_ptr(&mut self) -> *mut A + where + S: RawDataMut, + { + // self.try_ensure_unique(); // for ArcArray + self.ptr.as_ptr() } - pub fn push(&mut self, value: T) { - self.data.push(value); + /// Without any coping, turn the tensor into a shared tensor. + pub fn into_shared(self) -> ArcStore + where + S: DataOwned, + { + let data = self.data.into_shared(); + // safe because: equivalent unmoved data, ptr and dims remain valid + // unsafe { Self::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } + unsafe { StoreBase::from_data_ptr(data, self.ptr) } } - - pub fn get(&self, index: usize) -> Option<&T> { - self.data.get(index) + /// Return the number of elements in the tensor. + pub fn size(&self) -> usize { + self.layout.size() } +} - pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { - self.data.get_mut(index) +// Internal methods +impl StoreBase +where + S: RawData, +{ + pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { + let tensor = Self { + data, + layout: Layout::contiguous(0), + ptr, + }; + debug_assert!(tensor.pointer_is_inbounds()); + tensor } - pub fn remove(&mut self, index: usize) -> T { - self.data.remove(index) + pub(crate) fn pointer_is_inbounds(&self) -> bool { + self.data._is_pointer_inbounds(self.as_ptr()) } -} - -impl Default for Storage { - fn default() -> Self { - Self::new() + #[allow(dead_code)] + pub(crate) unsafe fn with_layout(self, layout: Layout) -> Self { + Self { + data: self.data, + layout, + ptr: self.ptr, + } } } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 76739605..513b5c1f 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ // use crate::ops::TrackedOp; -use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind, TensorOp}; +use crate::prelude::{BackpropOp, IntoShape, Rank, Shape, TensorId, TensorKind, TensorOp}; use crate::store::Layout; use acme::prelude::BinaryOp; use std::ops::Index; @@ -11,7 +11,7 @@ use std::ops::Index; pub(crate) fn new( kind: TensorKind, - op: Option>, + op: BackpropOp, shape: impl IntoShape, store: Vec, ) -> TensorBase { @@ -25,7 +25,7 @@ pub(crate) fn new( } pub(crate) fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> TensorBase { - new(kind, None, shape, store) + new(kind, BackpropOp::none(), shape, store) } pub(crate) fn from_vec_with_op( @@ -34,7 +34,7 @@ pub(crate) fn from_vec_with_op( shape: impl IntoShape, store: Vec, ) -> TensorBase { - new(kind.into(), Some(op), shape, store) + new(kind.into(), BackpropOp::new(op), shape, store) } #[derive(Clone, Debug)] @@ -43,26 +43,26 @@ pub struct TensorBase { pub(crate) id: TensorId, pub(crate) kind: TensorKind, pub(crate) layout: Layout, - pub(crate) op: Option>, + pub(crate) op: BackpropOp, pub(crate) store: Vec, } impl TensorBase { pub fn new(kind: TensorKind, shape: impl IntoShape) -> Self { let shape = shape.into_shape(); - let store = Vec::with_capacity(shape.elements()); + let store = Vec::with_capacity(shape.size()); Self { id: TensorId::new(), kind, layout: Layout::contiguous(shape), - op: None, + op: BackpropOp::none(), store, } } pub fn from_vec( kind: TensorKind, - op: Option>, + op: BackpropOp, shape: impl IntoShape, store: Vec, ) -> Self { @@ -86,14 +86,14 @@ impl TensorBase { id: TensorId::new(), kind: TensorKind::Normal, layout: self.layout.clone(), - op: None, + op: BackpropOp::none(), store: self.store.clone(), } } } /// Returns the number of elements in the tensor. pub fn elements(&self) -> usize { - self.layout.elements() + self.layout.size() } /// Returns the unique identifier of the tensor. pub const fn id(&self) -> TensorId { @@ -104,8 +104,8 @@ impl TensorBase { &self.layout } /// Get a reference to the operation of the tensor - pub fn op(&self) -> Option<&TensorOp> { - self.op.as_ref() + pub fn op(&self) -> &BackpropOp { + &self.op } /// Get an owned reference to the [Rank] of the tensor pub fn rank(&self) -> Rank { @@ -155,11 +155,7 @@ impl TensorBase { id: TensorId::new(), kind: self.kind, layout: self.layout.clone(), - op: Some(TensorOp::Binary( - Box::new(self.clone()), - Box::new(other.clone()), - op, - )), + op: BackpropOp::binary(self.clone(), other.clone(), op), store, } } diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index f2afe09d..d81ad2f4 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -18,7 +18,7 @@ where } let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); - let mut result = vec![T::zero(); shape.elements()]; + let mut result = vec![T::zero(); shape.size()]; for i in 0..lhs.shape().nrows() { for j in 0..rhs.shape().ncols() { @@ -30,7 +30,7 @@ where } } } - let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); + let op = TensorOp::matmul(lhs.clone(), rhs.clone()); let tensor = from_vec_with_op(false, op, shape, result); Ok(tensor) } @@ -44,7 +44,7 @@ where } let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); - let mut result = vec![T::zero(); shape.elements()]; + let mut result = vec![T::zero(); shape.size()]; for i in 0..lhs.shape().nrows() { for j in 0..rhs.shape().ncols() { @@ -56,7 +56,7 @@ where } } } - let op = TensorOp::Matmul(Box::new(lhs.clone()), Box::new(rhs.clone())); + let op = TensorOp::matmul(lhs.clone(), rhs.clone()); let tensor = from_vec_with_op(false, op, shape, result); Ok(tensor) } diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 08e24a05..5692f70a 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -6,6 +6,7 @@ extern crate acme_tensor as acme; use acme::prelude::Tensor; +use core::ops::Neg; #[test] fn test_backward() { @@ -34,26 +35,31 @@ fn test_addition_2() { let a = Tensor::::ones(shape).variable(); let b = Tensor::::ones(shape).variable(); let c = Tensor::::ones(shape).variable(); - let tmp = &a + &b; - println!("Tmp: {}", &tmp.id()); - let d = tmp + &c; + let d = &a + &b + &c; assert_eq!(&d, &Tensor::fill(shape, 3_f64)); - println!( - "*** Variables ***\nA: {}\nB: {}\nC: {}\n\n", - &a.id(), - &b.id(), - &c.id() - ); - println!("*** Outcomes ***\nD: {}", &d.id()); + let grad = d.grad().unwrap(); - println!("{:?}", &grad.keys()); for i in [a.id(), b.id(), c.id()].iter() { assert_eq!(grad[i], Tensor::ones(shape)); } } +#[test] +fn test_division() { + let shape = (2, 2); + + let a = Tensor::::ones(shape).variable(); + let b = Tensor::::fill(shape, 2_f64).variable(); + let c = &a / &b; + + let grad = c.grad().unwrap(); + + assert_eq!(grad[&a.id()], Tensor::fill(shape, 0.5)); + assert_eq!(grad[&b.id()], Tensor::fill(shape, -0.25)); +} + #[test] fn test_multiplication() { let shape = (2, 2); @@ -69,33 +75,30 @@ fn test_multiplication() { } #[test] -#[ignore = "Needs to be fixed"] -fn test_add_chain() { +fn test_subtraction() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); let b = Tensor::::fill(shape, 2_f64).variable(); - let c = &a + &b; - let d = &c + &a; + let c = &a - &b; - let grad = d.grad().unwrap(); - // println!("Gradient:\n\n{:?}\n\n", &grad); + let grad = c.grad().unwrap(); - assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); - assert_eq!(grad[&b.id()], Tensor::ones(shape)); + assert_eq!(grad[&a.id()], Tensor::ones(shape)); + assert_eq!(grad[&b.id()], Tensor::ones(shape).neg()); } #[test] -#[ignore = "Needs to be fixed"] -fn test_add_mul() { +fn test_mixed() { let shape = (2, 2); + let a = Tensor::::ones(shape).variable(); - let b = Tensor::::ones(shape).variable(); - println!("*** Variables ***\nA: {}\nB: {}", a.id(), b.id()); - let c = &a + &b; - let d = &a * &c; - let grad = d.grad().unwrap(); + let b = Tensor::::fill(shape, 2_f64).variable(); - assert_eq!(grad[&a.id()], Tensor::fill(shape, 3_f64)); - assert_eq!(grad[&b.id()], Tensor::ones(shape)); + let res = &b * (&a + &b); + + let grad = res.grad().unwrap(); + + assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); + assert_eq!(grad[&b.id()], Tensor::fill(shape, 5_f64)); } From 2c63441cf7e5edb6928bbed9a780946da1d7bee3 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 11:29:20 -0500 Subject: [PATCH 57/87] update Signed-off-by: Joe McCain III --- .github/workflows/crates.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/.github/workflows/crates.yml b/.github/workflows/crates.yml index 5697c206..f9a2b41f 100644 --- a/.github/workflows/crates.yml +++ b/.github/workflows/crates.yml @@ -5,7 +5,7 @@ concurrency: cancel-in-progress: false env: - CARGO_PREFIX: ${{ github.repository.name }} + CARGO_PREFIX: ${{ github.event.repository.name }} CARGO_TERM_COLOR: always on: @@ -19,11 +19,8 @@ jobs: core: name: Publish (core) runs-on: ubuntu-latest - strategy: - matrix: - features: [ core ] env: - CARGO_PACKAGE_NAME: ${{ github.event.repository.name }}-${{ matrix.features }} + CARGO_PACKAGE_NAME: ${{ github.event.repository.name }}-core steps: - uses: actions/checkout@v4 - name: Publish (${{ env.CARGO_PACKAGE_NAME }}) From 8772a95d23b374197240bfa583e455f1f0255e84 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 16:30:44 -0500 Subject: [PATCH 58/87] update Signed-off-by: Joe McCain III --- .github/workflows/clippy.yml | 6 +- .github/workflows/rust.yml | 1 - Cargo.toml | 2 +- acme/Cargo.toml | 15 ++--- acme/examples/tensor.rs | 7 +- graphs/Cargo.toml | 2 +- tensor/Cargo.toml | 2 +- tensor/src/actions/iter/iterator.rs | 8 +-- tensor/src/actions/iter/mod.rs | 8 +-- tensor/src/backend/mod.rs | 15 +++++ tensor/src/impls/create.rs | 10 +-- tensor/src/impls/linalg.rs | 4 +- tensor/src/impls/reshape.rs | 58 +++++++++++++++-- tensor/src/lib.rs | 5 +- tensor/src/linalg/mod.rs | 4 +- tensor/src/ops/kinds.rs | 36 +++++++---- tensor/src/shape/{dim => }/axis.rs | 15 +++++ tensor/src/shape/dim/mod.rs | 24 +------ tensor/src/shape/error.rs | 2 + tensor/src/shape/mod.rs | 9 ++- tensor/src/shape/{dim => }/rank.rs | 10 +++ tensor/src/shape/shape.rs | 48 ++++++++++---- tensor/src/specs/ndtensor.rs | 2 +- tensor/src/specs/scalar.rs | 2 +- tensor/src/store/layout.rs | 99 ++++++++++++++++++++++++----- tensor/src/store/mod.rs | 10 --- tensor/src/tensor.rs | 63 +++++++++++++----- tensor/src/utils.rs | 4 +- tensor/tests/composition.rs | 26 ++++---- tensor/tests/reshape.rs | 35 ++++++++++ tensor/tests/tensor.rs | 20 ++++++ 31 files changed, 401 insertions(+), 151 deletions(-) create mode 100644 tensor/src/backend/mod.rs rename tensor/src/shape/{dim => }/axis.rs (84%) rename tensor/src/shape/{dim => }/rank.rs (90%) create mode 100644 tensor/tests/reshape.rs create mode 100644 tensor/tests/tensor.rs diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index db9201de..6e4c0107 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -29,16 +29,16 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: setup (rust) + - name: Setup the toolchain uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable components: clippy override: true - - name: setup (clippy) + - name: Installation run: cargo install clippy-sarif sarif-fmt - - name: analyze + - name: Analyze run: cargo clippy --all-features diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ff6535a6..0e4c8bc3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -10,7 +10,6 @@ env: on: push: branches: [ main ] - tags: [ nightly*, v*.*.* ] release: types: [ created ] repository_dispatch: diff --git a/Cargo.toml b/Cargo.toml index cdd20fc1..930fcab2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,7 @@ keywords = ["acme", "autodiff", "mathematics", "tensor"] license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0-nightly.2" # TODO - Update cargo package version +version = "0.3.0" # TODO - Update cargo package version [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 0b7329ba..8508f379 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -74,16 +74,11 @@ required-features = ["macros"] [build-dependencies] [dependencies] -# acme-core = { path = "../core", version = "0.3.0" } -# acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -# acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } -acme-core = { path = "../core", version = "0.3.0-nightly.2" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.2" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.2" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.2" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.2" } +acme-core = { path = "../core", version = "0.3.0" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } [dev-dependencies] approx = "0.5" diff --git a/acme/examples/tensor.rs b/acme/examples/tensor.rs index 909e6a34..d6d76ed6 100644 --- a/acme/examples/tensor.rs +++ b/acme/examples/tensor.rs @@ -6,8 +6,13 @@ extern crate acme; -use acme::prelude::BoxResult; +use acme::prelude::{BoxResult, Tensor}; fn main() -> BoxResult { + let shape = (2, 3); + let tensor: Tensor = Tensor::linspace(1.0, 7.0, 6).reshape(shape)?; + let b = tensor.t(); + println!("{:?}", &tensor[&[1, 1]]); + println!("{:?}", &b[&[1, 1]]); Ok(()) } diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 2e9ec24e..e03aa8ca 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -44,7 +44,7 @@ strum.workspace = true [dependencies.acme-core] path = "../core" -version = "0.3.0-nightly.2" # "0.3.0" +version = "0.3.0" [package.metadata.docs.rs] all-features = true diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index b669ac68..e07fb306 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -37,7 +37,7 @@ strum = { features = ["derive"], version = "0.26" } [dependencies.acme-core] path = "../core" -version = "0.3.0-nightly.2" # "0.3.0" +version = "0.3.0" [dev-dependencies] approx = "0.5" diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index e5088402..109c8d81 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -2,8 +2,8 @@ Appellation: iterator Contrib: FL03 */ -//! # Iterator -//! -//! +use crate::prelude::Order; -pub struct Iterator; +pub struct Iter { + order: Order, +} diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index db635fcd..4516f820 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -1,15 +1,15 @@ /* - Appellation: grad + Appellation: iter Contrib: FL03 */ -//! # Gradient +//! # Iter //! //! -pub use self::iterator::Iterator; +pub use self::iterator::*; pub(crate) mod iterator; -pub trait TensorIter { +pub trait IterTensor { type Item; } diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs new file mode 100644 index 00000000..3bbf3dd6 --- /dev/null +++ b/tensor/src/backend/mod.rs @@ -0,0 +1,15 @@ +/* + Appellation: backend + Contrib: FL03 +*/ +//! # Backend +//! +//! + +pub enum TensorType { + Scalar(T), + Tensor(Vec>), +} + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index 50e53658..bcafdda3 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -21,7 +21,7 @@ where pub fn fill(shape: impl IntoShape, value: T) -> Self { let shape = shape.into_shape(); let store = vec![value; shape.size()]; - from_vec(false.into(), shape, store) + from_vec(false, shape, store) } pub fn default_like(&self) -> Self @@ -48,7 +48,7 @@ where store.push(value); value += step; } - from_vec(false.into(), (store.len(),), store) + from_vec(false, store.len(), store) } /// Create a tensor within a range of values @@ -64,7 +64,7 @@ where store.push(value); value += step; } - from_vec(false.into(), (store.len(),), store) + from_vec(false, store.len(), store) } pub fn logspace(start: T, end: T, steps: usize) -> Self @@ -80,7 +80,7 @@ where store.push(value.exp2()); value += step; } - from_vec(false.into(), (store.len(),), store) + from_vec(false, (store.len(),), store) } pub fn geomspace(start: T, end: T, steps: usize) -> Self @@ -96,7 +96,7 @@ where store.push(value.exp()); value += step; } - from_vec(false.into(), (store.len(),), store) + from_vec(false, (store.len(),), store) } } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index cedfbc97..4418bda6 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -8,6 +8,8 @@ use crate::prelude::{Matmul, Scalar, TensorOp}; use crate::tensor::*; +impl TensorBase where T: Scalar {} + impl Matmul> for TensorBase where T: Scalar, @@ -15,7 +17,7 @@ where type Output = Self; fn matmul(&self, other: &Self) -> Self { - let shape = self.shape().matmul_shape(other.shape()).unwrap(); + let shape = self.shape().matmul_shape(&other.shape()).unwrap(); let mut result = vec![T::zero(); shape.size()]; for i in 0..self.shape()[0] { diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 009200f5..ddfa32ee 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -2,8 +2,9 @@ Appellation: reshape Contrib: FL03 */ -use crate::prelude::{IntoShape, ShapeError, TensorResult}; -use crate::tensor::TensorBase; +use crate::prelude::{BackpropOp, TensorId, TensorOp, TensorResult}; +use crate::shape::{Axis, IntoShape, ShapeError}; +use crate::tensor::{from_vec, TensorBase}; impl TensorBase where @@ -25,14 +26,61 @@ where unimplemented!() } + /// + pub fn swap_axes(&self, swap: Axis, with: Axis) -> TensorResult { + let layout = self.layout().swap_axes(swap, with); + + let shape = self.shape(); + let mut res = self.data().clone(); + + for i in 0..shape[swap] { + for j in 0..shape[with] { + let target = self.layout.position(&[i, j])?; + let dest = layout.position(&[j, i])?; + res[dest] = self.data()[target].clone(); + } + } + + let tensor = crate::new(false, None, layout.shape(), res); + Ok(tensor) + } + /// Transpose the tensor. + pub fn t(&self) -> TensorBase { + let (a, b) = (Axis(0), Axis(1)); + let op = TensorOp::transpose(self.clone(), a, b); + + let layout = self.layout().clone().transpose(a, b); + let shape = self.layout.shape(); + let mut data = self.store.to_vec(); + + for i in 0..shape[a] { + for j in 0..shape[b] { + let scope = self.layout.select([i, j]); + let target = layout.select([j, i]); + println!("Swapping {:?} with {:?}", scope, target); + data[target] = self[&[i, j]].clone(); + } + } + + TensorBase { + id: TensorId::new(), + kind: self.kind.clone(), + layout, + op: op.into(), + store: data.clone(), + } + } + pub fn reshape(self, shape: impl IntoShape) -> TensorResult { - let mut tensor = self; let shape = shape.into_shape(); - if tensor.elements() != shape.size() { + if self.size() != shape.size() { return Err(ShapeError::MismatchedElements.into()); } - tensor.layout.shape = shape; + let mut tensor = self; + + tensor.layout.reshape(shape); + Ok(tensor) } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index d52fca9e..6956ae1f 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -12,16 +12,17 @@ extern crate alloc; extern crate acme_core as acme; #[doc(inline)] -pub use self::{error::*, seal::*, tensor::*, utils::*}; +pub use self::{seal::*, tensor::*, utils::*}; -pub(crate) mod error; #[macro_use] pub(crate) mod seal; pub(crate) mod tensor; pub(crate) mod utils; pub mod actions; +pub mod backend; pub mod data; +pub mod error; pub mod linalg; pub mod ops; pub mod shape; diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index 4fae49d5..22a58f91 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -8,6 +8,8 @@ pub mod arith; pub mod uplo; +use crate::shape::Axis; + pub trait Inverse { fn inv(self) -> Self; } @@ -20,7 +22,7 @@ pub trait Matmul { } pub trait Transpose { - fn transpose(&self) -> Self; + fn transpose(&self, swap: Axis, with: Axis) -> Self; } pub(crate) mod prelude { diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 9bd5905e..090f5495 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -2,16 +2,23 @@ Appellation: kinds Contrib: FL03 */ +use crate::shape::Axis; use crate::TensorBase; use acme::ops::binary::BinaryOp; use acme::ops::unary::UnaryOp; +pub type BoxTensor = Box>; + #[derive(Clone, Debug)] pub enum TensorOp { - Binary(Box>, Box>, BinaryOp), - BinaryScalar(Box>, T, BinaryOp), - Unary(Box>, UnaryOp), - Matmul(Box>, Box>), + Binary(BoxTensor, BoxTensor, BinaryOp), + BinaryScalar(BoxTensor, T, BinaryOp), + Unary(BoxTensor, UnaryOp), + Matmul(BoxTensor, BoxTensor), + Transpose { + tensor: BoxTensor, + axes: (Axis, Axis), + }, } impl TensorOp { @@ -23,20 +30,28 @@ impl TensorOp { TensorOp::BinaryScalar(Box::new(lhs), rhs, op) } - pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { - TensorOp::Unary(Box::new(tensor), op) - } - pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { TensorOp::Matmul(Box::new(lhs), Box::new(rhs)) } + pub fn transpose(tensor: TensorBase, swap: Axis, with: Axis) -> Self { + TensorOp::Transpose { + tensor: Box::new(tensor), + axes: (swap, with), + } + } + + pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { + TensorOp::Unary(Box::new(tensor), op) + } + pub fn lhs(&self) -> &TensorBase { match self { TensorOp::Binary(lhs, _, _) => lhs, TensorOp::BinaryScalar(lhs, _, _) => lhs, TensorOp::Unary(lhs, _) => lhs, TensorOp::Matmul(lhs, _) => lhs, + TensorOp::Transpose { tensor, .. } => tensor, } } @@ -48,8 +63,3 @@ impl TensorOp { } } } - -pub enum Inputs { - Scalar(T), - Tensor(TensorBase), -} diff --git a/tensor/src/shape/dim/axis.rs b/tensor/src/shape/axis.rs similarity index 84% rename from tensor/src/shape/dim/axis.rs rename to tensor/src/shape/axis.rs index 2be8ca9b..b6fd4fd6 100644 --- a/tensor/src/shape/dim/axis.rs +++ b/tensor/src/shape/axis.rs @@ -9,6 +9,21 @@ use serde::{Deserialize, Serialize}; use std::ops::Deref; +pub trait IntoAxis { + fn into_axis(self) -> Axis; +} + +impl IntoAxis for usize { + fn into_axis(self) -> Axis { + Axis::new(self) + } +} + +pub struct Switch { + pub swap: Axis, + pub with: Axis, +} + #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Axis(pub(crate) usize); diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index 543deb7b..c95dbd09 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -5,11 +5,9 @@ //! # Dimension //! -pub use self::{axis::Axis, dimension::*, rank::Rank}; +pub use self::dimension::Dim; -pub(crate) mod axis; pub(crate) mod dimension; -pub(crate) mod rank; pub trait Dimension { type Pattern; @@ -18,23 +16,3 @@ pub trait Dimension { fn ndim(&self) -> usize; } - -pub trait IntoAxis { - fn into_axis(self) -> Axis; -} - -impl IntoAxis for usize { - fn into_axis(self) -> Axis { - Axis::new(self) - } -} - -pub trait IntoRank { - fn into_rank(self) -> Rank; -} - -impl IntoRank for usize { - fn into_rank(self) -> Rank { - Rank::new(self) - } -} diff --git a/tensor/src/shape/error.rs b/tensor/src/shape/error.rs index b18f41d2..281aa30a 100644 --- a/tensor/src/shape/error.rs +++ b/tensor/src/shape/error.rs @@ -32,7 +32,9 @@ pub type ShapeResult = std::result::Result; #[repr(usize)] #[strum(serialize_all = "snake_case")] pub enum ShapeError { + DimensionMismatch, IncompatibleShapes, + InvalidAxis, InvalidShape, MismatchedElements, } diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index 680bf994..014a6575 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -5,9 +5,11 @@ //! # Shapes //! //! -pub use self::{error::*, shape::*, stride::*}; +pub use self::{axis::*, error::*, rank::*, shape::Shape, stride::*}; +pub(crate) mod axis; pub(crate) mod error; +pub(crate) mod rank; pub(crate) mod shape; pub(crate) mod stride; @@ -27,11 +29,14 @@ where } pub(crate) mod prelude { + pub use super::IntoShape; + + pub use super::axis::{Axis, IntoAxis}; pub use super::dim::*; pub use super::error::*; + pub use super::rank::{IntoRank, Rank}; pub use super::shape::*; pub use super::stride::*; - pub use super::IntoShape; } #[cfg(test)] diff --git a/tensor/src/shape/dim/rank.rs b/tensor/src/shape/rank.rs similarity index 90% rename from tensor/src/shape/dim/rank.rs rename to tensor/src/shape/rank.rs index d48a1fbd..4640e52f 100644 --- a/tensor/src/shape/dim/rank.rs +++ b/tensor/src/shape/rank.rs @@ -10,6 +10,16 @@ use serde::{Deserialize, Serialize}; use std::borrow::Borrow; use std::ops::{Deref, DerefMut}; +pub trait IntoRank { + fn into_rank(self) -> Rank; +} + +impl IntoRank for usize { + fn into_rank(self) -> Rank { + Rank::new(self) + } +} + #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Rank(pub usize); diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index c38e6d0e..e4fd9f0a 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -2,12 +2,13 @@ Appellation: shape Contrib: FL03 */ -use super::dim::Rank; use super::error::ShapeError; +use super::{Axis, Rank}; use crate::prelude::TensorResult; + +use core::ops::{self, Deref}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::ops::{self, Deref}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -26,14 +27,14 @@ impl Shape { Self(Vec::with_capacity(capacity)) } - pub fn zero() -> Self { - Self::default() - } - pub fn zeros(rank: usize) -> Self { Self(vec![0; rank]) } + pub fn as_slice(&self) -> &[usize] { + &self.0 + } + pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { if *self.rank() != 2 || *other.rank() != 2 { return Err(ShapeError::IncompatibleShapes.into()); @@ -66,6 +67,7 @@ impl Shape { } true } + pub fn ncols(&self) -> usize { if self.len() >= 2 { self.0[1] @@ -100,14 +102,36 @@ impl Shape { self.0.remove(index) } - pub fn set(&mut self, index: usize, dim: usize) { - self.0[index] = dim + pub fn reverse(&mut self) { + self.0.reverse() + } + + pub fn set(&mut self, index: Axis, dim: usize) { + self[index] = dim } pub fn size(&self) -> usize { self.0.iter().product() } + pub fn slice(&self) -> &[usize] { + &self.0 + } + + pub fn slice_mut(&mut self) -> &mut [usize] { + &mut self.0 + } + + pub(crate) fn swap(&mut self, a: Axis, b: Axis) { + self.0.swap(a.axis(), b.axis()) + } + + pub fn swap_axes(&self, swap: Axis, with: Axis) -> Self { + let mut shape = self.clone(); + shape.swap(swap, with); + shape + } + pub(crate) fn stride_contiguous(&self) -> Vec { let mut stride: Vec<_> = self .0 @@ -242,10 +266,10 @@ impl ops::Index for Shape { } } -impl ops::Index for Shape { +impl ops::Index for Shape { type Output = usize; - fn index(&self, index: Rank) -> &Self::Output { + fn index(&self, index: Axis) -> &Self::Output { &self.0[*index] } } @@ -256,8 +280,8 @@ impl ops::IndexMut for Shape { } } -impl ops::IndexMut for Shape { - fn index_mut(&mut self, index: Rank) -> &mut Self::Output { +impl ops::IndexMut for Shape { + fn index_mut(&mut self, index: Axis) -> &mut Self::Output { &mut self.0[*index] } } diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 03bba7f0..9e7e6f7c 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -17,7 +17,7 @@ pub trait NdTensor { self.layout().shape().rank() } - fn shape(&self) -> &Shape { + fn shape(&self) -> Shape { self.layout().shape() } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index d7584629..bce4b6e6 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -4,7 +4,7 @@ */ use num::complex::Complex; use num::traits::real::Real; -use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps, Pow}; +use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps}; use std::iter::{Product, Sum}; use std::ops::Neg; diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index b406581e..c9ad62df 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -5,7 +5,7 @@ //! # Layout //! //! -use crate::shape::{IntoShape, Shape}; +use crate::shape::{Axis, IntoShape, Shape, ShapeError, ShapeResult}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -25,7 +25,7 @@ impl Layout { stride, } } - + /// Create a new layout with a contiguous stride. pub fn contiguous(shape: impl IntoShape) -> Self { let shape = shape.into_shape(); let stride = shape.stride_contiguous(); @@ -36,16 +36,6 @@ impl Layout { } } - pub fn contiguous_with_offset(shape: impl IntoShape, offset: usize) -> Self { - let shape = shape.into_shape(); - let stride = shape.stride_contiguous(); - Self { - offset, - shape, - stride, - } - } - pub fn ndim(&self) -> usize { debug_assert_eq!(self.stride.len(), self.shape.ndim()); self.shape.ndim() @@ -55,8 +45,19 @@ impl Layout { self.offset } - pub fn shape(&self) -> &Shape { - &self.shape + pub fn reshape(&mut self, shape: impl IntoShape) { + self.shape = shape.into_shape(); + self.stride = self.shape.stride_contiguous(); + } + + pub fn reverse_axes(mut self) -> Layout { + self.shape.slice_mut().reverse(); + self.stride.reverse(); + self + } + + pub fn shape(&self) -> Shape { + self.shape.clone() } pub fn size(&self) -> usize { @@ -66,15 +67,81 @@ impl Layout { pub fn stride(&self) -> &[usize] { &self.stride } + + pub fn swap_axes(&self, a: Axis, b: Axis) -> Layout { + let mut stride = self.stride.to_vec(); + stride.swap(a.axis(), b.axis()); + Layout { + offset: self.offset, + shape: self.shape.swap_axes(a, b), + stride, + } + } + + pub fn transpose(&self, a: Axis, b: Axis) -> Layout { + let shape = self.shape.swap_axes(a, b); + let stride = shape.stride_contiguous(); + Layout { + offset: self.offset, + shape, + stride, + } + } + + pub fn with_offset(mut self, offset: usize) -> Self { + self.offset = offset; + self + } + + pub fn with_shape(mut self, shape: impl IntoShape) -> Self { + self.shape = shape.into_shape(); + self.stride = self.shape.stride_contiguous(); + self + } } // Internal methods impl Layout { - pub(crate) fn position(&self, coords: impl AsRef<[usize]>) -> usize { + pub fn position(&self, coords: impl AsRef<[usize]>) -> ShapeResult { + let coords = coords.as_ref(); + if coords.len() != self.shape.ndim() { + return Err(ShapeError::DimensionMismatch.into()); + } + for (&coord, &dim) in coords.iter().zip(self.shape.slice().iter()) { + if coord >= dim { + return Err(ShapeError::MismatchedElements.into()); + } + } let mut index = self.offset; - for (i, &coord) in coords.as_ref().iter().enumerate() { + for (i, &coord) in coords.iter().enumerate() { index += coord * self.stride[i]; } + Ok(index) + } + + pub fn select(&self, coords: impl AsRef<[usize]>) -> usize { + let coords = coords.as_ref(); + if coords.len() != self.shape.ndim() { + panic!("Dimension mismatch"); + } + let index = coords + .iter() + .zip(self.stride.iter()) + .fold(self.offset, |acc, (&coord, &stride)| acc + coord * stride); index } } + +#[cfg(test)] +mod tests { + use super::Layout; + + #[test] + fn test_position() { + let shape = (3, 3); + let layout = Layout::contiguous(shape); + assert_eq!(layout.select(&[0, 0]), 0); + assert_eq!(layout.select(&[0, 1]), 1); + assert_eq!(layout.select(&[2, 2]), 8); + } +} diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs index 3b06467f..dbfcf16d 100644 --- a/tensor/src/store/mod.rs +++ b/tensor/src/store/mod.rs @@ -14,15 +14,5 @@ pub trait TensorStore { type Elem; } -pub enum TensorData { - Scalar(T), - Tensor(Vec>), -} - -pub enum TensorBackend { - Scalar, - Tensor, -} - #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 513b5c1f..08a485b4 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -10,21 +10,25 @@ use std::ops::Index; // use std::sync::{Arc, RwLock}; pub(crate) fn new( - kind: TensorKind, - op: BackpropOp, + kind: impl Into, + op: impl Into>, shape: impl IntoShape, store: Vec, ) -> TensorBase { TensorBase { id: TensorId::new(), - kind, + kind: kind.into(), layout: Layout::contiguous(shape), - op, + op: op.into(), store, } } -pub(crate) fn from_vec(kind: TensorKind, shape: impl IntoShape, store: Vec) -> TensorBase { +pub(crate) fn from_vec( + kind: impl Into, + shape: impl IntoShape, + store: Vec, +) -> TensorBase { new(kind, BackpropOp::none(), shape, store) } @@ -61,16 +65,16 @@ impl TensorBase { } pub fn from_vec( - kind: TensorKind, - op: BackpropOp, + kind: impl Into, + op: impl Into>, shape: impl IntoShape, store: Vec, ) -> Self { Self { id: TensorId::new(), - kind, + kind: kind.into(), layout: Layout::contiguous(shape), - op, + op: op.into(), store, } } @@ -91,10 +95,6 @@ impl TensorBase { } } } - /// Returns the number of elements in the tensor. - pub fn elements(&self) -> usize { - self.layout.size() - } /// Returns the unique identifier of the tensor. pub const fn id(&self) -> TensorId { self.id @@ -112,9 +112,13 @@ impl TensorBase { self.layout.shape().rank() } /// An owned reference of the tensors [Shape] - pub fn shape(&self) -> &Shape { + pub fn shape(&self) -> Shape { self.layout.shape() } + /// Returns the number of elements in the tensor. + pub fn size(&self) -> usize { + self.layout.size() + } /// Get a reference to the stride of the tensor pub fn stride(&self) -> &[usize] { self.layout.stride() @@ -189,19 +193,38 @@ impl TensorBase { store, } } + + pub fn with_layout(mut self, layout: Layout) -> Self { + self.layout = layout; + self + } + + pub fn with_op(mut self, op: BackpropOp) -> Self { + self.op = op; + self + } + + pub fn with_shape(mut self, shape: impl IntoShape) -> Self { + self.layout = Layout::contiguous(shape); + self + } } impl TensorBase { pub(crate) fn data(&self) -> &Vec { &self.store } + + pub(crate) fn data_mut(&mut self) -> &mut Vec { + &mut self.store + } } impl Index<&[usize]> for TensorBase { type Output = T; fn index(&self, index: &[usize]) -> &Self::Output { - let i = self.layout().position(index); + let i = self.layout().position(index).unwrap(); &self.store[i] } } @@ -219,6 +242,14 @@ where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - self.store == other.store + self.layout == other.layout && self.store == other.store + } +} + +impl FromIterator for TensorBase { + fn from_iter>(iter: I) -> Self { + let store = Vec::from_iter(iter); + let shape = Shape::from(store.len()); + from_vec(TensorKind::Normal, shape, store) } } diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index d81ad2f4..018b6ec7 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -17,7 +17,7 @@ where return Err(ShapeError::IncompatibleShapes.into()); } - let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); + let shape = lhs.shape().matmul_shape(&rhs.shape()).unwrap(); let mut result = vec![T::zero(); shape.size()]; for i in 0..lhs.shape().nrows() { @@ -43,7 +43,7 @@ where return Err(ShapeError::IncompatibleShapes.into()); } - let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); + let shape = lhs.shape().matmul_shape(&rhs.shape()).unwrap(); let mut result = vec![T::zero(); shape.size()]; for i in 0..lhs.shape().nrows() { diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index d7e7c265..123ebec0 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -1,5 +1,5 @@ /* - Appellation: tensor + Appellation: composition Contrib: FL03 */ #![cfg(test)] @@ -8,29 +8,25 @@ extern crate acme_tensor as acme; use acme::prelude::{Shape, Tensor}; #[test] -fn test_tensor() { +fn test_ones_and_zeros() { let shape = (2, 2); let a = Tensor::::ones(shape); - let b = Tensor::zeros(shape); + let b = a.zeros_like(); assert_ne!(&a, &b); -} - -#[test] -fn test_reshape() { - let shape = (2, 2); - let a = Tensor::::ones(shape); - let b = a.clone().reshape((4,)).unwrap(); - - assert_ne!(&a.shape(), &b.shape()); - assert_eq!(&a.elements(), &b.elements()); + assert_ne!(a.id(), b.id()); + assert_eq!(a.shape(), b.shape()); + assert_eq!(a.size(), b.size()); + assert_eq!(a.stride(), b.stride()); + assert_eq!(a, Tensor::ones(shape)); + assert_eq!(b, Tensor::zeros(shape)); } #[test] fn test_arange() { let exp = Shape::from(10); let a = Tensor::arange(0_f64, 10_f64, 1_f64); - assert_eq!(a.shape(), &exp); + assert_eq!(a.shape(), exp); for i in 0..10 { assert_eq!(a[&[i]], i as f64); @@ -41,7 +37,7 @@ fn test_arange() { fn test_linstep() { let exp = Shape::from(10); let a = Tensor::linspace(0_f64, 10_f64, 10); - assert_eq!(a.shape(), &exp); + assert_eq!(a.shape(), exp); let b = Tensor::arange(0_f64, 10_f64, 1_f64); for i in 0..10 { assert_eq!(a[&[i]], b[&[i]]); diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs new file mode 100644 index 00000000..056fbdda --- /dev/null +++ b/tensor/tests/reshape.rs @@ -0,0 +1,35 @@ +/* + Appellation: reshape + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::Tensor; + +#[test] +fn test_reshape() { + let shape = (2, 2); + let a = Tensor::::ones(shape); + let b = a.clone().reshape((4,)).unwrap(); + + assert_ne!(&a.shape(), &b.shape()); + assert_eq!(&a.size(), &b.size()); +} + +#[test] +fn test_transpose() { + let shape = (2, 3); + let a = Tensor::::linspace(0.0, 6.0, 6).with_shape(shape); + let at = a.t(); + println!("Transposed Shape: {:?}", &at.shape()); + + let exp = Tensor::from_vec(false, None, (3, 2), vec![0.0, 3.0, 1.0, 4.0, 2.0, 5.0]); + assert_ne!(&a, &at); + assert_eq!(at.shape(), (3, 2).into()); + for i in 0..shape.0 { + for j in 0..shape.1 { + assert_eq!(a[&[i, j]], exp[&[j, i]]); + } + } +} diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs new file mode 100644 index 00000000..055ea804 --- /dev/null +++ b/tensor/tests/tensor.rs @@ -0,0 +1,20 @@ +/* + Appellation: tensor + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::Tensor; + +#[test] +fn test_tensor() { + let shape = (2, 2); + let a = Tensor::::ones(shape); + let b = a.zeros_like(); + + assert_ne!(a.id(), b.id()); + assert_eq!(a.shape(), b.shape()); + assert_eq!(a.size(), b.size()); + assert_eq!(a.stride(), b.stride()); +} From f48167f5e2de7d8f5785f6ad623c60ce17bbedaa Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 17:54:53 -0500 Subject: [PATCH 59/87] update Signed-off-by: Joe McCain III --- tensor/src/impls/num.rs | 13 ++++- tensor/src/impls/ops/binary.rs | 11 ++-- tensor/src/impls/reshape.rs | 49 +++++++++++------- tensor/src/ops/kinds.rs | 10 ++-- tensor/src/shape/rank.rs | 51 ++++++++++++++++++ tensor/src/shape/shape.rs | 58 +++++++++++++++------ tensor/src/shape/stride.rs | 95 ++++++++++++++++++++++++++++++++-- tensor/src/store/layout.rs | 90 +++++++++++++++++--------------- tensor/src/tensor.rs | 32 ++++++++---- tensor/tests/reshape.rs | 11 ++++ tensor/tests/tensor.rs | 13 +++++ 11 files changed, 330 insertions(+), 103 deletions(-) diff --git a/tensor/src/impls/num.rs b/tensor/src/impls/num.rs index a92aa13f..9667e829 100644 --- a/tensor/src/impls/num.rs +++ b/tensor/src/impls/num.rs @@ -4,7 +4,18 @@ */ use crate::prelude::Scalar; use crate::tensor::TensorBase; -use num::traits::{One, Zero}; +use num::traits::{Num, One, Zero}; + +impl Num for TensorBase +where + T: Scalar + Num, +{ + type FromStrRadixErr = T::FromStrRadixErr; + + fn from_str_radix(str: &str, radix: u32) -> Result { + T::from_str_radix(str, radix).map(Self::from_scalar) + } +} impl One for TensorBase where diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 5b10d5a8..7b1ebba5 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -44,7 +44,7 @@ where } macro_rules! impl_arithmetic { - ($trait:ident, $method:ident, $op:tt) => { + (op: $trait:ident, $method:ident, $op:tt) => { impl_scalar_arith!($trait, $method, $op); impl std::ops::$trait for TensorBase @@ -113,6 +113,9 @@ macro_rules! impl_arithmetic { } } }; + ($(($trait:ident, $method:ident, $op:tt)),*) => { + $( impl_arithmetic!(op: $trait, $method, $op); )* + }; } macro_rules! impl_scalar_arith { @@ -181,12 +184,10 @@ macro_rules! impl_assign_op { } -impl_arithmetic!(Add, add, +); -impl_arithmetic!(Div, div, /); -impl_arithmetic!(Mul, mul, *); -impl_arithmetic!(Sub, sub, -); +impl_arithmetic!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); impl_assign_op!(AddAssign, add_assign, Add, +); impl_assign_op!(DivAssign, div_assign, Div, /); impl_assign_op!(MulAssign, mul_assign, Mul, *); +impl_assign_op!(RemAssign, rem_assign, Rem, %); impl_assign_op!(SubAssign, sub_assign, Sub, -); diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index ddfa32ee..241c8ef1 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -2,20 +2,24 @@ Appellation: reshape Contrib: FL03 */ -use crate::prelude::{BackpropOp, TensorId, TensorOp, TensorResult}; +use crate::prelude::{TensorId, TensorOp, TensorResult}; use crate::shape::{Axis, IntoShape, ShapeError}; -use crate::tensor::{from_vec, TensorBase}; +use crate::tensor::TensorBase; impl TensorBase where T: Clone + Default, { pub fn broadcast(&self, shape: impl IntoShape) -> Self { - let shape = shape.into_shape(); - - let _diff = *self.shape().rank() - *shape.rank(); + let layout = self.layout.broadcast_as(shape).unwrap(); - unimplemented!() + Self { + id: TensorId::new(), + kind: self.kind.clone(), + layout, + op: self.op.clone(), + store: self.store.clone(), + } } pub fn pad(&self, shape: impl IntoShape, _with: T) -> Self { @@ -27,25 +31,31 @@ where } /// - pub fn swap_axes(&self, swap: Axis, with: Axis) -> TensorResult { - let layout = self.layout().swap_axes(swap, with); + pub fn swap_axes(&self, swap: Axis, with: Axis) -> Self { + let op = TensorOp::transpose(self.clone(), swap, with); - let shape = self.shape(); - let mut res = self.data().clone(); + let layout = self.layout().clone().transpose(swap, with); + let shape = self.layout.shape(); + let mut data = self.store.to_vec(); for i in 0..shape[swap] { for j in 0..shape[with] { - let target = self.layout.position(&[i, j])?; - let dest = layout.position(&[j, i])?; - res[dest] = self.data()[target].clone(); + let scope = self.layout.index([i, j]); + let target = layout.index([j, i]); + data[target] = self.data()[scope].clone(); } } - let tensor = crate::new(false, None, layout.shape(), res); - Ok(tensor) + TensorBase { + id: TensorId::new(), + kind: self.kind.clone(), + layout, + op: op.into(), + store: data.clone(), + } } /// Transpose the tensor. - pub fn t(&self) -> TensorBase { + pub fn t(&self) -> Self { let (a, b) = (Axis(0), Axis(1)); let op = TensorOp::transpose(self.clone(), a, b); @@ -55,10 +65,9 @@ where for i in 0..shape[a] { for j in 0..shape[b] { - let scope = self.layout.select([i, j]); - let target = layout.select([j, i]); - println!("Swapping {:?} with {:?}", scope, target); - data[target] = self[&[i, j]].clone(); + let scope = self.layout.index([i, j]); + let target = layout.index([j, i]); + data[target] = self.data()[scope].clone(); } } diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/kinds.rs index 090f5495..c635ebdb 100644 --- a/tensor/src/ops/kinds.rs +++ b/tensor/src/ops/kinds.rs @@ -2,18 +2,19 @@ Appellation: kinds Contrib: FL03 */ -use crate::shape::Axis; +use crate::shape::{Axis, Shape}; use crate::TensorBase; -use acme::ops::binary::BinaryOp; -use acme::ops::unary::UnaryOp; +use acme::prelude::{BinaryOp, UnaryOp}; -pub type BoxTensor = Box>; +pub type BoxTensor = Box>; #[derive(Clone, Debug)] +#[non_exhaustive] pub enum TensorOp { Binary(BoxTensor, BoxTensor, BinaryOp), BinaryScalar(BoxTensor, T, BinaryOp), Unary(BoxTensor, UnaryOp), + Broadcast(BoxTensor, Shape), Matmul(BoxTensor, BoxTensor), Transpose { tensor: BoxTensor, @@ -50,6 +51,7 @@ impl TensorOp { TensorOp::Binary(lhs, _, _) => lhs, TensorOp::BinaryScalar(lhs, _, _) => lhs, TensorOp::Unary(lhs, _) => lhs, + TensorOp::Broadcast(tensor, _) => tensor, TensorOp::Matmul(lhs, _) => lhs, TensorOp::Transpose { tensor, .. } => tensor, } diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/rank.rs index 4640e52f..eade68f9 100644 --- a/tensor/src/shape/rank.rs +++ b/tensor/src/shape/rank.rs @@ -87,3 +87,54 @@ impl From for usize { unsafe impl Send for Rank {} unsafe impl Sync for Rank {} + +macro_rules! impl_std_ops { + ($trait:tt, $method:ident, $e:tt) => { + impl std::ops::$trait for Rank { + type Output = usize; + + fn $method(self, rhs: usize) -> Self::Output { + self.0 $e rhs + } + } + + impl std::ops::$trait for Rank { + type Output = usize; + + fn $method(self, rhs: Rank) -> Self::Output { + self.0 $e rhs.0 + } + } + + impl<'a> std::ops::$trait for &'a Rank { + type Output = usize; + + fn $method(self, rhs: Rank) -> Self::Output { + self.0 $e rhs.0 + } + } + + impl<'a> std::ops::$trait<&'a Rank> for Rank { + type Output = usize; + + fn $method(self, rhs: &'a Rank) -> Self::Output { + self.0 $e rhs.0 + } + } + + impl<'a> std::ops::$trait<&'a Rank> for &'a Rank { + type Output = usize; + + fn $method(self, rhs: &'a Rank) -> Self::Output { + self.0 $e rhs.0 + } + } + }; + (many: $(($trait:tt, $method:ident, $e:tt)),*) => { + $( + impl_std_ops!($trait, $method, $e); + )* + }; +} + +impl_std_ops!(many: (Add, add, +), (Sub, sub, -), (Mul, mul, *), (Div, div, /), (Rem, rem, %)); diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index e4fd9f0a..bf108891 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use super::error::ShapeError; -use super::{Axis, Rank}; +use super::{Axis, Rank, Stride}; use crate::prelude::TensorResult; use core::ops::{self, Deref}; @@ -31,15 +31,8 @@ impl Shape { Self(vec![0; rank]) } - pub fn as_slice(&self) -> &[usize] { - &self.0 - } - pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { - if *self.rank() != 2 || *other.rank() != 2 { - return Err(ShapeError::IncompatibleShapes.into()); - } - if self[1] != other[0] { + if *self.rank() != 2 || *other.rank() != 2 || self[1] != other[0] { return Err(ShapeError::IncompatibleShapes.into()); } Ok(Self::from((self[0], other[1]))) @@ -54,7 +47,7 @@ impl Shape { } /// Returns true if the strides are C contiguous (aka row major). - pub fn is_contiguous(&self, stride: &[usize]) -> bool { + pub fn is_contiguous(&self, stride: &Stride) -> bool { if self.0.len() != stride.len() { return false; } @@ -78,10 +71,6 @@ impl Shape { } } - pub fn ndim(&self) -> usize { - self.0.len() - } - pub fn nrows(&self) -> usize { if self.len() >= 1 { *self.0.first().unwrap() @@ -132,7 +121,7 @@ impl Shape { shape } - pub(crate) fn stride_contiguous(&self) -> Vec { + pub(crate) fn stride_contiguous(&self) -> Stride { let mut stride: Vec<_> = self .0 .iter() @@ -144,7 +133,44 @@ impl Shape { }) .collect(); stride.reverse(); - stride + stride.into() + } + + pub fn upcast(&self, to: &Shape, stride: &Stride) -> Option { + let mut new_stride = to.slice().to_vec(); + // begin at the back (the least significant dimension) + // size of the axis has to either agree or `from` has to be 1 + if to.rank() < self.rank() { + return None; + } + + { + let mut new_stride_iter = new_stride.as_mut_slice().iter_mut().rev(); + for ((er, es), dr) in self + .slice() + .iter() + .rev() + .zip(stride.slice().iter().rev()) + .zip(new_stride_iter.by_ref()) + { + /* update strides */ + if *dr == *er { + /* keep stride */ + *dr = *es; + } else if *er == 1 { + /* dead dimension, zero stride */ + *dr = 0 + } else { + return None; + } + } + + /* set remaining strides to zero */ + for dr in new_stride_iter { + *dr = 0; + } + } + Some(new_stride.into()) } } diff --git a/tensor/src/shape/stride.rs b/tensor/src/shape/stride.rs index 8a298dfe..7bacd32d 100644 --- a/tensor/src/shape/stride.rs +++ b/tensor/src/shape/stride.rs @@ -2,17 +2,74 @@ Appellation: stride Contrib: FL03 */ +use super::{Axis, Rank}; +use core::borrow::{Borrow, BorrowMut}; use core::ops::{Deref, DerefMut}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; pub trait IntoStride { fn into_stride(self) -> Stride; } -pub struct Stride(pub Vec); +impl IntoStride for S +where + S: Into, +{ + fn into_stride(self) -> Stride { + self.into() + } +} + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub struct Stride(Vec); impl Stride { - pub fn ndim(&self) -> usize { - self.0.len() + pub fn new(stride: Vec) -> Self { + Self(stride) + } + + pub fn with_capacity(capacity: usize) -> Self { + Self(Vec::with_capacity(capacity)) + } + + pub fn get(&self, index: usize) -> Option<&usize> { + self.0.get(index) + } + + pub fn iter(&self) -> std::slice::Iter { + self.0.iter() + } + + pub fn iter_mut(&mut self) -> std::slice::IterMut { + self.0.iter_mut() + } + /// Returns the rank of the stride; i.e., the number of dimensions. + pub fn rank(&self) -> Rank { + self.0.len().into() + } + /// Reverses the stride. + pub fn reverse(&mut self) { + self.0.reverse() + } + /// Returns a reference to the stride. + pub fn slice(&self) -> &[usize] { + &self.0 + } + /// Returns a mutable reference to the stride. + pub fn slice_mut(&mut self) -> &mut [usize] { + &mut self.0 + } + /// Swaps two elements in the stride. + pub fn swap(&mut self, a: usize, b: usize) { + self.0.swap(a, b) + } + + pub fn swap_axes(&self, a: Axis, b: Axis) -> Self { + let mut stride = self.clone(); + stride.swap(a.axis(), b.axis()); + stride } } @@ -28,6 +85,18 @@ impl AsMut<[usize]> for Stride { } } +impl Borrow<[usize]> for Stride { + fn borrow(&self) -> &[usize] { + &self.0 + } +} + +impl BorrowMut<[usize]> for Stride { + fn borrow_mut(&mut self) -> &mut [usize] { + &mut self.0 + } +} + impl Deref for Stride { type Target = [usize]; @@ -54,6 +123,14 @@ impl FromIterator for Stride { } } +// impl Iterator for Stride { +// type Item = usize; + +// fn next(&mut self) -> Option { +// self.0.next() +// } +// } + impl IntoIterator for Stride { type Item = usize; type IntoIter = std::vec::IntoIter; @@ -62,3 +139,15 @@ impl IntoIterator for Stride { self.0.into_iter() } } + +impl From> for Stride { + fn from(v: Vec) -> Self { + Stride(v) + } +} + +impl From<&[usize]> for Stride { + fn from(v: &[usize]) -> Self { + Stride(v.to_vec()) + } +} diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index c9ad62df..9bb07f9c 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -2,28 +2,51 @@ Appellation: layout Contrib: FL03 */ -//! # Layout -//! -//! -use crate::shape::{Axis, IntoShape, Shape, ShapeError, ShapeResult}; +use crate::shape::{Axis, IntoShape, IntoStride, Rank, Shape, ShapeError, ShapeResult, Stride}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +/// A layout is a description of how data is stored in memory. #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Layout { pub(crate) offset: usize, pub(crate) shape: Shape, - pub(crate) stride: Vec, + pub(crate) stride: Stride, } impl Layout { - pub fn new(offset: usize, shape: Shape, stride: Vec) -> Self { + pub fn new(offset: usize, shape: impl IntoShape, stride: impl IntoStride) -> Self { Self { offset, - shape, - stride, + shape: shape.into_shape(), + stride: stride.into_stride(), + } + } + /// Broadcast the layout to a new shape. + /// + /// The new shape must have the same or higher rank than the current shape. + pub fn broadcast_as(&self, shape: impl IntoShape) -> ShapeResult { + let shape = shape.into_shape(); + if shape.rank() < self.shape().rank() { + return Err(ShapeError::IncompatibleShapes); } + let added_dims = shape.rank() - self.shape().rank(); + let mut stride = vec![0; added_dims]; + for (&dst_dim, (&src_dim, &src_stride)) in shape[added_dims..] + .iter() + .zip(self.shape().iter().zip(self.stride().iter())) + { + let s = if dst_dim == src_dim { + src_stride + } else if src_dim != 1 { + return Err(ShapeError::IncompatibleShapes); + } else { + 0 + }; + stride.push(s) + } + Ok(Self::new(self.offset, shape, stride)) } /// Create a new layout with a contiguous stride. pub fn contiguous(shape: impl IntoShape) -> Self { @@ -35,23 +58,23 @@ impl Layout { stride, } } - - pub fn ndim(&self) -> usize { - debug_assert_eq!(self.stride.len(), self.shape.ndim()); - self.shape.ndim() - } - + /// Get a peek at the offset of the layout. pub fn offset(&self) -> usize { self.offset } - + /// Return the rank (number of dimensions) of the layout. + pub fn rank(&self) -> Rank { + debug_assert_eq!(self.stride.len(), *self.shape.rank()); + self.shape.rank() + } + /// Reshape the layout to a new shape. pub fn reshape(&mut self, shape: impl IntoShape) { self.shape = shape.into_shape(); self.stride = self.shape.stride_contiguous(); } - + /// Reverse the order of the axes. pub fn reverse_axes(mut self) -> Layout { - self.shape.slice_mut().reverse(); + self.shape.reverse(); self.stride.reverse(); self } @@ -64,17 +87,15 @@ impl Layout { self.shape.size() } - pub fn stride(&self) -> &[usize] { + pub fn stride(&self) -> &Stride { &self.stride } pub fn swap_axes(&self, a: Axis, b: Axis) -> Layout { - let mut stride = self.stride.to_vec(); - stride.swap(a.axis(), b.axis()); Layout { offset: self.offset, shape: self.shape.swap_axes(a, b), - stride, + stride: self.stride.swap_axes(a, b), } } @@ -102,26 +123,9 @@ impl Layout { // Internal methods impl Layout { - pub fn position(&self, coords: impl AsRef<[usize]>) -> ShapeResult { - let coords = coords.as_ref(); - if coords.len() != self.shape.ndim() { - return Err(ShapeError::DimensionMismatch.into()); - } - for (&coord, &dim) in coords.iter().zip(self.shape.slice().iter()) { - if coord >= dim { - return Err(ShapeError::MismatchedElements.into()); - } - } - let mut index = self.offset; - for (i, &coord) in coords.iter().enumerate() { - index += coord * self.stride[i]; - } - Ok(index) - } - - pub fn select(&self, coords: impl AsRef<[usize]>) -> usize { + pub(crate) fn index(&self, coords: impl AsRef<[usize]>) -> usize { let coords = coords.as_ref(); - if coords.len() != self.shape.ndim() { + if coords.len() != *self.shape.rank() { panic!("Dimension mismatch"); } let index = coords @@ -140,8 +144,8 @@ mod tests { fn test_position() { let shape = (3, 3); let layout = Layout::contiguous(shape); - assert_eq!(layout.select(&[0, 0]), 0); - assert_eq!(layout.select(&[0, 1]), 1); - assert_eq!(layout.select(&[2, 2]), 8); + assert_eq!(layout.index(&[0, 0]), 0); + assert_eq!(layout.index(&[0, 1]), 1); + assert_eq!(layout.index(&[2, 2]), 8); } } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 08a485b4..05bb2c85 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,12 +2,11 @@ Appellation: tensor Contrib: FL03 */ -// use crate::ops::TrackedOp; -use crate::prelude::{BackpropOp, IntoShape, Rank, Shape, TensorId, TensorKind, TensorOp}; +use crate::ops::{BackpropOp, TensorOp}; +use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind}; use crate::store::Layout; use acme::prelude::BinaryOp; -use std::ops::Index; -// use std::sync::{Arc, RwLock}; +use std::ops::{Index, IndexMut}; pub(crate) fn new( kind: impl Into, @@ -64,6 +63,16 @@ impl TensorBase { } } + pub fn from_scalar(value: T) -> Self { + Self { + id: TensorId::new(), + kind: TensorKind::default(), + layout: Layout::contiguous(()), + op: None.into(), + store: vec![value], + } + } + pub fn from_vec( kind: impl Into, op: impl Into>, @@ -214,7 +223,7 @@ impl TensorBase { pub(crate) fn data(&self) -> &Vec { &self.store } - + #[allow(dead_code)] pub(crate) fn data_mut(&mut self) -> &mut Vec { &mut self.store } @@ -224,16 +233,17 @@ impl Index<&[usize]> for TensorBase { type Output = T; fn index(&self, index: &[usize]) -> &Self::Output { - let i = self.layout().position(index).unwrap(); + let i = self.layout().index(index); &self.store[i] } } -// impl IndexMut<&[usize]> for Tensor { -// fn index_mut(&mut self, index: &[usize]) -> &mut Self::Output { -// self.get_mut(index).unwrap() -// } -// } +impl IndexMut<&[usize]> for TensorBase { + fn index_mut(&mut self, index: &[usize]) -> &mut Self::Output { + let i = self.layout().index(index); + &mut self.store[i] + } +} impl Eq for TensorBase where T: Eq {} diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index 056fbdda..0be2a507 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -7,6 +7,17 @@ extern crate acme_tensor as acme; use acme::prelude::Tensor; +#[test] +#[ignore = "Not implemented"] +fn test_broadcast() { + let shape = (4, 1); + let a = Tensor::::ones(shape); + let b = a.clone().broadcast((4, 1, 1)); + + assert_ne!(&a.shape(), &b.shape()); + assert_eq!(&a.size(), &b.size()); +} + #[test] fn test_reshape() { let shape = (2, 2); diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index 055ea804..97221e5f 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -18,3 +18,16 @@ fn test_tensor() { assert_eq!(a.size(), b.size()); assert_eq!(a.stride(), b.stride()); } + +#[test] +fn test_higher_dim() { + let shape = (2, 2, 2, 2); + let a = Tensor::::ones(shape); + let b = a.zeros_like(); + + assert_ne!(a.id(), b.id()); + assert_eq!(a.shape(), b.shape()); + assert_eq!(a.size(), b.size()); + assert_eq!(a.stride(), b.stride()); + assert_eq!(a.stride().len(), 4); +} From 9a0cbff19369a114463c6c3f34cde73e50ead7d6 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 18:03:54 -0500 Subject: [PATCH 60/87] update Signed-off-by: Joe McCain III --- tensor/tests/reshape.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index 0be2a507..0152c91a 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -31,7 +31,7 @@ fn test_reshape() { #[test] fn test_transpose() { let shape = (2, 3); - let a = Tensor::::linspace(0.0, 6.0, 6).with_shape(shape); + let a = Tensor::::linspace(0f64, 6f64, 6).with_shape(shape); let at = a.t(); println!("Transposed Shape: {:?}", &at.shape()); From 9d70a4526c072bf55527e20b582432721ba055d0 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 18:05:39 -0500 Subject: [PATCH 61/87] update Signed-off-by: Joe McCain III --- tensor/tests/reshape.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index 0152c91a..5c6dcaeb 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -24,8 +24,9 @@ fn test_reshape() { let a = Tensor::::ones(shape); let b = a.clone().reshape((4,)).unwrap(); - assert_ne!(&a.shape(), &b.shape()); - assert_eq!(&a.size(), &b.size()); + assert_ne!(a.rank(), b.rank()); + assert_ne!(a.shape(), b.shape()); + assert_eq!(a.size(), b.size()); } #[test] From dfc599163016140d10349f565c2a3612c35286f2 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 28 Mar 2024 18:15:23 -0500 Subject: [PATCH 62/87] update Signed-off-by: Joe McCain III --- tensor/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 6956ae1f..67d8e982 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -12,7 +12,7 @@ extern crate alloc; extern crate acme_core as acme; #[doc(inline)] -pub use self::{seal::*, tensor::*, utils::*}; +pub use self::{tensor::*, utils::*}; #[macro_use] pub(crate) mod seal; From 9de312dce81cd398c11569a3054640803a394f8a Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 29 Mar 2024 08:49:46 -0500 Subject: [PATCH 63/87] update Signed-off-by: Joe McCain III --- acme/benches/default.rs | 74 ++++++++++ acme/tests/macros.rs | 1 + core/src/eval/mod.rs | 16 ++- core/src/id/id.rs | 24 ++-- core/src/id/{ => kinds}/atomic.rs | 2 +- core/src/id/mod.rs | 15 ++- core/src/lib.rs | 9 -- core/src/ops/kinds.rs | 12 ++ core/src/ops/mod.rs | 13 ++ core/src/ops/unary/kinds.rs | 1 + core/src/primitives.rs | 16 --- core/src/specs/func/structural.rs | 15 ++- core/src/specs/mod.rs | 6 + core/src/types/constants.rs | 6 +- core/src/types/dual.rs | 210 +++++++++++++++++------------ core/src/types/mod.rs | 29 ++-- core/src/types/variables.rs | 172 ++++++++++++++++++++--- core/src/utils.rs | 25 ---- derive/src/lib.rs | 16 +-- macros/src/lib.rs | 33 +++-- tensor/src/backend/cpu/mod.rs | 10 ++ tensor/src/backend/devices.rs | 12 ++ tensor/src/backend/mod.rs | 11 ++ tensor/src/data/specs.rs | 1 + tensor/src/linalg/arith.rs | 4 - tensor/src/linalg/mod.rs | 7 +- tensor/src/ops/kinds/reshape.rs | 37 +++++ tensor/src/ops/mod.rs | 10 +- tensor/src/ops/{kinds.rs => op.rs} | 0 tensor/src/shape/axis.rs | 11 +- tensor/src/shape/mod.rs | 3 +- tensor/src/shape/rank.rs | 39 +++--- tensor/src/shape/shape.rs | 57 ++++---- tensor/src/shape/stride.rs | 2 +- tensor/src/specs/mod.rs | 89 ++++++++---- tensor/src/specs/ndtensor.rs | 4 +- tensor/src/store/layout.rs | 6 +- tensor/src/tensor.rs | 25 ++-- tensor/tests/reshape.rs | 1 - 39 files changed, 704 insertions(+), 320 deletions(-) create mode 100644 acme/benches/default.rs rename core/src/id/{ => kinds}/atomic.rs (98%) delete mode 100644 core/src/primitives.rs delete mode 100644 core/src/utils.rs create mode 100644 tensor/src/backend/cpu/mod.rs create mode 100644 tensor/src/backend/devices.rs delete mode 100644 tensor/src/linalg/arith.rs create mode 100644 tensor/src/ops/kinds/reshape.rs rename tensor/src/ops/{kinds.rs => op.rs} (100%) diff --git a/acme/benches/default.rs b/acme/benches/default.rs new file mode 100644 index 00000000..318d3e77 --- /dev/null +++ b/acme/benches/default.rs @@ -0,0 +1,74 @@ +/* + Appellation: default + Contrib: FL03 +*/ +#![feature(test)] +extern crate test; + +use test::Bencher; + +// bench: find the `BENCH_SIZE` first terms of the fibonacci sequence +static BENCH_SIZE: usize = 20; + +// function to benchmark must be annotated with `#[bench]` +#[bench] +fn recursive_fibonacci(b: &mut Bencher) { + // exact code to benchmark must be passed as a closure to the iter + // method of Bencher + b.iter(|| (0..BENCH_SIZE).map(fib::fibonacci).collect::>()) +} + +#[bench] +fn iterative_fibonacci(b: &mut Bencher) { + b.iter(|| fib::Fibonacci::seq().take(BENCH_SIZE).collect::>()) +} + +pub mod fib { + // recursive fibonacci + pub fn fibonacci(n: usize) -> u32 { + if n < 2 { + 1 + } else { + fibonacci(n - 1) + fibonacci(n - 2) + } + } + + // iterative fibonacci + pub struct Fibonacci { + pub curr: u32, + next: u32, + } + + impl Fibonacci { + pub fn new(curr: u32, next: u32) -> Self { + Self { curr, next } + } + + pub fn seq() -> Self { + Self::new(1, 1) + } + + pub fn value(&self) -> u32 { + self.curr + } + } + + impl Default for Fibonacci { + fn default() -> Self { + Self { curr: 1, next: 1 } + } + } + + impl Iterator for Fibonacci { + type Item = u32; + + fn next(&mut self) -> Option { + use std::mem::replace; + + let next = self.curr + self.next; + let prev = replace(&mut self.next, next); + + Some(replace(&mut self.curr, prev)) + } + } +} diff --git a/acme/tests/macros.rs b/acme/tests/macros.rs index 28d7b8fc..5f4afd15 100644 --- a/acme/tests/macros.rs +++ b/acme/tests/macros.rs @@ -2,6 +2,7 @@ Appellation: macros Contrib: FL03 */ +#![allow(unused)] #![cfg(all(test, feature = "macros"))] extern crate acme; diff --git a/core/src/eval/mod.rs b/core/src/eval/mod.rs index 9e14cc16..97a4e088 100644 --- a/core/src/eval/mod.rs +++ b/core/src/eval/mod.rs @@ -6,16 +6,24 @@ pub use self::evaluator::*; pub(crate) mod evaluator; -pub trait Evaluate { +pub trait EvaluateOnce { type Output; - fn eval(self) -> Self::Output; + fn eval_once(self) -> Self::Output; } -impl Evaluate for f64 { +pub trait EvaluateMut: EvaluateOnce { + fn eval_mut(&mut self) -> Self::Output; +} + +pub trait Evaluate: EvaluateMut { + fn eval(&self) -> Self::Output; +} + +impl EvaluateOnce for f64 { type Output = f64; - fn eval(self) -> Self::Output { + fn eval_once(self) -> Self::Output { self } } diff --git a/core/src/id/id.rs b/core/src/id/id.rs index 92067e15..dd787b42 100644 --- a/core/src/id/id.rs +++ b/core/src/id/id.rs @@ -8,13 +8,13 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -pub struct Id { +pub struct Id { id: AtomicId, - index: usize, + index: Idx, } -impl Id { - pub fn new(index: usize) -> Self { +impl Id { + pub fn new(index: Idx) -> Self { Self { id: AtomicId::new(), index, @@ -25,19 +25,15 @@ impl Id { *self.id } - pub fn index(&self) -> usize { - self.index - } - - pub(crate) fn next_index(&self) -> Self { - Self { - id: self.id, - index: self.index() + 1, - } + pub fn index(&self) -> &Idx { + &self.index } } -impl std::fmt::Display for Id { +impl std::fmt::Display for Id +where + Idx: std::fmt::Display, +{ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if f.alternate() { write!(f, "{}.{}", self.index(), self.id) diff --git a/core/src/id/atomic.rs b/core/src/id/kinds/atomic.rs similarity index 98% rename from core/src/id/atomic.rs rename to core/src/id/kinds/atomic.rs index 62e65386..aaa210bb 100644 --- a/core/src/id/atomic.rs +++ b/core/src/id/kinds/atomic.rs @@ -13,7 +13,7 @@ use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] -#[repr(transparent)] +#[repr(C)] pub struct AtomicId(usize); impl AtomicId { diff --git a/core/src/id/mod.rs b/core/src/id/mod.rs index f1df797a..3869f3d9 100644 --- a/core/src/id/mod.rs +++ b/core/src/id/mod.rs @@ -5,12 +5,23 @@ //! # Ids //! //! -pub use self::{atomic::*, id::*}; +pub use self::{id::Id, kinds::*}; -pub(crate) mod atomic; pub(crate) mod id; +pub(crate) mod kinds { + pub use self::atomic::AtomicId; + + pub(crate) mod atomic; +} + pub trait Identifier {} +pub trait Identifiable { + type Id: Identifier; + + fn id(&self) -> Self::Id; +} + #[cfg(test)] mod tests {} diff --git a/core/src/lib.rs b/core/src/lib.rs index 3d6ab0a0..de6ee071 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -5,12 +5,6 @@ //! # Core //! //! - -pub use self::{primitives::*, utils::*}; - -pub(crate) mod primitives; -pub(crate) mod utils; - pub mod errors; pub mod eval; pub mod id; @@ -20,9 +14,6 @@ pub mod specs; pub mod types; pub mod prelude { - pub use crate::primitives::*; - pub use crate::utils::*; - pub use crate::errors::*; pub use crate::eval::*; pub use crate::id::*; diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index cca99083..edf910b4 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -34,6 +34,18 @@ pub enum Op { Unary(UnaryOp), } +impl From for Op { + fn from(op: BinaryOp) -> Self { + Self::Binary(op) + } +} + +impl From for Op { + fn from(op: UnaryOp) -> Self { + Self::Unary(op) + } +} + pub enum Expr { Binary(BinaryOperator>), } diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index fdfd0be0..1b6eb04d 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -12,6 +12,19 @@ pub(crate) mod kinds; pub mod binary; pub mod unary; +pub trait IntoOp { + fn into_op(self) -> Op; +} + +impl IntoOp for S +where + S: Into, +{ + fn into_op(self) -> Op { + self.into() + } +} + pub trait Operation { type Output; diff --git a/core/src/ops/unary/kinds.rs b/core/src/ops/unary/kinds.rs index 8230b14c..ebf24c22 100644 --- a/core/src/ops/unary/kinds.rs +++ b/core/src/ops/unary/kinds.rs @@ -43,6 +43,7 @@ pub enum UnaryOp { Inv, Ln, Neg, + Not, Sin, Sinh, Sqrt, diff --git a/core/src/primitives.rs b/core/src/primitives.rs deleted file mode 100644 index 2422d0e4..00000000 --- a/core/src/primitives.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* - Appellation: primitives - Contrib: FL03 -*/ -pub use self::types::*; - -mod constants {} - -mod statics {} - -mod types { - /// A boxed error type for use in the library. - pub type BoxError = Box; - /// A boxed result type for use in the library. - pub type BoxResult = std::result::Result; -} diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs index c3a499cc..2c81f3c9 100644 --- a/core/src/specs/func/structural.rs +++ b/core/src/specs/func/structural.rs @@ -12,11 +12,12 @@ pub trait StructuralFn { pub trait StructuredArgs {} -pub struct StructFunc -where - F: StructuralFn, - A: StructuredArgs, -{ - args: A, - func: F, +pub struct Sigmoid { + x: T, +} + +impl Sigmoid { + pub fn new(x: T) -> Self { + Self { x } + } } diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index dc71076d..2f683d85 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -12,6 +12,12 @@ pub mod func; use crate::errors::PredictError; +pub trait Idx { + type Index; + + fn index(&self) -> Self::Index; +} + pub trait Backward { type Output; diff --git a/core/src/types/constants.rs b/core/src/types/constants.rs index 4afee709..f218b6d9 100644 --- a/core/src/types/constants.rs +++ b/core/src/types/constants.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ -use crate::prelude::{Evaluate, Gradient}; +use crate::prelude::{EvaluateOnce, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -68,10 +68,10 @@ where } } -impl Evaluate for Constant { +impl EvaluateOnce for Constant { type Output = T; - fn eval(self) -> Self::Output { + fn eval_once(self) -> Self::Output { self.0 } } diff --git a/core/src/types/dual.rs b/core/src/types/dual.rs index 10382020..868a60b1 100644 --- a/core/src/types/dual.rs +++ b/core/src/types/dual.rs @@ -12,7 +12,7 @@ //! e != 0 //! e^2 = 0 -use crate::prelude::{Evaluate, Gradient}; +use crate::prelude::{EvaluateOnce, Gradient}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -58,10 +58,10 @@ where } } -impl Evaluate for Dual { +impl EvaluateOnce for Dual { type Output = T; - fn eval(self) -> Self::Output { + fn eval_once(self) -> Self::Output { self.real } } @@ -77,15 +77,6 @@ where } } -impl From for Dual -where - T: Default, -{ - fn from(value: T) -> Self { - Self::real(value) - } -} - impl Neg for Dual where T: Neg, @@ -112,6 +103,15 @@ unsafe impl Send for Dual {} unsafe impl Sync for Dual {} +impl From for Dual +where + T: Default, +{ + fn from(value: T) -> Self { + Self::real(value) + } +} + impl ops::Div for Dual where T: Copy + ops::Div + ops::Mul + ops::Sub, @@ -128,7 +128,7 @@ where impl ops::Div for Dual where - T: Copy + ops::Div + ops::Mul + ops::Sub, + T: Copy + ops::Div, { type Output = Dual; @@ -137,72 +137,23 @@ where } } -impl ops::Mul for Dual +impl ops::DivAssign for Dual where - T: ops::Mul + ops::Add + Copy, + T: Copy + ops::DivAssign + num::traits::NumOps, { - type Output = Dual; - - fn mul(self, rhs: Self) -> Self::Output { - Dual::new( - self.real * rhs.real, - self.dual * rhs.real + rhs.dual * self.real, - ) + fn div_assign(&mut self, rhs: Self) { + self.real /= rhs.real; + self.dual = (self.dual * rhs.real - self.real * rhs.dual) / (rhs.real * rhs.real); } } -impl ops::Mul for Dual +impl ops::DivAssign for Dual where - T: ops::Mul, + T: Copy + ops::DivAssign, { - type Output = Dual; - - fn mul(self, rhs: T) -> Self::Output { - Dual::new(self.real * rhs, self.dual) - } -} - -impl ops::Rem for Dual -where - T: ops::Rem, -{ - type Output = Dual; - - fn rem(self, rhs: Self) -> Self::Output { - Dual::new(self.real % rhs.real, self.dual % rhs.dual) - } -} - -impl ops::Rem for Dual -where - T: ops::Rem, -{ - type Output = Dual; - - fn rem(self, rhs: T) -> Self::Output { - Dual::new(self.real % rhs, self.dual) - } -} - -impl ops::Sub for Dual -where - T: ops::Sub, -{ - type Output = Dual; - - fn sub(self, rhs: Self) -> Self::Output { - Dual::new(self.real - rhs.real, self.dual - rhs.dual) - } -} - -impl ops::Sub for Dual -where - T: ops::Sub, -{ - type Output = Dual; - - fn sub(self, rhs: T) -> Self::Output { - Dual::new(self.real - rhs, self.dual) + fn div_assign(&mut self, rhs: T) { + self.real /= rhs; + self.dual /= rhs; } } @@ -232,7 +183,7 @@ where impl Zero for Dual where - T: Zero, + T: Copy + Zero, { fn zero() -> Self { Dual::new(T::zero(), T::zero()) @@ -243,31 +194,124 @@ where } } -macro_rules! impl_dual_op { - ($trait:ident, $method:ident) => { - impl $trait for Dual +macro_rules! impl_binary_op { + ($(($op:ident, $method:ident, $e:tt)),*) => { + $(impl_binary_op!($op, $method, $e);)* + }; + ($trait:ident, $method:ident, $e:tt) => { + impl std::ops::$trait> for Dual where - T: $trait, + T: Copy + std::ops::$trait, { type Output = Dual; fn $method(self, rhs: Self) -> Self::Output { - Dual::new(self.real.$method(rhs.real), self.dual.$method(rhs.dual)) + let real = self.real $e rhs.real; + let dual = self.dual $e rhs.dual; + Dual::new(real, dual) + } + } + + impl<'a, T> std::ops::$trait<&'a Dual> for Dual + where + T: Copy + std::ops::$trait, + { + type Output = Dual; + + fn $method(self, rhs: &'a Dual) -> Self::Output { + let real = self.real $e rhs.real; + let dual = self.dual $e rhs.dual; + Dual::new(real, dual) + } + } + + impl<'a, T> std::ops::$trait> for &'a Dual + where + T: Copy + std::ops::$trait, + { + type Output = Dual; + + fn $method(self, rhs: Dual) -> Self::Output { + let real = self.real $e rhs.real; + let dual = self.dual $e rhs.dual; + Dual::new(real, dual) + } + } + + impl<'a, T> std::ops::$trait<&'a Dual> for &'a Dual + where + T: Copy + std::ops::$trait, + { + type Output = Dual; + + fn $method(self, rhs: &'a Dual) -> Self::Output { + let real = self.real $e rhs.real; + let dual = self.dual $e rhs.dual; + Dual::new(real, dual) + } + } + + impl std::ops::$trait for Dual + where + T: Copy + std::ops::$trait, + { + type Output = Dual; + + fn $method(self, rhs: T) -> Self::Output { + let real = self.real $e rhs; + Dual::new(real, self.dual) } } - impl $trait for Dual + impl<'a, T> std::ops::$trait for &'a Dual where - T: Copy + $trait, + T: Copy + std::ops::$trait, { type Output = Dual; fn $method(self, rhs: T) -> Self::Output { - Dual::new(self.real.$method(rhs), self.dual.$method(rhs)) + let real = self.real $e rhs; + Dual::new(real, self.dual) + } + } + }; +} + +macro_rules! impl_assign_op { + ($(($op:ident, $method:ident, $e:tt)),*) => { + $(impl_assign_op!($op, $method, $e);)* + }; + ($trait:ident, $method:ident, $e:tt) => { + impl std::ops::$trait> for Dual + where + T: Copy + std::ops::$trait, + { + fn $method(&mut self, rhs: Self) { + self.real $e rhs.real; + self.dual $e rhs.dual; + } + } + + impl<'a, T> std::ops::$trait<&'a Dual> for Dual + where + T: Copy + std::ops::$trait, + { + fn $method(&mut self, rhs: &'a Dual) { + self.real $e rhs.real; + self.dual $e rhs.dual; + } + } + + impl std::ops::$trait for Dual + where + T: Copy + std::ops::$trait, + { + fn $method(&mut self, rhs: T) { + self.real $e rhs; } } }; } -use std::ops::Add; -impl_dual_op!(Add, add); +impl_binary_op!((Add, add, +), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); +impl_assign_op!((AddAssign, add_assign, +=), (MulAssign, mul_assign, *=), (RemAssign, rem_assign, %=), (SubAssign, sub_assign, -=)); diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index b13852d9..6425b6e5 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -12,11 +12,16 @@ pub(crate) mod dual; pub(crate) mod operators; pub(crate) mod variables; +/// A boxed error type for use in the library. +pub type BoxError = Box; +/// A boxed result type for use in the library. +pub type BoxResult = std::result::Result; + macro_rules! impl_op { ($name:ident, $bound:ident, $fn:ident, $val:tt, $e:expr) => { - impl $bound for $name + impl std::ops::$bound for $name where - T: $bound, + T: std::ops::$bound, { type Output = Self; @@ -25,9 +30,9 @@ macro_rules! impl_op { } } - impl $bound for $name + impl std::ops::$bound for $name where - T: $bound, + T: std::ops::$bound, { type Output = Self; @@ -39,18 +44,16 @@ macro_rules! impl_op { } macro_rules! impl_const_op { - ($name:ident, $bound:ident, $fn:ident, $e:expr) => { - impl_op!($name, $bound, $fn, 0, |a, b| $name::new($e(a, b))); + ($bound:ident, $fn:ident, $e:expr) => { + impl_op!(Constant, $bound, $fn, 0, |a, b| Constant::new($e(a, b))); }; } -use std::ops::{Add, Div, Mul, Rem, Sub}; - -impl_const_op!(Constant, Add, add, |a, b| a + b); -impl_const_op!(Constant, Div, div, |a, b| a / b); -impl_const_op!(Constant, Mul, mul, |a, b| a * b); -impl_const_op!(Constant, Rem, rem, |a, b| a % b); -impl_const_op!(Constant, Sub, sub, |a, b| a - b); +impl_const_op!(Add, add, |a, b| a + b); +impl_const_op!(Div, div, |a, b| a / b); +impl_const_op!(Mul, mul, |a, b| a * b); +impl_const_op!(Rem, rem, |a, b| a % b); +impl_const_op!(Sub, sub, |a, b| a - b); #[cfg(test)] mod tests { diff --git a/core/src/types/variables.rs b/core/src/types/variables.rs index 009d95ce..69d32ae4 100644 --- a/core/src/types/variables.rs +++ b/core/src/types/variables.rs @@ -2,17 +2,20 @@ Appellation: variables Contrib: FL03 */ -use crate::prelude::{Evaluate, Gradient}; +use crate::eval::{Evaluate, EvaluateMut, EvaluateOnce}; +use crate::prelude::{BinaryOp, Gradient, Op, UnaryOp}; +use core::borrow::{Borrow, BorrowMut}; +use core::ops::{Neg, Not}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::borrow::{Borrow, BorrowMut}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[repr(C)] pub struct Variable { name: String, + operation: Option, pub(crate) value: Option, } @@ -20,10 +23,15 @@ impl Variable { pub fn new(name: impl ToString) -> Self { Self { name: name.to_string(), + operation: None, value: None, } } + pub const fn is_expression(&self) -> bool { + self.operation.is_some() + } + pub const fn is_initialized(&self) -> bool { self.value.is_some() } @@ -32,6 +40,14 @@ impl Variable { &self.name } + pub fn op(&self) -> Option<&Op> { + self.operation.as_ref() + } + + pub fn op_mut(&mut self) -> Option<&mut Op> { + self.operation.as_mut() + } + pub fn value(&self) -> Option<&T> { self.value.as_ref() } @@ -45,6 +61,11 @@ impl Variable { self } + pub fn with_op(mut self, op: impl Into) -> Self { + self.operation = Some(op.into()); + self + } + pub fn with_value(mut self, value: T) -> Self { self.value = Some(value); self @@ -70,12 +91,29 @@ impl std::fmt::Display for Variable { } impl Evaluate for Variable +where + T: Copy + Default, +{ + fn eval(&self) -> Self::Output { + self.value.as_ref().copied().unwrap_or_default() + } +} +impl EvaluateMut for Variable +where + T: Default, +{ + fn eval_mut(&mut self) -> Self::Output { + self.value.take().unwrap_or_default() + } +} + +impl EvaluateOnce for Variable where T: Default, { type Output = T; - fn eval(self) -> Self::Output { + fn eval_once(self) -> Self::Output { self.value.unwrap_or_default() } } @@ -98,9 +136,59 @@ unsafe impl Send for Variable {} unsafe impl Sync for Variable {} +impl Neg for Variable +where + T: Copy + Default + Neg, +{ + type Output = Variable; + + fn neg(self) -> Self::Output { + let name = format!("-{}", self.name()); + let value = self.eval_once().neg(); + Variable::new(name).with_op(UnaryOp::Neg).with_value(value) + } +} + +impl<'a, T> Neg for &'a Variable +where + T: Copy + Default + Neg, +{ + type Output = Variable; + + fn neg(self) -> Self::Output { + let name = format!("-{}", self.name()); + let value = self.eval().neg(); + Variable::new(name).with_op(UnaryOp::Neg).with_value(value) + } +} + +impl Not for Variable +where + T: Copy + Default + Not, +{ + type Output = Variable; + + fn not(self) -> Self::Output { + let name = format!("!{}", self.name()); + let value = self.eval_once().not(); + Variable::new(name).with_op(UnaryOp::Not).with_value(value) + } +} + +impl Num for Variable +where + T: Copy + Default + Num, +{ + type FromStrRadixErr = T::FromStrRadixErr; + + fn from_str_radix(str: &str, radix: u32) -> Result { + T::from_str_radix(str, radix).map(|value| Variable::new(str).with_value(value)) + } +} + impl One for Variable where - T: Clone + Default + One, + T: Copy + Default + One, { fn one() -> Self { Variable::new("one").with_value(T::one()) @@ -109,47 +197,89 @@ where impl Zero for Variable where - T: Clone + Default + Zero, + T: Copy + Default + Zero, { fn zero() -> Self { Variable::new("0").with_value(T::zero()) } fn is_zero(&self) -> bool { - self.clone().eval().is_zero() + self.clone().eval_once().is_zero() } } macro_rules! impl_std_op { - ($parent:ident: $trait:ident, $method:ident) => { - impl std::ops::$trait for $parent + ($(($trait:ident, $method:ident, $e:tt)),*) => { + $( + impl_std_op!($trait, $method, $e); + )* + }; + ($trait:ident, $method:ident, $e:tt) => { + impl std::ops::$trait for Variable where - T: Clone + Default + std::ops::$trait, + T: Copy + Default + std::ops::$trait, { - type Output = Self; + type Output = Variable; - fn $method(self, rhs: Self) -> Self::Output { + fn $method(self, rhs: Variable) -> Self::Output { let name = format!("{}", stringify!($method)); - let value = self.eval().$method(rhs.eval()); - $parent::new(name).with_value(value) + let value = self.eval_once() $e rhs.eval_once(); + Variable::new(name).with_op(BinaryOp::$trait).with_value(value) } } - impl std::ops::$trait for $parent + impl<'a, T> std::ops::$trait<&'a Variable> for Variable where - T: Clone + Default + std::ops::$trait, + T: Copy + Default + std::ops::$trait, + { + type Output = Variable; + + fn $method(self, rhs: &'a Variable) -> Self::Output { + let name = format!("{}", stringify!($method)); + let value = self.eval_once() $e rhs.eval(); + Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + } + } + + impl<'a, T> std::ops::$trait> for &'a Variable + where + T: Copy + Default + std::ops::$trait, + { + type Output = Variable; + + fn $method(self, rhs: Variable) -> Self::Output { + let name = format!("{}", stringify!($method)); + let value = self.eval() $e rhs.eval_once(); + Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + } + } + + impl<'a, T> std::ops::$trait<&'a Variable> for &'a Variable + where + T: Copy + Default + std::ops::$trait, + { + type Output = Variable; + + fn $method(self, rhs: &'a Variable) -> Self::Output { + let name = format!("{}", stringify!($method)); + let value = self.eval() $e rhs.eval(); + Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + } + } + + impl std::ops::$trait for Variable + where + T: Copy + Default + std::ops::$trait, { type Output = Self; fn $method(self, rhs: T) -> Self::Output { let name = format!("{}", stringify!($method)); - let value = self.eval().$method(rhs); - $parent::new(name).with_value(value) + let value = self.eval_once() $e rhs; + Variable::new(name).with_op(BinaryOp::$trait).with_value(value) } } }; } -impl_std_op!(Variable: Add, add); -impl_std_op!(Variable: Div, div); -impl_std_op!(Variable: Mul, mul); -impl_std_op!(Variable: Sub, sub); + +impl_std_op!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); diff --git a/core/src/utils.rs b/core/src/utils.rs deleted file mode 100644 index 61b4fef4..00000000 --- a/core/src/utils.rs +++ /dev/null @@ -1,25 +0,0 @@ -/* - Appellation: utils - Contrib: FL03 -*/ -use num::Float; - -pub fn sigmoid(x: T) -> T -where - T: Float, -{ - (T::one() + x.neg().exp()).recip() -} - -pub trait Sigmoid { - fn sigmoid(self) -> Self; -} - -impl Sigmoid for T -where - T: Float, -{ - fn sigmoid(self) -> Self { - (T::one() + self.neg().exp()).recip() - } -} diff --git a/derive/src/lib.rs b/derive/src/lib.rs index 638d0a51..ebd7bf80 100644 --- a/derive/src/lib.rs +++ b/derive/src/lib.rs @@ -6,24 +6,16 @@ //! //! extern crate proc_macro; -use proc_macro::TokenStream; -use quote::{format_ident, quote}; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput}; pub(crate) mod ast; pub(crate) mod cmp; pub(crate) mod utils; -#[proc_macro_derive(AnswerFn)] -pub fn derive_answer_fn(_item: TokenStream) -> TokenStream { - "fn answer() -> u32 { 42 }".parse().unwrap() -} - -#[proc_macro_derive(HelperAttr, attributes(helper))] -pub fn derive_helper_attr(_item: TokenStream) -> TokenStream { - TokenStream::new() -} +use proc_macro::TokenStream; +use quote::{format_ident, quote}; +use syn::{parse_macro_input, Data, DataStruct, DeriveInput}; +/// This macro generates a parameter struct and an enum of parameter keys. #[proc_macro_derive(Params, attributes(param))] pub fn params(input: TokenStream) -> TokenStream { // Parse the input tokens into a syntax tree diff --git a/macros/src/lib.rs b/macros/src/lib.rs index a4eb21d8..d63e2a18 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -17,18 +17,28 @@ use ast::partials::PartialAst; use proc_macro::TokenStream; use syn::parse_macro_input; -#[proc_macro_attribute] -pub fn partial(_attr: TokenStream, item: TokenStream) -> TokenStream { - let ast = parse_macro_input!(item as syn::ItemFn); - let result = grad::handle_item_fn(&ast); - TokenStream::from(result) -} - -/// Compute the gradient of an expression +/// Compute the partial derivative of a given expression w.r.t a particular variable. +/// At the moment, the macro only supports expressions defined within the same scope. /// /// # Examples /// +/// ## Compute the gradient of a simple expression +/// +/// ``` +/// extern crate acme_macros as macros; +/// +/// use macros::autodiff; /// +/// fn main() { +/// let x = 3f64; +/// let y = 4f64; +/// let dx = autodiff!(x: x * y); +/// let dy = autodiff!(y: x * y); +/// +/// assert_eq!(dx, y); +/// assert_eq!(dy, x); +/// } +/// ``` #[proc_macro] pub fn autodiff(input: TokenStream) -> TokenStream { // Parse the input expression into a syntax tree @@ -41,6 +51,13 @@ pub fn autodiff(input: TokenStream) -> TokenStream { TokenStream::from(result) } +#[proc_macro_attribute] +pub fn partial(_attr: TokenStream, item: TokenStream) -> TokenStream { + let ast = parse_macro_input!(item as syn::ItemFn); + let result = grad::handle_item_fn(&ast); + TokenStream::from(result) +} + pub(crate) mod kw { syn::custom_keyword!(eval); syn::custom_keyword!(grad); diff --git a/tensor/src/backend/cpu/mod.rs b/tensor/src/backend/cpu/mod.rs new file mode 100644 index 00000000..cfc95f37 --- /dev/null +++ b/tensor/src/backend/cpu/mod.rs @@ -0,0 +1,10 @@ +/* + Appellation: cpu + Contrib: FL03 +*/ +//! # CPU +//! +//! + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/backend/devices.rs b/tensor/src/backend/devices.rs new file mode 100644 index 00000000..92dd35e1 --- /dev/null +++ b/tensor/src/backend/devices.rs @@ -0,0 +1,12 @@ +/* + Appellation: devices + Contrib: FL03 +*/ + +pub enum Device { + CPU, + Cuda, +} + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs index 3bbf3dd6..d0af749b 100644 --- a/tensor/src/backend/mod.rs +++ b/tensor/src/backend/mod.rs @@ -5,11 +5,22 @@ //! # Backend //! //! +pub use self::devices::Device; + +pub(crate) mod devices; + +pub mod cpu; pub enum TensorType { Scalar(T), Tensor(Vec>), } +pub trait Backend {} + +pub trait BackendStorage { + type Backend: Backend; +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/data/specs.rs b/tensor/src/data/specs.rs index 100cec36..83f7dd4b 100644 --- a/tensor/src/data/specs.rs +++ b/tensor/src/data/specs.rs @@ -140,6 +140,7 @@ pub unsafe trait RawDataClone: RawData { } } +/// Raw Data Subsitution pub trait RawDataSubst: RawData { /// The resulting array storage of the same kind but substituted element type type Output: RawData; diff --git a/tensor/src/linalg/arith.rs b/tensor/src/linalg/arith.rs deleted file mode 100644 index c311b924..00000000 --- a/tensor/src/linalg/arith.rs +++ /dev/null @@ -1,4 +0,0 @@ -/* - Appellation: arith - Contrib: FL03 -*/ diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index 22a58f91..bffd3f69 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -5,7 +5,6 @@ //! # Linear Algebra //! //! -pub mod arith; pub mod uplo; use crate::shape::Axis; @@ -21,13 +20,13 @@ pub trait Matmul { fn matmul(&self, rhs: &Rhs) -> Self::Output; } -pub trait Transpose { - fn transpose(&self, swap: Axis, with: Axis) -> Self; +pub trait SwapAxes { + fn swap_axes(&self, swap: Axis, with: Axis) -> Self; } pub(crate) mod prelude { pub use super::uplo::UPLO; - pub use super::{Inverse, Matmul}; + pub use super::{Inverse, Matmul, SwapAxes}; } #[cfg(test)] diff --git a/tensor/src/ops/kinds/reshape.rs b/tensor/src/ops/kinds/reshape.rs new file mode 100644 index 00000000..cf68c388 --- /dev/null +++ b/tensor/src/ops/kinds/reshape.rs @@ -0,0 +1,37 @@ +/* + Appellation: reshape + Contrib: FL03 +*/ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize), + serde(rename_all = "snake_case", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(u8)] +#[strum(serialize_all = "snake_case")] +pub enum ReshapeOp { + Broadcast, + Reshape, + Swap, + Transpose, +} diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index 93663896..c9dce4d5 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -2,10 +2,16 @@ Appellation: ops Contrib: FL03 */ -pub use self::{backprop::*, kinds::*}; +pub use self::{backprop::*, kinds::*, op::*}; pub(crate) mod backprop; -pub(crate) mod kinds; +pub(crate) mod op; + +pub(crate) mod kinds { + pub use self::reshape::*; + + pub(crate) mod reshape; +} pub trait TensorExpr {} diff --git a/tensor/src/ops/kinds.rs b/tensor/src/ops/op.rs similarity index 100% rename from tensor/src/ops/kinds.rs rename to tensor/src/ops/op.rs diff --git a/tensor/src/shape/axis.rs b/tensor/src/shape/axis.rs index b6fd4fd6..f28f2a40 100644 --- a/tensor/src/shape/axis.rs +++ b/tensor/src/shape/axis.rs @@ -2,12 +2,9 @@ Appellation: axis Contrib: FL03 */ -//! # Axis -//! -//! An [Axis] is used to represent a dimension in a tensor. +use core::ops::Deref; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::ops::Deref; pub trait IntoAxis { fn into_axis(self) -> Axis; @@ -19,11 +16,7 @@ impl IntoAxis for usize { } } -pub struct Switch { - pub swap: Axis, - pub with: Axis, -} - +/// An [Axis] is used to represent a dimension in a tensor. #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Axis(pub(crate) usize); diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index 014a6575..f2d2b382 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -4,7 +4,8 @@ */ //! # Shapes //! -//! +//! This modules provides implements several useful primitives for working with +//! the shape of a [Tensor](crate::tensor::TensorBase). pub use self::{axis::*, error::*, rank::*, shape::Shape, stride::*}; pub(crate) mod axis; diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/rank.rs index eade68f9..ea3e982a 100644 --- a/tensor/src/shape/rank.rs +++ b/tensor/src/shape/rank.rs @@ -5,10 +5,10 @@ //! # Rank //! //! The rank of a n-dimensional array describes the number of dimensions +use core::borrow::Borrow; +use core::ops::{Deref, DerefMut}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::borrow::Borrow; -use std::ops::{Deref, DerefMut}; pub trait IntoRank { fn into_rank(self) -> Rank; @@ -22,13 +22,17 @@ impl IntoRank for usize { #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Rank(pub usize); +pub struct Rank(pub(crate) usize); impl Rank { pub fn new(rank: usize) -> Self { Self(rank) } + pub fn into_inner(self) -> usize { + self.0 + } + pub fn rank(&self) -> usize { self.0 } @@ -91,42 +95,47 @@ unsafe impl Sync for Rank {} macro_rules! impl_std_ops { ($trait:tt, $method:ident, $e:tt) => { impl std::ops::$trait for Rank { - type Output = usize; + type Output = Rank; - fn $method(self, rhs: usize) -> Self::Output { - self.0 $e rhs - } + fn $method(self, rhs: usize) -> Self::Output { + let rank = self.0 $e rhs; + Rank(rank) } + } impl std::ops::$trait for Rank { - type Output = usize; + type Output = Rank; fn $method(self, rhs: Rank) -> Self::Output { - self.0 $e rhs.0 + let rank = self.0 $e rhs.0; + Rank(rank) } } impl<'a> std::ops::$trait for &'a Rank { - type Output = usize; + type Output = Rank; fn $method(self, rhs: Rank) -> Self::Output { - self.0 $e rhs.0 + let rank = self.0 $e rhs.0; + Rank(rank) } } impl<'a> std::ops::$trait<&'a Rank> for Rank { - type Output = usize; + type Output = Rank; fn $method(self, rhs: &'a Rank) -> Self::Output { - self.0 $e rhs.0 + let rank = self.0 $e rhs.0; + Rank(rank) } } impl<'a> std::ops::$trait<&'a Rank> for &'a Rank { - type Output = usize; + type Output = Rank; fn $method(self, rhs: &'a Rank) -> Self::Output { - self.0 $e rhs.0 + let rank = self.0 $e rhs.0; + Rank(rank) } } }; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index bf108891..e044d1dc 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -2,14 +2,14 @@ Appellation: shape Contrib: FL03 */ -use super::error::ShapeError; use super::{Axis, Rank, Stride}; -use crate::prelude::TensorResult; +use crate::prelude::{ShapeError, SwapAxes, TensorResult}; use core::ops::{self, Deref}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +/// A shape is a description of the number of elements in each dimension. #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Shape(Vec); @@ -18,7 +18,7 @@ impl Shape { pub fn new(shape: Vec) -> Self { Self(shape) } - + /// Creates a new shape of rank 0. pub fn scalar() -> Self { Self(Vec::new()) } @@ -26,7 +26,7 @@ impl Shape { pub fn with_capacity(capacity: usize) -> Self { Self(Vec::with_capacity(capacity)) } - + /// Creates a new shape of the given rank with all dimensions set to 0. pub fn zeros(rank: usize) -> Self { Self(vec![0; rank]) } @@ -37,15 +37,10 @@ impl Shape { } Ok(Self::from((self[0], other[1]))) } - - pub fn dims(&self) -> &[usize] { - &self.0 - } - - pub fn insert(&mut self, index: usize, dim: usize) { - self.0.insert(index, dim) + /// Inserts a new dimension along the given [Axis]. + pub fn insert(&mut self, index: Axis, dim: usize) { + self.0.insert(*index, dim) } - /// Returns true if the strides are C contiguous (aka row major). pub fn is_contiguous(&self, stride: &Stride) -> bool { if self.0.len() != stride.len() { @@ -60,7 +55,7 @@ impl Shape { } true } - + /// The number of columns in the shape. pub fn ncols(&self) -> usize { if self.len() >= 2 { self.0[1] @@ -70,7 +65,7 @@ impl Shape { 0 } } - + /// The number of rows in the shape. pub fn nrows(&self) -> usize { if self.len() >= 1 { *self.0.first().unwrap() @@ -78,43 +73,43 @@ impl Shape { 0 } } - + /// Add a new dimension to the shape. pub fn push(&mut self, dim: usize) { self.0.push(dim) } - + /// Get the number of dimensions, or [Rank], of the shape pub fn rank(&self) -> Rank { self.0.len().into() } - - pub fn remove(&mut self, index: usize) -> usize { - self.0.remove(index) + /// Remove the dimension at the given [Axis]. + pub fn remove(&mut self, index: Axis) -> usize { + self.0.remove(*index) } - + /// Reverse the dimensions of the shape. pub fn reverse(&mut self) { self.0.reverse() } - + /// Set the dimension at the given [Axis]. pub fn set(&mut self, index: Axis, dim: usize) { self[index] = dim } - + /// The number of elements in the shape. pub fn size(&self) -> usize { self.0.iter().product() } - + /// Get a reference to the shape as a slice. pub fn slice(&self) -> &[usize] { &self.0 } - + /// Get a mutable reference to the shape as a slice. pub fn slice_mut(&mut self) -> &mut [usize] { &mut self.0 } - - pub(crate) fn swap(&mut self, a: Axis, b: Axis) { + /// Swap the dimensions of the current [Shape] at the given [Axis]. + pub fn swap(&mut self, a: Axis, b: Axis) { self.0.swap(a.axis(), b.axis()) } - + /// Swap the dimensions at the given [Axis], creating a new [Shape] pub fn swap_axes(&self, swap: Axis, with: Axis) -> Self { let mut shape = self.clone(); shape.swap(swap, with); @@ -200,6 +195,14 @@ impl Extend for Shape { } } +impl SwapAxes for Shape { + fn swap_axes(&self, a: Axis, b: Axis) -> Self { + let mut shape = self.clone(); + shape.swap(a, b); + shape + } +} + impl From<()> for Shape { fn from(_: ()) -> Self { Self::default() diff --git a/tensor/src/shape/stride.rs b/tensor/src/shape/stride.rs index 7bacd32d..4baab8ba 100644 --- a/tensor/src/shape/stride.rs +++ b/tensor/src/shape/stride.rs @@ -23,7 +23,7 @@ where #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] -pub struct Stride(Vec); +pub struct Stride(pub(crate) Vec); impl Stride { pub fn new(stride: Vec) -> Self { diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index a41f9cec..8dd4553b 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -6,10 +6,34 @@ pub mod ndtensor; pub mod scalar; +/// [Affine] describes a type of geometric transformation which preserves +/// lines and parallelisms. +/// +/// ### General Formula +/// f(x) = A * x + b pub trait Affine { type Output; - fn affine(&self, mul: &T, add: &T) -> Self::Output; + fn affine(&self, mul: T, add: T) -> Self::Output; +} + +impl Affine for A +where + A: std::ops::Mul, + C: std::ops::Add, + Self: Clone, +{ + type Output = D; + + fn affine(&self, mul: B, add: B) -> Self::Output { + self.clone() * mul + add + } +} + +pub trait Hstack { + type Output; + + fn hstack(&self, other: &T) -> Self::Output; } pub trait Vstack { @@ -18,6 +42,20 @@ pub trait Vstack { fn vstack(&self, other: &T) -> Self::Output; } +pub trait Swap { + type Key; + + fn swap(&mut self, swap: Self::Key, with: Self::Key); +} + +impl Swap for [T] { + type Key = usize; + + fn swap(&mut self, swap: Self::Key, with: Self::Key) { + self.swap(swap, with); + } +} + pub(crate) mod prelude { pub use super::ndtensor::*; pub use super::scalar::*; @@ -26,34 +64,35 @@ pub(crate) mod prelude { #[cfg(test)] mod tests { - // use super::*; - - macro_rules! Scalar { - (complex) => { - Scalar!(cf64) - }; - (float) => { - Scalar!(f64) - }; - (cf64) => { - Complex - }; - (cf32) => { - Complex - }; - (f64) => { - f64 - }; - (f32) => { - f32 - }; + use super::scalar::Scalar; + use super::Affine; + use num::Complex; + + #[test] + fn test_affine() { + let a = 3f64; + let b = 4f64; + let c = 5f64; + let exp = 17f64; + + assert_eq!(a.affine(b, c), exp); + + let a = Complex::::new(3.0, 0.0); + let b = 4f64; + let c = 5f64; + + let exp = Complex::::new(17.0, 0.0); + + assert_eq!(a.affine(b, c), exp); } #[test] fn test_scalar() { - let a: Scalar!(f64); - a = 3.0; - assert_eq!(a, 3_f64); + let a = 3f64; + let b = 4f64; + + assert_eq!(Scalar::square(a), 9f64); + assert_eq!(Scalar::sqrt(b), 2f64); } } diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 9e7e6f7c..d0615638 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::prelude::TensorId; -use crate::shape::prelude::{Rank, Shape}; +use crate::shape::{Rank, Shape, Stride}; use crate::store::Layout; pub trait NdTensor { @@ -25,7 +25,7 @@ pub trait NdTensor { self.shape().size() } - fn stride(&self) -> &[usize] { + fn stride(&self) -> &Stride { self.layout().stride() } } diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 9bb07f9c..4b3558fb 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -31,9 +31,9 @@ impl Layout { if shape.rank() < self.shape().rank() { return Err(ShapeError::IncompatibleShapes); } - let added_dims = shape.rank() - self.shape().rank(); - let mut stride = vec![0; added_dims]; - for (&dst_dim, (&src_dim, &src_stride)) in shape[added_dims..] + let diff = shape.rank() - self.shape().rank(); + let mut stride = vec![0; *diff]; + for (&dst_dim, (&src_dim, &src_stride)) in shape[*diff..] .iter() .zip(self.shape().iter().zip(self.stride().iter())) { diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 05bb2c85..c9878468 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -62,7 +62,7 @@ impl TensorBase { store, } } - + /// Create a new tensor from a scalar value. pub fn from_scalar(value: T) -> Self { Self { id: TensorId::new(), @@ -87,7 +87,7 @@ impl TensorBase { store, } } - + /// Detach the computational graph from the tensor pub fn detach(&self) -> Self where T: Clone, @@ -96,8 +96,8 @@ impl TensorBase { self.clone() } else { Self { - id: TensorId::new(), - kind: TensorKind::Normal, + id: self.id, + kind: self.kind, layout: self.layout.clone(), op: BackpropOp::none(), store: self.store.clone(), @@ -109,11 +109,11 @@ impl TensorBase { self.id } /// Get a reference to the [Layout] of the tensor - pub fn layout(&self) -> &Layout { + pub const fn layout(&self) -> &Layout { &self.layout } /// Get a reference to the operation of the tensor - pub fn op(&self) -> &BackpropOp { + pub const fn op(&self) -> &BackpropOp { &self.op } /// Get an owned reference to the [Rank] of the tensor @@ -218,12 +218,21 @@ impl TensorBase { self } } - +// Inernal Methods +#[allow(dead_code)] impl TensorBase { + pub(crate) fn as_slice(&self) -> &[T] { + &self.store + } + + pub(crate) fn as_mut_slice(&mut self) -> &mut [T] { + &mut self.store + } + pub(crate) fn data(&self) -> &Vec { &self.store } - #[allow(dead_code)] + pub(crate) fn data_mut(&mut self) -> &mut Vec { &mut self.store } diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index 5c6dcaeb..de1417ec 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -34,7 +34,6 @@ fn test_transpose() { let shape = (2, 3); let a = Tensor::::linspace(0f64, 6f64, 6).with_shape(shape); let at = a.t(); - println!("Transposed Shape: {:?}", &at.shape()); let exp = Tensor::from_vec(false, None, (3, 2), vec![0.0, 3.0, 1.0, 4.0, 2.0, 5.0]); assert_ne!(&a, &at); From 0f8b274639d6de3796fcaa0a03fe43ab5fa8b70b Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 29 Mar 2024 08:52:19 -0500 Subject: [PATCH 64/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 3 ++- acme/Cargo.toml | 15 ++++++++++----- graphs/Cargo.toml | 2 +- tensor/Cargo.toml | 2 +- 4 files changed, 14 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 930fcab2..1a4761cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,8 @@ keywords = ["acme", "autodiff", "mathematics", "tensor"] license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0" # TODO - Update cargo package version +# version = "0.3.0" +version = "0.3.0-nightly.3" [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 8508f379..64ceb870 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -74,11 +74,16 @@ required-features = ["macros"] [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +# acme-core = { path = "../core", version = "0.3.0" } +# acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +# acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly.3" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.3" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.3" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.3" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.3" } [dev-dependencies] approx = "0.5" diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index e03aa8ca..385c2a14 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -44,7 +44,7 @@ strum.workspace = true [dependencies.acme-core] path = "../core" -version = "0.3.0" +version = "0.3.0-nightly.3" [package.metadata.docs.rs] all-features = true diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index e07fb306..ab84a819 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -37,7 +37,7 @@ strum = { features = ["derive"], version = "0.26" } [dependencies.acme-core] path = "../core" -version = "0.3.0" +version = "0.3.0-nightly.3" [dev-dependencies] approx = "0.5" From 1dd1ab20c8e8e9a9bc66b3f396b85c70331cdf6e Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 29 Mar 2024 09:31:28 -0500 Subject: [PATCH 65/87] update Signed-off-by: Joe McCain III --- core/src/ops/unary/specs.rs | 35 +++++++++------ tensor/src/actions/mod.rs | 1 + tensor/src/impls/grad.rs | 80 +++++++++++++++++++++++------------ tensor/src/impls/ops/unary.rs | 29 +++++++++++++ tensor/src/specs/mod.rs | 2 +- tensor/src/specs/scalar.rs | 28 ++++++------ 6 files changed, 118 insertions(+), 57 deletions(-) diff --git a/core/src/ops/unary/specs.rs b/core/src/ops/unary/specs.rs index 4e9ceb00..10d0112a 100644 --- a/core/src/ops/unary/specs.rs +++ b/core/src/ops/unary/specs.rs @@ -6,6 +6,9 @@ use num::traits::Inv; use num::Complex; macro_rules! unary_op_trait { + ($(($trait:ident, $method:ident)),*) => { + $(unary_op_trait!($trait, $method);)* + }; ($trait:ident, $method:ident) => { pub trait $trait { type Output; @@ -62,18 +65,22 @@ macro_rules! impl_unary_op { }; } -unary_op_trait!(Abs, abs); -unary_op_trait!(Cos, cos); -unary_op_trait!(Cosh, cosh); -unary_op_trait!(Exp, exp); -unary_op_trait!(Ln, ln); -unary_op_trait!(Recip, recip); -unary_op_trait!(Sin, sin); -unary_op_trait!(Sinh, sinh); -unary_op_trait!(Sqrt, sqrt); -unary_op_trait!(Square, square); -unary_op_trait!(Tan, tan); -unary_op_trait!(Tanh, tanh); +unary_op_trait!( + (Abs, abs), + (Exp, exp), + (Ln, ln), + (Recip, recip), + (SquareRoot, sqrt), + (Square, sqr) +); +unary_op_trait!( + (Cos, cos), + (Cosh, cosh), + (Sin, sin), + (Sinh, sinh), + (Tan, tan), + (Tanh, tanh) +); impl Abs for Complex where @@ -103,7 +110,7 @@ where { type Output = T; - fn square(self) -> Self::Output { + fn sqr(self) -> Self::Output { self * self } } @@ -115,6 +122,6 @@ impl_unary_op!(Exp, exp; [f64, f32, Complex, Complex]); impl_unary_op!(Ln, ln; [f64, f32, Complex, Complex]); impl_unary_op!(Sin, sin; [f64, f32, Complex, Complex]); impl_unary_op!(Sinh, sinh; [f64, f32, Complex, Complex]); -impl_unary_op!(Sqrt, sqrt; [f64, f32, Complex, Complex]); +impl_unary_op!(SquareRoot, sqrt; [f64, f32, Complex, Complex]); impl_unary_op!(Tan, tan; [f64, f32, Complex, Complex]); impl_unary_op!(Tanh, tanh; [f64, f32, Complex, Complex]); diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index 376a774a..e7750ddb 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -18,6 +18,7 @@ pub mod iter; pub(crate) mod prelude { pub use super::arange::*; + pub use super::grad::*; } #[cfg(test)] diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 37bef4fa..cfc34d3c 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -9,29 +9,38 @@ use acme::prelude::{BinaryOp, Store, UnaryOp}; pub(crate) type Visited = std::collections::HashMap; +macro_rules! entry { + ($ctx:expr, $entry:expr) => { + entry!($ctx, $entry, $entry.zeros_like()) + }; + ($ctx:expr, $entry:expr, $default:expr) => { + $ctx.entry($entry.id()).or_insert($default) + }; +} + impl TensorBase where T: Scalar, { - /// [TensorBase::toposort] returns a topologically sorted list of nodes in the graph. - fn toposort(&self) -> Vec<&TensorBase> { + /// [toposort](TensorBase::toposort) is a utilitarian functions that returns a topologically sorted list of nodes. + fn toposort(&self, reverse: bool) -> Vec<&TensorBase> { // Here, the sorted nodes are passed as an owned value rather than as a mutable reference to workaround some lifetime limitations. fn walk<'a, T>( - node: &'a TensorBase, + scope: &'a TensorBase, nodes: Vec<&'a TensorBase>, visited: &mut Visited, ) -> (bool, Vec<&'a TensorBase>) { - if let Some(&tg) = visited.get(&node.id()) { + if let Some(&tg) = visited.get(&scope.id()) { return (tg, nodes); } // track the gradient of the current node let mut track = false; // recursively call on the children nodes - let mut nodes = if node.is_variable() { + let mut nodes = if scope.is_variable() { // Do not call recursively on the "leaf" nodes. track = true; nodes - } else if let Some(op) = node.op().op() { + } else if let Some(op) = scope.op().op() { match op { TensorOp::Binary(lhs, rhs, _kind) => { let (tg, nodes) = walk(lhs, nodes, visited); @@ -50,21 +59,25 @@ where } else { nodes }; - visited.insert(node.id(), track); + visited.insert(scope.id(), track); if track { - nodes.push(node); + nodes.push(scope); } (track, nodes) } - + // walk through the dag let (_tg, mut nodes) = walk(self, Vec::new(), &mut Visited::new()); - nodes.reverse(); + // reverse the nodes; if needed + if reverse { + nodes.reverse(); + } + // return the sorted nodes nodes } pub fn grad(&self) -> TensorResult> { // get the sorted nodes - let sorted = self.toposort(); + let sorted = self.toposort(true); // initialize a new gradient store let mut store = GradStore::new(); // insert the gradient w.r.t. the current node @@ -76,45 +89,58 @@ where } // get the gradient of the node let grad = store.remove(&node.id()).expect("Gradient not found"); + // detach the gradient let grad = grad.detach(); // handle the different types of operations if let Some(op) = &*node.op { match op { TensorOp::Binary(lhs, rhs, kind) => match kind { BinaryOp::Add => { - *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad; - *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += &grad; + *entry!(store, lhs) += &grad; + *entry!(store, rhs) += &grad; } BinaryOp::Div => { - *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += - &grad / rhs.as_ref(); - *store.entry(rhs.id()).or_insert(rhs.zeros_like()) -= + *entry!(store, lhs) += &grad / rhs.as_ref(); + *entry!(store, rhs) -= &grad * lhs.as_ref() / (rhs.as_ref() * rhs.as_ref()); } BinaryOp::Mul => { - *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += - &grad * rhs.as_ref(); - *store.entry(rhs.id()).or_insert(rhs.zeros_like()) += - &grad * lhs.as_ref(); + *entry!(store, lhs) += &grad * rhs.as_ref(); + *entry!(store, rhs) += &grad * lhs.as_ref(); } BinaryOp::Sub => { - *store.entry(lhs.id()).or_insert(lhs.zeros_like()) += &grad; - *store.entry(rhs.id()).or_insert(rhs.zeros_like()) -= &grad; + *entry!(store, lhs) += &grad; + *entry!(store, rhs) -= &grad; } _ => todo!(), }, TensorOp::Unary(val, kind) => match kind { UnaryOp::Cos => { - *store.entry(val.id()).or_insert(val.zeros_like()) -= - &grad * val.clone().sin(); + *entry!(store, val) -= &grad * val.clone().sin(); + } + UnaryOp::Cosh => { + *entry!(store, val) += &grad * val.clone().sinh(); + } + UnaryOp::Exp => { + *entry!(store, val) += &grad * val.clone().exp(); } UnaryOp::Neg => { - *store.entry(val.id()).or_insert(val.zeros_like()) -= &grad; + *entry!(store, val) -= &grad; } UnaryOp::Sin => { - *store.entry(val.id()).or_insert(val.zeros_like()) += - &grad * val.clone().cos(); + *entry!(store, val) += &grad * val.clone().cos(); + } + UnaryOp::Sinh => { + *entry!(store, val) += &grad * val.clone().cosh(); + } + UnaryOp::Sqrt => { + *entry!(store, val) += + &grad / (val.clone().sqrt() * T::from(2).unwrap()); } + UnaryOp::Tan => { + *entry!(store, val) += &grad / val.clone().cos().sqr(); + } + _ => todo!(), }, _ => {} diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 22abce89..8786a6f9 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -34,6 +34,34 @@ where } } +impl std::ops::Not for TensorBase +where + T: Copy + std::ops::Not, +{ + type Output = Self; + + fn not(self) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().copied().map(|a| !a).collect(); + let op = TensorOp::unary(self, UnaryOp::Not); + from_vec_with_op(false, op, shape, store) + } +} + +impl<'a, T> std::ops::Not for &'a TensorBase +where + T: Copy + std::ops::Not, +{ + type Output = TensorBase; + + fn not(self) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().copied().map(|a| !a).collect(); + let op = TensorOp::unary(self.clone(), UnaryOp::Not); + from_vec_with_op(false, op, shape, store) + } +} + macro_rules! impl_unary_op { ($variant:ident, $method:ident) => { pub fn $method(self) -> Self { @@ -73,6 +101,7 @@ where impl_unary_op!(Ln, ln); impl_unary_op!(Sin, sin); impl_unary_op!(Sinh, sinh); + impl_unary_op!(Square, sqr); impl_unary_op!(Sqrt, sqrt); impl_unary_op!(Tan, tan); impl_unary_op!(Tanh, tanh); diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index 8dd4553b..2c229a85 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -92,7 +92,7 @@ mod tests { let a = 3f64; let b = 4f64; - assert_eq!(Scalar::square(a), 9f64); + assert_eq!(Scalar::sqr(a), 9f64); assert_eq!(Scalar::sqrt(b), 2f64); } } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index bce4b6e6..fec47dc5 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -25,6 +25,8 @@ pub trait Scalar: + NumOps + Real; + fn abs(self) -> Self::Real; + fn conj(&self) -> Self::Complex; fn im(&self) -> Self::Real { @@ -33,12 +35,6 @@ pub trait Scalar: fn re(&self) -> Self::Real; - fn abs(self) -> Self::Real { - let re = self.re(); - let im = self.im(); - <::Real as Real>::sqrt(re * re + im * im) - } - fn cos(self) -> Self; fn cosh(self) -> Self; @@ -68,7 +64,9 @@ pub trait Scalar: fn sqrt(self) -> Self; - fn square(self) -> Self::Real; + fn sqr(self) -> Self { + self.powi(2) + } fn tan(self) -> Self; @@ -83,6 +81,10 @@ where type Complex = Self; type Real = T; + fn abs(self) -> Self::Real { + Complex::norm(self) + } + fn conj(&self) -> Self::Complex { Complex::conj(self) } @@ -139,10 +141,6 @@ where Complex::sqrt(self) } - fn square(self) -> Self::Real { - Complex::norm_sqr(&self) - } - fn tan(self) -> Self { Complex::tan(self) } @@ -158,6 +156,10 @@ macro_rules! impl_scalar { type Complex = Complex<$re>; type Real = $re; + fn abs(self) -> Self::Real { + <$re>::abs(self) + } + fn conj(&self) -> Self::Complex { Complex::new(*self, -<$re>::default()) } @@ -210,10 +212,6 @@ macro_rules! impl_scalar { <$re>::sqrt(self) } - fn square(self) -> Self::Real { - self * self - } - fn tan(self) -> Self { <$re>::tan(self) } From 15bb4e4cb51912ecc6ed99962355e4b347d4ea96 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 29 Mar 2024 12:25:22 -0500 Subject: [PATCH 66/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 4 +- acme/Cargo.toml | 25 ++++--- core/Cargo.toml | 6 +- core/src/lib.rs | 4 ++ core/src/seal.rs | 34 +++++++++ core/src/specs/func/mod.rs | 48 ++++++++++++- core/src/specs/func/structural.rs | 2 +- core/src/specs/mod.rs | 31 ++------ core/src/specs/prop.rs | 76 ++++++++++++++++++++ core/src/specs/store.rs | 2 +- graphs/Cargo.toml | 3 +- tensor/Cargo.toml | 9 ++- tensor/src/actions/index/mod.rs | 33 ++++++++- tensor/src/actions/index/slice.rs | 5 +- tensor/src/actions/index/strides.rs | 107 ++++++++++++++++++++++++++++ tensor/src/backend/mod.rs | 53 +++++++++++++- tensor/src/ops/backprop.rs | 10 +++ tensor/src/ops/op.rs | 25 +++++++ tensor/src/shape/rank.rs | 8 +++ tensor/src/specs/scalar.rs | 13 ++-- tensor/src/store/layout.rs | 16 +++-- tensor/src/tensor.rs | 26 +++++++ 22 files changed, 474 insertions(+), 66 deletions(-) create mode 100644 core/src/seal.rs create mode 100644 core/src/specs/prop.rs create mode 100644 tensor/src/actions/index/strides.rs diff --git a/Cargo.toml b/Cargo.toml index 1a4761cd..7a35a49e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,8 +8,8 @@ keywords = ["acme", "autodiff", "mathematics", "tensor"] license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -# version = "0.3.0" -version = "0.3.0-nightly.3" +version = "0.3.0" +# version = "0.3.0-nightly.4" [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 64ceb870..7d2a5f03 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -50,6 +50,11 @@ serde = [ "acme-tensor/serde" ] +std = [ + "acme-core/std", + "acme-tensor/std" +] + [lib] bench = true @@ -74,16 +79,16 @@ required-features = ["macros"] [build-dependencies] [dependencies] -# acme-core = { path = "../core", version = "0.3.0" } -# acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -# acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } -acme-core = { path = "../core", version = "0.3.0-nightly.3" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.3" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.3" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.3" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.3" } +acme-core = { path = "../core", version = "0.3.0" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +# acme-core = { path = "../core", version = "0.3.0-nightly.4" } +# acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.4" } +# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.4" } +# acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.4" } +# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.4" } [dev-dependencies] approx = "0.5" diff --git a/core/Cargo.toml b/core/Cargo.toml index 342abd37..1ee48fe5 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -10,7 +10,9 @@ repository.workspace = true version.workspace = true [features] -default = [] +default = [ + "std", +] serde = [ "dep:serde", @@ -21,6 +23,8 @@ serde-ext = [ "dep:serde_json", ] +std = [] + [lib] bench = false crate-type = ["cdylib", "rlib"] diff --git a/core/src/lib.rs b/core/src/lib.rs index de6ee071..8fc4d40a 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -5,6 +5,10 @@ //! # Core //! //! + +#[macro_use] +pub(crate) mod seal; + pub mod errors; pub mod eval; pub mod id; diff --git a/core/src/seal.rs b/core/src/seal.rs new file mode 100644 index 00000000..b53ba76e --- /dev/null +++ b/core/src/seal.rs @@ -0,0 +1,34 @@ +/* + Appellation: seal + Contrib: FL03 +*/ +#![allow(unused)] +//! The public parts of this private module are used to create traits +//! that cannot be implemented outside of our own crate. This way we +//! can feel free to extend those traits without worrying about it +//! being a breaking change for other implementations. + +/// If this type is pub but not publicly reachable, third parties +/// can't name it and can't implement traits using it. +pub struct PrivateMarker; + +macro_rules! private_decl { + () => { + /// This trait is private to implement; this method exists to make it + /// impossible to implement outside the crate. + #[doc(hidden)] + fn __private__(&self) -> $crate::seal::PrivateMarker; + }; +} + +macro_rules! private_impl { + () => { + fn __private__(&self) -> $crate::seal::PrivateMarker { + $crate::seal::PrivateMarker + } + }; +} + +pub trait Sealed { + private_decl!(); +} diff --git a/core/src/specs/func/mod.rs b/core/src/specs/func/mod.rs index 400f9e13..02a98ff3 100644 --- a/core/src/specs/func/mod.rs +++ b/core/src/specs/func/mod.rs @@ -6,5 +6,51 @@ pub use self::structural::*; pub(crate) mod structural; +pub trait FnHandler { + type Output; + + fn item_fn(&self) -> fn(Args) -> Self::Output; +} + +#[allow(unused)] #[cfg(test)] -mod tests {} +mod tests { + use super::FnHandler; + use core::ops::Mul; + + pub struct Sample; + + impl Sample { + pub fn sqr(x: T) -> T + where + T: Copy + Mul, + { + x * x + } + + pub fn blahblah() -> fn(T) -> T + where + T: Copy + Mul, + { + Sample::sqr + } + } + + impl FnHandler for Sample + where + T: Copy + Mul, + { + type Output = T; + + fn item_fn(&self) -> fn(T) -> T { + Self::sqr + } + } + + #[test] + fn test_fn_handler() { + let sample = Sample; + let item_fn = sample.item_fn(); + assert_eq!(item_fn(2), 4); + } +} diff --git a/core/src/specs/func/structural.rs b/core/src/specs/func/structural.rs index 2c81f3c9..79191141 100644 --- a/core/src/specs/func/structural.rs +++ b/core/src/specs/func/structural.rs @@ -13,7 +13,7 @@ pub trait StructuralFn { pub trait StructuredArgs {} pub struct Sigmoid { - x: T, + pub x: T, } impl Sigmoid { diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 2f683d85..345e9789 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,14 +3,15 @@ Contrib: FL03 */ -pub use self::{gradient::*, store::*}; +pub use self::{gradient::*, prop::*, store::*}; pub(crate) mod gradient; +pub(crate) mod prop; pub(crate) mod store; pub mod func; -use crate::errors::PredictError; +use core::borrow::Borrow; pub trait Idx { type Index; @@ -18,36 +19,16 @@ pub trait Idx { fn index(&self) -> Self::Index; } -pub trait Backward { - type Output; - - fn backward(&self) -> Self::Output; -} - -pub trait Forward { - type Output; - - fn forward(&self, args: &T) -> Result; -} - -impl Forward for Option +pub trait IdxExt: Idx where - S: Forward, - T: Clone, + Self: Borrow + Copy, { - type Output = T; - - fn forward(&self, args: &T) -> Result { - match self { - Some(s) => s.forward(args), - None => Ok(args.clone()), - } - } } pub(crate) mod prelude { pub use super::func::*; pub use super::gradient::*; + pub use super::prop::*; pub use super::store::*; } diff --git a/core/src/specs/prop.rs b/core/src/specs/prop.rs new file mode 100644 index 00000000..26421931 --- /dev/null +++ b/core/src/specs/prop.rs @@ -0,0 +1,76 @@ +/* + Appellation: prop + Contrib: FL03 +*/ +use crate::errors::PredictError; + +/// [Backward] describes an object capable of backward propagation. +/// +/// +pub trait Backward { + type Output; + + fn backward(&self) -> Self::Output; +} + +/// [Forward] describes an object capable of forward propagation. +pub trait Forward { + type Output; + + fn forward(&self, args: &T) -> Result; +} + +pub trait ForwardIter { + type Item: Forward; + + fn forward_iter(self, args: &T) -> Result<>::Output, PredictError>; +} + +// Trait implementations +mod impls { + use super::*; + + impl ForwardIter for I + where + I: Iterator, + M: Forward, + T: Clone, + { + type Item = M; + + fn forward_iter(self, args: &T) -> Result { + let mut result = args.clone(); + for i in self { + result = i.forward(&result)?; + } + Ok(result) + } + } + + impl Forward for Option + where + S: Forward, + T: Clone, + { + type Output = T; + + fn forward(&self, args: &T) -> Result { + match self { + Some(s) => s.forward(args), + None => Ok(args.clone()), + } + } + } + + impl Forward for S + where + S: AsRef>, + T: Clone, + { + type Output = T; + + fn forward(&self, args: &T) -> Result { + self.as_ref().forward(args) + } + } +} diff --git a/core/src/specs/store.rs b/core/src/specs/store.rs index 75245ae4..c5c0eac7 100644 --- a/core/src/specs/store.rs +++ b/core/src/specs/store.rs @@ -2,7 +2,7 @@ Appellation: stores Contrib: FL03 */ -use std::borrow::Borrow; +use core::borrow::Borrow; use std::collections::{BTreeMap, HashMap}; pub trait Get { diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 385c2a14..41b606ed 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -44,7 +44,8 @@ strum.workspace = true [dependencies.acme-core] path = "../core" -version = "0.3.0-nightly.3" +version = "0.3.0" +# version = "0.3.0-nightly.4" [package.metadata.docs.rs] all-features = true diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index ab84a819..0021f627 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -16,8 +16,6 @@ default = [ "std" ] -std = [] - serde = [ "dep:serde", "serde-ext", @@ -27,6 +25,10 @@ serde-ext = [ "acme-core/serde" ] +std = [ + "acme-core/std", +] + [build-dependencies] [dependencies] @@ -37,7 +39,8 @@ strum = { features = ["derive"], version = "0.26" } [dependencies.acme-core] path = "../core" -version = "0.3.0-nightly.3" +version = "0.3.0" +# version = "0.3.0-nightly.4" [dev-dependencies] approx = "0.5" diff --git a/tensor/src/actions/index/mod.rs b/tensor/src/actions/index/mod.rs index a0bf39ff..96e95a9e 100644 --- a/tensor/src/actions/index/mod.rs +++ b/tensor/src/actions/index/mod.rs @@ -5,11 +5,38 @@ //! # Index //! //! -pub use self::slice::*; +pub use self::{strides::*, slice::*}; +pub(crate) mod strides; pub(crate) mod slice; -pub trait TensorIdx {} +use crate::tensor::TensorBase; + +pub enum IndexItem { + Scalar(T), + Strides(TensorBase), +} #[cfg(test)] -mod tests {} +mod tests { + use super::Strides; + use crate::prelude::Shape; + use crate::tensor::TensorBase; + + #[test] + fn test() { + let shape = Shape::from_iter([2, 2]); + let n = shape.size(); + let tensor = TensorBase::linspace(0f64, n as f64, n) + .reshape(shape) + .unwrap(); + let indexer = Strides::from(tensor.layout()); + for (i, idx) in indexer.enumerate() { + let elem = *tensor.get_by_index(idx).unwrap(); + println!("{:?}", &elem); + + assert_eq!(i as f64, elem); + + } + } +} diff --git a/tensor/src/actions/index/slice.rs b/tensor/src/actions/index/slice.rs index c6ffdbcf..6eb3cab3 100644 --- a/tensor/src/actions/index/slice.rs +++ b/tensor/src/actions/index/slice.rs @@ -9,5 +9,8 @@ pub struct Slice { pub start: usize, pub end: usize, - pub step: usize, } + +impl Slice {} + +pub enum Index {} diff --git a/tensor/src/actions/index/strides.rs b/tensor/src/actions/index/strides.rs new file mode 100644 index 00000000..01772dc8 --- /dev/null +++ b/tensor/src/actions/index/strides.rs @@ -0,0 +1,107 @@ +/* + Appellation: stride + Contrib: FL03 +*/ +use crate::shape::{Shape, Stride}; +use crate::store::Layout; +use crate::tensor::TensorBase; + +pub struct StrideIter<'a, T> { + scope: Option<&'a T>, + strides: Strides<'a>, + tensor: &'a TensorBase, +} + +impl<'a, T> StrideIter<'a, T> { + pub fn new(tensor: &'a TensorBase) -> Self { + let strides = Strides::from(tensor.layout()); + Self { + scope: None, + strides, + tensor, + } + } +} + + +impl<'a, T> Iterator for StrideIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + let idx = self.strides.next()?; + self.scope = self.tensor.get_by_index(idx); + self.scope + } +} + +pub struct Strides<'a> { + next: Option, + position: Vec, + pub(crate) shape: &'a Shape, + pub(crate) stride: &'a Stride, +} + +impl<'a> Strides<'a> { + pub fn new(offset: usize, shape: &'a Shape, stride: &'a Stride) -> Self { + let elem_count: usize = shape.iter().product(); + let next = if elem_count == 0 { + None + } else { + // This applies to the scalar case. + Some(offset) + }; + Self { + next, + position: vec![0; *shape.rank()], + shape, + stride, + } + } + + pub fn index(&self, index: &[usize]) -> usize { + index + .iter() + .zip(self.stride.iter()) + .map(|(i, s)| i * s) + .sum() + } +} + +impl<'a> Iterator for Strides<'a> { + type Item = usize; + + fn next(&mut self) -> Option { + let scope = match self.next { + None => return None, + Some(storage_index) => storage_index, + }; + let mut updated = false; + let mut next = scope; + for ((multi_i, max_i), stride_i) in self + .position + .iter_mut() + .zip(self.shape.iter()) + .zip(self.stride.iter()) + .rev() + { + let next_i = *multi_i + 1; + if next_i < *max_i { + *multi_i = next_i; + updated = true; + next += stride_i; + break; + } else { + next -= *multi_i * stride_i; + *multi_i = 0 + } + } + self.next = if updated { Some(next) } else { None }; + Some(scope) + } +} + +impl<'a> From<&'a Layout> for Strides<'a> { + fn from(layout: &'a Layout) -> Self { + Self::new(layout.offset, &layout.shape, &layout.stride) + } +} diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs index d0af749b..3e216b4a 100644 --- a/tensor/src/backend/mod.rs +++ b/tensor/src/backend/mod.rs @@ -11,9 +11,47 @@ pub(crate) mod devices; pub mod cpu; +use crate::shape::Rank; +use crate::tensor::TensorBase; + +#[derive(Clone, Debug, Eq, PartialEq)] pub enum TensorType { Scalar(T), - Tensor(Vec>), + Tensor(TensorBase), +} + +impl TensorType { + pub fn scalar(scalar: T) -> Self { + Self::Scalar(scalar) + } + + pub fn tensor(tensor: TensorBase) -> Self { + Self::Tensor(tensor) + } + + pub fn is_scalar(&self) -> bool { + match self { + Self::Scalar(_) => true, + _ => false, + } + } + + pub fn rank(&self) -> Rank { + match self { + Self::Scalar(_) => Rank::scalar(), + Self::Tensor(tensor) => tensor.rank(), + } + } +} + +impl From> for TensorType where T: Clone { + fn from(tensor: TensorBase) -> Self { + if tensor.rank().is_scalar() { + Self::Scalar(tensor.data()[0].clone()) + } else { + Self::Tensor(tensor) + } + } } pub trait Backend {} @@ -23,4 +61,15 @@ pub trait BackendStorage { } #[cfg(test)] -mod tests {} +mod tests { + use super::*; + + #[test] + fn test_tensor_type() { + let shape = (2, 3); + let tensor = TensorBase::::ones(shape); + let item = TensorType::tensor(tensor); + + assert_eq!(item.rank(), Rank::from(2)); + } +} diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index dea5aa23..35680b18 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -39,6 +39,16 @@ impl BackpropOp { pub fn into_inner(self) -> Option> { self.0 } + + pub fn take(&mut self) -> Option> { + self.0.take() + } +} + +impl BackpropOp where T: Clone { + pub fn view(&self) -> BackpropOp<&T> { + BackpropOp(self.0.as_ref().map(|op| op.view())) + } } impl Borrow>> for BackpropOp { diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index c635ebdb..da3602ce 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -31,6 +31,10 @@ impl TensorOp { TensorOp::BinaryScalar(Box::new(lhs), rhs, op) } + pub fn broadcast(tensor: TensorBase, shape: Shape) -> Self { + TensorOp::Broadcast(Box::new(tensor), shape) + } + pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { TensorOp::Matmul(Box::new(lhs), Box::new(rhs)) } @@ -65,3 +69,24 @@ impl TensorOp { } } } + +impl TensorOp where T: Clone { + pub fn view<'a>(&'a self) -> TensorOp<&'a T> { + match self { + TensorOp::Binary(lhs, rhs, op) => { + TensorOp::binary(lhs.view(), rhs.view(), *op) + } + TensorOp::BinaryScalar(lhs, rhs, op) => { + TensorOp::binary_scalar(lhs.view(), rhs, *op) + } + TensorOp::Unary(tensor, op) => TensorOp::unary(tensor.view(), *op), + TensorOp::Broadcast(tensor, shape) => { + TensorOp::broadcast(tensor.view(), shape.clone()) + } + TensorOp::Matmul(lhs, rhs) => TensorOp::matmul(lhs.view(), rhs.view()), + TensorOp::Transpose { tensor, axes } => { + TensorOp::transpose(tensor.view(), axes.0, axes.1) + } + } + } +} diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/rank.rs index ea3e982a..73591c7e 100644 --- a/tensor/src/shape/rank.rs +++ b/tensor/src/shape/rank.rs @@ -29,10 +29,18 @@ impl Rank { Self(rank) } + pub const fn scalar() -> Self { + Self(0) + } + pub fn into_inner(self) -> usize { self.0 } + pub fn is_scalar(&self) -> bool { + self.0 == 0 + } + pub fn rank(&self) -> usize { self.0 } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index fec47dc5..8df5cb64 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -3,7 +3,6 @@ Contrib: FL03 */ use num::complex::Complex; -use num::traits::real::Real; use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps}; use std::iter::{Product, Sum}; use std::ops::Neg; @@ -20,10 +19,10 @@ pub trait Scalar: + Sum + 'static { - type Complex: Scalar + NumOps; + type Complex: Scalar + + NumOps; type Real: Scalar - + NumOps - + Real; + + NumOps; fn abs(self) -> Self::Real; @@ -221,11 +220,7 @@ macro_rules! impl_scalar { } } }; - ($($t:ty),*) => { - $( - impl_scalar!($t); - )* - }; } + impl_scalar!(f32); impl_scalar!(f64); diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 4b3558fb..daa25e01 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -58,6 +58,10 @@ impl Layout { stride, } } + /// Determine if the current layout is contiguous or not. + pub fn is_contiguous(&self) -> bool { + self.shape.is_contiguous(&self.stride) + } /// Get a peek at the offset of the layout. pub fn offset(&self) -> usize { self.offset @@ -123,16 +127,16 @@ impl Layout { // Internal methods impl Layout { - pub(crate) fn index(&self, coords: impl AsRef<[usize]>) -> usize { - let coords = coords.as_ref(); - if coords.len() != *self.shape.rank() { + pub(crate) fn index(&self, idx: impl AsRef<[usize]>) -> usize { + let idx = idx.as_ref(); + if idx.len() != *self.shape.rank() { panic!("Dimension mismatch"); } - let index = coords + idx .iter() .zip(self.stride.iter()) - .fold(self.offset, |acc, (&coord, &stride)| acc + coord * stride); - index + .map(|(i, s)| i * s) + .sum() } } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index c9878468..2fdc43ba 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,6 +2,7 @@ Appellation: tensor Contrib: FL03 */ +use crate::actions::index::Strides; use crate::ops::{BackpropOp, TensorOp}; use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind}; use crate::store::Layout; @@ -108,6 +109,10 @@ impl TensorBase { pub const fn id(&self) -> TensorId { self.id } + + pub fn is_contiguous(&self) -> bool { + self.layout().is_contiguous() + } /// Get a reference to the [Layout] of the tensor pub const fn layout(&self) -> &Layout { &self.layout @@ -132,6 +137,7 @@ impl TensorBase { pub fn stride(&self) -> &[usize] { self.layout.stride() } + /// A function to check if the tensor is a scalar pub fn is_scalar(&self) -> bool { self.shape().len() == 0 @@ -218,6 +224,16 @@ impl TensorBase { self } } + +impl TensorBase where T: Clone { + pub fn to_owned(&self) -> TensorBase { + self.clone() + } + + pub fn view<'a>(&'a self) -> TensorBase<&'a T> { + unimplemented!("view") + } +} // Inernal Methods #[allow(dead_code)] impl TensorBase { @@ -236,6 +252,14 @@ impl TensorBase { pub(crate) fn data_mut(&mut self) -> &mut Vec { &mut self.store } + + pub(crate) fn get_by_index(&self, index: usize) -> Option<&T> { + self.store.get(index) + } + /// Create an iterator over the strides of the tensor + pub(crate) fn strides(&self) -> Strides<'_> { + self.layout().into() + } } impl Index<&[usize]> for TensorBase { @@ -272,3 +296,5 @@ impl FromIterator for TensorBase { from_vec(TensorKind::Normal, shape, store) } } + + From f24808d6755b85a8fd0adef044ea9545a65f43be Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 29 Mar 2024 14:29:54 -0500 Subject: [PATCH 67/87] update Signed-off-by: Joe McCain III --- core/src/types/mod.rs | 4 +- macros/src/ast/gradient.rs | 1 + macros/src/grad/mod.rs | 2 + tensor/src/actions/index/mod.rs | 5 +- tensor/src/actions/index/strides.rs | 1 - tensor/src/actions/iter/iterator.rs | 10 +++ tensor/src/actions/iter/mod.rs | 28 ++++++- tensor/src/backend/mod.rs | 5 +- tensor/src/data/mod.rs | 115 ++++++++++++++++++++++++++ tensor/src/lib.rs | 1 + tensor/src/ops/backprop.rs | 5 +- tensor/src/ops/kinds/reshape.rs | 37 ++++----- tensor/src/ops/op.rs | 62 +++++++------- tensor/src/shape/dim/mod.rs | 122 +++++++++++++++++++++++++++- tensor/src/shape/error.rs | 4 + tensor/src/shape/mod.rs | 6 ++ tensor/src/shape/shape.rs | 21 ++++- tensor/src/shape/stride.rs | 16 ++++ tensor/src/specs/ndtensor.rs | 8 +- tensor/src/specs/scalar.rs | 9 +- tensor/src/store/layout.rs | 50 ++++++++++-- tensor/src/tensor.rs | 25 +++--- tensor/src/utils.rs | 36 ++++++++ tensor/tests/backward.rs | 22 ++++- tensor/tests/composition.rs | 4 +- tensor/tests/reshape.rs | 4 +- 26 files changed, 511 insertions(+), 92 deletions(-) diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index 6425b6e5..266de08a 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -13,9 +13,11 @@ pub(crate) mod operators; pub(crate) mod variables; /// A boxed error type for use in the library. +#[cfg(feature = "std")] pub type BoxError = Box; /// A boxed result type for use in the library. -pub type BoxResult = std::result::Result; +#[cfg(feature = "std")] +pub type BoxResult = core::result::Result; macro_rules! impl_op { ($name:ident, $bound:ident, $fn:ident, $val:tt, $e:expr) => { diff --git a/macros/src/ast/gradient.rs b/macros/src/ast/gradient.rs index b618b4e4..d80542b7 100644 --- a/macros/src/ast/gradient.rs +++ b/macros/src/ast/gradient.rs @@ -10,6 +10,7 @@ pub struct GradientAst { pub item: ItemFn, } +#[allow(dead_code)] impl GradientAst { pub fn new(attrs: Vec, item: ItemFn) -> Self { Self { attrs, item } diff --git a/macros/src/grad/mod.rs b/macros/src/grad/mod.rs index 49b43b58..fd86c1ae 100644 --- a/macros/src/grad/mod.rs +++ b/macros/src/grad/mod.rs @@ -9,6 +9,7 @@ use proc_macro2::TokenStream; use quote::quote; use syn::{ItemFn, Signature}; +#[allow(dead_code)] pub fn gradient(grad: &GradientAst) -> TokenStream { let GradientAst { attrs, item } = grad; let _attrs = attrs; @@ -39,6 +40,7 @@ pub fn handle_item_fn(item: &ItemFn) -> TokenStream { } } +#[allow(dead_code)] pub fn item_fn_partial(item: &ItemFn) -> TokenStream { let ItemFn { block, sig, .. } = item; let Signature { inputs, .. } = sig; diff --git a/tensor/src/actions/index/mod.rs b/tensor/src/actions/index/mod.rs index 96e95a9e..5e52fb51 100644 --- a/tensor/src/actions/index/mod.rs +++ b/tensor/src/actions/index/mod.rs @@ -5,10 +5,10 @@ //! # Index //! //! -pub use self::{strides::*, slice::*}; +pub use self::{slice::*, strides::*}; -pub(crate) mod strides; pub(crate) mod slice; +pub(crate) mod strides; use crate::tensor::TensorBase; @@ -36,7 +36,6 @@ mod tests { println!("{:?}", &elem); assert_eq!(i as f64, elem); - } } } diff --git a/tensor/src/actions/index/strides.rs b/tensor/src/actions/index/strides.rs index 01772dc8..4780359e 100644 --- a/tensor/src/actions/index/strides.rs +++ b/tensor/src/actions/index/strides.rs @@ -23,7 +23,6 @@ impl<'a, T> StrideIter<'a, T> { } } - impl<'a, T> Iterator for StrideIter<'a, T> { type Item = &'a T; diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 109c8d81..9b2765f7 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -7,3 +7,13 @@ use crate::prelude::Order; pub struct Iter { order: Order, } + +impl Iter { + pub fn new(order: Order) -> Self { + Self { order } + } + + pub fn order(&self) -> Order { + self.order + } +} diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 4516f820..7c8f93b1 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -5,7 +5,7 @@ //! # Iter //! //! -pub use self::iterator::*; +pub use self::{iterator::*, utils::*}; pub(crate) mod iterator; @@ -13,5 +13,31 @@ pub trait IterTensor { type Item; } +pub(crate) mod utils { + use core::ptr; + + pub fn to_vec_mapped(iter: I, mut f: F) -> Vec + where + I: ExactSizeIterator, // + TrustedIterator + F: FnMut(I::Item) -> B, + { + // Use an `unsafe` block to do this efficiently. + // We know that iter will produce exactly .size() elements, + // and the loop can vectorize if it's clean (without branch to grow the vector). + let (size, _) = iter.size_hint(); + let mut result = Vec::with_capacity(size); + let mut out_ptr = result.as_mut_ptr(); + let mut len = 0; + iter.fold((), |(), elt| unsafe { + ptr::write(out_ptr, f(elt)); + len += 1; + result.set_len(len); + out_ptr = out_ptr.offset(1); + }); + debug_assert_eq!(size, result.len()); + result + } +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs index 3e216b4a..9adfba3f 100644 --- a/tensor/src/backend/mod.rs +++ b/tensor/src/backend/mod.rs @@ -44,7 +44,10 @@ impl TensorType { } } -impl From> for TensorType where T: Clone { +impl From> for TensorType +where + T: Clone, +{ fn from(tensor: TensorBase) -> Self { if tensor.rank().is_scalar() { Self::Scalar(tensor.data()[0].clone()) diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 4d46824f..3823627d 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -6,6 +6,7 @@ //! //! pub use self::specs::*; +pub(crate) use self::utils::*; pub(crate) mod specs; @@ -19,8 +20,13 @@ pub mod repr { pub(crate) mod view; } +use crate::actions::iter::to_vec_mapped; use crate::prelude::{BackpropOp, Layout, TensorId, TensorKind}; +use crate::shape::dim::can_index_slice; +use crate::shape::{IntoShape, IntoStride, Shape, Stride}; use core::ptr::NonNull; +use core::slice; +use rawpointer::PointerExt; pub type Tensor = BaseTensor>; @@ -66,6 +72,31 @@ where // self.try_ensure_unique(); // for ArcArray self.ptr.as_ptr() } + pub fn as_slice_memory_order(&self) -> Option<&[A]> + where + S: Data, + { + if self.is_contiguous() { + let offset = self.layout.offset_from_low_addr_ptr_to_logical_ptr(); + unsafe { + Some(slice::from_raw_parts( + PointerExt::sub(self.ptr, offset).as_ptr(), + self.size(), + )) + } + } else { + None + } + } + + /// Return true if the array is known to be contiguous. + pub fn is_contiguous(&self) -> bool { + self.layout.is_contiguous() + } + + pub fn is_standard_layout(&self) -> bool { + self.layout.is_layout_c() + } /// Without any coping, turn the tensor into a shared tensor. pub fn into_shared(self) -> ArcTensor @@ -78,12 +109,83 @@ where unsafe { BaseTensor::from_data_ptr(data, self.ptr) } } + pub fn layout(&self) -> &Layout { + &self.layout + } + + pub fn map<'a, B, F>(&'a self, f: F) -> Tensor + where + F: FnMut(&'a A) -> B, + A: 'a, + S: Data, + { + unsafe { + if let Some(slc) = self.as_slice_memory_order() { + BaseTensor::from_shape_trusted_iter_unchecked(self.shape().slice(), slc.iter(), f) + } else { + unimplemented!() + // BaseTensor::from_shape_trusted_iter_unchecked(self.shape(), self.iter(), f) + } + } + } + + pub fn shape(&self) -> &Shape { + self.layout().shape() + } + + pub fn stride(&self) -> &Stride { + self.layout().stride() + } + pub fn size(&self) -> usize { self.layout.size() } } // Internal methods +impl BaseTensor +where + S: DataOwned + RawData, +{ + unsafe fn from_vec_dim_stride_unchecked( + dim: impl IntoShape, + strides: impl IntoStride, + mut v: Vec, + ) -> Self { + let layout = Layout::new(0, dim, strides); + // debug check for issues that indicates wrong use of this constructor + debug_assert!(can_index_slice(&v, &layout.shape(), &layout.stride()).is_ok()); + + let ptr = { + let tmp = nonnull_from_vec_data(&mut v); + PointerExt::add(tmp, layout.offset_from_low_addr_ptr_to_logical_ptr()) + }; + BaseTensor::from_data_ptr(DataOwned::new(v), ptr).with_layout(layout) + } + + /// Creates an array from an iterator, mapped by `map` and interpret it according to the + /// provided shape and strides. + /// + /// # Safety + /// + /// See from_shape_vec_unchecked + pub(crate) unsafe fn from_shape_trusted_iter_unchecked( + shape: Sh, + iter: I, + map: F, + ) -> Self + where + Sh: IntoShape, + I: ExactSizeIterator, + F: FnMut(I::Item) -> A, + { + let shape = shape.into_shape(); + let strides = shape.default_strides(); // shape.stride().strides_for_dim(&dim); + let v = to_vec_mapped(iter, map); + Self::from_vec_dim_stride_unchecked(shape, strides, v) + } +} + impl BaseTensor where S: RawData, @@ -115,6 +217,19 @@ where ptr: self.ptr, } } + + pub(crate) unsafe fn with_strides_dim( + self, + stride: impl IntoStride, + dim: impl IntoShape, + ) -> BaseTensor { + let shape = dim.into_shape(); + let stride = stride.into_stride(); + debug_assert_eq!(shape.rank(), stride.rank()); + + let layout = Layout::new(0, shape, stride); + self.with_layout(layout) + } } pub(crate) mod utils { diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 67d8e982..4361e7c6 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -17,6 +17,7 @@ pub use self::{tensor::*, utils::*}; #[macro_use] pub(crate) mod seal; pub(crate) mod tensor; +#[macro_use] pub(crate) mod utils; pub mod actions; diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 35680b18..1dff4486 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -45,7 +45,10 @@ impl BackpropOp { } } -impl BackpropOp where T: Clone { +impl BackpropOp +where + T: Clone, +{ pub fn view(&self) -> BackpropOp<&T> { BackpropOp(self.0.as_ref().map(|op| op.view())) } diff --git a/tensor/src/ops/kinds/reshape.rs b/tensor/src/ops/kinds/reshape.rs index cf68c388..34e2da73 100644 --- a/tensor/src/ops/kinds/reshape.rs +++ b/tensor/src/ops/kinds/reshape.rs @@ -2,36 +2,33 @@ Appellation: reshape Contrib: FL03 */ +use crate::ops::BoxTensor; +use crate::shape::Shape; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; +use strum::{Display, EnumCount, EnumDiscriminants, EnumIs, EnumIter, EnumString, VariantNames}; -#[cfg_attr( - feature = "serde", - derive(Deserialize, Serialize), - serde(rename_all = "snake_case", untagged) -)] #[derive( Clone, - Copy, Debug, - Display, - EnumCount, - EnumIs, - EnumIter, - EnumString, + EnumDiscriminants, Eq, - Hash, - Ord, - PartialEq, - PartialOrd, - VariantNames, + PartialEq )] #[repr(u8)] #[strum(serialize_all = "snake_case")] -pub enum ReshapeOp { - Broadcast, - Reshape, +#[strum_discriminants(derive(Display, EnumCount, EnumIs, EnumIter, EnumString, Hash, Ord, PartialOrd, VariantNames))] +#[cfg_attr(feature = "serde", strum_discriminants(derive(Deserialize, Serialize)))] +#[strum_discriminants(name(ReshapeOp))] +pub enum ReshapeExpr { + Broadcast { + scope: BoxTensor, + shape: Shape, + }, + Reshape { + scope: BoxTensor, + shape: Shape, + }, Swap, Transpose, } diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index da3602ce..5fa4c5c0 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -2,6 +2,7 @@ Appellation: kinds Contrib: FL03 */ +use crate::ops::kinds::reshape::*; use crate::shape::{Axis, Shape}; use crate::TensorBase; use acme::prelude::{BinaryOp, UnaryOp}; @@ -16,9 +17,10 @@ pub enum TensorOp { Unary(BoxTensor, UnaryOp), Broadcast(BoxTensor, Shape), Matmul(BoxTensor, BoxTensor), + Reshape(ReshapeExpr), Transpose { - tensor: BoxTensor, - axes: (Axis, Axis), + scope: BoxTensor, + target: (Axis, Axis), }, } @@ -39,54 +41,56 @@ impl TensorOp { TensorOp::Matmul(Box::new(lhs), Box::new(rhs)) } - pub fn transpose(tensor: TensorBase, swap: Axis, with: Axis) -> Self { + pub fn transpose(scope: TensorBase, swap: Axis, with: Axis) -> Self { TensorOp::Transpose { - tensor: Box::new(tensor), - axes: (swap, with), + scope: Box::new(scope), + target: (swap, with), } } pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { TensorOp::Unary(Box::new(tensor), op) } - - pub fn lhs(&self) -> &TensorBase { +} +impl TensorOp { + pub fn lhs(self) -> Option> { match self { - TensorOp::Binary(lhs, _, _) => lhs, - TensorOp::BinaryScalar(lhs, _, _) => lhs, - TensorOp::Unary(lhs, _) => lhs, - TensorOp::Broadcast(tensor, _) => tensor, - TensorOp::Matmul(lhs, _) => lhs, - TensorOp::Transpose { tensor, .. } => tensor, + TensorOp::Binary(lhs, _, _) => Some(*lhs), + TensorOp::BinaryScalar(lhs, _, _) => Some(*lhs), + TensorOp::Unary(lhs, _) => Some(*lhs), + TensorOp::Broadcast(tensor, _) => Some(*tensor), + TensorOp::Matmul(lhs, _) => Some(*lhs), + TensorOp::Transpose { scope, .. } => Some(*scope), + _ => None, } } - pub fn rhs(&self) -> Option<&TensorBase> { + pub fn rhs(self) -> Option> { match self { - TensorOp::Binary(_, rhs, _) => Some(rhs), - TensorOp::Matmul(_, rhs) => Some(rhs), + TensorOp::Binary(_, rhs, _) => Some(*rhs), + TensorOp::BinaryScalar(_, scalar, _) => Some(TensorBase::from_scalar(scalar)), + TensorOp::Matmul(_, rhs) => Some(*rhs), _ => None, } } } -impl TensorOp where T: Clone { +impl TensorOp +where + T: Clone, +{ pub fn view<'a>(&'a self) -> TensorOp<&'a T> { match self { - TensorOp::Binary(lhs, rhs, op) => { - TensorOp::binary(lhs.view(), rhs.view(), *op) - } - TensorOp::BinaryScalar(lhs, rhs, op) => { - TensorOp::binary_scalar(lhs.view(), rhs, *op) - } + TensorOp::Binary(lhs, rhs, op) => TensorOp::binary(lhs.view(), rhs.view(), *op), + TensorOp::BinaryScalar(lhs, rhs, op) => TensorOp::binary_scalar(lhs.view(), rhs, *op), TensorOp::Unary(tensor, op) => TensorOp::unary(tensor.view(), *op), - TensorOp::Broadcast(tensor, shape) => { - TensorOp::broadcast(tensor.view(), shape.clone()) - } + TensorOp::Broadcast(tensor, shape) => TensorOp::broadcast(tensor.view(), shape.clone()), TensorOp::Matmul(lhs, rhs) => TensorOp::matmul(lhs.view(), rhs.view()), - TensorOp::Transpose { tensor, axes } => { - TensorOp::transpose(tensor.view(), axes.0, axes.1) - } + TensorOp::Transpose { + scope: tensor, + target: axes, + } => TensorOp::transpose(tensor.view(), axes.0, axes.1), + _ => unimplemented!() } } } diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index c95dbd09..5eae009b 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -5,7 +5,7 @@ //! # Dimension //! -pub use self::dimension::Dim; +pub use self::{dimension::Dim, utils::*}; pub(crate) mod dimension; @@ -16,3 +16,123 @@ pub trait Dimension { fn ndim(&self) -> usize; } + +pub(crate) mod utils { + use crate::shape::{Shape, ShapeError, Stride}; + use core::mem; + + pub(crate) fn can_index_slice( + data: &[A], + shape: &Shape, + stride: &Stride, + ) -> Result<(), ShapeError> { + // Check conditions 1 and 2 and calculate `max_offset`. + let max_offset = max_abs_offset_check_overflow::(shape, stride)?; + can_index_slice_impl(max_offset, data.len(), shape, stride) + } + + fn can_index_slice_impl( + max_offset: usize, + data_len: usize, + dim: &Shape, + strides: &Stride, + ) -> Result<(), ShapeError> { + // Check condition 3. + let is_empty = dim.slice().iter().any(|&d| d == 0); + if is_empty && max_offset > data_len { + return Err(ShapeError::OutOfBounds); + } + if !is_empty && max_offset >= data_len { + return Err(ShapeError::OutOfBounds); + } + + // Check condition 4. + if !is_empty && dim_stride_overlap(dim, strides) { + return Err(ShapeError::Unsupported); + } + + Ok(()) + } + + pub fn dim_stride_overlap(dim: &Shape, strides: &Stride) -> bool { + let order = strides._fastest_varying_stride_order(); + let mut sum_prev_offsets = 0; + for &index in order.slice() { + let d = dim[index]; + let s = (strides[index] as isize).abs(); + match d { + 0 => return false, + 1 => {} + _ => { + if s <= sum_prev_offsets { + return true; + } + sum_prev_offsets += (d - 1) as isize * s; + } + } + } + false + } + + pub fn max_abs_offset_check_overflow( + dim: &Shape, + strides: &Stride, + ) -> Result { + max_abs_offset_check_overflow_impl(mem::size_of::(), dim, strides) + } + + fn max_abs_offset_check_overflow_impl( + elem_size: usize, + dim: &Shape, + strides: &Stride, + ) -> Result { + // Condition 1. + if dim.rank() != strides.rank() { + return Err(ShapeError::IncompatibleLayout); + } + + // Condition 3. + let _ = size_of_shape_checked(dim)?; + + // Determine absolute difference in units of `A` between least and greatest + // address accessible by moving along all axes. + let max_offset: usize = izip!(dim.slice(), strides.slice()) + .try_fold(0usize, |acc, (&d, &s)| { + let s = s as isize; + // Calculate maximum possible absolute movement along this axis. + let off = d.saturating_sub(1).checked_mul(s.unsigned_abs())?; + acc.checked_add(off) + }) + .ok_or_else(|| ShapeError::Overflow)?; + // Condition 2a. + if max_offset > isize::MAX as usize { + return Err(ShapeError::Overflow); + } + + // Determine absolute difference in units of bytes between least and + // greatest address accessible by moving along all axes + let max_offset_bytes = max_offset + .checked_mul(elem_size) + .ok_or_else(|| ShapeError::Overflow)?; + // Condition 2b. + if max_offset_bytes > isize::MAX as usize { + return Err(ShapeError::Overflow); + } + + Ok(max_offset) + } + + pub fn size_of_shape_checked(dim: &Shape) -> Result { + let size_nonzero = dim + .slice() + .iter() + .filter(|&&d| d != 0) + .try_fold(1usize, |acc, &d| acc.checked_mul(d)) + .ok_or_else(|| ShapeError::Overflow)?; + if size_nonzero > ::std::isize::MAX as usize { + Err(ShapeError::Overflow) + } else { + Ok(dim.size()) + } + } +} diff --git a/tensor/src/shape/error.rs b/tensor/src/shape/error.rs index 281aa30a..e39beb9b 100644 --- a/tensor/src/shape/error.rs +++ b/tensor/src/shape/error.rs @@ -34,9 +34,13 @@ pub type ShapeResult = std::result::Result; pub enum ShapeError { DimensionMismatch, IncompatibleShapes, + IncompatibleLayout, InvalidAxis, InvalidShape, MismatchedElements, + OutOfBounds, + Overflow, + Unsupported, } unsafe impl Send for ShapeError {} diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index f2d2b382..cf5123f4 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -29,6 +29,12 @@ where } } +impl<'a> IntoShape for &'a Shape { + fn into_shape(self) -> Shape { + self.clone() + } +} + pub(crate) mod prelude { pub use super::IntoShape; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index e044d1dc..3ed987ff 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -30,7 +30,26 @@ impl Shape { pub fn zeros(rank: usize) -> Self { Self(vec![0; rank]) } - + #[doc(hidden)] + pub(crate) fn default_strides(&self) -> Stride { + // Compute default array strides + // Shape (a, b, c) => Give strides (b * c, c, 1) + let mut strides = Stride::zeros(self.rank()); + // For empty arrays, use all zero strides. + if self.slice().iter().all(|&d| d != 0) { + let mut it = strides.slice_mut().iter_mut().rev(); + // Set first element to 1 + if let Some(rs) = it.next() { + *rs = 1; + } + let mut cum_prod = 1; + for (rs, dim) in it.zip(self.slice().iter().rev()) { + cum_prod *= *dim; + *rs = cum_prod; + } + } + strides + } pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { if *self.rank() != 2 || *other.rank() != 2 || self[1] != other[0] { return Err(ShapeError::IncompatibleShapes.into()); diff --git a/tensor/src/shape/stride.rs b/tensor/src/shape/stride.rs index 4baab8ba..ca0a3455 100644 --- a/tensor/src/shape/stride.rs +++ b/tensor/src/shape/stride.rs @@ -34,6 +34,22 @@ impl Stride { Self(Vec::with_capacity(capacity)) } + pub fn zeros(rank: Rank) -> Self { + Self(vec![0; *rank]) + } + + pub(crate) fn _fastest_varying_stride_order(&self) -> Self { + let mut indices = self.clone(); + for (i, elt) in indices.slice_mut().into_iter().enumerate() { + *elt = i; + } + let strides = self.slice(); + indices + .slice_mut() + .sort_by_key(|&i| (strides[i] as isize).abs()); + indices + } + pub fn get(&self, index: usize) -> Option<&usize> { self.0.get(index) } diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index d0615638..ee3ac044 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -7,7 +7,7 @@ use crate::shape::{Rank, Shape, Stride}; use crate::store::Layout; pub trait NdTensor { - type Elem; + type Data: TensorData; fn id(&self) -> TensorId; @@ -17,7 +17,7 @@ pub trait NdTensor { self.layout().shape().rank() } - fn shape(&self) -> Shape { + fn shape(&self) -> &Shape { self.layout().shape() } @@ -39,6 +39,6 @@ pub trait NdIterator { type Item; } -pub trait NdIndex { - type Output; +pub trait TensorData { + type Elem; } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index 8df5cb64..0f4e2bb3 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -2,10 +2,11 @@ Appellation: scalar Contrib: FL03 */ +use crate::tensor::TensorBase; +use core::iter::{Product, Sum}; +use core::ops::Neg; use num::complex::Complex; use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps}; -use std::iter::{Product, Sum}; -use std::ops::Neg; pub trait Scalar: Copy @@ -70,6 +71,10 @@ pub trait Scalar: fn tan(self) -> Self; fn tanh(self) -> Self; + + fn into_tensor(self) -> TensorBase { + TensorBase::from_scalar(self) + } } impl Scalar for Complex diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index daa25e01..70f4e2b3 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -62,10 +62,50 @@ impl Layout { pub fn is_contiguous(&self) -> bool { self.shape.is_contiguous(&self.stride) } + pub fn is_layout_c(&self) -> bool { + if let 1 = *self.shape.rank() { + return self.stride[0] == 1 || self.shape[0] <= 1; + } + + for d in self.shape().iter() { + if *d == 0 { + return true; + } + } + + let mut contig_stride = 1_isize; + // check all dimensions -- a dimension of length 1 can have unequal strides + for (dim, s) in izip!(self.shape().iter().rev(), self.stride().iter().rev()) { + if *dim != 1 { + let s = *s as isize; + if s != contig_stride { + return false; + } + contig_stride *= *dim as isize; + } + } + true + } /// Get a peek at the offset of the layout. pub fn offset(&self) -> usize { self.offset } + /// Returns the offset from the lowest-address element to the logically first + /// element. + pub fn offset_from_low_addr_ptr_to_logical_ptr(&self) -> usize { + let offset = + izip!(self.shape().slice(), self.stride().slice()).fold(0, |_offset, (d, s)| { + let d = *d as isize; + let s = *s as isize; + if s < 0 && d > 1 { + _offset - s * (d - 1) + } else { + _offset + } + }); + debug_assert!(offset >= 0); + offset as usize + } /// Return the rank (number of dimensions) of the layout. pub fn rank(&self) -> Rank { debug_assert_eq!(self.stride.len(), *self.shape.rank()); @@ -83,8 +123,8 @@ impl Layout { self } - pub fn shape(&self) -> Shape { - self.shape.clone() + pub fn shape(&self) -> &Shape { + &self.shape } pub fn size(&self) -> usize { @@ -132,11 +172,7 @@ impl Layout { if idx.len() != *self.shape.rank() { panic!("Dimension mismatch"); } - idx - .iter() - .zip(self.stride.iter()) - .map(|(i, s)| i * s) - .sum() + idx.iter().zip(self.stride.iter()).map(|(i, s)| i * s).sum() } } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 2fdc43ba..0e2062f3 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -88,6 +88,13 @@ impl TensorBase { store, } } + pub fn as_slice(&self) -> &[T] { + &self.store + } + /// + pub fn as_mut_slice(&mut self) -> &mut [T] { + &mut self.store + } /// Detach the computational graph from the tensor pub fn detach(&self) -> Self where @@ -126,7 +133,7 @@ impl TensorBase { self.layout.shape().rank() } /// An owned reference of the tensors [Shape] - pub fn shape(&self) -> Shape { + pub fn shape(&self) -> &Shape { self.layout.shape() } /// Returns the number of elements in the tensor. @@ -137,7 +144,6 @@ impl TensorBase { pub fn stride(&self) -> &[usize] { self.layout.stride() } - /// A function to check if the tensor is a scalar pub fn is_scalar(&self) -> bool { self.shape().len() == 0 @@ -225,7 +231,10 @@ impl TensorBase { } } -impl TensorBase where T: Clone { +impl TensorBase +where + T: Clone, +{ pub fn to_owned(&self) -> TensorBase { self.clone() } @@ -237,14 +246,6 @@ impl TensorBase where T: Clone { // Inernal Methods #[allow(dead_code)] impl TensorBase { - pub(crate) fn as_slice(&self) -> &[T] { - &self.store - } - - pub(crate) fn as_mut_slice(&mut self) -> &mut [T] { - &mut self.store - } - pub(crate) fn data(&self) -> &Vec { &self.store } @@ -296,5 +297,3 @@ impl FromIterator for TensorBase { from_vec(TensorKind::Normal, shape, store) } } - - diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 018b6ec7..2bcdae6b 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -60,3 +60,39 @@ where let tensor = from_vec_with_op(false, op, shape, result); Ok(tensor) } + +macro_rules! izip { + // @closure creates a tuple-flattening closure for .map() call. usage: + // @closure partial_pattern => partial_tuple , rest , of , iterators + // eg. izip!( @closure ((a, b), c) => (a, b, c) , dd , ee ) + ( @closure $p:pat => $tup:expr ) => { + |$p| $tup + }; + + // The "b" identifier is a different identifier on each recursion level thanks to hygiene. + ( @closure $p:pat => ( $($tup:tt)* ) , $_iter:expr $( , $tail:expr )* ) => { + izip!(@closure ($p, b) => ( $($tup)*, b ) $( , $tail )*) + }; + + // unary + ($first:expr $(,)*) => { + IntoIterator::into_iter($first) + }; + + // binary + ($first:expr, $second:expr $(,)*) => { + izip!($first) + .zip($second) + }; + + // n-ary where n > 2 + ( $first:expr $( , $rest:expr )* $(,)* ) => { + izip!($first) + $( + .zip($rest) + )* + .map( + izip!(@closure a => (a) $( , $rest )*) + ) + }; +} diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 5692f70a..75169ab1 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -93,12 +93,28 @@ fn test_mixed() { let shape = (2, 2); let a = Tensor::::ones(shape).variable(); - let b = Tensor::::fill(shape, 2_f64).variable(); + let b = Tensor::::fill(shape, 2f64).variable(); let res = &b * (&a + &b); let grad = res.grad().unwrap(); - assert_eq!(grad[&a.id()], Tensor::fill(shape, 2_f64)); - assert_eq!(grad[&b.id()], Tensor::fill(shape, 5_f64)); + assert_eq!(grad[&a.id()], Tensor::fill(shape, 2f64)); + assert_eq!(grad[&b.id()], Tensor::fill(shape, 5f64)); +} + +#[test] +fn test_complex_expr() { + let shape = (2, 2); + + let a = Tensor::::ones(shape).variable(); + let b = Tensor::fill(shape, 2f64).variable(); + let c = Tensor::fill(shape, 3f64).variable(); + let res = (&a + &b) * c.clone().sin() + &b; + + let grad = res.grad().unwrap(); + + assert_eq!(grad[&a.id()], c.clone().sin()); + assert_eq!(grad[&b.id()], c.clone().sin() + 1f64); + assert_eq!(grad[&c.id()], (&a + &b) * c.cos()); } diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index 123ebec0..fdf5ec98 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -26,7 +26,7 @@ fn test_ones_and_zeros() { fn test_arange() { let exp = Shape::from(10); let a = Tensor::arange(0_f64, 10_f64, 1_f64); - assert_eq!(a.shape(), exp); + assert_eq!(a.shape(), &exp); for i in 0..10 { assert_eq!(a[&[i]], i as f64); @@ -37,7 +37,7 @@ fn test_arange() { fn test_linstep() { let exp = Shape::from(10); let a = Tensor::linspace(0_f64, 10_f64, 10); - assert_eq!(a.shape(), exp); + assert_eq!(a.shape(), &exp); let b = Tensor::arange(0_f64, 10_f64, 1_f64); for i in 0..10 { assert_eq!(a[&[i]], b[&[i]]); diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index de1417ec..0255c25f 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -5,7 +5,7 @@ #![cfg(test)] extern crate acme_tensor as acme; -use acme::prelude::Tensor; +use acme::prelude::{Shape, Tensor}; #[test] #[ignore = "Not implemented"] @@ -37,7 +37,7 @@ fn test_transpose() { let exp = Tensor::from_vec(false, None, (3, 2), vec![0.0, 3.0, 1.0, 4.0, 2.0, 5.0]); assert_ne!(&a, &at); - assert_eq!(at.shape(), (3, 2).into()); + assert_eq!(at.shape(), &Shape::new(vec![3, 2])); for i in 0..shape.0 { for j in 0..shape.1 { assert_eq!(a[&[i, j]], exp[&[j, i]]); From a514384ace30b1c1741e17ef6b2fe260f53972c4 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 29 Mar 2024 15:28:37 -0500 Subject: [PATCH 68/87] update Signed-off-by: Joe McCain III --- core/src/ops/mod.rs | 18 ++++-- core/src/ops/unary/mod.rs | 4 +- core/src/ops/unary/operator.rs | 4 +- tensor/src/actions/arange.rs | 102 ++++++++++++++++++++++---------- tensor/src/actions/mod.rs | 2 + tensor/src/data/mod.rs | 2 +- tensor/src/impls/grad.rs | 10 ++-- tensor/src/impls/linalg.rs | 4 +- tensor/src/impls/ops/binary.rs | 22 +++---- tensor/src/impls/ops/unary.rs | 16 ++--- tensor/src/impls/reshape.rs | 6 +- tensor/src/ops/backprop.rs | 34 +++++------ tensor/src/ops/kinds/reshape.rs | 30 +++++----- tensor/src/ops/mod.rs | 6 +- tensor/src/ops/op.rs | 63 +++++++++++--------- tensor/src/specs/affine.rs | 26 ++++++++ tensor/src/specs/mod.rs | 54 ++--------------- tensor/src/tensor.rs | 4 +- tensor/src/utils.rs | 6 +- 19 files changed, 228 insertions(+), 185 deletions(-) create mode 100644 tensor/src/specs/affine.rs diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 1b6eb04d..c049fe40 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -12,6 +12,19 @@ pub(crate) mod kinds; pub mod binary; pub mod unary; +pub trait ApplyTo { + type Output; + + fn apply_to(&self, other: T) -> Self::Output; +} + +pub trait ApplyWith { + type Output; + type With; + + fn apply_with(&self, other: T, with: Self::With) -> Self::Output; +} + pub trait IntoOp { fn into_op(self) -> Op; } @@ -45,10 +58,7 @@ pub trait Powi: Pow { fn powi(&self, exp: T) -> Self::Output; } -pub trait Powf: Pow -where - T: num::Float, -{ +pub trait Powf: Pow { fn powf(&self, exp: T) -> Self::Output; } diff --git a/core/src/ops/unary/mod.rs b/core/src/ops/unary/mod.rs index c9fa1d86..e16ea09d 100644 --- a/core/src/ops/unary/mod.rs +++ b/core/src/ops/unary/mod.rs @@ -13,9 +13,11 @@ pub(crate) mod specs; use num::{Complex, Num}; -pub trait UnaryOperation { +pub trait Unary { type Output; + fn name(&self) -> &str; + fn unary(self, expr: UnaryOp) -> Self::Output; } diff --git a/core/src/ops/unary/operator.rs b/core/src/ops/unary/operator.rs index 13592a99..336675d6 100644 --- a/core/src/ops/unary/operator.rs +++ b/core/src/ops/unary/operator.rs @@ -2,7 +2,7 @@ Appellation: operator Contrib: FL03 */ -use super::{UnaryOp, UnaryOperation}; +use super::{Unary, UnaryOp}; // use std::marker::PhantomData; pub struct UnaryOperator { @@ -22,7 +22,7 @@ impl UnaryOperator { pub fn eval(self) -> A::Output where - A: UnaryOperation, + A: Unary, { self.args.unary(self.op) } diff --git a/tensor/src/actions/arange.rs b/tensor/src/actions/arange.rs index e0ddb501..fbe10c20 100644 --- a/tensor/src/actions/arange.rs +++ b/tensor/src/actions/arange.rs @@ -2,26 +2,33 @@ Appellation: arange Contrib: FL03 */ -use num::traits::{FromPrimitive, Num, ToPrimitive}; -use std::ops; +use core::ops::{self, Range}; +use num::traits::{Bounded, FromPrimitive, Num, ToPrimitive}; + +pub fn step_size(start: T, stop: T, steps: usize) -> T +where + T: FromPrimitive + Num, +{ + (stop - start) / T::from_usize(steps).unwrap() +} pub struct Arange { - range: Aranged, + range: Boundary, step: T, } impl Arange { - pub fn new(range: Aranged, step: T) -> Self { + pub fn new(range: Boundary, step: T) -> Self { Self { range, step } } pub fn range(start: T, stop: T, step: T) -> Self { - Self::new(Aranged::Range { start, stop }, step) + Self::new(Boundary::Range { start, stop }, step) } } impl Arange where - T: Copy + Default + Num, + T: Copy + Default + FromPrimitive + Num, { pub fn start(&self) -> T { self.range.start() @@ -29,10 +36,10 @@ where pub fn steps(&self) -> usize where - T: FromPrimitive + ToPrimitive, + T: ToPrimitive, { - let start = self.range.start(); - let stop = self.range.stop(); + let start = self.start(); + let stop = self.stop(); let step = self.step; let steps = (stop - start) / step; steps.to_usize().unwrap() @@ -43,89 +50,120 @@ where } pub fn stop(&self) -> T { - self.range.stop() + self.range.stop_or_linear() } } #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum Aranged { +pub enum Boundary { Range { start: T, stop: T }, + From { start: T }, Inclusive { start: T, stop: T }, Until { stop: T }, } -impl Aranged +impl Boundary where T: Copy + Default, { /// Returns the start value of the range. pub fn start(&self) -> T { match self { - Aranged::Range { start, .. } => *start, - Aranged::Inclusive { start, .. } => *start, - Aranged::Until { .. } => T::default(), + Boundary::Range { start, .. } => *start, + Boundary::From { start } => *start, + Boundary::Inclusive { start, .. } => *start, + Boundary::Until { .. } => T::default(), } } /// Returns the stop value of the range. - pub fn stop(&self) -> T { + pub fn stop(&self) -> Option { match self { - Aranged::Range { stop, .. } => *stop, - Aranged::Inclusive { stop, .. } => *stop, - Aranged::Until { stop } => *stop, + Boundary::Range { stop, .. } + | Boundary::Inclusive { stop, .. } + | Boundary::Until { stop } => Some(*stop), + _ => None, } } + pub fn stop_or(&self, default: T) -> T { + self.stop().unwrap_or(default) + } + + pub fn stop_or_linear(&self) -> T + where + T: FromPrimitive + Num, + { + self.stop_or(self.start() * T::from_usize(2).unwrap()) + } + + pub fn stop_or_default(&self) -> T { + self.stop_or(T::default()) + } + + pub fn stop_or_max(&self) -> T + where + T: Bounded, + { + self.stop_or(T::max_value()) + } + pub fn step_size(&self, steps: usize) -> T where T: FromPrimitive + Num, { let steps = T::from_usize(steps).unwrap(); let start = self.start(); - let stop = self.stop(); + let stop = self.stop_or_default(); let step = (stop - start) / steps; step } } -impl From> for Aranged { - fn from(args: ops::Range) -> Self { - Aranged::Range { +impl From> for Boundary { + fn from(args: Range) -> Self { + Boundary::Range { start: args.start, stop: args.end, } } } -impl From> for Aranged { +impl From> for Boundary { + fn from(args: ops::RangeFrom) -> Self { + Boundary::From { start: args.start } + } +} + +impl From> for Boundary { fn from(args: ops::RangeTo) -> Self { - Aranged::Until { stop: args.end } + Boundary::Until { stop: args.end } } } -impl From<[T; 2]> for Aranged +impl From<[T; 2]> for Boundary where T: Copy, { fn from(args: [T; 2]) -> Self { - Aranged::Range { + Boundary::Range { start: args[0], stop: args[1], } } } -impl From<(T, T)> for Aranged { +impl From<(T, T)> for Boundary { fn from(args: (T, T)) -> Self { - Aranged::Inclusive { + Boundary::Inclusive { start: args.0, stop: args.1, } } } -impl From for Aranged { +impl From for Boundary { fn from(stop: T) -> Self { - Aranged::Until { stop } + Boundary::Until { stop } } } @@ -135,7 +173,7 @@ mod tests { #[test] fn test_arange() { - let setup = Aranged::Range { start: 0, stop: 10 }; + let setup = Boundary::Range { start: 0, stop: 10 }; let arange = Arange::new(setup, 1); assert_eq!(arange.start(), 0); assert_eq!(arange.stop(), 10); diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index e7750ddb..a6692204 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -19,6 +19,8 @@ pub mod iter; pub(crate) mod prelude { pub use super::arange::*; pub use super::grad::*; + pub use super::index::*; + pub use super::iter::*; } #[cfg(test)] diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 3823627d..cc082c9c 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -196,7 +196,7 @@ where data, kind: TensorKind::Normal, layout: Layout::contiguous(0), - op: BackpropOp::none(), + op: BackpropOp::::none(), ptr, }; debug_assert!(tensor.pointer_is_inbounds()); diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index cfc34d3c..1b57c57f 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::actions::grad::GradStore; -use crate::prelude::{Scalar, TensorId, TensorOp, TensorResult}; +use crate::prelude::{Scalar, TensorExpr, TensorId, TensorResult}; use crate::TensorBase; use acme::prelude::{BinaryOp, Store, UnaryOp}; @@ -42,14 +42,14 @@ where nodes } else if let Some(op) = scope.op().op() { match op { - TensorOp::Binary(lhs, rhs, _kind) => { + TensorExpr::Binary(lhs, rhs, _kind) => { let (tg, nodes) = walk(lhs, nodes, visited); track |= tg; let (tg, nodes) = walk(rhs, nodes, visited); track |= tg; nodes } - TensorOp::Unary(a, _kind) => { + TensorExpr::Unary(a, _kind) => { let (tg, nodes) = walk(a, nodes, visited); track |= tg; nodes @@ -94,7 +94,7 @@ where // handle the different types of operations if let Some(op) = &*node.op { match op { - TensorOp::Binary(lhs, rhs, kind) => match kind { + TensorExpr::Binary(lhs, rhs, kind) => match kind { BinaryOp::Add => { *entry!(store, lhs) += &grad; *entry!(store, rhs) += &grad; @@ -114,7 +114,7 @@ where } _ => todo!(), }, - TensorOp::Unary(val, kind) => match kind { + TensorExpr::Unary(val, kind) => match kind { UnaryOp::Cos => { *entry!(store, val) -= &grad * val.clone().sin(); } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 4418bda6..36f52dc5 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -5,7 +5,7 @@ //! Implementations for linear algebra operations. //! //! -use crate::prelude::{Matmul, Scalar, TensorOp}; +use crate::prelude::{Matmul, Scalar, TensorExpr}; use crate::tensor::*; impl TensorBase where T: Scalar {} @@ -28,7 +28,7 @@ where } } } - let op = TensorOp::matmul(self.clone(), other.clone()); + let op = TensorExpr::matmul(self.clone(), other.clone()); from_vec_with_op(false, op, shape, result) } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 7b1ebba5..ddd8e3bb 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -2,7 +2,7 @@ Appellation: arith Contrib: FL03 */ -use crate::prelude::TensorOp; +use crate::prelude::TensorExpr; use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; use num::traits::Pow; @@ -24,7 +24,7 @@ where fn pow(self, exp: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| a.pow(exp)).collect(); - let op = TensorOp::binary_scalar(self, exp, BinaryOp::Pow); + let op = TensorExpr::binary_scalar(self, exp, BinaryOp::Pow); from_vec_with_op(false, op, shape, store) } } @@ -38,7 +38,7 @@ where fn pow(self, exp: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| a.pow(exp)).collect(); - let op = TensorOp::binary_scalar(self.clone(), exp, BinaryOp::Pow); + let op = TensorExpr::binary_scalar(self.clone(), exp, BinaryOp::Pow); from_vec_with_op(false, op, shape, store) } } @@ -57,7 +57,7 @@ macro_rules! impl_arithmetic { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::binary(self, other, BinaryOp::$trait); + let op = TensorExpr::binary(self, other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -74,7 +74,7 @@ macro_rules! impl_arithmetic { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::binary(self, other.clone(), BinaryOp::$trait); + let op = TensorExpr::binary(self, other.clone(), BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -91,7 +91,7 @@ macro_rules! impl_arithmetic { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::binary(self.clone(), other, BinaryOp::$trait); + let op = TensorExpr::binary(self.clone(), other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -108,7 +108,7 @@ macro_rules! impl_arithmetic { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::binary(self.clone(), other.clone(), BinaryOp::$trait); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -130,7 +130,7 @@ macro_rules! impl_scalar_arith { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorOp::binary_scalar(self, other, BinaryOp::$trait); + let op = TensorExpr::binary_scalar(self, other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -144,7 +144,7 @@ macro_rules! impl_scalar_arith { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorOp::binary_scalar(self.clone(), other, BinaryOp::$trait); + let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$trait); from_vec_with_op(false, op, shape, store) } } @@ -161,7 +161,7 @@ macro_rules! impl_assign_op { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::binary(self.clone(), other, BinaryOp::$inner); + let op = TensorExpr::binary(self.clone(), other, BinaryOp::$inner); *self = from_vec_with_op(false, op, shape, store); } @@ -175,7 +175,7 @@ macro_rules! impl_assign_op { cmp!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorOp::binary(self.clone(), other.clone(), BinaryOp::$inner); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$inner); *self = from_vec_with_op(false, op, shape, store); } diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 8786a6f9..44516dbd 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -2,7 +2,7 @@ Appellation: arith Contrib: FL03 */ -use crate::prelude::{Scalar, TensorOp}; +use crate::prelude::{Scalar, TensorExpr}; use crate::tensor::*; use acme::ops::unary::UnaryOp; @@ -15,7 +15,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| -a).collect(); - let op = TensorOp::unary(self, UnaryOp::Neg); + let op = TensorExpr::unary(self, UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } } @@ -29,7 +29,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| -a).collect(); - let op = TensorOp::unary(self.clone(), UnaryOp::Neg); + let op = TensorExpr::unary(self.clone(), UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } } @@ -43,7 +43,7 @@ where fn not(self) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| !a).collect(); - let op = TensorOp::unary(self, UnaryOp::Not); + let op = TensorExpr::unary(self, UnaryOp::Not); from_vec_with_op(false, op, shape, store) } } @@ -57,7 +57,7 @@ where fn not(self) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().copied().map(|a| !a).collect(); - let op = TensorOp::unary(self.clone(), UnaryOp::Not); + let op = TensorExpr::unary(self.clone(), UnaryOp::Not); from_vec_with_op(false, op, shape, store) } } @@ -67,7 +67,7 @@ macro_rules! impl_unary_op { pub fn $method(self) -> Self { let shape = self.shape().clone(); let store = self.store.iter().copied().map(|v| v.$method()).collect(); - let op = TensorOp::unary(self, UnaryOp::$variant); + let op = TensorExpr::unary(self, UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } }; @@ -75,7 +75,7 @@ macro_rules! impl_unary_op { pub fn $method(self) -> Self { let shape = self.shape().clone(); let store = self.store.iter().copied().map($f).collect(); - let op = TensorOp::unary(self, UnaryOp::$variant); + let op = TensorExpr::unary(self, UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } }; @@ -91,7 +91,7 @@ where { let shape = self.shape().clone(); let store = self.store.iter().copied().map(|v| v.abs()).collect(); - let op = TensorOp::unary(self, UnaryOp::Abs); + let op = TensorExpr::unary(self, UnaryOp::Abs); from_vec_with_op(false, op, shape, store) } diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 241c8ef1..0ab1bba7 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -2,7 +2,7 @@ Appellation: reshape Contrib: FL03 */ -use crate::prelude::{TensorId, TensorOp, TensorResult}; +use crate::prelude::{TensorExpr, TensorId, TensorResult}; use crate::shape::{Axis, IntoShape, ShapeError}; use crate::tensor::TensorBase; @@ -32,7 +32,7 @@ where /// pub fn swap_axes(&self, swap: Axis, with: Axis) -> Self { - let op = TensorOp::transpose(self.clone(), swap, with); + let op = TensorExpr::transpose(self.clone(), swap, with); let layout = self.layout().clone().transpose(swap, with); let shape = self.layout.shape(); @@ -57,7 +57,7 @@ where /// Transpose the tensor. pub fn t(&self) -> Self { let (a, b) = (Axis(0), Axis(1)); - let op = TensorOp::transpose(self.clone(), a, b); + let op = TensorExpr::transpose(self.clone(), a, b); let layout = self.layout().clone().transpose(a, b); let shape = self.layout.shape(); diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 1dff4486..29d0533c 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -2,17 +2,17 @@ Appellation: backprop Contrib: FL03 */ -use super::TensorOp; +use super::TensorExpr; use crate::TensorBase; use acme::prelude::BinaryOp; use core::borrow::Borrow; use core::ops::{Deref, DerefMut}; #[derive(Clone, Debug)] -pub struct BackpropOp(Option>); +pub struct BackpropOp(Option>); impl BackpropOp { - pub fn new(op: TensorOp) -> Self { + pub fn new(op: TensorExpr) -> Self { BackpropOp(Some(op)) } @@ -21,26 +21,26 @@ impl BackpropOp { } pub fn binary(lhs: TensorBase, rhs: TensorBase, kind: BinaryOp) -> Self { - BackpropOp(Some(TensorOp::binary(lhs, rhs, kind))) + BackpropOp(Some(TensorExpr::binary(lhs, rhs, kind))) } pub fn is_none(&self) -> bool { self.0.is_none() } - pub fn op(&self) -> Option<&TensorOp> { + pub fn op(&self) -> Option<&TensorExpr> { self.0.as_ref() } - pub fn op_mut(&mut self) -> Option<&mut TensorOp> { + pub fn op_mut(&mut self) -> Option<&mut TensorExpr> { self.0.as_mut() } - pub fn into_inner(self) -> Option> { + pub fn into_inner(self) -> Option> { self.0 } - pub fn take(&mut self) -> Option> { + pub fn take(&mut self) -> Option> { self.0.take() } } @@ -54,8 +54,8 @@ where } } -impl Borrow>> for BackpropOp { - fn borrow(&self) -> &Option> { +impl Borrow>> for BackpropOp { + fn borrow(&self) -> &Option> { &self.0 } } @@ -67,7 +67,7 @@ impl Default for BackpropOp { } impl Deref for BackpropOp { - type Target = Option>; + type Target = Option>; fn deref(&self) -> &Self::Target { &self.0 @@ -80,20 +80,20 @@ impl DerefMut for BackpropOp { } } -impl From>> for BackpropOp { - fn from(op: Option>) -> Self { +impl From>> for BackpropOp { + fn from(op: Option>) -> Self { BackpropOp(op) } } -impl From> for BackpropOp { - fn from(op: TensorOp) -> Self { +impl From> for BackpropOp { + fn from(op: TensorExpr) -> Self { BackpropOp(Some(op)) } } -impl From> for Option> { - fn from(op: BackpropOp) -> Option> { +impl From> for Option> { + fn from(op: BackpropOp) -> Option> { op.into_inner() } } diff --git a/tensor/src/ops/kinds/reshape.rs b/tensor/src/ops/kinds/reshape.rs index 34e2da73..62eb55ca 100644 --- a/tensor/src/ops/kinds/reshape.rs +++ b/tensor/src/ops/kinds/reshape.rs @@ -8,27 +8,25 @@ use crate::shape::Shape; use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumDiscriminants, EnumIs, EnumIter, EnumString, VariantNames}; -#[derive( - Clone, - Debug, - EnumDiscriminants, - Eq, - PartialEq -)] +#[derive(Clone, Debug, EnumDiscriminants, Eq, PartialEq)] #[repr(u8)] #[strum(serialize_all = "snake_case")] -#[strum_discriminants(derive(Display, EnumCount, EnumIs, EnumIter, EnumString, Hash, Ord, PartialOrd, VariantNames))] +#[strum_discriminants(derive( + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Hash, + Ord, + PartialOrd, + VariantNames +))] #[cfg_attr(feature = "serde", strum_discriminants(derive(Deserialize, Serialize)))] #[strum_discriminants(name(ReshapeOp))] pub enum ReshapeExpr { - Broadcast { - scope: BoxTensor, - shape: Shape, - }, - Reshape { - scope: BoxTensor, - shape: Shape, - }, + Broadcast { scope: BoxTensor, shape: Shape }, + Reshape { scope: BoxTensor, shape: Shape }, Swap, Transpose, } diff --git a/tensor/src/ops/mod.rs b/tensor/src/ops/mod.rs index c9dce4d5..bb0297ab 100644 --- a/tensor/src/ops/mod.rs +++ b/tensor/src/ops/mod.rs @@ -13,7 +13,11 @@ pub(crate) mod kinds { pub(crate) mod reshape; } -pub trait TensorExpr {} +pub trait BaseOperation { + type Output; + + fn name(&self) -> &str; +} #[cfg(test)] mod tests {} diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index 5fa4c5c0..0d3fd5ed 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -11,86 +11,91 @@ pub type BoxTensor = Box>; #[derive(Clone, Debug)] #[non_exhaustive] -pub enum TensorOp { +pub enum TensorExpr { Binary(BoxTensor, BoxTensor, BinaryOp), BinaryScalar(BoxTensor, T, BinaryOp), Unary(BoxTensor, UnaryOp), Broadcast(BoxTensor, Shape), Matmul(BoxTensor, BoxTensor), - Reshape(ReshapeExpr), + Reshape(BoxTensor, ReshapeExpr), + Shape(ReshapeExpr), Transpose { scope: BoxTensor, target: (Axis, Axis), }, } -impl TensorOp { +impl TensorExpr { pub fn binary(lhs: TensorBase, rhs: TensorBase, op: BinaryOp) -> Self { - TensorOp::Binary(Box::new(lhs), Box::new(rhs), op) + TensorExpr::Binary(Box::new(lhs), Box::new(rhs), op) } pub fn binary_scalar(lhs: TensorBase, rhs: T, op: BinaryOp) -> Self { - TensorOp::BinaryScalar(Box::new(lhs), rhs, op) + TensorExpr::BinaryScalar(Box::new(lhs), rhs, op) } pub fn broadcast(tensor: TensorBase, shape: Shape) -> Self { - TensorOp::Broadcast(Box::new(tensor), shape) + TensorExpr::Broadcast(Box::new(tensor), shape) } pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { - TensorOp::Matmul(Box::new(lhs), Box::new(rhs)) + TensorExpr::Matmul(Box::new(lhs), Box::new(rhs)) } pub fn transpose(scope: TensorBase, swap: Axis, with: Axis) -> Self { - TensorOp::Transpose { + TensorExpr::Transpose { scope: Box::new(scope), target: (swap, with), } } pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { - TensorOp::Unary(Box::new(tensor), op) + TensorExpr::Unary(Box::new(tensor), op) } } -impl TensorOp { +impl TensorExpr { pub fn lhs(self) -> Option> { match self { - TensorOp::Binary(lhs, _, _) => Some(*lhs), - TensorOp::BinaryScalar(lhs, _, _) => Some(*lhs), - TensorOp::Unary(lhs, _) => Some(*lhs), - TensorOp::Broadcast(tensor, _) => Some(*tensor), - TensorOp::Matmul(lhs, _) => Some(*lhs), - TensorOp::Transpose { scope, .. } => Some(*scope), + TensorExpr::Binary(lhs, _, _) => Some(*lhs), + TensorExpr::BinaryScalar(lhs, _, _) => Some(*lhs), + TensorExpr::Unary(lhs, _) => Some(*lhs), + TensorExpr::Broadcast(tensor, _) => Some(*tensor), + TensorExpr::Matmul(lhs, _) => Some(*lhs), + TensorExpr::Transpose { scope, .. } => Some(*scope), _ => None, } } pub fn rhs(self) -> Option> { match self { - TensorOp::Binary(_, rhs, _) => Some(*rhs), - TensorOp::BinaryScalar(_, scalar, _) => Some(TensorBase::from_scalar(scalar)), - TensorOp::Matmul(_, rhs) => Some(*rhs), + TensorExpr::Binary(_, rhs, _) => Some(*rhs), + TensorExpr::BinaryScalar(_, scalar, _) => Some(TensorBase::from_scalar(scalar)), + TensorExpr::Matmul(_, rhs) => Some(*rhs), _ => None, } } } -impl TensorOp +impl TensorExpr where T: Clone, { - pub fn view<'a>(&'a self) -> TensorOp<&'a T> { + pub fn view<'a>(&'a self) -> TensorExpr<&'a T> { match self { - TensorOp::Binary(lhs, rhs, op) => TensorOp::binary(lhs.view(), rhs.view(), *op), - TensorOp::BinaryScalar(lhs, rhs, op) => TensorOp::binary_scalar(lhs.view(), rhs, *op), - TensorOp::Unary(tensor, op) => TensorOp::unary(tensor.view(), *op), - TensorOp::Broadcast(tensor, shape) => TensorOp::broadcast(tensor.view(), shape.clone()), - TensorOp::Matmul(lhs, rhs) => TensorOp::matmul(lhs.view(), rhs.view()), - TensorOp::Transpose { + TensorExpr::Binary(lhs, rhs, op) => TensorExpr::binary(lhs.view(), rhs.view(), *op), + TensorExpr::BinaryScalar(lhs, rhs, op) => { + TensorExpr::binary_scalar(lhs.view(), rhs, *op) + } + TensorExpr::Unary(tensor, op) => TensorExpr::unary(tensor.view(), *op), + TensorExpr::Broadcast(tensor, shape) => { + TensorExpr::broadcast(tensor.view(), shape.clone()) + } + TensorExpr::Matmul(lhs, rhs) => TensorExpr::matmul(lhs.view(), rhs.view()), + TensorExpr::Transpose { scope: tensor, target: axes, - } => TensorOp::transpose(tensor.view(), axes.0, axes.1), - _ => unimplemented!() + } => TensorExpr::transpose(tensor.view(), axes.0, axes.1), + _ => unimplemented!(), } } } diff --git a/tensor/src/specs/affine.rs b/tensor/src/specs/affine.rs new file mode 100644 index 00000000..7cfb6cf1 --- /dev/null +++ b/tensor/src/specs/affine.rs @@ -0,0 +1,26 @@ +/* + Appellation: affine + Contrib: FL03 +*/ +/// [Affine] describes a type of geometric transformation which preserves +/// lines and parallelisms. +/// +/// ### General Formula +/// f(x) = A * x + b +pub trait Affine { + type Output; + + fn affine(&self, mul: A, add: B) -> Self::Output; +} + +impl Affine for S +where + S: Clone + std::ops::Mul, + C: std::ops::Add, +{ + type Output = C; + + fn affine(&self, mul: A, add: B) -> Self::Output { + self.clone() * mul + add + } +} diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index 2c229a85..a98b166b 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -2,33 +2,11 @@ Appellation: specs Contrib: FL03 */ +pub use self::{affine::*, ndtensor::*, scalar::*}; -pub mod ndtensor; -pub mod scalar; - -/// [Affine] describes a type of geometric transformation which preserves -/// lines and parallelisms. -/// -/// ### General Formula -/// f(x) = A * x + b -pub trait Affine { - type Output; - - fn affine(&self, mul: T, add: T) -> Self::Output; -} - -impl Affine for A -where - A: std::ops::Mul, - C: std::ops::Add, - Self: Clone, -{ - type Output = D; - - fn affine(&self, mul: B, add: B) -> Self::Output { - self.clone() * mul + add - } -} +pub(crate) mod affine; +pub(crate) mod ndtensor; +pub(crate) mod scalar; pub trait Hstack { type Output; @@ -65,34 +43,14 @@ pub(crate) mod prelude { #[cfg(test)] mod tests { use super::scalar::Scalar; - use super::Affine; use num::Complex; - #[test] - fn test_affine() { - let a = 3f64; - let b = 4f64; - let c = 5f64; - - let exp = 17f64; - - assert_eq!(a.affine(b, c), exp); - - let a = Complex::::new(3.0, 0.0); - let b = 4f64; - let c = 5f64; - - let exp = Complex::::new(17.0, 0.0); - - assert_eq!(a.affine(b, c), exp); - } - #[test] fn test_scalar() { let a = 3f64; - let b = 4f64; + let b = Complex::new(4f64, 0f64); assert_eq!(Scalar::sqr(a), 9f64); - assert_eq!(Scalar::sqrt(b), 2f64); + assert_eq!(Scalar::sqrt(b), 2f64.into()); } } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 0e2062f3..44638fe6 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::actions::index::Strides; -use crate::ops::{BackpropOp, TensorOp}; +use crate::ops::{BackpropOp, TensorExpr}; use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind}; use crate::store::Layout; use acme::prelude::BinaryOp; @@ -34,7 +34,7 @@ pub(crate) fn from_vec( pub(crate) fn from_vec_with_op( kind: impl Into, - op: TensorOp, + op: TensorExpr, shape: impl IntoShape, store: Vec, ) -> TensorBase { diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 2bcdae6b..4741c24c 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -5,7 +5,7 @@ //! # Utilities //! //! -use crate::prelude::{Scalar, TensorOp, TensorResult}; +use crate::prelude::{Scalar, TensorExpr, TensorResult}; use crate::shape::ShapeError; use crate::tensor::{from_vec_with_op, TensorBase}; @@ -30,7 +30,7 @@ where } } } - let op = TensorOp::matmul(lhs.clone(), rhs.clone()); + let op = TensorExpr::matmul(lhs.clone(), rhs.clone()); let tensor = from_vec_with_op(false, op, shape, result); Ok(tensor) } @@ -56,7 +56,7 @@ where } } } - let op = TensorOp::matmul(lhs.clone(), rhs.clone()); + let op = TensorExpr::matmul(lhs.clone(), rhs.clone()); let tensor = from_vec_with_op(false, op, shape, result); Ok(tensor) } From da51f2d28e442b6042e8ba957a2463df3ecba4f7 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 10:27:59 -0500 Subject: [PATCH 69/87] update Signed-off-by: Joe McCain III --- core/src/errors/kinds/external.rs | 2 ++ core/src/ops/kinds.rs | 3 +-- core/src/ops/unary/kinds.rs | 3 +-- core/src/ops/unary/mod.rs | 43 ----------------------------- core/src/ops/unary/specs.rs | 44 +++++++++++++++++++++++++++++- core/src/specs/gradient.rs | 7 +---- core/src/specs/mod.rs | 2 -- macros/src/lib.rs | 2 +- macros/src/ops/unary.rs | 8 +++--- macros/tests/autodiff.rs | 2 +- tensor/src/data/mod.rs | 18 +++---------- tensor/src/data/specs.rs | 2 +- tensor/src/impls/ops/unary.rs | 19 ++++++------- tensor/src/lib.rs | 1 + tensor/src/shape/shape.rs | 45 +++++++++++++++---------------- tensor/src/specs/ndtensor.rs | 4 +++ tensor/src/stats/mod.rs | 30 +++++++++++++++++++++ tensor/src/store/layout.rs | 2 +- tensor/src/tensor.rs | 4 +++ 19 files changed, 130 insertions(+), 111 deletions(-) create mode 100644 tensor/src/stats/mod.rs diff --git a/core/src/errors/kinds/external.rs b/core/src/errors/kinds/external.rs index 2d5b7294..0251874e 100644 --- a/core/src/errors/kinds/external.rs +++ b/core/src/errors/kinds/external.rs @@ -44,6 +44,8 @@ impl ExternalError { } } +impl std::error::Error for ExternalError where E: std::fmt::Debug {} + impl ErrorType for ExternalError where E: ToString, diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index edf910b4..04d9afce 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -6,7 +6,7 @@ use super::binary::{BinaryOp, BinaryOperator}; use super::unary::UnaryOp; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; +use strum::{Display, EnumCount, EnumIs, VariantNames}; #[cfg_attr( feature = "serde", @@ -20,7 +20,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Display, EnumCount, EnumIs, - EnumIter, Eq, Hash, Ord, diff --git a/core/src/ops/unary/kinds.rs b/core/src/ops/unary/kinds.rs index ebf24c22..bfd1bd48 100644 --- a/core/src/ops/unary/kinds.rs +++ b/core/src/ops/unary/kinds.rs @@ -15,7 +15,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Clone, Copy, Debug, - Default, Display, EnumCount, EnumIs, @@ -30,7 +29,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[repr(u8)] #[strum(serialize_all = "lowercase")] pub enum UnaryOp { - #[default] Abs, Cos, Cosh, @@ -46,6 +44,7 @@ pub enum UnaryOp { Not, Sin, Sinh, + #[cfg_attr(feature = "serde", serde(alias = "square_root"))] Sqrt, Square, Tan, diff --git a/core/src/ops/unary/mod.rs b/core/src/ops/unary/mod.rs index e16ea09d..d38f08af 100644 --- a/core/src/ops/unary/mod.rs +++ b/core/src/ops/unary/mod.rs @@ -11,8 +11,6 @@ pub(crate) mod kinds; pub(crate) mod operator; pub(crate) mod specs; -use num::{Complex, Num}; - pub trait Unary { type Output; @@ -21,46 +19,5 @@ pub trait Unary { fn unary(self, expr: UnaryOp) -> Self::Output; } -/// -pub trait Conjugate { - type Complex; - type Real; - - fn conj(&self) -> Self::Complex; -} - -macro_rules! impl_conj { - ($t:ty) => { - impl Conjugate for $t { - type Complex = Complex; - type Real = Self; - - fn conj(&self) -> Self::Complex { - Complex::new(*self, <$t>::default()) - } - } - }; - ($($t:ty),*) => { - $( - impl_conj!($t); - )* - }; -} - -impl Conjugate for Complex -where - T: Clone + Num + std::ops::Neg, -{ - type Complex = Self; - type Real = T; - - fn conj(&self) -> Self::Complex { - Complex::conj(self) - } -} - -impl_conj!(i8, i16, i32, i64, i128, isize); -impl_conj!(f32, f64); - #[cfg(test)] mod tests {} diff --git a/core/src/ops/unary/specs.rs b/core/src/ops/unary/specs.rs index 10d0112a..cb8c2240 100644 --- a/core/src/ops/unary/specs.rs +++ b/core/src/ops/unary/specs.rs @@ -2,9 +2,51 @@ Appellation: specs Contrib: FL03 */ -use num::traits::Inv; +use core::ops; +use num::traits::{Inv, Num}; use num::Complex; +/// +pub trait Conjugate { + type Complex; + type Real; + + fn conj(&self) -> Self::Complex; +} + +macro_rules! impl_conj { + ($t:ty) => { + impl Conjugate for $t { + type Complex = Complex; + type Real = Self; + + fn conj(&self) -> Self::Complex { + Complex::new(*self, <$t>::default()) + } + } + }; + ($($t:ty),*) => { + $( + impl_conj!($t); + )* + }; +} + +impl Conjugate for Complex +where + T: Clone + Num + ops::Neg, +{ + type Complex = Self; + type Real = T; + + fn conj(&self) -> Self::Complex { + Complex::conj(self) + } +} + +impl_conj!(i8, i16, i32, i64, i128, isize); +impl_conj!(f32, f64); + macro_rules! unary_op_trait { ($(($trait:ident, $method:ident)),*) => { $(unary_op_trait!($trait, $method);)* diff --git a/core/src/specs/gradient.rs b/core/src/specs/gradient.rs index e63405d9..f70325ec 100644 --- a/core/src/specs/gradient.rs +++ b/core/src/specs/gradient.rs @@ -6,15 +6,10 @@ use super::store::Store; pub trait IsDifferentiable { + /// Returns true if the function is differentiable. fn differentiable(&self) -> bool; } -pub trait Differentiable { - type Derivative; - - fn diff(&self, args: T) -> Self::Derivative; -} - pub trait Gradient { type Gradient; diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 345e9789..19597ade 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -15,8 +15,6 @@ use core::borrow::Borrow; pub trait Idx { type Index; - - fn index(&self) -> Self::Index; } pub trait IdxExt: Idx diff --git a/macros/src/lib.rs b/macros/src/lib.rs index d63e2a18..d4755fe9 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -63,7 +63,7 @@ pub(crate) mod kw { syn::custom_keyword!(grad); syn::custom_keyword!(cos); - syn::custom_keyword!(e); + syn::custom_keyword!(exp); syn::custom_keyword!(ln); syn::custom_keyword!(sin); syn::custom_keyword!(tan); diff --git a/macros/src/ops/unary.rs b/macros/src/ops/unary.rs index 7176cd16..c7c1cefa 100644 --- a/macros/src/ops/unary.rs +++ b/macros/src/ops/unary.rs @@ -57,7 +57,7 @@ impl Parse for UnaryMethod { pub enum UnaryOps { Cosine(kw::cos), - Exp(kw::e), + Exp(kw::exp), Ln(kw::ln), Sine(kw::sin), Tan(kw::tan), @@ -72,7 +72,7 @@ impl Parse for UnaryOps { let span = Span::call_site(); match method.to_string().as_str() { "cos" => return Ok(UnaryOps::Cosine(kw::cos(span))), - "exp" => return Ok(UnaryOps::Exp(kw::e(span))), + "exp" => return Ok(UnaryOps::Exp(kw::exp(span))), "ln" => return Ok(UnaryOps::Ln(kw::ln(span))), "sin" => return Ok(UnaryOps::Sine(kw::sin(span))), "tan" => return Ok(UnaryOps::Tan(kw::tan(span))), @@ -87,8 +87,8 @@ impl Parse for UnaryOps { input.parse::().map(UnaryOps::Tan) } else if input.peek2(kw::ln) { input.parse::().map(UnaryOps::Ln) - } else if input.peek2(kw::e) { - input.parse::().map(UnaryOps::Exp) + } else if input.peek2(kw::exp) { + input.parse::().map(UnaryOps::Exp) } else { Err(input.error("Expected a method call")) } diff --git a/macros/tests/autodiff.rs b/macros/tests/autodiff.rs index dcdf09b8..8690b313 100644 --- a/macros/tests/autodiff.rs +++ b/macros/tests/autodiff.rs @@ -69,7 +69,7 @@ fn test_div() { fn test_mul() { let x = 1.0; let y = 2.0; - assert_eq!(autodiff!(x: x * y), 2.0); + assert_eq!(autodiff!(x: x * y + 10.0), 2.0); assert_eq!(autodiff!(y: x * y), 1.0); assert_eq!(autodiff!(x: x *= y), 2.0); assert_eq!(autodiff!(y: x *= y), 1.0); diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index cc082c9c..0d7d13c4 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -105,8 +105,7 @@ where { let data = self.data.into_shared(); // safe because: equivalent unmoved data, ptr and dims remain valid - // unsafe { Self::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } - unsafe { BaseTensor::from_data_ptr(data, self.ptr) } + unsafe { BaseTensor::from_data_ptr(data, self.ptr).with_layout(self.layout) } } pub fn layout(&self) -> &Layout { @@ -208,6 +207,8 @@ where } pub(crate) unsafe fn with_layout(self, layout: Layout) -> BaseTensor { + debug_assert_eq!(self.layout().rank(), layout.rank()); + Self { id: self.id, data: self.data, @@ -217,19 +218,6 @@ where ptr: self.ptr, } } - - pub(crate) unsafe fn with_strides_dim( - self, - stride: impl IntoStride, - dim: impl IntoShape, - ) -> BaseTensor { - let shape = dim.into_shape(); - let stride = stride.into_stride(); - debug_assert_eq!(shape.rank(), stride.rank()); - - let layout = Layout::new(0, shape, stride); - self.with_layout(layout) - } } pub(crate) mod utils { diff --git a/tensor/src/data/specs.rs b/tensor/src/data/specs.rs index 83f7dd4b..1d90bd72 100644 --- a/tensor/src/data/specs.rs +++ b/tensor/src/data/specs.rs @@ -42,7 +42,7 @@ pub unsafe trait DataMut: Data + RawDataMut { /// Ensures that the array has unique access to its data. #[doc(hidden)] #[inline] - fn ensure_unique(self_: &mut BaseTensor) + fn ensure_unique(self_: &mut BaseTensor) where Self: Sized, { diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 44516dbd..22226d93 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -5,10 +5,11 @@ use crate::prelude::{Scalar, TensorExpr}; use crate::tensor::*; use acme::ops::unary::UnaryOp; +use core::ops; -impl std::ops::Neg for TensorBase +impl ops::Neg for TensorBase where - T: Copy + std::ops::Neg, + T: Copy + ops::Neg, { type Output = Self; @@ -20,9 +21,9 @@ where } } -impl<'a, T> std::ops::Neg for &'a TensorBase +impl<'a, T> ops::Neg for &'a TensorBase where - T: Copy + std::ops::Neg, + T: Copy + ops::Neg, { type Output = TensorBase; @@ -34,9 +35,9 @@ where } } -impl std::ops::Not for TensorBase +impl ops::Not for TensorBase where - T: Copy + std::ops::Not, + T: Copy + ops::Not, { type Output = Self; @@ -48,9 +49,9 @@ where } } -impl<'a, T> std::ops::Not for &'a TensorBase +impl<'a, T> ops::Not for &'a TensorBase where - T: Copy + std::ops::Not, + T: Copy + ops::Not, { type Output = TensorBase; @@ -66,7 +67,7 @@ macro_rules! impl_unary_op { ($variant:ident, $method:ident) => { pub fn $method(self) -> Self { let shape = self.shape().clone(); - let store = self.store.iter().copied().map(|v| v.$method()).collect(); + let store = self.store.iter().map(|v| v.$method()).collect(); let op = TensorExpr::unary(self, UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 4361e7c6..e7079995 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -28,6 +28,7 @@ pub mod linalg; pub mod ops; pub mod shape; pub mod specs; +pub mod stats; pub mod store; pub mod types; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 3ed987ff..073d0fb2 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -66,7 +66,7 @@ impl Shape { return false; } let mut acc = 1; - for (&stride, &dim) in stride.iter().zip(self.0.iter()).rev() { + for (&stride, &dim) in stride.iter().zip(self.iter()).rev() { if stride != acc { return false; } @@ -158,32 +158,31 @@ impl Shape { return None; } + let mut iter = new_stride.as_mut_slice().iter_mut().rev(); + for ((er, es), dr) in self + .slice() + .iter() + .rev() + .zip(stride.slice().iter().rev()) + .zip(iter.by_ref()) { - let mut new_stride_iter = new_stride.as_mut_slice().iter_mut().rev(); - for ((er, es), dr) in self - .slice() - .iter() - .rev() - .zip(stride.slice().iter().rev()) - .zip(new_stride_iter.by_ref()) - { - /* update strides */ - if *dr == *er { - /* keep stride */ - *dr = *es; - } else if *er == 1 { - /* dead dimension, zero stride */ - *dr = 0 - } else { - return None; - } + /* update strides */ + if *dr == *er { + /* keep stride */ + *dr = *es; + } else if *er == 1 { + /* dead dimension, zero stride */ + *dr = 0 + } else { + return None; } + } - /* set remaining strides to zero */ - for dr in new_stride_iter { - *dr = 0; - } + /* set remaining strides to zero */ + for dr in iter { + *dr = 0; } + Some(new_stride.into()) } } diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index ee3ac044..34f8d656 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -42,3 +42,7 @@ pub trait NdIterator { pub trait TensorData { type Elem; } + +pub trait TensorDataMut: TensorData { + fn as_mut_ptr(&mut self) -> *mut Self::Elem; +} diff --git a/tensor/src/stats/mod.rs b/tensor/src/stats/mod.rs new file mode 100644 index 00000000..ca891a1c --- /dev/null +++ b/tensor/src/stats/mod.rs @@ -0,0 +1,30 @@ +/* + Appellation: stats + Contrib: FL03 +*/ + +use crate::shape::Axis; + +pub trait SummaryStatistics { + /// Returns the maximum value in the collection. + fn max(&self) -> T; + /// Returns the mean (average) value of the collection. + fn mean(&self) -> T; + /// Returns the median value in the collection. + fn median(&self) -> T; + /// Returns the minimum value in the collection. + fn min(&self) -> T; + + fn mode(&self) -> T; + /// Compute the standard deviation + fn std(&self) -> T; + /// Compute the variance + fn variance(&self) -> T; +} + +pub trait TensorStats: SummaryStatistics { + fn mean_axis(&self, axis: Axis) -> T; +} + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 70f4e2b3..57654d0a 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -6,7 +6,7 @@ use crate::shape::{Axis, IntoShape, IntoStride, Rank, Shape, ShapeError, ShapeRe #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -/// A layout is a description of how data is stored in memory. +/// The layout describes the memory layout of a tensor. #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Layout { diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 44638fe6..e1e83b02 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -120,6 +120,10 @@ impl TensorBase { pub fn is_contiguous(&self) -> bool { self.layout().is_contiguous() } + + pub fn is_empty(&self) -> bool { + self.store.is_empty() + } /// Get a reference to the [Layout] of the tensor pub const fn layout(&self) -> &Layout { &self.layout From 3545180d847cdfd84f4570dae9ceec501627cec9 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 13:04:48 -0500 Subject: [PATCH 70/87] update Signed-off-by: Joe McCain III --- core/src/lib.rs | 2 + core/src/types/constants.rs | 7 +- core/src/types/mod.rs | 4 +- core/src/types/variables.rs | 20 +- scripts/rustup.sh | 4 - scripts/setup.sh | 4 - scripts/win/setup.cmd | 2 - tensor/src/actions/arange.rs | 57 +++-- tensor/src/actions/grad/store.rs | 59 ++++- tensor/src/actions/index/mod.rs | 27 +-- tensor/src/actions/iter/mod.rs | 20 +- tensor/src/actions/{index => iter}/strides.rs | 12 +- tensor/src/actions/mod.rs | 35 ++- tensor/src/backend/mod.rs | 10 +- tensor/src/data/mod.rs | 199 +---------------- tensor/src/data/tensor.rs | 201 ++++++++++++++++++ tensor/src/impls/num.rs | 4 +- tensor/src/impls/ops/binary.rs | 33 +-- tensor/src/impls/reshape.rs | 30 +-- tensor/src/ops/op.rs | 37 ++-- tensor/src/specs/mod.rs | 20 +- tensor/src/specs/reshape.rs | 18 ++ tensor/src/tensor.rs | 121 ++++++----- tensor/src/utils.rs | 4 +- 24 files changed, 508 insertions(+), 422 deletions(-) delete mode 100644 scripts/rustup.sh delete mode 100644 scripts/setup.sh delete mode 100644 scripts/win/setup.cmd rename tensor/src/actions/{index => iter}/strides.rs (92%) create mode 100644 tensor/src/data/tensor.rs create mode 100644 tensor/src/specs/reshape.rs diff --git a/core/src/lib.rs b/core/src/lib.rs index 8fc4d40a..c03e52ac 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -5,6 +5,8 @@ //! # Core //! //! +#[cfg(not(feature = "std"))] +extern crate alloc; #[macro_use] pub(crate) mod seal; diff --git a/core/src/types/constants.rs b/core/src/types/constants.rs index f218b6d9..1102f3cc 100644 --- a/core/src/types/constants.rs +++ b/core/src/types/constants.rs @@ -2,18 +2,17 @@ Appellation: constants Contrib: FL03 */ - use crate::prelude::{EvaluateOnce, Gradient}; +use core::borrow::{Borrow, BorrowMut}; +use core::ops::{Deref, DerefMut, Neg, Not}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::borrow::{Borrow, BorrowMut}; -use std::ops::{Deref, DerefMut, Neg, Not}; #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[repr(C)] -pub struct Constant(pub T); +pub struct Constant(pub(crate) T); impl Constant { pub fn new(value: T) -> Self { diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index 266de08a..35d59780 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -1,8 +1,8 @@ /* - Appellation: cmp + Appellation: types Contrib: FL03 */ -//! # Components +//! # Types //! //! pub use self::{constants::*, dual::*, operators::*, variables::*}; diff --git a/core/src/types/variables.rs b/core/src/types/variables.rs index 69d32ae4..18a82a76 100644 --- a/core/src/types/variables.rs +++ b/core/src/types/variables.rs @@ -215,9 +215,9 @@ macro_rules! impl_std_op { )* }; ($trait:ident, $method:ident, $e:tt) => { - impl std::ops::$trait for Variable + impl core::ops::$trait for Variable where - T: Copy + Default + std::ops::$trait, + T: Copy + Default + core::ops::$trait, { type Output = Variable; @@ -228,9 +228,9 @@ macro_rules! impl_std_op { } } - impl<'a, T> std::ops::$trait<&'a Variable> for Variable + impl<'a, T> core::ops::$trait<&'a Variable> for Variable where - T: Copy + Default + std::ops::$trait, + T: Copy + Default + core::ops::$trait, { type Output = Variable; @@ -241,9 +241,9 @@ macro_rules! impl_std_op { } } - impl<'a, T> std::ops::$trait> for &'a Variable + impl<'a, T> core::ops::$trait> for &'a Variable where - T: Copy + Default + std::ops::$trait, + T: Copy + Default + core::ops::$trait, { type Output = Variable; @@ -254,9 +254,9 @@ macro_rules! impl_std_op { } } - impl<'a, T> std::ops::$trait<&'a Variable> for &'a Variable + impl<'a, T> core::ops::$trait<&'a Variable> for &'a Variable where - T: Copy + Default + std::ops::$trait, + T: Copy + Default + core::ops::$trait, { type Output = Variable; @@ -267,9 +267,9 @@ macro_rules! impl_std_op { } } - impl std::ops::$trait for Variable + impl core::ops::$trait for Variable where - T: Copy + Default + std::ops::$trait, + T: Copy + Default + core::ops::$trait, { type Output = Self; diff --git a/scripts/rustup.sh b/scripts/rustup.sh deleted file mode 100644 index 7fb2cbc0..00000000 --- a/scripts/rustup.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -rustup default nightly -rustup target add wasm32-unknown-unknown wasm32-wasi --toolchain nightly -rustup component add clippy rustfmt --toolchain nightly \ No newline at end of file diff --git a/scripts/setup.sh b/scripts/setup.sh deleted file mode 100644 index 242a4275..00000000 --- a/scripts/setup.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -sudo apt update -y && sudo apt upgrade -y && sudo apt autoremove -y -sudo apt install -y protobuf-compiler -sh scripts/rustup.sh \ No newline at end of file diff --git a/scripts/win/setup.cmd b/scripts/win/setup.cmd deleted file mode 100644 index 0be7df19..00000000 --- a/scripts/win/setup.cmd +++ /dev/null @@ -1,2 +0,0 @@ -rustup default nightly -set RUSTFLAGS="--cfg procmacro2_semver_exempt" \ No newline at end of file diff --git a/tensor/src/actions/arange.rs b/tensor/src/actions/arange.rs index fbe10c20..e17399dd 100644 --- a/tensor/src/actions/arange.rs +++ b/tensor/src/actions/arange.rs @@ -7,11 +7,19 @@ use num::traits::{Bounded, FromPrimitive, Num, ToPrimitive}; pub fn step_size(start: T, stop: T, steps: usize) -> T where - T: FromPrimitive + Num, + T: FromPrimitive + ops::Div + ops::Sub, { (stop - start) / T::from_usize(steps).unwrap() } +pub fn steps(start: T, stop: T, step: T) -> usize +where + T: ToPrimitive + ops::Div + ops::Sub, +{ + let steps = (stop - start) / step; + steps.to_usize().unwrap() +} + pub struct Arange { range: Boundary, step: T, @@ -28,7 +36,7 @@ impl Arange { } impl Arange where - T: Copy + Default + FromPrimitive + Num, + T: Copy + Default + Num + PartialOrd, { pub fn start(&self) -> T { self.range.start() @@ -36,26 +44,25 @@ where pub fn steps(&self) -> usize where - T: ToPrimitive, + T: FromPrimitive + ToPrimitive, { - let start = self.start(); - let stop = self.stop(); - let step = self.step; - let steps = (stop - start) / step; - steps.to_usize().unwrap() + steps(self.start(), self.stop(), self.step) } pub fn step(&self) -> T { self.step } - pub fn stop(&self) -> T { + pub fn stop(&self) -> T + where + T: FromPrimitive + PartialOrd, + { self.range.stop_or_linear() } } #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum Boundary { +pub enum Boundary { Range { start: T, stop: T }, From { start: T }, Inclusive { start: T, stop: T }, @@ -85,7 +92,25 @@ where } } + pub fn step_size(&self, steps: usize) -> T + where + T: FromPrimitive + Num + PartialOrd, + { + let steps = T::from_usize(steps).unwrap(); + let start = self.start(); + let stop = self.stop_or_default(); + let step = (stop - start) / steps; + step + } +} + +impl Boundary +where + T: Copy + Default + PartialOrd, +{ pub fn stop_or(&self, default: T) -> T { + debug_assert!(default >= self.start()); + self.stop().unwrap_or(default) } @@ -106,19 +131,7 @@ where { self.stop_or(T::max_value()) } - - pub fn step_size(&self, steps: usize) -> T - where - T: FromPrimitive + Num, - { - let steps = T::from_usize(steps).unwrap(); - let start = self.start(); - let stop = self.stop_or_default(); - let step = (stop - start) / steps; - step - } } - impl From> for Boundary { fn from(args: Range) -> Self { Boundary::Range { diff --git a/tensor/src/actions/grad/store.rs b/tensor/src/actions/grad/store.rs index bc2707c2..5db2d0fb 100644 --- a/tensor/src/actions/grad/store.rs +++ b/tensor/src/actions/grad/store.rs @@ -5,8 +5,9 @@ use crate::prelude::TensorId; use crate::TensorBase; use acme::prelude::Store; -use std::collections::btree_map::{BTreeMap, Entry, Keys}; -use std::ops::{Index, IndexMut}; +use core::borrow::{Borrow, BorrowMut}; +use core::ops::{Deref, DerefMut, Index, IndexMut}; +use std::collections::btree_map::{BTreeMap, Entry, Keys, Values}; #[derive(Clone, Debug)] pub struct GradStore { @@ -39,7 +40,7 @@ impl GradStore { pub fn is_empty(&self) -> bool { self.store.is_empty() } - + /// Returns an iterator over the store's keys pub fn keys(&self) -> Keys<'_, TensorId, TensorBase> { self.store.keys() } @@ -68,6 +69,56 @@ impl GradStore { { self.entry(tensor.id()).or_insert(tensor.zeros_like()) } + /// Remove an element from the store. + pub fn remove(&mut self, key: &TensorId) -> Option> { + self.store.remove(key) + } + /// Remove a tensor from the store. + pub fn remove_tensor(&mut self, tensor: &TensorBase) -> Option> { + self.remove(&tensor.id()) + } + + pub fn values(&self) -> Values<'_, TensorId, TensorBase> { + self.store.values() + } +} + +impl AsRef>> for GradStore { + fn as_ref(&self) -> &BTreeMap> { + &self.store + } +} + +impl AsMut>> for GradStore { + fn as_mut(&mut self) -> &mut BTreeMap> { + &mut self.store + } +} + +impl Borrow>> for GradStore { + fn borrow(&self) -> &BTreeMap> { + &self.store + } +} + +impl BorrowMut>> for GradStore { + fn borrow_mut(&mut self) -> &mut BTreeMap> { + &mut self.store + } +} + +impl Deref for GradStore { + type Target = BTreeMap>; + + fn deref(&self) -> &Self::Target { + &self.store + } +} + +impl DerefMut for GradStore { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.store + } } impl Extend<(TensorId, TensorBase)> for GradStore { @@ -121,6 +172,6 @@ impl Store> for GradStore { } fn remove(&mut self, key: &TensorId) -> Option> { - self.store.remove(key) + self.remove(key) } } diff --git a/tensor/src/actions/index/mod.rs b/tensor/src/actions/index/mod.rs index 5e52fb51..51638c91 100644 --- a/tensor/src/actions/index/mod.rs +++ b/tensor/src/actions/index/mod.rs @@ -5,37 +5,16 @@ //! # Index //! //! -pub use self::{slice::*, strides::*}; +pub use self::slice::*; pub(crate) mod slice; -pub(crate) mod strides; use crate::tensor::TensorBase; pub enum IndexItem { Scalar(T), - Strides(TensorBase), + Tensor(TensorBase), } #[cfg(test)] -mod tests { - use super::Strides; - use crate::prelude::Shape; - use crate::tensor::TensorBase; - - #[test] - fn test() { - let shape = Shape::from_iter([2, 2]); - let n = shape.size(); - let tensor = TensorBase::linspace(0f64, n as f64, n) - .reshape(shape) - .unwrap(); - let indexer = Strides::from(tensor.layout()); - for (i, idx) in indexer.enumerate() { - let elem = *tensor.get_by_index(idx).unwrap(); - println!("{:?}", &elem); - - assert_eq!(i as f64, elem); - } - } -} +mod tests {} diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 7c8f93b1..37977362 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -5,9 +5,10 @@ //! # Iter //! //! -pub use self::{iterator::*, utils::*}; +pub use self::{iterator::*, strides::*, utils::*}; pub(crate) mod iterator; +pub(crate) mod strides; pub trait IterTensor { type Item; @@ -40,4 +41,19 @@ pub(crate) mod utils { } #[cfg(test)] -mod tests {} +mod tests { + use crate::actions::Linspace; + use crate::prelude::{Shape, Tensor}; + + #[test] + fn test_strided() { + let shape = Shape::from_iter([2, 2]); + let n = shape.size(); + let exp = Vec::linspace(0f64, n as f64, n); + let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); + let iter = tensor.strided(); + for (i, idx) in iter.enumerate() { + assert_eq!(idx, &exp[i]); + } + } +} diff --git a/tensor/src/actions/index/strides.rs b/tensor/src/actions/iter/strides.rs similarity index 92% rename from tensor/src/actions/index/strides.rs rename to tensor/src/actions/iter/strides.rs index 4780359e..b115a9c1 100644 --- a/tensor/src/actions/index/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -8,13 +8,13 @@ use crate::tensor::TensorBase; pub struct StrideIter<'a, T> { scope: Option<&'a T>, - strides: Strides<'a>, + strides: Strided<'a>, tensor: &'a TensorBase, } impl<'a, T> StrideIter<'a, T> { pub fn new(tensor: &'a TensorBase) -> Self { - let strides = Strides::from(tensor.layout()); + let strides = Strided::from(tensor.layout()); Self { scope: None, strides, @@ -33,14 +33,14 @@ impl<'a, T> Iterator for StrideIter<'a, T> { } } -pub struct Strides<'a> { +pub struct Strided<'a> { next: Option, position: Vec, pub(crate) shape: &'a Shape, pub(crate) stride: &'a Stride, } -impl<'a> Strides<'a> { +impl<'a> Strided<'a> { pub fn new(offset: usize, shape: &'a Shape, stride: &'a Stride) -> Self { let elem_count: usize = shape.iter().product(); let next = if elem_count == 0 { @@ -66,7 +66,7 @@ impl<'a> Strides<'a> { } } -impl<'a> Iterator for Strides<'a> { +impl<'a> Iterator for Strided<'a> { type Item = usize; fn next(&mut self) -> Option { @@ -99,7 +99,7 @@ impl<'a> Iterator for Strides<'a> { } } -impl<'a> From<&'a Layout> for Strides<'a> { +impl<'a> From<&'a Layout> for Strided<'a> { fn from(layout: &'a Layout) -> Self { Self::new(layout.offset, &layout.shape, &layout.stride) } diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index a6692204..e60621ce 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -4,18 +4,41 @@ */ //! # Actions //! -//! This module contains the implementations of the various actions that can be performed on tensors. -//! The actions include: -//! - Composition -//! - Differentiation -//! - Indexing -//! - Iteration +//! This module describes the actions that may be taken on or by a tensor. +//! +//! The actions include:
+//! * Automatic Differentiation +//! * Creation Routines +//! * Indexing +//! * Iteration pub mod arange; pub mod grad; pub mod index; pub mod iter; +use num::traits::{FromPrimitive, Num}; + +pub trait Linspace { + fn linspace(start: T, stop: T, steps: usize) -> Self; +} + +impl Linspace for Vec +where + T: Copy + Default + FromPrimitive + Num + PartialOrd, +{ + fn linspace(start: T, stop: T, steps: usize) -> Self { + let step = arange::step_size(start, stop, steps); + let mut vec = Vec::with_capacity(steps); + let mut value = start; + for _ in 0..steps { + vec.push(value); + value = value + step; + } + vec + } +} + pub(crate) mod prelude { pub use super::arange::*; pub use super::grad::*; diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs index 9adfba3f..c275f8a9 100644 --- a/tensor/src/backend/mod.rs +++ b/tensor/src/backend/mod.rs @@ -15,12 +15,12 @@ use crate::shape::Rank; use crate::tensor::TensorBase; #[derive(Clone, Debug, Eq, PartialEq)] -pub enum TensorType { +pub enum Tensors { Scalar(T), Tensor(TensorBase), } -impl TensorType { +impl Tensors { pub fn scalar(scalar: T) -> Self { Self::Scalar(scalar) } @@ -38,13 +38,13 @@ impl TensorType { pub fn rank(&self) -> Rank { match self { - Self::Scalar(_) => Rank::scalar(), Self::Tensor(tensor) => tensor.rank(), + _ => Rank::scalar(), } } } -impl From> for TensorType +impl From> for Tensors where T: Clone, { @@ -71,7 +71,7 @@ mod tests { fn test_tensor_type() { let shape = (2, 3); let tensor = TensorBase::::ones(shape); - let item = TensorType::tensor(tensor); + let item = Tensors::tensor(tensor); assert_eq!(item.rank(), Rank::from(2)); } diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 0d7d13c4..b89a1978 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -5,10 +5,11 @@ //! # Data //! //! -pub use self::specs::*; pub(crate) use self::utils::*; +pub use self::{specs::*, tensor::*}; pub(crate) mod specs; +pub(crate) mod tensor; pub mod elem; @@ -20,206 +21,10 @@ pub mod repr { pub(crate) mod view; } -use crate::actions::iter::to_vec_mapped; -use crate::prelude::{BackpropOp, Layout, TensorId, TensorKind}; -use crate::shape::dim::can_index_slice; -use crate::shape::{IntoShape, IntoStride, Shape, Stride}; -use core::ptr::NonNull; -use core::slice; -use rawpointer::PointerExt; - pub type Tensor
= BaseTensor>; pub type ArcTensor = BaseTensor>; -#[derive(Clone)] -pub struct BaseTensor -where - S: RawData, -{ - id: TensorId, - data: S, - kind: TensorKind, - layout: Layout, - op: BackpropOp, - ptr: NonNull, -} - -impl BaseTensor -where - S: RawData, -{ - #[inline(always)] - pub fn as_ptr(&self) -> *const A { - self.ptr.as_ptr() as *const A - } - - /// Return a mutable pointer to the first element in the array. - /// - /// This method attempts to unshare the data. If `S: DataMut`, then the - /// data is guaranteed to be uniquely held on return. - /// - /// # Warning - /// - /// When accessing elements through this pointer, make sure to use strides - /// obtained *after* calling this method, since the process of unsharing - /// the data may change the strides. - #[inline(always)] - pub fn as_mut_ptr(&mut self) -> *mut A - where - S: RawDataMut, - { - // self.try_ensure_unique(); // for ArcArray - self.ptr.as_ptr() - } - pub fn as_slice_memory_order(&self) -> Option<&[A]> - where - S: Data, - { - if self.is_contiguous() { - let offset = self.layout.offset_from_low_addr_ptr_to_logical_ptr(); - unsafe { - Some(slice::from_raw_parts( - PointerExt::sub(self.ptr, offset).as_ptr(), - self.size(), - )) - } - } else { - None - } - } - - /// Return true if the array is known to be contiguous. - pub fn is_contiguous(&self) -> bool { - self.layout.is_contiguous() - } - - pub fn is_standard_layout(&self) -> bool { - self.layout.is_layout_c() - } - - /// Without any coping, turn the tensor into a shared tensor. - pub fn into_shared(self) -> ArcTensor - where - S: DataOwned, - { - let data = self.data.into_shared(); - // safe because: equivalent unmoved data, ptr and dims remain valid - unsafe { BaseTensor::from_data_ptr(data, self.ptr).with_layout(self.layout) } - } - - pub fn layout(&self) -> &Layout { - &self.layout - } - - pub fn map<'a, B, F>(&'a self, f: F) -> Tensor - where - F: FnMut(&'a A) -> B, - A: 'a, - S: Data, - { - unsafe { - if let Some(slc) = self.as_slice_memory_order() { - BaseTensor::from_shape_trusted_iter_unchecked(self.shape().slice(), slc.iter(), f) - } else { - unimplemented!() - // BaseTensor::from_shape_trusted_iter_unchecked(self.shape(), self.iter(), f) - } - } - } - - pub fn shape(&self) -> &Shape { - self.layout().shape() - } - - pub fn stride(&self) -> &Stride { - self.layout().stride() - } - - pub fn size(&self) -> usize { - self.layout.size() - } -} - -// Internal methods -impl BaseTensor -where - S: DataOwned + RawData, -{ - unsafe fn from_vec_dim_stride_unchecked( - dim: impl IntoShape, - strides: impl IntoStride, - mut v: Vec, - ) -> Self { - let layout = Layout::new(0, dim, strides); - // debug check for issues that indicates wrong use of this constructor - debug_assert!(can_index_slice(&v, &layout.shape(), &layout.stride()).is_ok()); - - let ptr = { - let tmp = nonnull_from_vec_data(&mut v); - PointerExt::add(tmp, layout.offset_from_low_addr_ptr_to_logical_ptr()) - }; - BaseTensor::from_data_ptr(DataOwned::new(v), ptr).with_layout(layout) - } - - /// Creates an array from an iterator, mapped by `map` and interpret it according to the - /// provided shape and strides. - /// - /// # Safety - /// - /// See from_shape_vec_unchecked - pub(crate) unsafe fn from_shape_trusted_iter_unchecked( - shape: Sh, - iter: I, - map: F, - ) -> Self - where - Sh: IntoShape, - I: ExactSizeIterator, - F: FnMut(I::Item) -> A, - { - let shape = shape.into_shape(); - let strides = shape.default_strides(); // shape.stride().strides_for_dim(&dim); - let v = to_vec_mapped(iter, map); - Self::from_vec_dim_stride_unchecked(shape, strides, v) - } -} - -impl BaseTensor -where - S: RawData, -{ - pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { - let tensor = Self { - id: TensorId::new(), - data, - kind: TensorKind::Normal, - layout: Layout::contiguous(0), - op: BackpropOp::::none(), - ptr, - }; - debug_assert!(tensor.pointer_is_inbounds()); - tensor - } - - pub(crate) fn pointer_is_inbounds(&self) -> bool { - self.data._is_pointer_inbounds(self.as_ptr()) - } - - pub(crate) unsafe fn with_layout(self, layout: Layout) -> BaseTensor { - debug_assert_eq!(self.layout().rank(), layout.rank()); - - Self { - id: self.id, - data: self.data, - kind: self.kind, - layout, - op: self.op, - ptr: self.ptr, - } - } -} - pub(crate) mod utils { #[cfg(not(feature = "std"))] use alloc::vec::Vec; diff --git a/tensor/src/data/tensor.rs b/tensor/src/data/tensor.rs new file mode 100644 index 00000000..8e17d657 --- /dev/null +++ b/tensor/src/data/tensor.rs @@ -0,0 +1,201 @@ +/* + Appellation: tensor + Contrib: FL03 +*/ +use super::specs::{Data, DataOwned, RawData, RawDataMut}; +use super::{nonnull_from_vec_data, ArcTensor, Tensor}; +use crate::actions::iter::to_vec_mapped; +use crate::prelude::{BackpropOp, Layout, TensorId, TensorKind}; +use crate::shape::dim::can_index_slice; +use crate::shape::{IntoShape, IntoStride, Shape, Stride}; +use core::ptr::NonNull; +use core::slice; +use rawpointer::PointerExt; + +#[derive(Clone)] +pub struct BaseTensor +where + S: RawData, +{ + pub(crate) id: TensorId, + pub(crate) data: S, + pub(crate) kind: TensorKind, + pub(crate) layout: Layout, + pub(crate) op: BackpropOp, + pub(crate) ptr: NonNull, +} + +impl BaseTensor +where + S: RawData, +{ + #[inline(always)] + pub fn as_ptr(&self) -> *const A { + self.ptr.as_ptr() as *const A + } + + /// Return a mutable pointer to the first element in the array. + /// + /// This method attempts to unshare the data. If `S: DataMut`, then the + /// data is guaranteed to be uniquely held on return. + /// + /// # Warning + /// + /// When accessing elements through this pointer, make sure to use strides + /// obtained *after* calling this method, since the process of unsharing + /// the data may change the strides. + #[inline(always)] + pub fn as_mut_ptr(&mut self) -> *mut A + where + S: RawDataMut, + { + // self.try_ensure_unique(); // for ArcArray + self.ptr.as_ptr() + } + pub fn as_slice_memory_order(&self) -> Option<&[A]> + where + S: Data, + { + if self.is_contiguous() { + let offset = self.layout.offset_from_low_addr_ptr_to_logical_ptr(); + unsafe { + Some(slice::from_raw_parts( + PointerExt::sub(self.ptr, offset).as_ptr(), + self.size(), + )) + } + } else { + None + } + } + + /// Return true if the array is known to be contiguous. + pub fn is_contiguous(&self) -> bool { + self.layout.is_contiguous() + } + + pub fn is_standard_layout(&self) -> bool { + self.layout.is_layout_c() + } + + /// Without any coping, turn the tensor into a shared tensor. + pub fn into_shared(self) -> ArcTensor + where + S: DataOwned, + { + let data = self.data.into_shared(); + // safe because: equivalent unmoved data, ptr and dims remain valid + unsafe { BaseTensor::from_data_ptr(data, self.ptr).with_layout(self.layout) } + } + + pub fn layout(&self) -> &Layout { + &self.layout + } + + pub fn map<'a, B, F>(&'a self, f: F) -> Tensor + where + F: FnMut(&'a A) -> B, + A: 'a, + S: Data, + { + unsafe { + if let Some(slc) = self.as_slice_memory_order() { + BaseTensor::from_shape_trusted_iter_unchecked(self.shape().slice(), slc.iter(), f) + } else { + unimplemented!() + // BaseTensor::from_shape_trusted_iter_unchecked(self.shape(), self.iter(), f) + } + } + } + + pub fn shape(&self) -> &Shape { + self.layout().shape() + } + + pub fn stride(&self) -> &Stride { + self.layout().stride() + } + + pub fn size(&self) -> usize { + self.layout.size() + } +} + +// Internal methods +impl BaseTensor +where + S: DataOwned + RawData, +{ + unsafe fn from_vec_dim_stride_unchecked( + dim: impl IntoShape, + strides: impl IntoStride, + mut v: Vec, + ) -> Self { + let layout = Layout::new(0, dim, strides); + // debug check for issues that indicates wrong use of this constructor + debug_assert!(can_index_slice(&v, &layout.shape(), &layout.stride()).is_ok()); + + let ptr = { + let tmp = nonnull_from_vec_data(&mut v); + PointerExt::add(tmp, layout.offset_from_low_addr_ptr_to_logical_ptr()) + }; + BaseTensor::from_data_ptr(DataOwned::new(v), ptr).with_layout(layout) + } + + /// Creates an array from an iterator, mapped by `map` and interpret it according to the + /// provided shape and strides. + /// + /// # Safety + /// + /// See from_shape_vec_unchecked + pub(crate) unsafe fn from_shape_trusted_iter_unchecked( + shape: Sh, + iter: I, + map: F, + ) -> Self + where + Sh: IntoShape, + I: ExactSizeIterator, + F: FnMut(I::Item) -> A, + { + let shape = shape.into_shape(); + let strides = shape.default_strides(); // shape.stride().strides_for_dim(&dim); + let v = to_vec_mapped(iter, map); + Self::from_vec_dim_stride_unchecked(shape, strides, v) + } +} + +impl BaseTensor +where + S: RawData, +{ + pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { + let tensor = Self { + id: TensorId::new(), + data, + kind: TensorKind::Normal, + layout: Layout::contiguous(0), + op: BackpropOp::::none(), + ptr, + }; + debug_assert!(tensor.pointer_is_inbounds()); + tensor + } + + pub(crate) fn pointer_is_inbounds(&self) -> bool { + self.data._is_pointer_inbounds(self.as_ptr()) + } + + pub(crate) unsafe fn with_layout(self, layout: Layout) -> BaseTensor { + debug_assert_eq!(self.layout().rank(), layout.rank()); + + Self { + id: self.id, + data: self.data, + kind: self.kind, + layout, + op: self.op, + ptr: self.ptr, + } + } +} diff --git a/tensor/src/impls/num.rs b/tensor/src/impls/num.rs index 9667e829..8566d375 100644 --- a/tensor/src/impls/num.rs +++ b/tensor/src/impls/num.rs @@ -22,7 +22,7 @@ where T: Scalar, { fn one() -> Self { - Self::fill(1, T::one()) + Self::from_scalar(T::one()) } } @@ -31,7 +31,7 @@ where T: Scalar, { fn zero() -> Self { - Self::fill(1, T::zero()) + Self::from_scalar(T::zero()) } fn is_zero(&self) -> bool { diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index ddd8e3bb..3c2c82c3 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -5,6 +5,7 @@ use crate::prelude::TensorExpr; use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; +use core::ops; use num::traits::Pow; macro_rules! cmp { @@ -47,9 +48,9 @@ macro_rules! impl_arithmetic { (op: $trait:ident, $method:ident, $op:tt) => { impl_scalar_arith!($trait, $method, $op); - impl std::ops::$trait for TensorBase + impl ops::$trait for TensorBase where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Self; @@ -62,9 +63,9 @@ macro_rules! impl_arithmetic { } } - impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase + impl<'a, T> ops::$trait<&'a TensorBase> for TensorBase where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = TensorBase; @@ -79,9 +80,9 @@ macro_rules! impl_arithmetic { } } - impl<'a, T> std::ops::$trait> for &'a TensorBase + impl<'a, T> ops::$trait> for &'a TensorBase where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = TensorBase; @@ -96,9 +97,9 @@ macro_rules! impl_arithmetic { } } - impl<'a, 'b, T> std::ops::$trait<&'b TensorBase> for &'a TensorBase + impl<'a, 'b, T> ops::$trait<&'b TensorBase> for &'a TensorBase where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = TensorBase; @@ -121,9 +122,9 @@ macro_rules! impl_arithmetic { macro_rules! impl_scalar_arith { ($trait:ident, $method:ident, $op:tt) => { - impl std::ops::$trait for TensorBase + impl ops::$trait for TensorBase where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Self; @@ -135,9 +136,9 @@ macro_rules! impl_scalar_arith { } } - impl<'a, T> std::ops::$trait for &'a TensorBase + impl<'a, T> ops::$trait for &'a TensorBase where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = TensorBase; @@ -153,9 +154,9 @@ macro_rules! impl_scalar_arith { macro_rules! impl_assign_op { ($trait:ident, $method:ident, $inner:ident, $op:tt) => { - impl std::ops::$trait for TensorBase + impl ops::$trait for TensorBase where - T: Copy + std::ops::$inner, + T: Copy + ops::$inner, { fn $method(&mut self, other: Self) { cmp!(ne: self.shape(), other.shape()); @@ -167,9 +168,9 @@ macro_rules! impl_assign_op { } } - impl<'a, T> std::ops::$trait<&'a TensorBase> for TensorBase + impl<'a, T> ops::$trait<&'a TensorBase> for TensorBase where - T: Copy + std::ops::$inner, + T: Copy + ops::$inner, { fn $method(&mut self, other: &'a TensorBase) { cmp!(ne: self.shape(), other.shape()); diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 0ab1bba7..08835396 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -12,12 +12,12 @@ where { pub fn broadcast(&self, shape: impl IntoShape) -> Self { let layout = self.layout.broadcast_as(shape).unwrap(); - + let op = TensorExpr::broadcast(self.clone(), layout.shape().clone()); Self { id: TensorId::new(), - kind: self.kind.clone(), + kind: self.kind(), layout, - op: self.op.clone(), + op: op.into(), store: self.store.clone(), } } @@ -32,9 +32,9 @@ where /// pub fn swap_axes(&self, swap: Axis, with: Axis) -> Self { - let op = TensorExpr::transpose(self.clone(), swap, with); + let op = TensorExpr::swap_axes(self.clone(), swap, with); - let layout = self.layout().clone().transpose(swap, with); + let layout = self.layout().clone().swap_axes(swap, with); let shape = self.layout.shape(); let mut data = self.store.to_vec(); @@ -56,27 +56,15 @@ where } /// Transpose the tensor. pub fn t(&self) -> Self { - let (a, b) = (Axis(0), Axis(1)); - let op = TensorExpr::transpose(self.clone(), a, b); - - let layout = self.layout().clone().transpose(a, b); - let shape = self.layout.shape(); - let mut data = self.store.to_vec(); - - for i in 0..shape[a] { - for j in 0..shape[b] { - let scope = self.layout.index([i, j]); - let target = layout.index([j, i]); - data[target] = self.data()[scope].clone(); - } - } + let op = TensorExpr::transpose(self.clone()); + let layout = self.layout().clone().reverse_axes(); TensorBase { id: TensorId::new(), - kind: self.kind.clone(), + kind: self.kind(), layout, op: op.into(), - store: data.clone(), + store: self.data().clone(), } } diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index 0d3fd5ed..ca85c23b 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -9,7 +9,7 @@ use acme::prelude::{BinaryOp, UnaryOp}; pub type BoxTensor = Box>; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] #[non_exhaustive] pub enum TensorExpr { Binary(BoxTensor, BoxTensor, BinaryOp), @@ -17,12 +17,10 @@ pub enum TensorExpr { Unary(BoxTensor, UnaryOp), Broadcast(BoxTensor, Shape), Matmul(BoxTensor, BoxTensor), - Reshape(BoxTensor, ReshapeExpr), + Reshape(BoxTensor, Shape), Shape(ReshapeExpr), - Transpose { - scope: BoxTensor, - target: (Axis, Axis), - }, + SwapAxes(BoxTensor, Axis, Axis), + Transpose(BoxTensor), } impl TensorExpr { @@ -42,11 +40,20 @@ impl TensorExpr { TensorExpr::Matmul(Box::new(lhs), Box::new(rhs)) } - pub fn transpose(scope: TensorBase, swap: Axis, with: Axis) -> Self { - TensorExpr::Transpose { - scope: Box::new(scope), - target: (swap, with), - } + pub fn reshape(tensor: TensorBase, shape: Shape) -> Self { + TensorExpr::Reshape(Box::new(tensor), shape) + } + + pub fn shape(expr: ReshapeExpr) -> Self { + TensorExpr::Shape(expr) + } + + pub fn swap_axes(tensor: TensorBase, swap: Axis, with: Axis) -> Self { + TensorExpr::SwapAxes(Box::new(tensor), swap, with) + } + + pub fn transpose(scope: TensorBase) -> Self { + TensorExpr::Transpose(Box::new(scope)) } pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { @@ -61,7 +68,7 @@ impl TensorExpr { TensorExpr::Unary(lhs, _) => Some(*lhs), TensorExpr::Broadcast(tensor, _) => Some(*tensor), TensorExpr::Matmul(lhs, _) => Some(*lhs), - TensorExpr::Transpose { scope, .. } => Some(*scope), + TensorExpr::Transpose(lhs) => Some(*lhs), _ => None, } } @@ -91,10 +98,8 @@ where TensorExpr::broadcast(tensor.view(), shape.clone()) } TensorExpr::Matmul(lhs, rhs) => TensorExpr::matmul(lhs.view(), rhs.view()), - TensorExpr::Transpose { - scope: tensor, - target: axes, - } => TensorExpr::transpose(tensor.view(), axes.0, axes.1), + TensorExpr::Reshape(tensor, shape) => TensorExpr::reshape(tensor.view(), shape.clone()), + TensorExpr::Transpose(tensor) => TensorExpr::transpose(tensor.view()), _ => unimplemented!(), } } diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index a98b166b..38c04bec 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -2,10 +2,11 @@ Appellation: specs Contrib: FL03 */ -pub use self::{affine::*, ndtensor::*, scalar::*}; +pub use self::{affine::*, ndtensor::*, reshape::*, scalar::*}; pub(crate) mod affine; pub(crate) mod ndtensor; +pub(crate) mod reshape; pub(crate) mod scalar; pub trait Hstack { @@ -20,24 +21,11 @@ pub trait Vstack { fn vstack(&self, other: &T) -> Self::Output; } -pub trait Swap { - type Key; - - fn swap(&mut self, swap: Self::Key, with: Self::Key); -} - -impl Swap for [T] { - type Key = usize; - - fn swap(&mut self, swap: Self::Key, with: Self::Key) { - self.swap(swap, with); - } -} - pub(crate) mod prelude { + pub use super::affine::*; pub use super::ndtensor::*; + pub use super::reshape::*; pub use super::scalar::*; - pub use super::Affine; } #[cfg(test)] diff --git a/tensor/src/specs/reshape.rs b/tensor/src/specs/reshape.rs new file mode 100644 index 00000000..6b7efe21 --- /dev/null +++ b/tensor/src/specs/reshape.rs @@ -0,0 +1,18 @@ +/* + Appellation: reshape + Contrib: FL03 +*/ + +pub trait Swap { + type Key; + + fn swap(&mut self, swap: Self::Key, with: Self::Key); +} + +impl Swap for [T] { + type Key = usize; + + fn swap(&mut self, swap: Self::Key, with: Self::Key) { + self.swap(swap, with); + } +} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index e1e83b02..92ddd03a 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,12 +2,14 @@ Appellation: tensor Contrib: FL03 */ -use crate::actions::index::Strides; +use crate::actions::iter::StrideIter; use crate::ops::{BackpropOp, TensorExpr}; use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind}; use crate::store::Layout; use acme::prelude::BinaryOp; -use std::ops::{Index, IndexMut}; +use core::iter::Map; +use core::ops::{Index, IndexMut}; +use core::slice::Iter as SliceIter; pub(crate) fn new( kind: impl Into, @@ -88,6 +90,7 @@ impl TensorBase { store, } } + /// Returns a pub fn as_slice(&self) -> &[T] { &self.store } @@ -116,14 +119,26 @@ impl TensorBase { pub const fn id(&self) -> TensorId { self.id } - + /// Returns true if the tensor is contiguous. pub fn is_contiguous(&self) -> bool { self.layout().is_contiguous() } - + /// Returns true if the tensor is empty. pub fn is_empty(&self) -> bool { self.store.is_empty() } + /// A function to check if the tensor is a scalar + pub fn is_scalar(&self) -> bool { + self.shape().len() == 0 + } + /// A function to check if the tensor is a variable + pub const fn is_variable(&self) -> bool { + self.kind.is_variable() + } + /// Get the kind of the tensor + pub fn kind(&self) -> TensorKind { + self.kind + } /// Get a reference to the [Layout] of the tensor pub const fn layout(&self) -> &Layout { &self.layout @@ -148,77 +163,43 @@ impl TensorBase { pub fn stride(&self) -> &[usize] { self.layout.stride() } - /// A function to check if the tensor is a scalar - pub fn is_scalar(&self) -> bool { - self.shape().len() == 0 - } - /// A function to check if the tensor is a variable - pub const fn is_variable(&self) -> bool { - self.kind.is_variable() - } - /// Changes the kind of tensor to a variable - pub fn variable(mut self) -> Self { - self.kind = TensorKind::Variable; - self + /// Create an iterator over the tensor + pub fn strided(&self) -> StrideIter<'_, T> { + StrideIter::new(self) } /// Turn the tensor into a one-dimensional vector pub fn to_vec(&self) -> Vec where T: Clone, { - self.store.clone() + self.store.to_vec() + } + /// Changes the kind of tensor to a variable + pub fn variable(mut self) -> Self { + self.kind = TensorKind::Variable; + self } - pub fn apply_binary(&self, op: BinaryOp, other: &Self, f: F) -> Self + pub fn apply_binary(&self, other: &Self, op: BinaryOp, f: F) -> Self where F: Fn(&T, &T) -> T, T: Clone, { let store = self - .store + .data() .iter() - .zip(other.store.iter()) + .zip(other.data().iter()) .map(|(a, b)| f(a, b)) .collect(); TensorBase { id: TensorId::new(), - kind: self.kind, - layout: self.layout.clone(), + kind: self.kind(), + layout: self.layout().clone(), op: BackpropOp::binary(self.clone(), other.clone(), op), store, } } - - pub fn map<'a, F>(&'a self, f: F) -> TensorBase - where - F: FnMut(&'a T) -> T, - T: 'a + Clone, - { - let store = self.store.iter().map(f).collect(); - TensorBase { - id: TensorId::new(), - kind: self.kind, - layout: self.layout.clone(), - op: self.op.clone(), - store, - } - } - - pub fn mapv(&self, f: F) -> TensorBase - where - F: Fn(T) -> T, - T: Copy, - { - let store = self.store.iter().copied().map(f).collect(); - TensorBase { - id: TensorId::new(), - kind: self.kind, - layout: self.layout.clone(), - op: self.op.clone(), - store, - } - } - + /// pub fn with_layout(mut self, layout: Layout) -> Self { self.layout = layout; self @@ -244,7 +225,14 @@ where } pub fn view<'a>(&'a self) -> TensorBase<&'a T> { - unimplemented!("view") + let store = self.store.iter().collect(); + TensorBase { + id: self.id, + kind: self.kind, + layout: self.layout.clone(), + op: self.op.view(), + store, + } } } // Inernal Methods @@ -261,9 +249,28 @@ impl TensorBase { pub(crate) fn get_by_index(&self, index: usize) -> Option<&T> { self.store.get(index) } - /// Create an iterator over the strides of the tensor - pub(crate) fn strides(&self) -> Strides<'_> { - self.layout().into() + + pub(crate) fn map<'a, F>(&'a self, f: F) -> Map, F> + where + F: FnMut(&'a T) -> T, + T: 'a + Clone, + { + self.store.iter().map(f) + } + + pub(crate) fn mapv(&self, f: F) -> TensorBase + where + F: Fn(T) -> T, + T: Copy, + { + let store = self.store.iter().copied().map(f).collect(); + TensorBase { + id: TensorId::new(), + kind: self.kind, + layout: self.layout.clone(), + op: self.op.clone(), + store, + } } } diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 4741c24c..4be2199e 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -14,10 +14,10 @@ where T: Scalar, { if lhs.shape().rank() != rhs.shape().rank() { - return Err(ShapeError::IncompatibleShapes.into()); + return Err(ShapeError::DimensionMismatch.into()); } - let shape = lhs.shape().matmul_shape(&rhs.shape()).unwrap(); + let shape = lhs.shape().matmul_shape(rhs.shape()).unwrap(); let mut result = vec![T::zero(); shape.size()]; for i in 0..lhs.shape().nrows() { From 95595eccbec0282c4c74b416ef531933c75014f7 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 15:27:29 -0500 Subject: [PATCH 71/87] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 4 + core/src/ops/binary/kinds.rs | 2 + core/src/ops/binary/mod.rs | 6 ++ core/src/ops/mod.rs | 28 +------ core/src/specs/arith.rs | 25 ++++++ core/src/specs/mod.rs | 3 +- graphs/src/ops/arithmetic.rs | 24 +++--- tensor/Cargo.toml | 4 + tensor/src/actions/{ => create}/arange.rs | 16 +--- tensor/src/actions/create/linspace.rs | 30 +++++++ tensor/src/actions/create/mod.rs | 32 +++++++ tensor/src/actions/create/stack.rs | 23 ++++++ tensor/src/actions/iter/mod.rs | 2 +- tensor/src/actions/mod.rs | 28 +------ tensor/src/data/{tensor.rs => container.rs} | 41 ++++----- tensor/src/data/mod.rs | 9 +- tensor/src/data/repr/owned.rs | 12 +-- tensor/src/data/repr/shared.rs | 22 ++--- tensor/src/data/specs.rs | 18 ++-- tensor/src/error.rs | 1 + tensor/src/impls/create.rs | 37 +++++---- tensor/src/impls/linalg.rs | 92 ++++++++++++++++++++- tensor/src/io/mod.rs | 4 + tensor/src/lib.rs | 2 + tensor/src/linalg/mod.rs | 2 +- tensor/src/shape/axis.rs | 2 +- tensor/src/shape/error.rs | 5 +- tensor/src/shape/shape.rs | 19 +++++ tensor/src/specs/create.rs | 6 ++ tensor/src/specs/mod.rs | 18 +--- tensor/src/specs/{reshape.rs => moves.rs} | 0 tensor/src/store/layout.rs | 18 +++- tensor/src/store/mod.rs | 7 +- tensor/src/store/storage.rs | 90 -------------------- tensor/src/tensor.rs | 15 +++- tensor/tests/arith.rs | 11 +-- tensor/tests/composition.rs | 5 ++ tensor/tests/linalg.rs | 56 +++++++++++++ tensor/tests/tensor.rs | 15 +++- 39 files changed, 450 insertions(+), 284 deletions(-) create mode 100644 core/src/specs/arith.rs rename tensor/src/actions/{ => create}/arange.rs (91%) create mode 100644 tensor/src/actions/create/linspace.rs create mode 100644 tensor/src/actions/create/mod.rs create mode 100644 tensor/src/actions/create/stack.rs rename tensor/src/data/{tensor.rs => container.rs} (81%) create mode 100644 tensor/src/io/mod.rs create mode 100644 tensor/src/specs/create.rs rename tensor/src/specs/{reshape.rs => moves.rs} (100%) delete mode 100644 tensor/src/store/storage.rs create mode 100644 tensor/tests/linalg.rs diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 7d2a5f03..47ff1aa2 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -36,6 +36,10 @@ graph = [ "dep:acme-graphs" ] +io = [ + "acme-tensor/io" +] + macros = [ "dep:acme-macros" ] diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs index 84c83e64..2bd277b2 100644 --- a/core/src/ops/binary/kinds.rs +++ b/core/src/ops/binary/kinds.rs @@ -30,6 +30,7 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[repr(u8)] #[strum(serialize_all = "lowercase")] pub enum BinaryOp { + // { #[default] Add, Sub, @@ -44,6 +45,7 @@ pub enum BinaryOp { Xor, Shl, Shr, + // Custom(Kind), } impl BinaryOp { diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index b4bde322..f4f64848 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -8,5 +8,11 @@ pub(crate) mod kinds; pub(crate) mod operator; pub(crate) mod specs; +pub trait BinOp { + type Output; + + fn bin_op(&self, other: &T) -> Self::Output; +} + #[cfg(test)] mod tests {} diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index c049fe40..6d0d76a5 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -39,33 +39,9 @@ where } pub trait Operation { - type Output; - - fn kind(&self) -> String; -} - -pub trait Pow { - type Output; - - fn pow(&self, exp: T) -> Self::Output; -} - -pub trait Powc: Pow { - fn powc(&self, exp: T) -> Self::Output; -} - -pub trait Powi: Pow { - fn powi(&self, exp: T) -> Self::Output; -} - -pub trait Powf: Pow { - fn powf(&self, exp: T) -> Self::Output; -} - -pub trait Squared { - type Output; + type Kind; - fn squared(&self) -> Self::Output; + fn kind(&self) -> Self::Kind; } pub(crate) mod prelude { diff --git a/core/src/specs/arith.rs b/core/src/specs/arith.rs new file mode 100644 index 00000000..7762f3a7 --- /dev/null +++ b/core/src/specs/arith.rs @@ -0,0 +1,25 @@ +/* + Appellation: arith + Contrib: FL03 +*/ + +pub trait Pow { + type Output; + + fn pow(&self, exp: T) -> Self::Output; +} + +pub trait Powc: Pow { + type Complex; + type Real: Pow; + + fn powc(&self, exp: Self::Complex) -> Self::Output; +} + +pub trait Powi: Pow { + fn powi(&self, exp: T) -> Self::Output; +} + +pub trait Powf: Pow { + fn powf(&self, exp: T) -> Self::Output; +} diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 19597ade..5df9b8e9 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,8 +3,9 @@ Contrib: FL03 */ -pub use self::{gradient::*, prop::*, store::*}; +pub use self::{arith::*, gradient::*, prop::*, store::*}; +pub(crate) mod arith; pub(crate) mod gradient; pub(crate) mod prop; pub(crate) mod store; diff --git a/graphs/src/ops/arithmetic.rs b/graphs/src/ops/arithmetic.rs index 8b94768e..4dffc381 100644 --- a/graphs/src/ops/arithmetic.rs +++ b/graphs/src/ops/arithmetic.rs @@ -3,6 +3,7 @@ Contrib: FL03 */ use super::BinaryOperation; +use num::traits::NumOps; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; @@ -71,7 +72,7 @@ macro_rules! impl_binary_op { ($op:ident, $bound:ident, $operator:tt) => { impl BinaryOperation for $op where - A: $bound, + A: core::ops::$bound, { type Output = C; @@ -83,7 +84,7 @@ macro_rules! impl_binary_op { (expr $op:ident, $bound:ident, $exp:expr) => { impl BinaryOperation for $op where - A: $bound, + A: core::ops::$bound, { type Output = C; @@ -95,9 +96,7 @@ macro_rules! impl_binary_op { } // operator!(Addition, Division, Multiplication, Subtraction); -operators!(class Arithmetic; {Addition: Add, Division: Div, Multiplication: Mul, Subtraction: Sub}); - -use std::ops::{Add, Div, Mul, Sub}; +operators!(class Arithmetic; {Addition: Add, Division: Div, Multiplication: Mul, Remainder: Rem, Subtraction: Sub}); impl_binary_op!(Addition, Add, +); @@ -105,6 +104,8 @@ impl_binary_op!(Division, Div, /); impl_binary_op!(Multiplication, Mul, *); +impl_binary_op!(Remainder, Rem, %); + impl_binary_op!(Subtraction, Sub, -); impl Arithmetic { @@ -130,12 +131,13 @@ impl Arithmetic { pub fn op(&self) -> Box> where - A: Add + Div + Mul + Sub, + A: NumOps, { match self.clone() { Arithmetic::Add(op) => Box::new(op), Arithmetic::Div(op) => Box::new(op), Arithmetic::Mul(op) => Box::new(op), + Arithmetic::Rem(op) => Box::new(op), Arithmetic::Sub(op) => Box::new(op), } } @@ -145,19 +147,15 @@ impl Arithmetic { Arithmetic::Add(op) => op.name(), Arithmetic::Div(op) => op.name(), Arithmetic::Mul(op) => op.name(), + Arithmetic::Rem(op) => op.name(), Arithmetic::Sub(op) => op.name(), } } pub fn eval(&self, lhs: A, rhs: B) -> C where - A: Add + Div + Mul + Sub, + A: NumOps, { - match self { - Arithmetic::Add(op) => op.eval(lhs, rhs), - Arithmetic::Div(op) => op.eval(lhs, rhs), - Arithmetic::Mul(op) => op.eval(lhs, rhs), - Arithmetic::Sub(op) => op.eval(lhs, rhs), - } + self.op().eval(lhs, rhs) } } diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index 0021f627..14d71818 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -16,6 +16,10 @@ default = [ "std" ] +io = [ + +] + serde = [ "dep:serde", "serde-ext", diff --git a/tensor/src/actions/arange.rs b/tensor/src/actions/create/arange.rs similarity index 91% rename from tensor/src/actions/arange.rs rename to tensor/src/actions/create/arange.rs index e17399dd..f74ffef8 100644 --- a/tensor/src/actions/arange.rs +++ b/tensor/src/actions/create/arange.rs @@ -2,24 +2,10 @@ Appellation: arange Contrib: FL03 */ +use super::utils::steps; use core::ops::{self, Range}; use num::traits::{Bounded, FromPrimitive, Num, ToPrimitive}; -pub fn step_size(start: T, stop: T, steps: usize) -> T -where - T: FromPrimitive + ops::Div + ops::Sub, -{ - (stop - start) / T::from_usize(steps).unwrap() -} - -pub fn steps(start: T, stop: T, step: T) -> usize -where - T: ToPrimitive + ops::Div + ops::Sub, -{ - let steps = (stop - start) / step; - steps.to_usize().unwrap() -} - pub struct Arange { range: Boundary, step: T, diff --git a/tensor/src/actions/create/linspace.rs b/tensor/src/actions/create/linspace.rs new file mode 100644 index 00000000..c0edde68 --- /dev/null +++ b/tensor/src/actions/create/linspace.rs @@ -0,0 +1,30 @@ +/* + Appellation: linspace + Contrib: FL03 +*/ +use super::step_size; +use num::traits::{FromPrimitive, Num}; + +pub trait Linspace { + fn linspace(start: T, stop: T, steps: usize) -> Self; +} + +pub trait LinspaceExt: Linspace { + fn linspace_until(&self, stop: T, steps: usize) -> Self; +} + +impl Linspace for Vec +where + T: Copy + Default + FromPrimitive + Num + PartialOrd, +{ + fn linspace(start: T, stop: T, steps: usize) -> Self { + let step = step_size(start, stop, steps); + let mut vec = Vec::with_capacity(steps); + let mut value = start; + for _ in 0..steps { + vec.push(value); + value = value + step; + } + vec + } +} diff --git a/tensor/src/actions/create/mod.rs b/tensor/src/actions/create/mod.rs new file mode 100644 index 00000000..b4cbcf20 --- /dev/null +++ b/tensor/src/actions/create/mod.rs @@ -0,0 +1,32 @@ +/* + Appellation: create + Contrib: FL03 +*/ +pub use self::{arange::*, linspace::*, stack::*, utils::*}; + +pub(crate) mod arange; +pub(crate) mod linspace; +pub(crate) mod stack; + +pub(crate) mod utils { + use core::ops::{Div, Sub}; + use num::traits::{FromPrimitive, ToPrimitive}; + + pub fn step_size(start: T, stop: T, steps: usize) -> T + where + T: FromPrimitive + Div + Sub, + { + (stop - start) / T::from_usize(steps).unwrap() + } + + pub fn steps(start: T, stop: T, step: T) -> usize + where + T: ToPrimitive + Div + Sub, + { + let steps = (stop - start) / step; + steps.to_usize().unwrap() + } +} + +#[cfg(test)] +mod tests {} diff --git a/tensor/src/actions/create/stack.rs b/tensor/src/actions/create/stack.rs new file mode 100644 index 00000000..b07790a6 --- /dev/null +++ b/tensor/src/actions/create/stack.rs @@ -0,0 +1,23 @@ +/* + Appellation: stack + Contrib: FL03 +*/ +use crate::shape::Axis; + +pub trait Stack { + type Output; + + fn stack(&self, other: &T, along: Axis) -> Self::Output; +} + +pub trait Hstack { + type Output; + + fn hstack(&self, other: &T) -> Self::Output; +} + +pub trait Vstack { + type Output; + + fn vstack(&self, other: &T) -> Self::Output; +} diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 37977362..0772e215 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -42,7 +42,7 @@ pub(crate) mod utils { #[cfg(test)] mod tests { - use crate::actions::Linspace; + use crate::actions::create::Linspace; use crate::prelude::{Shape, Tensor}; #[test] diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs index e60621ce..4cb5d9b6 100644 --- a/tensor/src/actions/mod.rs +++ b/tensor/src/actions/mod.rs @@ -8,39 +8,17 @@ //! //! The actions include:
//! * Automatic Differentiation -//! * Creation Routines +//! * Creation Routines (``) //! * Indexing //! * Iteration -pub mod arange; +pub mod create; pub mod grad; pub mod index; pub mod iter; -use num::traits::{FromPrimitive, Num}; - -pub trait Linspace { - fn linspace(start: T, stop: T, steps: usize) -> Self; -} - -impl Linspace for Vec -where - T: Copy + Default + FromPrimitive + Num + PartialOrd, -{ - fn linspace(start: T, stop: T, steps: usize) -> Self { - let step = arange::step_size(start, stop, steps); - let mut vec = Vec::with_capacity(steps); - let mut value = start; - for _ in 0..steps { - vec.push(value); - value = value + step; - } - vec - } -} - pub(crate) mod prelude { - pub use super::arange::*; + pub use super::create::*; pub use super::grad::*; pub use super::index::*; pub use super::iter::*; diff --git a/tensor/src/data/tensor.rs b/tensor/src/data/container.rs similarity index 81% rename from tensor/src/data/tensor.rs rename to tensor/src/data/container.rs index 8e17d657..a5dd70d1 100644 --- a/tensor/src/data/tensor.rs +++ b/tensor/src/data/container.rs @@ -1,11 +1,11 @@ /* - Appellation: tensor + Appellation: container Contrib: FL03 */ use super::specs::{Data, DataOwned, RawData, RawDataMut}; -use super::{nonnull_from_vec_data, ArcTensor, Tensor}; +use super::{nonnull_from_vec_data, Container, SharedContainer}; use crate::actions::iter::to_vec_mapped; -use crate::prelude::{BackpropOp, Layout, TensorId, TensorKind}; +use crate::prelude::Layout; use crate::shape::dim::can_index_slice; use crate::shape::{IntoShape, IntoStride, Shape, Stride}; use core::ptr::NonNull; @@ -13,19 +13,16 @@ use core::slice; use rawpointer::PointerExt; #[derive(Clone)] -pub struct BaseTensor +pub struct ContainerBase where S: RawData, { - pub(crate) id: TensorId, pub(crate) data: S, - pub(crate) kind: TensorKind, pub(crate) layout: Layout, - pub(crate) op: BackpropOp, pub(crate) ptr: NonNull, } -impl BaseTensor +impl ContainerBase where S: RawData, { @@ -49,7 +46,7 @@ where where S: RawDataMut, { - // self.try_ensure_unique(); // for ArcArray + RawDataMut::try_ensure_unique(self); // for ArcArray self.ptr.as_ptr() } pub fn as_slice_memory_order(&self) -> Option<&[A]> @@ -79,20 +76,20 @@ where } /// Without any coping, turn the tensor into a shared tensor. - pub fn into_shared(self) -> ArcTensor
+ pub fn into_shared(self) -> SharedContainer where S: DataOwned, { let data = self.data.into_shared(); // safe because: equivalent unmoved data, ptr and dims remain valid - unsafe { BaseTensor::from_data_ptr(data, self.ptr).with_layout(self.layout) } + unsafe { ContainerBase::from_data_ptr(data, self.ptr).with_layout(self.layout) } } pub fn layout(&self) -> &Layout { &self.layout } - pub fn map<'a, B, F>(&'a self, f: F) -> Tensor + pub fn map<'a, B, F>(&'a self, f: F) -> Container where F: FnMut(&'a A) -> B, A: 'a, @@ -100,7 +97,11 @@ where { unsafe { if let Some(slc) = self.as_slice_memory_order() { - BaseTensor::from_shape_trusted_iter_unchecked(self.shape().slice(), slc.iter(), f) + ContainerBase::from_shape_trusted_iter_unchecked( + self.shape().slice(), + slc.iter(), + f, + ) } else { unimplemented!() // BaseTensor::from_shape_trusted_iter_unchecked(self.shape(), self.iter(), f) @@ -122,7 +123,7 @@ where } // Internal methods -impl BaseTensor +impl ContainerBase where S: DataOwned + RawData, { @@ -139,7 +140,7 @@ where let tmp = nonnull_from_vec_data(&mut v); PointerExt::add(tmp, layout.offset_from_low_addr_ptr_to_logical_ptr()) }; - BaseTensor::from_data_ptr(DataOwned::new(v), ptr).with_layout(layout) + ContainerBase::from_data_ptr(DataOwned::new(v), ptr).with_layout(layout) } /// Creates an array from an iterator, mapped by `map` and interpret it according to the @@ -165,17 +166,14 @@ where } } -impl BaseTensor +impl ContainerBase where S: RawData, { pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { let tensor = Self { - id: TensorId::new(), data, - kind: TensorKind::Normal, layout: Layout::contiguous(0), - op: BackpropOp::::none(), ptr, }; debug_assert!(tensor.pointer_is_inbounds()); @@ -186,15 +184,12 @@ where self.data._is_pointer_inbounds(self.as_ptr()) } - pub(crate) unsafe fn with_layout(self, layout: Layout) -> BaseTensor { + pub(crate) unsafe fn with_layout(self, layout: Layout) -> ContainerBase { debug_assert_eq!(self.layout().rank(), layout.rank()); Self { - id: self.id, data: self.data, - kind: self.kind, layout, - op: self.op, ptr: self.ptr, } } diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index b89a1978..59c86795 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -6,10 +6,10 @@ //! //! pub(crate) use self::utils::*; -pub use self::{specs::*, tensor::*}; +pub use self::{container::*, specs::*}; +pub(crate) mod container; pub(crate) mod specs; -pub(crate) mod tensor; pub mod elem; @@ -18,12 +18,13 @@ pub mod repr { pub(crate) mod owned; pub(crate) mod shared; + #[allow(dead_code)] pub(crate) mod view; } -pub type Tensor = BaseTensor>; +pub type Container = ContainerBase>; -pub type ArcTensor = BaseTensor>; +pub type SharedContainer = ContainerBase>; pub(crate) mod utils { #[cfg(not(feature = "std"))] diff --git a/tensor/src/data/repr/owned.rs b/tensor/src/data/repr/owned.rs index af500ca2..e1f3d8e7 100644 --- a/tensor/src/data/repr/owned.rs +++ b/tensor/src/data/repr/owned.rs @@ -4,7 +4,7 @@ */ use crate::data::repr::OwnedArcRepr; use crate::data::utils::nonnull_from_vec_data; -use crate::data::{ArcTensor, BaseTensor, Tensor}; +use crate::data::{Container, ContainerBase, SharedContainer}; use crate::data::{Data, DataMut, DataOwned, RawData, RawDataClone, RawDataMut, RawDataSubst}; use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ptr::NonNull; @@ -146,7 +146,7 @@ impl Drop for OwnedRepr { unsafe impl Data for OwnedRepr { #[inline] - fn into_owned(self_: BaseTensor) -> Tensor + fn into_owned(self_: ContainerBase) -> Container where A: Clone, { @@ -155,12 +155,12 @@ unsafe impl Data for OwnedRepr { #[inline] fn try_into_owned_nocopy( - self_: BaseTensor, - ) -> Result, BaseTensor> { + self_: ContainerBase, + ) -> Result, ContainerBase> { Ok(self_) } - fn to_shared(self_: &BaseTensor) -> ArcTensor + fn to_shared(self_: &ContainerBase) -> SharedContainer where Self::Elem: Clone, { @@ -197,7 +197,7 @@ unsafe impl RawData for OwnedRepr { } unsafe impl RawDataMut for OwnedRepr { - fn try_ensure_unique(_: &mut BaseTensor) + fn try_ensure_unique(_: &mut ContainerBase) where Self: Sized, { diff --git a/tensor/src/data/repr/shared.rs b/tensor/src/data/repr/shared.rs index 60a2a332..0f6b658a 100644 --- a/tensor/src/data/repr/shared.rs +++ b/tensor/src/data/repr/shared.rs @@ -4,7 +4,7 @@ */ use crate::data::repr::OwnedRepr; use crate::data::specs::*; -use crate::data::{ArcTensor, BaseTensor, Tensor}; +use crate::data::{Container, ContainerBase, SharedContainer}; #[cfg(not(feature = "std"))] use alloc::sync::Arc; use core::mem::MaybeUninit; @@ -23,35 +23,37 @@ impl Clone for OwnedArcRepr { } unsafe impl Data for OwnedArcRepr { - fn into_owned(self_: BaseTensor) -> crate::data::Tensor + fn into_owned(self_: ContainerBase) -> crate::data::Container where Self::Elem: Clone, { // Self::ensure_unique(&mut self_); let data = Arc::try_unwrap(self_.data.0).ok().unwrap(); // safe because data is equivalent - unsafe { BaseTensor::from_data_ptr(data, self_.ptr).with_layout(self_.layout) } + unsafe { ContainerBase::from_data_ptr(data, self_.ptr).with_layout(self_.layout) } } fn try_into_owned_nocopy( - self_: BaseTensor, - ) -> Result, BaseTensor> { + self_: ContainerBase, + ) -> Result, ContainerBase> { match Arc::try_unwrap(self_.data.0) { Ok(owned_data) => unsafe { // Safe because the data is equivalent. - Ok(BaseTensor::from_data_ptr(owned_data, self_.ptr).with_layout(self_.layout)) + Ok(ContainerBase::from_data_ptr(owned_data, self_.ptr).with_layout(self_.layout)) }, Err(arc_data) => unsafe { // Safe because the data is equivalent; we're just // reconstructing `self_`. - Err(BaseTensor::from_data_ptr(OwnedArcRepr(arc_data), self_.ptr) - .with_layout(self_.layout)) + Err( + ContainerBase::from_data_ptr(OwnedArcRepr(arc_data), self_.ptr) + .with_layout(self_.layout), + ) }, } } #[allow(clippy::wrong_self_convention)] - fn to_shared(self_: &BaseTensor) -> ArcTensor + fn to_shared(self_: &ContainerBase) -> SharedContainer where Self::Elem: Clone, { @@ -98,7 +100,7 @@ unsafe impl RawDataMut for OwnedArcRepr where A: Clone, { - fn try_ensure_unique(self_: &mut BaseTensor) + fn try_ensure_unique(self_: &mut ContainerBase) where Self: Sized, { diff --git a/tensor/src/data/specs.rs b/tensor/src/data/specs.rs index 1d90bd72..cc6669cd 100644 --- a/tensor/src/data/specs.rs +++ b/tensor/src/data/specs.rs @@ -3,13 +3,13 @@ Contrib: FL03 */ use crate::data::repr::OwnedArcRepr; -use crate::data::{ArcTensor, BaseTensor, Tensor}; +use crate::data::{Container, ContainerBase, SharedContainer}; use core::mem::MaybeUninit; use core::ptr::NonNull; -/// Array representation trait. +/// Container representation trait. /// -/// For an array with elements that can be accessed with safe code. +/// For a container with elements that can be accessed with safe code. /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream @@ -17,7 +17,7 @@ pub unsafe trait Data: RawData { /// Converts the array to a uniquely owned array, cloning elements if necessary. #[doc(hidden)] #[allow(clippy::wrong_self_convention)] - fn into_owned(self_: BaseTensor) -> Tensor + fn into_owned(self_: ContainerBase) -> Container where Self::Elem: Clone; @@ -25,14 +25,14 @@ pub unsafe trait Data: RawData { /// cloning the array elements. Otherwise, returns `self_` unchanged. #[doc(hidden)] fn try_into_owned_nocopy( - self_: BaseTensor, - ) -> Result, BaseTensor>; + self_: ContainerBase, + ) -> Result, ContainerBase>; /// Return a shared ownership (copy on write) array based on the existing one, /// cloning elements if necessary. #[doc(hidden)] #[allow(clippy::wrong_self_convention)] - fn to_shared(self_: &BaseTensor) -> ArcTensor + fn to_shared(self_: &ContainerBase) -> SharedContainer where Self::Elem: Clone; } @@ -42,7 +42,7 @@ pub unsafe trait DataMut: Data + RawDataMut { /// Ensures that the array has unique access to its data. #[doc(hidden)] #[inline] - fn ensure_unique(self_: &mut BaseTensor) + fn ensure_unique(self_: &mut ContainerBase) where Self: Sized, { @@ -105,7 +105,7 @@ pub unsafe trait RawDataMut: RawData { /// Additionally, if `Self` provides safe mutable access to array elements, /// then this method **must** panic or ensure that the data is unique. #[doc(hidden)] - fn try_ensure_unique(_: &mut BaseTensor) + fn try_ensure_unique(_: &mut ContainerBase) where Self: Sized; diff --git a/tensor/src/error.rs b/tensor/src/error.rs index 553d13d6..4ab00580 100644 --- a/tensor/src/error.rs +++ b/tensor/src/error.rs @@ -23,6 +23,7 @@ pub enum TensorError { Arithmetic(ArithmeticError), Indexing(String), Shape(ShapeError), + Singular, } unsafe impl Send for TensorError {} diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index bcafdda3..acedd2d0 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -4,6 +4,7 @@ */ use crate::prelude::IntoShape; use crate::tensor::{from_vec, TensorBase}; +use num::traits::real::Real; use num::traits::{FromPrimitive, NumAssign, One, Zero}; impl TensorBase @@ -41,7 +42,6 @@ where if T::is_zero(&step) { panic!("step must be non-zero"); } - // let steps = ((end - start) / step).ceil() as usize; let mut store = Vec::new(); let mut value = start; while value < end { @@ -50,26 +50,29 @@ where } from_vec(false, store.len(), store) } - - /// Create a tensor within a range of values + /// Create an identity matrix of a certain size + pub fn eye(size: usize) -> Self { + let mut store = Vec::with_capacity(size * size); + for i in 0..size { + for j in 0..size { + store.push(if i == j { T::one() } else { T::zero() }); + } + } + from_vec(false, (size, size), store) + } + /// Create a tensor with a certain number of elements, evenly spaced + /// between the provided start and end values pub fn linspace(start: T, end: T, steps: usize) -> Self where T: FromPrimitive, { - // let steps = ((end - start) / step).ceil() as usize; let step = (end - start) / T::from_usize(steps).unwrap(); - let mut store = Vec::with_capacity(steps); - let mut value: T = start; - for _ in 0..steps { - store.push(value); - value += step; - } - from_vec(false, store.len(), store) + Self::arange(start, end, step) } pub fn logspace(start: T, end: T, steps: usize) -> Self where - T: num::traits::real::Real, + T: Real, { let start = start.log2(); let end = end.log2(); @@ -85,7 +88,7 @@ where pub fn geomspace(start: T, end: T, steps: usize) -> Self where - T: num::Float, + T: Real, { let start = start.log10(); let end = end.log10(); @@ -110,11 +113,11 @@ where } /// Create a tensor, filled with ones, from the shape of another tensor pub fn ones_from(tensor: &TensorBase) -> Self { - Self::ones(tensor.shape().clone()) + Self::ones(tensor.shape()) } /// Create a tensor, filled with ones, from the shape of the tensor pub fn ones_like(&self) -> Self { - Self::ones(self.shape().clone()) + Self::ones(self.shape()) } } @@ -128,10 +131,10 @@ where } /// Create a tensor, filled with zeros, from the shape of another tensor pub fn zeros_from(tensor: &TensorBase) -> Self { - Self::zeros(tensor.shape().clone()) + Self::zeros(tensor.shape()) } /// Create a tensor, filled with zeros, from the shape of the tensor pub fn zeros_like(&self) -> Self { - Self::zeros(self.shape().clone()) + Self::zeros(self.shape()) } } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 36f52dc5..998c7d78 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -5,10 +5,98 @@ //! Implementations for linear algebra operations. //! //! -use crate::prelude::{Matmul, Scalar, TensorExpr}; +use crate::prelude::{Matmul, Scalar, ShapeError, TensorError, TensorExpr, TensorResult}; use crate::tensor::*; +use acme::prelude::UnaryOp; +use num::traits::{Num, Signed}; -impl TensorBase where T: Scalar {} +pub fn inverse(tensor: &TensorBase) -> TensorResult> +where + T: Copy + Num + PartialOrd + Signed, +{ + if !tensor.shape().is_square() { + return Err(ShapeError::InvalidShape.into()); + } + let shape = tensor.shape(); + let n = *shape.first().unwrap(); + + let mut data = tensor.data().to_vec(); + let mut inverse = vec![T::zero(); n * n]; + + for i in 0..n { + inverse[(i * n) + i] = T::one(); + } + + let mut permutation = vec![0; n]; + for i in 0..n { + permutation[i] = i; + } + + for i in 0..n { + let mut max_row = i; + for j in i + 1..n { + if data[(j * n) + i].abs() > data[(max_row * n) + i].abs() { + max_row = j; + } + } + + if data[(max_row * n) + i].is_zero() { + return Err(TensorError::Singular); // Matrix is singular + } + + if max_row != i { + for j in 0..n { + data.swap((max_row * n) + j, (i * n) + j); + inverse.swap((max_row * n) + j, (i * n) + j); + } + permutation.swap(max_row, i); + } + + let pivot = data[(i * n) + i]; + for j in 0..n { + data[(i * n) + j] = data[(i * n) + j] / pivot; + inverse[(i * n) + j] = inverse[(i * n) + j] / pivot; + } + + for j in 0..n { + if j != i { + let factor = data[(j * n) + i]; + for k in 0..n { + data[(j * n) + k] = data[(j * n) + k] - data[(i * n) + k] * factor; + inverse[(j * n) + k] = inverse[(j * n) + k] - inverse[(i * n) + k] * factor; + } + } + } + } + + let mut res = vec![T::zero(); n * n]; + for i in 0..n { + for j in 0..n { + res[(i * n) + permutation[j]] = inverse[(i * n) + j]; + } + } + let op = TensorExpr::unary(tensor.clone(), UnaryOp::Inv); + let tensor = from_vec_with_op(false, op, shape, res); + Ok(tensor) +} + +impl TensorBase +where + T: Copy + Num + PartialOrd + Signed, +{ + pub fn diag(&self) -> Self + where + T: Clone, + { + let rank = *self.rank(); + + let store = (0..rank).map(|i| self[vec![i; rank]]).collect::>(); + from_vec(false, self.shape().diagonalize(), store) + } + pub fn inv(&self) -> TensorResult { + inverse(self) + } +} impl Matmul> for TensorBase where diff --git a/tensor/src/io/mod.rs b/tensor/src/io/mod.rs new file mode 100644 index 00000000..9a895a5b --- /dev/null +++ b/tensor/src/io/mod.rs @@ -0,0 +1,4 @@ +/* + Appellation: io + Contrib: FL03 +*/ diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index e7079995..0d199bda 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -24,6 +24,8 @@ pub mod actions; pub mod backend; pub mod data; pub mod error; +#[cfg(feature = "io")] +pub mod io; pub mod linalg; pub mod ops; pub mod shape; diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index bffd3f69..c36758ae 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -10,7 +10,7 @@ pub mod uplo; use crate::shape::Axis; pub trait Inverse { - fn inv(self) -> Self; + fn inv(&self) -> Self; } /// Matrix multiplication diff --git a/tensor/src/shape/axis.rs b/tensor/src/shape/axis.rs index f28f2a40..2aa4fa49 100644 --- a/tensor/src/shape/axis.rs +++ b/tensor/src/shape/axis.rs @@ -23,7 +23,7 @@ pub struct Axis(pub(crate) usize); impl Axis { pub fn new(axis: usize) -> Self { - Axis(axis) + Self(axis) } pub fn into_inner(self) -> usize { diff --git a/tensor/src/shape/error.rs b/tensor/src/shape/error.rs index e39beb9b..bfd16f1b 100644 --- a/tensor/src/shape/error.rs +++ b/tensor/src/shape/error.rs @@ -4,7 +4,7 @@ */ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumProperty, EnumString, VariantNames}; pub type ShapeResult = std::result::Result; @@ -21,6 +21,7 @@ pub type ShapeResult = std::result::Result; EnumCount, EnumIs, EnumIter, + EnumProperty, EnumString, Eq, Hash, @@ -32,7 +33,9 @@ pub type ShapeResult = std::result::Result; #[repr(usize)] #[strum(serialize_all = "snake_case")] pub enum ShapeError { + #[strum(props(desc = "Dimension mismatch"))] DimensionMismatch, + #[strum(props(desc = "incompatible shapes"))] IncompatibleShapes, IncompatibleLayout, InvalidAxis, diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 073d0fb2..2685387b 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -50,6 +50,11 @@ impl Shape { } strides } + + pub fn diagonalize(&self) -> Shape { + Self::new(vec![self.size()]) + } + pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { if *self.rank() != 2 || *other.rank() != 2 || self[1] != other[0] { return Err(ShapeError::IncompatibleShapes.into()); @@ -60,6 +65,20 @@ impl Shape { pub fn insert(&mut self, index: Axis, dim: usize) { self.0.insert(*index, dim) } + + pub fn insert_axis(&self, index: Axis) -> Self { + let mut shape = self.clone(); + shape.insert(index, 1); + shape + } + /// Returns true if the shape is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + /// Checks to see if the shape is square + pub fn is_square(&self) -> bool { + self.iter().all(|&dim| dim == self[0]) + } /// Returns true if the strides are C contiguous (aka row major). pub fn is_contiguous(&self, stride: &Stride) -> bool { if self.0.len() != stride.len() { diff --git a/tensor/src/specs/create.rs b/tensor/src/specs/create.rs new file mode 100644 index 00000000..0b60d59e --- /dev/null +++ b/tensor/src/specs/create.rs @@ -0,0 +1,6 @@ +/* + Appellation: reshape + Contrib: FL03 +*/ + + diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index 38c04bec..ea2ecb09 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -2,29 +2,17 @@ Appellation: specs Contrib: FL03 */ -pub use self::{affine::*, ndtensor::*, reshape::*, scalar::*}; +pub use self::{affine::*, moves::*, ndtensor::*, scalar::*}; pub(crate) mod affine; +pub(crate) mod moves; pub(crate) mod ndtensor; -pub(crate) mod reshape; pub(crate) mod scalar; -pub trait Hstack { - type Output; - - fn hstack(&self, other: &T) -> Self::Output; -} - -pub trait Vstack { - type Output; - - fn vstack(&self, other: &T) -> Self::Output; -} - pub(crate) mod prelude { pub use super::affine::*; + pub use super::moves::*; pub use super::ndtensor::*; - pub use super::reshape::*; pub use super::scalar::*; } diff --git a/tensor/src/specs/reshape.rs b/tensor/src/specs/moves.rs similarity index 100% rename from tensor/src/specs/reshape.rs rename to tensor/src/specs/moves.rs diff --git a/tensor/src/store/layout.rs b/tensor/src/store/layout.rs index 57654d0a..3e38e2af 100644 --- a/tensor/src/store/layout.rs +++ b/tensor/src/store/layout.rs @@ -86,6 +86,9 @@ impl Layout { } true } + pub fn is_square(&self) -> bool { + self.shape.is_square() + } /// Get a peek at the offset of the layout. pub fn offset(&self) -> usize { self.offset @@ -166,6 +169,7 @@ impl Layout { } // Internal methods +#[allow(dead_code)] impl Layout { pub(crate) fn index(&self, idx: impl AsRef<[usize]>) -> usize { let idx = idx.as_ref(); @@ -174,6 +178,14 @@ impl Layout { } idx.iter().zip(self.stride.iter()).map(|(i, s)| i * s).sum() } + + pub(crate) fn index_unchecked(&self, idx: impl AsRef<[usize]>) -> usize { + idx.as_ref() + .iter() + .zip(self.stride.iter()) + .map(|(i, s)| i * s) + .sum() + } } #[cfg(test)] @@ -184,8 +196,8 @@ mod tests { fn test_position() { let shape = (3, 3); let layout = Layout::contiguous(shape); - assert_eq!(layout.index(&[0, 0]), 0); - assert_eq!(layout.index(&[0, 1]), 1); - assert_eq!(layout.index(&[2, 2]), 8); + assert_eq!(layout.index_unchecked([0, 0]), 0); + assert_eq!(layout.index([0, 1]), 1); + assert_eq!(layout.index([2, 2]), 8); } } diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs index dbfcf16d..bd561ef2 100644 --- a/tensor/src/store/mod.rs +++ b/tensor/src/store/mod.rs @@ -5,14 +5,9 @@ //! # Store //! //! This module provides the storage and layout for the tensor data structure. -pub use self::{layout::*, storage::*}; +pub use self::layout::Layout; pub(crate) mod layout; -pub(crate) mod storage; - -pub trait TensorStore { - type Elem; -} #[cfg(test)] mod tests {} diff --git a/tensor/src/store/storage.rs b/tensor/src/store/storage.rs deleted file mode 100644 index 3a5b435a..00000000 --- a/tensor/src/store/storage.rs +++ /dev/null @@ -1,90 +0,0 @@ -/* - Appellation: storage - Contrib: FL03 -*/ -use crate::prelude::{DataOwned, Layout, OwnedArcRepr, RawData, RawDataMut}; -use core::ptr::NonNull; - -pub type ArcStore = StoreBase>; - -#[derive(Clone)] -pub struct StoreBase -where - S: RawData, -{ - data: S, - layout: Layout, - ptr: NonNull, -} - -impl StoreBase -where - S: RawData, -{ - #[inline(always)] - pub fn as_ptr(&self) -> *const A { - self.ptr.as_ptr() as *const A - } - - /// Return a mutable pointer to the first element in the array. - /// - /// This method attempts to unshare the data. If `S: DataMut`, then the - /// data is guaranteed to be uniquely held on return. - /// - /// # Warning - /// - /// When accessing elements through this pointer, make sure to use strides - /// obtained *after* calling this method, since the process of unsharing - /// the data may change the strides. - #[inline(always)] - pub fn as_mut_ptr(&mut self) -> *mut A - where - S: RawDataMut, - { - // self.try_ensure_unique(); // for ArcArray - self.ptr.as_ptr() - } - - /// Without any coping, turn the tensor into a shared tensor. - pub fn into_shared(self) -> ArcStore - where - S: DataOwned, - { - let data = self.data.into_shared(); - // safe because: equivalent unmoved data, ptr and dims remain valid - // unsafe { Self::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } - unsafe { StoreBase::from_data_ptr(data, self.ptr) } - } - /// Return the number of elements in the tensor. - pub fn size(&self) -> usize { - self.layout.size() - } -} - -// Internal methods -impl StoreBase -where - S: RawData, -{ - pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { - let tensor = Self { - data, - layout: Layout::contiguous(0), - ptr, - }; - debug_assert!(tensor.pointer_is_inbounds()); - tensor - } - - pub(crate) fn pointer_is_inbounds(&self) -> bool { - self.data._is_pointer_inbounds(self.as_ptr()) - } - #[allow(dead_code)] - pub(crate) unsafe fn with_layout(self, layout: Layout) -> Self { - Self { - data: self.data, - layout, - ptr: self.ptr, - } - } -} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 92ddd03a..5f08141f 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -10,6 +10,7 @@ use acme::prelude::BinaryOp; use core::iter::Map; use core::ops::{Index, IndexMut}; use core::slice::Iter as SliceIter; +use std::vec; pub(crate) fn new( kind: impl Into, @@ -274,17 +275,23 @@ impl TensorBase { } } -impl Index<&[usize]> for TensorBase { +impl Index for TensorBase +where + Idx: AsRef<[usize]>, +{ type Output = T; - fn index(&self, index: &[usize]) -> &Self::Output { + fn index(&self, index: Idx) -> &Self::Output { let i = self.layout().index(index); &self.store[i] } } -impl IndexMut<&[usize]> for TensorBase { - fn index_mut(&mut self, index: &[usize]) -> &mut Self::Output { +impl IndexMut for TensorBase +where + Idx: AsRef<[usize]>, +{ + fn index_mut(&mut self, index: Idx) -> &mut Self::Output { let i = self.layout().index(index); &mut self.store[i] } diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index 9386f4a1..d4b07f14 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -5,7 +5,7 @@ #![cfg(test)] extern crate acme_tensor as acme; -use acme::prelude::{Matmul, Tensor}; +use acme::prelude::Tensor; #[test] fn test_add() { @@ -51,15 +51,6 @@ fn test_sub() { assert_eq!(c, Tensor::::zeros(shape)); } -#[test] -fn test_matmul() { - let a = Tensor::::fill((3, 2), 2_f64); - let b = Tensor::::ones((2, 3)); - let c = a.matmul(&b); - - assert_eq!(c, Tensor::::fill((3, 3), 4.0)); -} - #[test] fn test_trig() { let a = Tensor::::ones((2, 2)); diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index fdf5ec98..9e917e8d 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -20,6 +20,11 @@ fn test_ones_and_zeros() { assert_eq!(a.stride(), b.stride()); assert_eq!(a, Tensor::ones(shape)); assert_eq!(b, Tensor::zeros(shape)); + + use num::traits::{One, Zero}; + + assert!(Tensor::::one().is_scalar()); + assert!(Tensor::::zero().is_scalar()); } #[test] diff --git a/tensor/tests/linalg.rs b/tensor/tests/linalg.rs new file mode 100644 index 00000000..af3982d8 --- /dev/null +++ b/tensor/tests/linalg.rs @@ -0,0 +1,56 @@ +/* + Appellation: linalg + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::{Matmul, Shape, Tensor}; + +macro_rules! adiff { + ($a:expr, $b:expr) => { + ($a - $b).abs() + }; +} +macro_rules! assert_diff { + ($a:expr, $b:expr, $tol:expr) => { + let diff = adiff!($a, $b); + assert!( + diff < $tol, + "the difference ({}) between {} and {} exceeds the allowed tolerance", + diff, + $a, + $b + ); + }; + ($a:expr, $b:expr) => { + assert_diff!($a, $b, 1e-10); + }; +} + +#[ignore = "not implemented"] +#[test] +fn test_inverse() { + let shape = Shape::from((2, 2)); + let arr: Vec = vec![1.0, 4.0, 3.0, 2.0]; + let tensor = Tensor::from_vec(false, None, shape.clone(), arr); + let inv_arr = vec![-0.2, 0.4, 0.3, -0.1]; + let exp = Tensor::from_vec(false, None, shape.clone(), inv_arr); + + let inverse = tensor.inv().unwrap(); + + for i in 0..shape.nrows() { + for j in 0..shape.ncols() { + assert_diff!(inverse[[i, j]], exp[[i, j]]); + } + } +} + +#[test] +fn test_matmul() { + let a = Tensor::::fill((3, 2), 2_f64); + let b = Tensor::::ones((2, 3)); + let c = a.matmul(&b); + + assert_eq!(c, Tensor::::fill((3, 3), 4.0)); +} diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index 97221e5f..c73be853 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -5,7 +5,7 @@ #![cfg(test)] extern crate acme_tensor as acme; -use acme::prelude::Tensor; +use acme::prelude::{IntoShape, Tensor}; #[test] fn test_tensor() { @@ -19,6 +19,19 @@ fn test_tensor() { assert_eq!(a.stride(), b.stride()); } +#[test] +fn test_index() { + let shape = (2, 3).into_shape(); + let n = shape.size(); + let a = Tensor::::linspace(0f64, n as f64, n) + .reshape(shape) + .unwrap(); + + assert_eq!(a[[0, 0]], 0f64); + assert_eq!(a[[0, 1]], 1f64); + assert_eq!(a[[1, 2]], 5f64); +} + #[test] fn test_higher_dim() { let shape = (2, 2, 2, 2); From 04c4ffebfd4dde174a81456d8dabc30fc8c3a7a4 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 16:03:17 -0500 Subject: [PATCH 72/87] update Signed-off-by: Joe McCain III --- core/src/{errors/error.rs => error/err.rs} | 0 core/src/{errors => error}/kinds/external.rs | 0 core/src/{errors => error}/kinds/mod.rs | 3 +- .../{errors => error}/kinds/propagation.rs | 12 ++++- core/src/{errors => error}/kinds/standard.rs | 4 +- core/src/error/kinds/types.rs | 50 +++++++++++++++++++ core/src/{errors => error}/mod.rs | 4 +- core/src/lib.rs | 4 +- core/src/specs/prop.rs | 2 +- tensor/src/actions/iter/strides.rs | 3 +- tensor/src/backend/devices.rs | 28 +++++++++++ tensor/src/{store => data}/layout.rs | 0 tensor/src/data/mod.rs | 4 +- tensor/src/impls/create.rs | 17 ++++--- tensor/src/lib.rs | 3 -- tensor/src/linalg/mod.rs | 8 +-- tensor/src/specs/moves.rs | 5 ++ tensor/src/specs/ndtensor.rs | 3 +- tensor/src/store/mod.rs | 13 ----- tensor/src/tensor.rs | 3 +- tensor/src/types/dtype.rs | 47 +++++++++++++---- 21 files changed, 159 insertions(+), 54 deletions(-) rename core/src/{errors/error.rs => error/err.rs} (100%) rename core/src/{errors => error}/kinds/external.rs (100%) rename core/src/{errors => error}/kinds/mod.rs (90%) rename core/src/{errors => error}/kinds/propagation.rs (84%) rename core/src/{errors => error}/kinds/standard.rs (89%) create mode 100644 core/src/error/kinds/types.rs rename core/src/{errors => error}/mod.rs (78%) rename tensor/src/{store => data}/layout.rs (100%) delete mode 100644 tensor/src/store/mod.rs diff --git a/core/src/errors/error.rs b/core/src/error/err.rs similarity index 100% rename from core/src/errors/error.rs rename to core/src/error/err.rs diff --git a/core/src/errors/kinds/external.rs b/core/src/error/kinds/external.rs similarity index 100% rename from core/src/errors/kinds/external.rs rename to core/src/error/kinds/external.rs diff --git a/core/src/errors/kinds/mod.rs b/core/src/error/kinds/mod.rs similarity index 90% rename from core/src/errors/kinds/mod.rs rename to core/src/error/kinds/mod.rs index 5af05b81..4417ddcc 100644 --- a/core/src/errors/kinds/mod.rs +++ b/core/src/error/kinds/mod.rs @@ -2,11 +2,12 @@ Appellation: kinds Contrib: FL03 */ -pub use self::{external::*, propagation::*, standard::*}; +pub use self::{external::*, propagation::*, standard::*, types::*}; pub(crate) mod external; pub(crate) mod propagation; pub(crate) mod standard; +pub(crate) mod types; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/core/src/errors/kinds/propagation.rs b/core/src/error/kinds/propagation.rs similarity index 84% rename from core/src/errors/kinds/propagation.rs rename to core/src/error/kinds/propagation.rs index 3cf48b0c..4adfab27 100644 --- a/core/src/errors/kinds/propagation.rs +++ b/core/src/error/kinds/propagation.rs @@ -5,7 +5,7 @@ use super::ErrorType; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, VariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -31,6 +31,8 @@ pub enum ModuleError { Predict(PredictError), } +impl std::error::Error for ModuleError {} + impl ErrorType for ModuleError { type Kind = ModuleError; @@ -50,6 +52,8 @@ impl ErrorType for ModuleError { Display, EnumCount, EnumIs, + EnumIter, + EnumString, Eq, Hash, Ord, @@ -68,6 +72,8 @@ pub enum PredictError { NumericalError, } +impl std::error::Error for PredictError {} + #[derive( Clone, Copy, @@ -75,6 +81,8 @@ pub enum PredictError { Display, EnumCount, EnumIs, + EnumIter, + EnumString, Eq, Hash, Ord, @@ -92,3 +100,5 @@ pub enum GradientError { Backward, Forward, } + +impl std::error::Error for GradientError {} diff --git a/core/src/errors/kinds/standard.rs b/core/src/error/kinds/standard.rs similarity index 89% rename from core/src/errors/kinds/standard.rs rename to core/src/error/kinds/standard.rs index e0159354..6fba6060 100644 --- a/core/src/errors/kinds/standard.rs +++ b/core/src/error/kinds/standard.rs @@ -4,7 +4,7 @@ */ #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -15,6 +15,7 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; EnumCount, EnumIs, EnumIter, + EnumString, Eq, Hash, Ord, @@ -44,6 +45,7 @@ pub enum StdError { EnumCount, EnumIs, EnumIter, + EnumString, Eq, Hash, Ord, diff --git a/core/src/error/kinds/types.rs b/core/src/error/kinds/types.rs new file mode 100644 index 00000000..ff98a04d --- /dev/null +++ b/core/src/error/kinds/types.rs @@ -0,0 +1,50 @@ +/* + Appellation: types + Contrib: FL03 +*/ +use crate::error::ErrorType; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[derive( + Clone, + Copy, + Debug, + Display, + EnumCount, + EnumIs, + EnumString, + EnumIter, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "snake_case") +)] +#[strum(serialize_all = "snake_case")] +pub enum TypeError { + ConversionError, + InferenceError, + InvalidType, +} + +impl std::error::Error for TypeError {} + +impl ErrorType for TypeError { + type Kind = TypeError; + + fn kind(&self) -> &Self::Kind { + self + } + + fn name(&self) -> String { + self.to_string() + } +} diff --git a/core/src/errors/mod.rs b/core/src/error/mod.rs similarity index 78% rename from core/src/errors/mod.rs rename to core/src/error/mod.rs index 6899a4c1..cb285938 100644 --- a/core/src/errors/mod.rs +++ b/core/src/error/mod.rs @@ -5,9 +5,9 @@ //! # Errors //! //! -pub use self::{error::*, kinds::*}; +pub use self::{err::*, kinds::*}; -pub(crate) mod error; +pub(crate) mod err; pub(crate) mod kinds; pub type Result = std::result::Result; diff --git a/core/src/lib.rs b/core/src/lib.rs index c03e52ac..6d6e4bde 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -11,7 +11,7 @@ extern crate alloc; #[macro_use] pub(crate) mod seal; -pub mod errors; +pub mod error; pub mod eval; pub mod id; pub mod math; @@ -20,7 +20,7 @@ pub mod specs; pub mod types; pub mod prelude { - pub use crate::errors::*; + pub use crate::error::*; pub use crate::eval::*; pub use crate::id::*; pub use crate::ops::prelude::*; diff --git a/core/src/specs/prop.rs b/core/src/specs/prop.rs index 26421931..2a44787f 100644 --- a/core/src/specs/prop.rs +++ b/core/src/specs/prop.rs @@ -2,7 +2,7 @@ Appellation: prop Contrib: FL03 */ -use crate::errors::PredictError; +use crate::error::PredictError; /// [Backward] describes an object capable of backward propagation. /// diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/strides.rs index b115a9c1..38100c20 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -2,8 +2,7 @@ Appellation: stride Contrib: FL03 */ -use crate::shape::{Shape, Stride}; -use crate::store::Layout; +use crate::prelude::{Layout, Shape, Stride}; use crate::tensor::TensorBase; pub struct StrideIter<'a, T> { diff --git a/tensor/src/backend/devices.rs b/tensor/src/backend/devices.rs index 92dd35e1..79b28a63 100644 --- a/tensor/src/backend/devices.rs +++ b/tensor/src/backend/devices.rs @@ -2,8 +2,36 @@ Appellation: devices Contrib: FL03 */ +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; +#[cfg_attr( + feature = "serde", + derive(Deserialize, Serialize,), + serde(rename_all = "lowercase", untagged) +)] +#[derive( + Clone, + Copy, + Debug, + Default, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + VariantNames, +)] +#[repr(C)] +#[strum(serialize_all = "lowercase")] pub enum Device { + #[default] CPU, Cuda, } diff --git a/tensor/src/store/layout.rs b/tensor/src/data/layout.rs similarity index 100% rename from tensor/src/store/layout.rs rename to tensor/src/data/layout.rs diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 59c86795..ee308166 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -6,9 +6,10 @@ //! //! pub(crate) use self::utils::*; -pub use self::{container::*, specs::*}; +pub use self::{container::*, layout::*, specs::*}; pub(crate) mod container; +pub(crate) mod layout; pub(crate) mod specs; pub mod elem; @@ -51,6 +52,7 @@ pub(crate) mod utils { } pub(crate) mod prelude { + pub use super::layout::Layout; pub use super::repr::*; pub use super::specs::*; } diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index acedd2d0..a8aa220c 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -11,6 +11,14 @@ impl TensorBase where T: Clone, { + /// Create a new tensor, whose elements are set to their default value + /// from the current shape. + pub fn default_like(&self) -> Self + where + T: Default, + { + Self::fill(self.shape(), T::default()) + } /// Create an empty tensor from the given shape pub fn empty(shape: impl IntoShape) -> Self where @@ -24,12 +32,9 @@ where let store = vec![value; shape.size()]; from_vec(false, shape, store) } - - pub fn default_like(&self) -> Self - where - T: Default, - { - Self::fill(self.shape().clone(), T::default()) + /// Create a tensor, filled with some value, from the current shape + pub fn fill_like(&self, value: T) -> Self { + Self::fill(self.shape(), value) } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 0d199bda..a404639c 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -31,7 +31,6 @@ pub mod ops; pub mod shape; pub mod specs; pub mod stats; -pub mod store; pub mod types; mod impls { @@ -64,8 +63,6 @@ pub mod prelude { #[doc(inline)] pub use crate::specs::prelude::*; #[doc(inline)] - pub use crate::store::*; - #[doc(inline)] pub use crate::types::prelude::*; pub use crate::utils::*; #[doc(inline)] diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index c36758ae..edb3352e 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -7,8 +7,6 @@ //! pub mod uplo; -use crate::shape::Axis; - pub trait Inverse { fn inv(&self) -> Self; } @@ -20,13 +18,9 @@ pub trait Matmul { fn matmul(&self, rhs: &Rhs) -> Self::Output; } -pub trait SwapAxes { - fn swap_axes(&self, swap: Axis, with: Axis) -> Self; -} - pub(crate) mod prelude { pub use super::uplo::UPLO; - pub use super::{Inverse, Matmul, SwapAxes}; + pub use super::{Inverse, Matmul}; } #[cfg(test)] diff --git a/tensor/src/specs/moves.rs b/tensor/src/specs/moves.rs index 6b7efe21..efa028d0 100644 --- a/tensor/src/specs/moves.rs +++ b/tensor/src/specs/moves.rs @@ -2,6 +2,7 @@ Appellation: reshape Contrib: FL03 */ +use crate::shape::Axis; pub trait Swap { type Key; @@ -16,3 +17,7 @@ impl Swap for [T] { self.swap(swap, with); } } + +pub trait SwapAxes { + fn swap_axes(&self, swap: Axis, with: Axis) -> Self; +} diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 34f8d656..16860150 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -2,9 +2,8 @@ Appellation: ndtensor Contrib: FL03 */ -use crate::prelude::TensorId; +use crate::prelude::{Layout, TensorId}; use crate::shape::{Rank, Shape, Stride}; -use crate::store::Layout; pub trait NdTensor { type Data: TensorData; diff --git a/tensor/src/store/mod.rs b/tensor/src/store/mod.rs deleted file mode 100644 index bd561ef2..00000000 --- a/tensor/src/store/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - Appellation: store - Contrib: FL03 -*/ -//! # Store -//! -//! This module provides the storage and layout for the tensor data structure. -pub use self::layout::Layout; - -pub(crate) mod layout; - -#[cfg(test)] -mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 5f08141f..8ded67f3 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -3,9 +3,10 @@ Contrib: FL03 */ use crate::actions::iter::StrideIter; +use crate::data::Layout; use crate::ops::{BackpropOp, TensorExpr}; use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind}; -use crate::store::Layout; + use acme::prelude::BinaryOp; use core::iter::Map; use core::ops::{Index, IndexMut}; diff --git a/tensor/src/types/dtype.rs b/tensor/src/types/dtype.rs index 8c881206..878b6c6f 100644 --- a/tensor/src/types/dtype.rs +++ b/tensor/src/types/dtype.rs @@ -2,11 +2,14 @@ Appellation: dtype Contrib: FL03 */ - +use acme::prelude::TypeError; use std::any::TypeId; -pub enum TypeError { - ConversionError, +pub trait TypeOf { + fn of(_value: &T) -> Result + where + T: 'static, + Self: Sized; } pub enum DType { @@ -15,16 +18,16 @@ pub enum DType { } impl DType { - pub fn from_type(_value: &T) -> Result + pub fn of(val: &T) -> Result where T: 'static, { - if let Ok(float) = Float::from_type(_value) { + if let Ok(float) = Float::from_type(val) { Ok(DType::Float(float)) - } else if let Ok(integer) = Integer::from_type(_value) { + } else if let Ok(integer) = Integer::from_type(val) { Ok(DType::Integer(integer)) } else { - Err(()) + Err(TypeError::InvalidType) } } } @@ -35,7 +38,7 @@ pub enum Float { } impl Float { - pub fn from_type(_value: &T) -> Result + pub fn from_type(_value: &T) -> Result where T: 'static, { @@ -44,7 +47,7 @@ impl Float { } else if TypeId::of::() == TypeId::of::() { Ok(Float::F64) } else { - Err(()) + Err(TypeError::InvalidType) } } } @@ -67,7 +70,7 @@ pub struct Integer { } impl Integer { - pub fn from_type(_value: &T) -> Result + pub fn from_type(_value: &T) -> Result where T: 'static, { @@ -122,7 +125,7 @@ impl Integer { signed: false, }) } else { - Err(()) + Err(TypeError::InvalidType) } } } @@ -136,3 +139,25 @@ pub enum NumBits { B128 = 128, BSize, } + +macro_rules! impl_from_bits { + ($v:ident, $t:ty) => { + impl From<$t> for NumBits { + fn from(_: $t) -> Self { + NumBits::$v + } + } + }; + ($v:ident: [$($t:ty),*]) => { + $( + impl_from_bits!($v, $t); + )* + }; +} + +impl_from_bits!(B8: [u8, i8]); +impl_from_bits!(B16: [u16, i16]); +impl_from_bits!(B32: [u32, i32]); +impl_from_bits!(B64: [u64, i64]); +impl_from_bits!(B128: [u128, i128]); +impl_from_bits!(BSize: [usize, isize]); From 49ca0df6ab767bb0da78085a22d07bc5a0250635 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 16:20:11 -0500 Subject: [PATCH 73/87] update Signed-off-by: Joe McCain III --- tensor/src/backend/mod.rs | 63 ++++----------------------------- tensor/src/types/mod.rs | 21 ++++++----- tensor/src/types/tensors.rs | 69 +++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 65 deletions(-) create mode 100644 tensor/src/types/tensors.rs diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs index c275f8a9..f1182dbe 100644 --- a/tensor/src/backend/mod.rs +++ b/tensor/src/backend/mod.rs @@ -5,74 +5,25 @@ //! # Backend //! //! -pub use self::devices::Device; +pub use self::devices::*; pub(crate) mod devices; pub mod cpu; -use crate::shape::Rank; -use crate::tensor::TensorBase; - -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Tensors { - Scalar(T), - Tensor(TensorBase), -} - -impl Tensors { - pub fn scalar(scalar: T) -> Self { - Self::Scalar(scalar) - } - - pub fn tensor(tensor: TensorBase) -> Self { - Self::Tensor(tensor) - } - - pub fn is_scalar(&self) -> bool { - match self { - Self::Scalar(_) => true, - _ => false, - } - } - - pub fn rank(&self) -> Rank { - match self { - Self::Tensor(tensor) => tensor.rank(), - _ => Rank::scalar(), - } - } -} - -impl From> for Tensors -where - T: Clone, -{ - fn from(tensor: TensorBase) -> Self { - if tensor.rank().is_scalar() { - Self::Scalar(tensor.data()[0].clone()) - } else { - Self::Tensor(tensor) - } - } -} - pub trait Backend {} pub trait BackendStorage { type Backend: Backend; } +#[allow(unused_imports)] +pub(crate) mod prelude { + pub use super::{Backend, BackendStorage}; + pub use super::devices::Device; +} + #[cfg(test)] mod tests { - use super::*; - - #[test] - fn test_tensor_type() { - let shape = (2, 3); - let tensor = TensorBase::::ones(shape); - let item = Tensors::tensor(tensor); - assert_eq!(item.rank(), Rank::from(2)); - } } diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index b84c7a2f..9852fcfe 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -2,15 +2,18 @@ Appellation: types Contrib: FL03 */ +pub use self::{dtype::*, id::*, kinds::*, order::*, tensors::*}; -pub mod dtype; -pub mod id; -pub mod kinds; -pub mod order; +pub(crate) mod dtype; +pub(crate) mod id; +pub(crate) mod kinds; +pub(crate) mod order; +pub(crate) mod tensors; pub(crate) mod prelude { - pub use super::dtype::DType; - pub use super::id::TensorId; - pub use super::kinds::TensorKind; - pub use super::order::Order; -} + pub use super::dtype::*; + pub use super::id::*; + pub use super::kinds::*; + pub use super::order::*; + pub use super::tensors::*; +} \ No newline at end of file diff --git a/tensor/src/types/tensors.rs b/tensor/src/types/tensors.rs new file mode 100644 index 00000000..0a762ab7 --- /dev/null +++ b/tensor/src/types/tensors.rs @@ -0,0 +1,69 @@ +/* + Appellation: tensors + Contrib: FL03 +*/ +use crate::shape::Rank; +use crate::tensor::TensorBase; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumDiscriminants, EnumIs, EnumIter, EnumString, VariantNames}; + +#[derive(Clone, Debug, EnumDiscriminants, Eq, PartialEq)] +#[strum_discriminants(derive(Display, EnumCount, EnumIs, EnumIter, EnumString, Hash, Ord, PartialOrd, VariantNames))] +#[strum_discriminants(name(TensorType))] +#[cfg_attr(feature = "serde", strum_discriminants(derive(Deserialize, Serialize)))] +pub enum Tensors { + Scalar(T), + Tensor(TensorBase), +} + +impl Tensors { + pub fn scalar(scalar: T) -> Self { + Self::Scalar(scalar) + } + + pub fn tensor(tensor: TensorBase) -> Self { + Self::Tensor(tensor) + } + + pub fn is_scalar(&self) -> bool { + match self { + Self::Scalar(_) => true, + _ => false, + } + } + + pub fn rank(&self) -> Rank { + match self { + Self::Tensor(tensor) => tensor.rank(), + _ => Rank::scalar(), + } + } +} + +impl From> for Tensors +where + T: Clone, +{ + fn from(tensor: TensorBase) -> Self { + if tensor.rank().is_scalar() { + Self::Scalar(tensor.data()[0].clone()) + } else { + Self::Tensor(tensor) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_tensor_type() { + let shape = (2, 3); + let tensor = TensorBase::::ones(shape); + let item = Tensors::tensor(tensor); + + assert_eq!(item.rank(), Rank::from(2)); + } +} From eb44562ea8d3b7708b4acd0813b6083baf12257a Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 18:26:50 -0500 Subject: [PATCH 74/87] update Signed-off-by: Joe McCain III --- tensor/src/actions/index/mod.rs | 4 +++ tensor/src/actions/iter/mod.rs | 9 ++++-- tensor/src/actions/iter/strides.rs | 49 ++++++++++++++++++++++++++++ tensor/src/impls/linalg.rs | 48 +++++++++++++++++++++++----- tensor/src/shape/dim/mod.rs | 33 +++++++++++++++++-- tensor/src/shape/shape.rs | 51 +++++++++++++++++------------- tensor/src/tensor.rs | 23 ++++++++++++-- 7 files changed, 178 insertions(+), 39 deletions(-) diff --git a/tensor/src/actions/index/mod.rs b/tensor/src/actions/index/mod.rs index 51638c91..88457c25 100644 --- a/tensor/src/actions/index/mod.rs +++ b/tensor/src/actions/index/mod.rs @@ -11,6 +11,10 @@ pub(crate) mod slice; use crate::tensor::TensorBase; +pub type Ix = usize; + +pub type Ixs = isize; + pub enum IndexItem { Scalar(T), Tensor(TensorBase), diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 0772e215..6ed570ff 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -50,10 +50,13 @@ mod tests { let shape = Shape::from_iter([2, 2]); let n = shape.size(); let exp = Vec::linspace(0f64, n as f64, n); + let _rev = exp.iter().rev().copied().collect::>(); let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); - let iter = tensor.strided(); - for (i, idx) in iter.enumerate() { - assert_eq!(idx, &exp[i]); + for (elem, val) in tensor.strided().zip(exp.iter()) { + assert_eq!(elem, val); } + // for (i, elem) in tensor.strided().rev().enumerate() { + // assert_eq!(elem, &rev[i]); + // } } } diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/strides.rs index 38100c20..48a6bb5b 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -32,6 +32,21 @@ impl<'a, T> Iterator for StrideIter<'a, T> { } } +impl<'a, T> DoubleEndedIterator for StrideIter<'a, T> { + fn next_back(&mut self) -> Option { + // let idx = self.strides.next_back()?; + // self.scope = self.tensor.get_by_index(idx); + // self.scope + unimplemented!() + } +} + +impl<'a, T> From<&'a TensorBase> for StrideIter<'a, T> { + fn from(tensor: &'a TensorBase) -> Self { + Self::new(tensor) + } +} + pub struct Strided<'a> { next: Option, position: Vec, @@ -65,6 +80,40 @@ impl<'a> Strided<'a> { } } +impl<'a> DoubleEndedIterator for Strided<'a> { + fn next_back(&mut self) -> Option { + + let scope = match self.next { + None => return None, + Some(storage_index) => storage_index, + }; + self.position = self.shape.iter().map(|i| i - 1).collect(); + let mut updated = false; + let mut next = scope; + for ((multi_i, max_i), stride_i) in self + .position + .iter_mut() + .zip(self.shape.iter()) + .zip(self.stride.iter()) + .rev() + { + let next_i = *multi_i - 1; + if next_i < *max_i { + *multi_i = next_i; + updated = true; + next -= stride_i; + break; + } else { + next += *multi_i * stride_i; + *multi_i = 0 + } + } + self.next = if updated { Some(next) } else { None }; + // Some(scope) + unimplemented!() + } +} + impl<'a> Iterator for Strided<'a> { type Item = usize; diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 998c7d78..134cc591 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -6,7 +6,7 @@ //! //! use crate::prelude::{Matmul, Scalar, ShapeError, TensorError, TensorExpr, TensorResult}; -use crate::tensor::*; +use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; use num::traits::{Num, Signed}; @@ -27,10 +27,7 @@ where inverse[(i * n) + i] = T::one(); } - let mut permutation = vec![0; n]; - for i in 0..n { - permutation[i] = i; - } + let mut permutation = Vec::::from_iter(0..n); for i in 0..n { let mut max_row = i; @@ -76,7 +73,7 @@ where } } let op = TensorExpr::unary(tensor.clone(), UnaryOp::Inv); - let tensor = from_vec_with_op(false, op, shape, res); + let tensor = tensor::from_vec_with_op(false, op, shape, res); Ok(tensor) } @@ -91,7 +88,42 @@ where let rank = *self.rank(); let store = (0..rank).map(|i| self[vec![i; rank]]).collect::>(); - from_vec(false, self.shape().diagonalize(), store) + tensor::from_vec(false, self.shape().diagonalize(), store) + } + + pub fn det(&self) -> Result { + if !self.shape().is_square() { + return Err(ShapeError::InvalidShape.into()); + } + let shape = self.shape(); + let n = *shape.first().unwrap(); + if n == 1 { + return Ok(T::zero()); + } + if n == 2 { + let res = self[vec![0, 0]] * self[vec![1, 1]] - self[vec![0, 1]] * self[vec![1, 0]]; + return Ok(res); + } + let mut det = T::zero(); + let mut cur_shape = shape.clone(); + for i in 0..n { + let _ = cur_shape.pop(); + let mut sub = vec![T::zero(); (n - 1).pow(2)]; + let mut k = 0; + for j in 0..n { + if j == i { + continue; + } + for l in 1..n { + sub[k] = self[vec![l, j]]; + k += 1; + } + } + let sub_tensor = tensor::from_vec(false, cur_shape.clone(), sub); + let sign = if i % 2 == 0 { T::one() } else { -T::one() }; + det = det + sign * self[vec![0, i]] * sub_tensor.det()?; + } + Ok(det) } pub fn inv(&self) -> TensorResult { inverse(self) @@ -117,6 +149,6 @@ where } } let op = TensorExpr::matmul(self.clone(), other.clone()); - from_vec_with_op(false, op, shape, result) + tensor::from_vec_with_op(false, op, shape, result) } } diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index 5eae009b..59c80f19 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -9,18 +9,45 @@ pub use self::{dimension::Dim, utils::*}; pub(crate) mod dimension; -pub trait Dimension { +use core::ops::IndexMut; + +pub trait IntoDimension { + type Dim: Dimension; + + fn into_dimension(self) -> Self::Dim; +} + +pub trait Dimension: IndexMut { type Pattern; - fn elements(&self) -> usize; + fn as_slice(&self) -> &[usize]; + + fn rank(&self) -> usize; - fn ndim(&self) -> usize; + fn size(&self) -> usize; + + #[doc(hidden)] + /// Return stride offset for index. + fn stride_offset(index: &Self, strides: &Self) -> isize { + let mut offset = 0; + for (&i, &s) in izip!(index.as_slice(), strides.as_slice()) { + offset += stride_offset(i, s); + } + offset + } } pub(crate) mod utils { + use crate::actions::index::{Ix, Ixs}; use crate::shape::{Shape, ShapeError, Stride}; use core::mem; + /// Calculate offset from `Ix` stride converting sign properly + #[inline(always)] + pub fn stride_offset(n: Ix, stride: Ix) -> isize { + (n as isize) * (stride as Ixs) + } + pub(crate) fn can_index_slice( data: &[A], shape: &Shape, diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 2685387b..75a97bf1 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -54,13 +54,6 @@ impl Shape { pub fn diagonalize(&self) -> Shape { Self::new(vec![self.size()]) } - - pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { - if *self.rank() != 2 || *other.rank() != 2 || self[1] != other[0] { - return Err(ShapeError::IncompatibleShapes.into()); - } - Ok(Self::from((self[0], other[1]))) - } /// Inserts a new dimension along the given [Axis]. pub fn insert(&mut self, index: Axis, dim: usize) { self.0.insert(*index, dim) @@ -111,6 +104,10 @@ impl Shape { 0 } } + + pub fn pop(&mut self) -> Option { + self.0.pop() + } /// Add a new dimension to the shape. pub fn push(&mut self, dim: usize) { self.0.push(dim) @@ -154,21 +151,6 @@ impl Shape { shape } - pub(crate) fn stride_contiguous(&self) -> Stride { - let mut stride: Vec<_> = self - .0 - .iter() - .rev() - .scan(1, |prod, u| { - let prod_pre_mult = *prod; - *prod *= u; - Some(prod_pre_mult) - }) - .collect(); - stride.reverse(); - stride.into() - } - pub fn upcast(&self, to: &Shape, stride: &Stride) -> Option { let mut new_stride = to.slice().to_vec(); // begin at the back (the least significant dimension) @@ -206,6 +188,31 @@ impl Shape { } } +// Internal methods +impl Shape { + pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { + if *self.rank() != 2 || *other.rank() != 2 || self[1] != other[0] { + return Err(ShapeError::IncompatibleShapes.into()); + } + Ok(Self::from((self[0], other[1]))) + } + + pub(crate) fn stride_contiguous(&self) -> Stride { + let mut stride: Vec<_> = self + .0 + .iter() + .rev() + .scan(1, |prod, u| { + let prod_pre_mult = *prod; + *prod *= u; + Some(prod_pre_mult) + }) + .collect(); + stride.reverse(); + stride.into() + } +} + impl AsRef<[usize]> for Shape { fn as_ref(&self) -> &[usize] { &self.0 diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 8ded67f3..54223fb8 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -5,12 +5,16 @@ use crate::actions::iter::StrideIter; use crate::data::Layout; use crate::ops::{BackpropOp, TensorExpr}; -use crate::prelude::{IntoShape, Rank, Shape, TensorId, TensorKind}; +use crate::shape::{IntoShape, Rank, Shape, Stride}; +use crate::prelude::{TensorId, TensorKind}; use acme::prelude::BinaryOp; +#[cfg(not(feature = "std"))] +use alloc::vec::{self, Vec}; use core::iter::Map; use core::ops::{Index, IndexMut}; use core::slice::Iter as SliceIter; +#[cfg(feature = "std")] use std::vec; pub(crate) fn new( @@ -92,7 +96,20 @@ impl TensorBase { store, } } - /// Returns a + + pub fn from_shape_vec( + shape: impl IntoShape, + store: Vec, + ) -> Self { + Self { + id: TensorId::new(), + kind: TensorKind::default(), + layout: Layout::contiguous(shape), + op: BackpropOp::none(), + store, + } + } + /// Get pub fn as_slice(&self) -> &[T] { &self.store } @@ -162,7 +179,7 @@ impl TensorBase { self.layout.size() } /// Get a reference to the stride of the tensor - pub fn stride(&self) -> &[usize] { + pub fn stride(&self) -> &Stride { self.layout.stride() } /// Create an iterator over the tensor From 6817069e3d157da2d9949589ef5667ff20eee696 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Sun, 31 Mar 2024 18:58:47 -0500 Subject: [PATCH 75/87] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 4 ++++ acme/benches/default.rs | 4 ++-- acme/benches/tensor.rs | 20 ++++++++++++++++++ core/src/specs/arith.rs | 15 -------------- core/src/specs/gradient.rs | 8 +++----- tensor/src/data/container.rs | 39 +++++++++++++++++++++++++----------- 6 files changed, 56 insertions(+), 34 deletions(-) create mode 100644 acme/benches/tensor.rs diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 47ff1aa2..0e1b20f4 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -66,6 +66,10 @@ crate-type = ["cdylib", "rlib"] doctest = true test = true +[[bench]] +name = "tensor" +required-features = ["tensor"] + [[example]] doc = true name = "autodiff" diff --git a/acme/benches/default.rs b/acme/benches/default.rs index 318d3e77..b4152bf8 100644 --- a/acme/benches/default.rs +++ b/acme/benches/default.rs @@ -8,7 +8,7 @@ extern crate test; use test::Bencher; // bench: find the `BENCH_SIZE` first terms of the fibonacci sequence -static BENCH_SIZE: usize = 20; +const BENCH_SIZE: usize = 20; // function to benchmark must be annotated with `#[bench]` #[bench] @@ -20,7 +20,7 @@ fn recursive_fibonacci(b: &mut Bencher) { #[bench] fn iterative_fibonacci(b: &mut Bencher) { - b.iter(|| fib::Fibonacci::seq().take(BENCH_SIZE).collect::>()) + b.iter(|| fib::Fibonacci::seq().take(BENCH_SIZE)) } pub mod fib { diff --git a/acme/benches/tensor.rs b/acme/benches/tensor.rs new file mode 100644 index 00000000..ae189667 --- /dev/null +++ b/acme/benches/tensor.rs @@ -0,0 +1,20 @@ +/* + Appellation: tensor + Contrib: FL03 +*/ +#![feature(test)] +extern crate acme; +extern crate test; + +use acme::prelude::{IntoShape, Tensor}; +use test::Bencher; + + + +#[bench] +fn tensor_iter(b: &mut Bencher) { + let shape = (20, 20, 20).into_shape(); + let n = shape.size(); + let tensor = Tensor::linspace(0f64, n as f64, n); + b.iter(|| tensor.strided().take(n)) +} diff --git a/core/src/specs/arith.rs b/core/src/specs/arith.rs index 7762f3a7..9960cfa6 100644 --- a/core/src/specs/arith.rs +++ b/core/src/specs/arith.rs @@ -8,18 +8,3 @@ pub trait Pow { fn pow(&self, exp: T) -> Self::Output; } - -pub trait Powc: Pow { - type Complex; - type Real: Pow; - - fn powc(&self, exp: Self::Complex) -> Self::Output; -} - -pub trait Powi: Pow { - fn powi(&self, exp: T) -> Self::Output; -} - -pub trait Powf: Pow { - fn powf(&self, exp: T) -> Self::Output; -} diff --git a/core/src/specs/gradient.rs b/core/src/specs/gradient.rs index f70325ec..104d35d8 100644 --- a/core/src/specs/gradient.rs +++ b/core/src/specs/gradient.rs @@ -3,8 +3,6 @@ Contrib: FL03 */ -use super::store::Store; - pub trait IsDifferentiable { /// Returns true if the function is differentiable. fn differentiable(&self) -> bool; @@ -16,10 +14,10 @@ pub trait Gradient { fn grad(&self, args: T) -> Self::Gradient; } -pub trait Grad { - type Gradient: Store; +pub trait Grad { + type Output; - fn grad(&self) -> Self::Gradient; + fn grad(&self) -> Self::Output; } pub trait Parameter { diff --git a/tensor/src/data/container.rs b/tensor/src/data/container.rs index a5dd70d1..31e12e53 100644 --- a/tensor/src/data/container.rs +++ b/tensor/src/data/container.rs @@ -65,16 +65,6 @@ where None } } - - /// Return true if the array is known to be contiguous. - pub fn is_contiguous(&self) -> bool { - self.layout.is_contiguous() - } - - pub fn is_standard_layout(&self) -> bool { - self.layout.is_layout_c() - } - /// Without any coping, turn the tensor into a shared tensor. pub fn into_shared(self) -> SharedContainer where @@ -84,6 +74,23 @@ where // safe because: equivalent unmoved data, ptr and dims remain valid unsafe { ContainerBase::from_data_ptr(data, self.ptr).with_layout(self.layout) } } + /// Return true if the array is known to be contiguous. + pub fn is_contiguous(&self) -> bool { + self.layout().is_contiguous() + } + /// Return true if the array is known to be c-contiguous (Row Major) + pub fn is_standard_layout(&self) -> bool { + self.layout().is_layout_c() + } + /// + pub fn iter(&self) -> slice::Iter<'_, A> + where + S: Data, + { + dbg!("Implement a custom iter for ContainerBase"); + self.as_slice_memory_order().unwrap().iter() + } + pub fn layout(&self) -> &Layout { &self.layout @@ -103,12 +110,20 @@ where f, ) } else { - unimplemented!() - // BaseTensor::from_shape_trusted_iter_unchecked(self.shape(), self.iter(), f) + ContainerBase::from_shape_trusted_iter_unchecked(self.shape(), self.iter(), f) } } } + pub fn mapv(&self, mut f: F) -> Container + where + F: FnMut(A) -> B, + A: Clone, + S: Data, + { + self.map(move |x| f(x.clone())) + } + pub fn shape(&self) -> &Shape { self.layout().shape() } From 083fe2807518833fc6303d904a6361ced4b06725 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Tue, 2 Apr 2024 10:31:56 -0500 Subject: [PATCH 76/87] update Signed-off-by: Joe McCain III --- tensor/src/actions/iter/indexed.rs | 39 ++++++++++++ tensor/src/actions/iter/mod.rs | 20 +++++-- tensor/src/actions/iter/strides.rs | 34 +++++------ tensor/src/data/layout.rs | 24 ++++---- tensor/src/error.rs | 1 + tensor/src/impls/ops/unary.rs | 18 +++--- tensor/src/ops/backprop.rs | 2 +- tensor/src/ops/kinds/reshape.rs | 2 +- tensor/src/ops/op.rs | 2 +- tensor/src/stats/mod.rs | 4 +- tensor/src/tensor.rs | 96 ++++++++++++++++++++++-------- tensor/tests/arith.rs | 6 +- tensor/tests/backward.rs | 6 +- tensor/tests/linalg.rs | 4 +- tensor/tests/reshape.rs | 2 +- tensor/tests/tensor.rs | 19 ++++++ 16 files changed, 196 insertions(+), 83 deletions(-) create mode 100644 tensor/src/actions/iter/indexed.rs diff --git a/tensor/src/actions/iter/indexed.rs b/tensor/src/actions/iter/indexed.rs new file mode 100644 index 00000000..0a360a0b --- /dev/null +++ b/tensor/src/actions/iter/indexed.rs @@ -0,0 +1,39 @@ +/* + Appellation: stride + Contrib: FL03 +*/ +use super::Strided; +use crate::tensor::TensorBase; + +pub struct IndexedIter<'a, T> { + scope: Option<&'a T>, + strides: Strided<'a>, + tensor: &'a TensorBase, +} + +impl<'a, T> IndexedIter<'a, T> { + pub fn new(tensor: &'a TensorBase) -> Self { + let strides = Strided::from(tensor.layout()); + Self { + scope: None, + strides, + tensor, + } + } +} + +impl<'a, T> Iterator for IndexedIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + let (_pos, idx) = self.strides.next()?; + self.scope = self.tensor.get_by_index(idx); + self.scope + } +} + +impl<'a, T> From<&'a TensorBase> for IndexedIter<'a, T> { + fn from(tensor: &'a TensorBase) -> Self { + Self::new(tensor) + } +} diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 6ed570ff..2447409c 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -5,8 +5,9 @@ //! # Iter //! //! -pub use self::{iterator::*, strides::*, utils::*}; +pub use self::{indexed::*, iterator::*, strides::*, utils::*}; +pub(crate) mod indexed; pub(crate) mod iterator; pub(crate) mod strides; @@ -50,13 +51,22 @@ mod tests { let shape = Shape::from_iter([2, 2]); let n = shape.size(); let exp = Vec::linspace(0f64, n as f64, n); - let _rev = exp.iter().rev().copied().collect::>(); let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); for (elem, val) in tensor.strided().zip(exp.iter()) { assert_eq!(elem, val); } - // for (i, elem) in tensor.strided().rev().enumerate() { - // assert_eq!(elem, &rev[i]); - // } + } + + #[test] + #[ignore = "not implemented"] + fn test_strided_rev() { + let shape = Shape::from_iter([2, 2]); + let n = shape.size(); + let exp = Vec::linspace(0f64, n as f64, n); + let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); + + for (i, j) in tensor.strided().rev().zip(exp.iter().rev()) { + assert_eq!(i, j); + } } } diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/strides.rs index 48a6bb5b..1b4ebf4a 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -26,7 +26,7 @@ impl<'a, T> Iterator for StrideIter<'a, T> { type Item = &'a T; fn next(&mut self) -> Option { - let idx = self.strides.next()?; + let (_pos, idx) = self.strides.next()?; self.scope = self.tensor.get_by_index(idx); self.scope } @@ -34,10 +34,9 @@ impl<'a, T> Iterator for StrideIter<'a, T> { impl<'a, T> DoubleEndedIterator for StrideIter<'a, T> { fn next_back(&mut self) -> Option { - // let idx = self.strides.next_back()?; - // self.scope = self.tensor.get_by_index(idx); - // self.scope - unimplemented!() + let (_pos, idx) = self.strides.next_back()?; + self.scope = self.tensor.get_by_index(idx); + self.scope } } @@ -87,35 +86,34 @@ impl<'a> DoubleEndedIterator for Strided<'a> { None => return None, Some(storage_index) => storage_index, }; - self.position = self.shape.iter().map(|i| i - 1).collect(); let mut updated = false; let mut next = scope; - for ((multi_i, max_i), stride_i) in self + for ((pos, max_i), stride) in self .position .iter_mut() .zip(self.shape.iter()) .zip(self.stride.iter()) - .rev() { - let next_i = *multi_i - 1; - if next_i < *max_i { - *multi_i = next_i; + let next_i = *pos - 1; + if next_i > *max_i { + *pos = next_i; updated = true; - next -= stride_i; + next -= stride; break; } else { - next += *multi_i * stride_i; - *multi_i = 0 + next += *pos * stride; + *pos = 0 } } self.next = if updated { Some(next) } else { None }; - // Some(scope) - unimplemented!() + println!("{:?}", &self.position); + Some((self.position.clone(), scope)) + // unimplemented!() } } impl<'a> Iterator for Strided<'a> { - type Item = usize; + type Item = (Vec, usize); fn next(&mut self) -> Option { let scope = match self.next { @@ -143,7 +141,7 @@ impl<'a> Iterator for Strided<'a> { } } self.next = if updated { Some(next) } else { None }; - Some(scope) + Some((self.position.clone(), scope)) } } diff --git a/tensor/src/data/layout.rs b/tensor/src/data/layout.rs index 3e38e2af..792231dd 100644 --- a/tensor/src/data/layout.rs +++ b/tensor/src/data/layout.rs @@ -86,6 +86,7 @@ impl Layout { } true } + /// Determine if the current layout is square or not. pub fn is_square(&self) -> bool { self.shape.is_square() } @@ -125,16 +126,16 @@ impl Layout { self.stride.reverse(); self } - - pub fn shape(&self) -> &Shape { + /// Get a reference to the shape of the layout. + pub const fn shape(&self) -> &Shape { &self.shape } - + /// Get a reference to the number of elements in the layout. pub fn size(&self) -> usize { self.shape.size() } - - pub fn stride(&self) -> &Stride { + /// Get a reference to the stride of the layout. + pub const fn stride(&self) -> &Stride { &self.stride } @@ -146,14 +147,11 @@ impl Layout { } } - pub fn transpose(&self, a: Axis, b: Axis) -> Layout { - let shape = self.shape.swap_axes(a, b); - let stride = shape.stride_contiguous(); - Layout { - offset: self.offset, - shape, - stride, - } + pub fn transpose(&self) -> Layout { + let mut layout = self.clone(); + layout.shape.reverse(); + layout.stride.reverse(); + layout } pub fn with_offset(mut self, offset: usize) -> Self { diff --git a/tensor/src/error.rs b/tensor/src/error.rs index 4ab00580..f323e036 100644 --- a/tensor/src/error.rs +++ b/tensor/src/error.rs @@ -24,6 +24,7 @@ pub enum TensorError { Indexing(String), Shape(ShapeError), Singular, + NotScalar, } unsafe impl Send for TensorError {} diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 22226d93..5216fe64 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -56,8 +56,8 @@ where type Output = TensorBase; fn not(self) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| !a).collect(); + let shape = self.shape(); + let store = self.store.iter().copied().map(|a| !a).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Not); from_vec_with_op(false, op, shape, store) } @@ -65,10 +65,10 @@ where macro_rules! impl_unary_op { ($variant:ident, $method:ident) => { - pub fn $method(self) -> Self { - let shape = self.shape().clone(); - let store = self.store.iter().map(|v| v.$method()).collect(); - let op = TensorExpr::unary(self, UnaryOp::$variant); + pub fn $method(&self) -> Self { + let shape = self.shape(); + let store = self.store.iter().copied().map(|v| v.$method()).collect(); + let op = TensorExpr::unary(self.clone(), UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } }; @@ -86,13 +86,13 @@ impl TensorBase where T: Scalar, { - pub fn abs(self) -> TensorBase<::Real> + pub fn abs(&self) -> TensorBase<::Real> where T: Scalar, { - let shape = self.shape().clone(); + let shape = self.shape(); let store = self.store.iter().copied().map(|v| v.abs()).collect(); - let op = TensorExpr::unary(self, UnaryOp::Abs); + let op = TensorExpr::unary(self.clone(), UnaryOp::Abs); from_vec_with_op(false, op, shape, store) } diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 29d0533c..28048b6c 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -8,7 +8,7 @@ use acme::prelude::BinaryOp; use core::borrow::Borrow; use core::ops::{Deref, DerefMut}; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BackpropOp(Option>); impl BackpropOp { diff --git a/tensor/src/ops/kinds/reshape.rs b/tensor/src/ops/kinds/reshape.rs index 62eb55ca..3f31caba 100644 --- a/tensor/src/ops/kinds/reshape.rs +++ b/tensor/src/ops/kinds/reshape.rs @@ -8,7 +8,7 @@ use crate::shape::Shape; use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumDiscriminants, EnumIs, EnumIter, EnumString, VariantNames}; -#[derive(Clone, Debug, EnumDiscriminants, Eq, PartialEq)] +#[derive(Clone, Debug, EnumDiscriminants, Eq, Hash, Ord, PartialEq, PartialOrd)] #[repr(u8)] #[strum(serialize_all = "snake_case")] #[strum_discriminants(derive( diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index ca85c23b..4c678489 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -9,7 +9,7 @@ use acme::prelude::{BinaryOp, UnaryOp}; pub type BoxTensor = Box>; -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd,)] #[non_exhaustive] pub enum TensorExpr { Binary(BoxTensor, BoxTensor, BinaryOp), diff --git a/tensor/src/stats/mod.rs b/tensor/src/stats/mod.rs index ca891a1c..4a5407b1 100644 --- a/tensor/src/stats/mod.rs +++ b/tensor/src/stats/mod.rs @@ -14,7 +14,7 @@ pub trait SummaryStatistics { fn median(&self) -> T; /// Returns the minimum value in the collection. fn min(&self) -> T; - + /// Get the mode of the collection. fn mode(&self) -> T; /// Compute the standard deviation fn std(&self) -> T; @@ -23,6 +23,8 @@ pub trait SummaryStatistics { } pub trait TensorStats: SummaryStatistics { + + /// Compute the mean along the specified axis. fn mean_axis(&self, axis: Axis) -> T; } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 54223fb8..1f0b06ce 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -4,9 +4,10 @@ */ use crate::actions::iter::StrideIter; use crate::data::Layout; +use crate::error::{TensorError, TensorResult}; use crate::ops::{BackpropOp, TensorExpr}; -use crate::shape::{IntoShape, Rank, Shape, Stride}; use crate::prelude::{TensorId, TensorKind}; +use crate::shape::{IntoShape, Rank, Shape, Stride}; use acme::prelude::BinaryOp; #[cfg(not(feature = "std"))] @@ -49,8 +50,7 @@ pub(crate) fn from_vec_with_op( new(kind.into(), BackpropOp::new(op), shape, store) } -#[derive(Clone, Debug)] -// #[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd)] +#[derive(Clone, Debug, Hash, Ord, PartialOrd)] pub struct TensorBase { pub(crate) id: TensorId, pub(crate) kind: TensorKind, @@ -71,6 +71,12 @@ impl TensorBase { store, } } + pub fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + Self::from_vec(Vec::from_iter(iter)) + } /// Create a new tensor from a scalar value. pub fn from_scalar(value: T) -> Self { Self { @@ -81,26 +87,25 @@ impl TensorBase { store: vec![value], } } - - pub fn from_vec( - kind: impl Into, - op: impl Into>, - shape: impl IntoShape, - store: Vec, - ) -> Self { + pub fn from_shape_iter(shape: impl IntoShape, iter: I) -> Self + where + I: IntoIterator, + { + Self::from_shape_vec(shape, Vec::from_iter(iter)) + } + /// Create a new tensor from a [Vec], with a specified [shape](Shape). + pub fn from_shape_vec(shape: impl IntoShape, store: Vec) -> Self { Self { id: TensorId::new(), - kind: kind.into(), + kind: TensorKind::default(), layout: Layout::contiguous(shape), - op: op.into(), + op: BackpropOp::none(), store, } } - - pub fn from_shape_vec( - shape: impl IntoShape, - store: Vec, - ) -> Self { + /// Create a new, one-dimensional tensor from a [Vec]. + pub fn from_vec(store: Vec) -> Self { + let shape = Shape::from(store.len()); Self { id: TensorId::new(), kind: TensorKind::default(), @@ -109,11 +114,11 @@ impl TensorBase { store, } } - /// Get + /// Return a reference to the tensor's data. pub fn as_slice(&self) -> &[T] { &self.store } - /// + /// Return a mutable reference to the tensor's data. pub fn as_mut_slice(&mut self) -> &mut [T] { &mut self.store } @@ -134,6 +139,26 @@ impl TensorBase { } } } + /// Returns a reference to the first element of the tensor. + pub fn first(&self) -> Option<&T> { + let pos = vec![0; *self.rank()]; + self.get(pos) + } + /// Returns a mutable reference to the first element of the tensor. + pub fn first_mut(&mut self) -> Option<&mut T> { + let pos = vec![0; *self.rank()]; + self.get_mut(pos) + } + /// Returns the data at the specified index. + pub fn get(&self, index: impl AsRef<[usize]>) -> Option<&T> { + let i = self.layout.index(index); + self.store.get(i) + } + /// Returns a mutable reference to the data at the specified index. + pub fn get_mut(&mut self, index: impl AsRef<[usize]>) -> Option<&mut T> { + let i = self.layout.index(index); + self.store.get_mut(i) + } /// Returns the unique identifier of the tensor. pub const fn id(&self) -> TensorId { self.id @@ -148,16 +173,30 @@ impl TensorBase { } /// A function to check if the tensor is a scalar pub fn is_scalar(&self) -> bool { - self.shape().len() == 0 + *self.rank() == 0 } /// A function to check if the tensor is a variable pub const fn is_variable(&self) -> bool { self.kind.is_variable() } + /// Return an iterator over the tensor + pub fn iter(&self) -> StrideIter<'_, T> { + StrideIter::new(self) + } /// Get the kind of the tensor - pub fn kind(&self) -> TensorKind { + pub const fn kind(&self) -> TensorKind { self.kind } + /// Get a reference to the last element of the tensor + pub fn last(&self) -> Option<&T> { + let pos = self.layout.shape().iter().map(|d| d - 1).collect::>(); + self.get(pos) + } + /// Get a mutable reference to the last element of the tensor + pub fn last_mut(&mut self) -> Option<&mut T> { + let pos = self.layout.shape().iter().map(|d| d - 1).collect::>(); + self.get_mut(pos) + } /// Get a reference to the [Layout] of the tensor pub const fn layout(&self) -> &Layout { &self.layout @@ -186,6 +225,15 @@ impl TensorBase { pub fn strided(&self) -> StrideIter<'_, T> { StrideIter::new(self) } + /// Turn the tensor into a scalar + /// If the tensor has a rank greater than 0, this will return an error + pub fn to_scalar(&self) -> TensorResult<&T> { + if self.is_scalar() { + Ok(self.first().unwrap()) + } else { + Err(TensorError::NotScalar) + } + } /// Turn the tensor into a one-dimensional vector pub fn to_vec(&self) -> Vec where @@ -219,7 +267,7 @@ impl TensorBase { } } /// - pub fn with_layout(mut self, layout: Layout) -> Self { + pub unsafe fn with_layout(mut self, layout: Layout) -> Self { self.layout = layout; self } @@ -328,8 +376,6 @@ where impl FromIterator for TensorBase { fn from_iter>(iter: I) -> Self { - let store = Vec::from_iter(iter); - let shape = Shape::from(store.len()); - from_vec(TensorKind::Normal, shape, store) + Self::from_vec(Vec::from_iter(iter)) } } diff --git a/tensor/tests/arith.rs b/tensor/tests/arith.rs index d4b07f14..29b93913 100644 --- a/tensor/tests/arith.rs +++ b/tensor/tests/arith.rs @@ -54,9 +54,9 @@ fn test_sub() { #[test] fn test_trig() { let a = Tensor::::ones((2, 2)); - let b = a.clone().sin(); + let b = a.sin(); let c = a.cos(); - assert_eq!(b[&[0, 0]], 1_f64.sin()); - assert_eq!(c[&[0, 0]], 1_f64.cos()); + assert_eq!(b[[0, 0]], 1_f64.sin()); + assert_eq!(c[[0, 0]], 1_f64.cos()); } diff --git a/tensor/tests/backward.rs b/tensor/tests/backward.rs index 75169ab1..c232921c 100644 --- a/tensor/tests/backward.rs +++ b/tensor/tests/backward.rs @@ -110,11 +110,11 @@ fn test_complex_expr() { let a = Tensor::::ones(shape).variable(); let b = Tensor::fill(shape, 2f64).variable(); let c = Tensor::fill(shape, 3f64).variable(); - let res = (&a + &b) * c.clone().sin() + &b; + let res = (&a + &b) * c.sin() + &b; let grad = res.grad().unwrap(); - assert_eq!(grad[&a.id()], c.clone().sin()); - assert_eq!(grad[&b.id()], c.clone().sin() + 1f64); + assert_eq!(grad[&a.id()], c.sin()); + assert_eq!(grad[&b.id()], c.sin() + 1f64); assert_eq!(grad[&c.id()], (&a + &b) * c.cos()); } diff --git a/tensor/tests/linalg.rs b/tensor/tests/linalg.rs index af3982d8..e64d73c9 100644 --- a/tensor/tests/linalg.rs +++ b/tensor/tests/linalg.rs @@ -33,9 +33,9 @@ macro_rules! assert_diff { fn test_inverse() { let shape = Shape::from((2, 2)); let arr: Vec = vec![1.0, 4.0, 3.0, 2.0]; - let tensor = Tensor::from_vec(false, None, shape.clone(), arr); + let tensor = Tensor::from_shape_vec(shape.clone(), arr); let inv_arr = vec![-0.2, 0.4, 0.3, -0.1]; - let exp = Tensor::from_vec(false, None, shape.clone(), inv_arr); + let exp = Tensor::from_shape_vec(shape.clone(), inv_arr); let inverse = tensor.inv().unwrap(); diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index 0255c25f..4c565d86 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -35,7 +35,7 @@ fn test_transpose() { let a = Tensor::::linspace(0f64, 6f64, 6).with_shape(shape); let at = a.t(); - let exp = Tensor::from_vec(false, None, (3, 2), vec![0.0, 3.0, 1.0, 4.0, 2.0, 5.0]); + let exp = Tensor::from_shape_vec((3, 2), vec![0.0, 3.0, 1.0, 4.0, 2.0, 5.0]); assert_ne!(&a, &at); assert_eq!(at.shape(), &Shape::new(vec![3, 2])); for i in 0..shape.0 { diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index c73be853..f8b8ab84 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -6,6 +6,7 @@ extern crate acme_tensor as acme; use acme::prelude::{IntoShape, Tensor}; +use num::One; #[test] fn test_tensor() { @@ -17,6 +18,24 @@ fn test_tensor() { assert_eq!(a.shape(), b.shape()); assert_eq!(a.size(), b.size()); assert_eq!(a.stride(), b.stride()); + + let tensor = Tensor::::one(); + assert!(tensor.is_scalar()); +} + +#[test] +fn test_first_and_last() { + let shape = (3, 3); + let tensor = Tensor::linspace(0f64, 9f64, 9).reshape(shape).unwrap(); + + assert_eq!(tensor.first(), Some(&0f64)); + assert_eq!(tensor.last(), Some(&8f64)); + + let shape = (3, 3, 1); + let tensor = Tensor::linspace(0f64, 9f64, 9).reshape(shape).unwrap(); + + assert_eq!(tensor.first(), Some(&0f64)); + assert_eq!(tensor.last(), Some(&8f64)); } #[test] From 46eaab8c0e2494d4c0fbf2b34ea4ebbb2b90728b Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Tue, 2 Apr 2024 13:54:50 -0500 Subject: [PATCH 77/87] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 1 + acme/benches/tensor.rs | 20 +++- core/src/ops/binary/kinds.rs | 2 + graphs/src/ops/arithmetic.rs | 84 +++++++------- graphs/src/ops/mod.rs | 36 +++++- tensor/src/actions/iter/iterator.rs | 7 ++ tensor/src/actions/iter/mod.rs | 3 +- tensor/src/actions/iter/strides.rs | 41 ++----- tensor/src/impls/create.rs | 12 +- tensor/src/impls/grad.rs | 28 ++++- tensor/src/impls/iter.rs | 22 ++++ tensor/src/impls/linalg.rs | 14 +-- tensor/src/impls/ops/binary.rs | 167 ++++++++++++++++++++-------- tensor/src/impls/ops/unary.rs | 14 +-- tensor/src/impls/reshape.rs | 8 +- tensor/src/lib.rs | 1 + tensor/src/shape/shape.rs | 133 +++++++++++----------- tensor/src/tensor.rs | 120 ++++++++------------ tensor/src/utils.rs | 4 +- tensor/tests/iter.rs | 22 ++++ tensor/tests/tensor.rs | 29 ++++- 21 files changed, 472 insertions(+), 296 deletions(-) create mode 100644 tensor/src/impls/iter.rs create mode 100644 tensor/tests/iter.rs diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 0e1b20f4..1c40aeff 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -100,6 +100,7 @@ acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } [dev-dependencies] approx = "0.5" +lazy_static = "1" num = "0.4" rand = "0.8" diff --git a/acme/benches/tensor.rs b/acme/benches/tensor.rs index ae189667..fb084100 100644 --- a/acme/benches/tensor.rs +++ b/acme/benches/tensor.rs @@ -6,15 +6,29 @@ extern crate acme; extern crate test; -use acme::prelude::{IntoShape, Tensor}; +use acme::prelude::{IntoShape, Shape, Tensor}; +use lazy_static::lazy_static; use test::Bencher; +lazy_static! { + static ref SHAPE_3D: Shape = SHAPE_3D_PATTERN.into_shape(); +} +const SHAPE_3D_PATTERN: (usize, usize, usize) = (100, 10, 1); #[bench] -fn tensor_iter(b: &mut Bencher) { - let shape = (20, 20, 20).into_shape(); +fn bench_iter(b: &mut Bencher) { + let shape = SHAPE_3D.clone(); let n = shape.size(); let tensor = Tensor::linspace(0f64, n as f64, n); b.iter(|| tensor.strided().take(n)) } + +#[bench] +fn bench_iter_rev(b: &mut Bencher) { + let shape = SHAPE_3D.clone(); + let n = shape.size(); + let tensor = Tensor::linspace(0f64, n as f64, n); + b.iter(|| tensor.strided().rev().take(n)) +} + diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs index 2bd277b2..7ae562e6 100644 --- a/core/src/ops/binary/kinds.rs +++ b/core/src/ops/binary/kinds.rs @@ -56,3 +56,5 @@ impl BinaryOp { } } } + + diff --git a/graphs/src/ops/arithmetic.rs b/graphs/src/ops/arithmetic.rs index 4dffc381..b66ee0bd 100644 --- a/graphs/src/ops/arithmetic.rs +++ b/graphs/src/ops/arithmetic.rs @@ -2,7 +2,7 @@ Appellation: arithmetic Contrib: FL03 */ -use super::BinaryOperation; +use super::{BinaryOperation, Operator}; use num::traits::NumOps; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -24,6 +24,12 @@ macro_rules! operator { stringify!($op).to_lowercase() } } + + impl Operator for $op { + fn name(&self) -> String { + self.name() + } + } }; ($($op:ident),*) => { $( @@ -34,10 +40,7 @@ macro_rules! operator { } macro_rules! operators { - (class $group:ident; {$($op:ident: $variant:ident),*}) => { - $( - operator!($op); - )* + ($group:ident; {$($variant:ident: $op:ident => $method:ident),*}) => { #[derive( Clone, Copy, @@ -65,11 +68,35 @@ macro_rules! operators { $variant($op), )* } + + impl $group { + $( + pub fn $method() -> Self { + Self::$variant($op::new()) + } + )* + + pub fn name(&self) -> String { + match self { + $( + $group::$variant(op) => op.name(), + )* + } + } + } }; } macro_rules! impl_binary_op { + ($(($op:ident, $bound:ident, $operator:tt)),*) => { + $( + impl_binary_op!($op, $bound, $operator); + )* + + }; ($op:ident, $bound:ident, $operator:tt) => { + operator!($op); + impl BinaryOperation for $op where A: core::ops::$bound, @@ -82,6 +109,8 @@ macro_rules! impl_binary_op { } }; (expr $op:ident, $bound:ident, $exp:expr) => { + operator!($op); + impl BinaryOperation for $op where A: core::ops::$bound, @@ -95,45 +124,21 @@ macro_rules! impl_binary_op { }; } -// operator!(Addition, Division, Multiplication, Subtraction); -operators!(class Arithmetic; {Addition: Add, Division: Div, Multiplication: Mul, Remainder: Rem, Subtraction: Sub}); - -impl_binary_op!(Addition, Add, +); +operators!(Arithmetic; {Add: Addition => add, Div: Division => div, Mul: Multiplication => mul, Rem: Remainder => rem, Sub: Subtraction => sub}); -impl_binary_op!(Division, Div, /); +impl_binary_op!((Addition, Add, +), (Division, Div, /), (Multiplication, Mul, *), (Remainder, Rem, %), (Subtraction, Sub, -)); -impl_binary_op!(Multiplication, Mul, *); - -impl_binary_op!(Remainder, Rem, %); - -impl_binary_op!(Subtraction, Sub, -); impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op } - pub fn add() -> Self { - Self::Add(Addition::new()) - } - - pub fn div() -> Self { - Self::Div(Division::new()) - } - - pub fn mul() -> Self { - Self::Mul(Multiplication::new()) - } - - pub fn sub() -> Self { - Self::Sub(Subtraction::new()) - } - - pub fn op(&self) -> Box> + pub fn into_op(self) -> Box> where A: NumOps, { - match self.clone() { + match self { Arithmetic::Add(op) => Box::new(op), Arithmetic::Div(op) => Box::new(op), Arithmetic::Mul(op) => Box::new(op), @@ -142,14 +147,11 @@ impl Arithmetic { } } - pub fn name(&self) -> String { - match self { - Arithmetic::Add(op) => op.name(), - Arithmetic::Div(op) => op.name(), - Arithmetic::Mul(op) => op.name(), - Arithmetic::Rem(op) => op.name(), - Arithmetic::Sub(op) => op.name(), - } + pub fn op(&self) -> Box> + where + A: NumOps, + { + self.into_op() } pub fn eval(&self, lhs: A, rhs: B) -> C diff --git a/graphs/src/ops/mod.rs b/graphs/src/ops/mod.rs index ea089ec6..969c3831 100644 --- a/graphs/src/ops/mod.rs +++ b/graphs/src/ops/mod.rs @@ -10,14 +10,44 @@ pub use self::{arithmetic::*, kinds::*}; pub(crate) mod arithmetic; pub(crate) mod kinds; -pub trait BinaryOperation { +pub trait BinaryOperation { type Output; fn eval(&self, lhs: A, rhs: B) -> Self::Output; } +impl BinaryOperation for S +where + S: Fn(A, B) -> C, +{ + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + self(lhs, rhs) + } +} + +impl BinaryOperation for Box> { + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + self.as_ref().eval(lhs, rhs) + } +} + pub trait Operator { - type Output; + fn boxed(self) -> Box + where + Self: Sized + 'static, + { + Box::new(self) + } + fn name(&self) -> String; +} + +impl Operator for Box { - fn kind(&self) -> String; + fn name(&self) -> String { + self.as_ref().name() + } } diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 9b2765f7..842e03a6 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -17,3 +17,10 @@ impl Iter { self.order } } + +pub struct BaseIter<'a, T> { + iter: &'a Iter, + data: &'a [T], + index: usize, +} + diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 2447409c..29bc0385 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -48,7 +48,7 @@ mod tests { #[test] fn test_strided() { - let shape = Shape::from_iter([2, 2]); + let shape = Shape::from_iter([2, 2, 2, 2]); let n = shape.size(); let exp = Vec::linspace(0f64, n as f64, n); let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); @@ -58,7 +58,6 @@ mod tests { } #[test] - #[ignore = "not implemented"] fn test_strided_rev() { let shape = Shape::from_iter([2, 2]); let n = shape.size(); diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/strides.rs index 1b4ebf4a..0d0ab1e0 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -49,8 +49,8 @@ impl<'a, T> From<&'a TensorBase> for StrideIter<'a, T> { pub struct Strided<'a> { next: Option, position: Vec, - pub(crate) shape: &'a Shape, - pub(crate) stride: &'a Stride, + shape: &'a Shape, + stride: &'a Stride, } impl<'a> Strided<'a> { @@ -70,8 +70,9 @@ impl<'a> Strided<'a> { } } - pub fn index(&self, index: &[usize]) -> usize { + pub(crate) fn index(&self, index: impl AsRef<[usize]>) -> usize { index + .as_ref() .iter() .zip(self.stride.iter()) .map(|(i, s)| i * s) @@ -81,33 +82,15 @@ impl<'a> Strided<'a> { impl<'a> DoubleEndedIterator for Strided<'a> { fn next_back(&mut self) -> Option { - - let scope = match self.next { - None => return None, - Some(storage_index) => storage_index, + let (pos, _idx) = if let Some(item) = self.next() { + item + } else { + return None; }; - let mut updated = false; - let mut next = scope; - for ((pos, max_i), stride) in self - .position - .iter_mut() - .zip(self.shape.iter()) - .zip(self.stride.iter()) - { - let next_i = *pos - 1; - if next_i > *max_i { - *pos = next_i; - updated = true; - next -= stride; - break; - } else { - next += *pos * stride; - *pos = 0 - } - } - self.next = if updated { Some(next) } else { None }; - println!("{:?}", &self.position); - Some((self.position.clone(), scope)) + let position = self.shape.iter().zip(pos.iter()).map(|(s, p)| s - p).collect(); + let scope = self.index(&position); + println!("{:?}", &position); + Some((position, scope)) // unimplemented!() } } diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index a8aa220c..6e9c22b1 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::prelude::IntoShape; -use crate::tensor::{from_vec, TensorBase}; +use crate::tensor::{from_vec_with_kind, TensorBase}; use num::traits::real::Real; use num::traits::{FromPrimitive, NumAssign, One, Zero}; @@ -30,7 +30,7 @@ where pub fn fill(shape: impl IntoShape, value: T) -> Self { let shape = shape.into_shape(); let store = vec![value; shape.size()]; - from_vec(false, shape, store) + from_vec_with_kind(false, shape, store) } /// Create a tensor, filled with some value, from the current shape pub fn fill_like(&self, value: T) -> Self { @@ -53,7 +53,7 @@ where store.push(value); value += step; } - from_vec(false, store.len(), store) + Self::from_vec(store) } /// Create an identity matrix of a certain size pub fn eye(size: usize) -> Self { @@ -63,7 +63,7 @@ where store.push(if i == j { T::one() } else { T::zero() }); } } - from_vec(false, (size, size), store) + Self::from_shape_vec((size, size), store) } /// Create a tensor with a certain number of elements, evenly spaced /// between the provided start and end values @@ -88,7 +88,7 @@ where store.push(value.exp2()); value += step; } - from_vec(false, (store.len(),), store) + from_vec_with_kind(false, (store.len(),), store) } pub fn geomspace(start: T, end: T, steps: usize) -> Self @@ -104,7 +104,7 @@ where store.push(value.exp()); value += step; } - from_vec(false, (store.len(),), store) + from_vec_with_kind(false, (store.len(),), store) } } diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 1b57c57f..ad4e913a 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -114,24 +114,42 @@ where } _ => todo!(), }, + TensorExpr::BinaryScalar(lhs, rhs, kind) => match kind { + BinaryOp::Add => { + *entry!(store, lhs) += &grad; + } + BinaryOp::Div => { + *entry!(store, lhs) += &grad / *rhs; + } + BinaryOp::Mul => { + *entry!(store, lhs) += &grad * *rhs; + } + BinaryOp::Pow => { + *entry!(store, lhs) += &grad * *rhs * lhs.pow(*rhs - T::one()); + } + BinaryOp::Sub => { + *entry!(store, lhs) += &grad; + } + _ => todo!(), + }, TensorExpr::Unary(val, kind) => match kind { UnaryOp::Cos => { - *entry!(store, val) -= &grad * val.clone().sin(); + *entry!(store, val) -= &grad * val.sin(); } UnaryOp::Cosh => { - *entry!(store, val) += &grad * val.clone().sinh(); + *entry!(store, val) += &grad * val.sinh(); } UnaryOp::Exp => { - *entry!(store, val) += &grad * val.clone().exp(); + *entry!(store, val) += &grad * val.exp(); } UnaryOp::Neg => { *entry!(store, val) -= &grad; } UnaryOp::Sin => { - *entry!(store, val) += &grad * val.clone().cos(); + *entry!(store, val) += &grad * val.cos(); } UnaryOp::Sinh => { - *entry!(store, val) += &grad * val.clone().cosh(); + *entry!(store, val) += &grad * val.cosh(); } UnaryOp::Sqrt => { *entry!(store, val) += diff --git a/tensor/src/impls/iter.rs b/tensor/src/impls/iter.rs new file mode 100644 index 00000000..46f8c69d --- /dev/null +++ b/tensor/src/impls/iter.rs @@ -0,0 +1,22 @@ +/* + Appellation: iter + Contrib: FL03 +*/ +use crate::prelude::Scalar; +use crate::tensor::TensorBase; + +impl TensorBase where T: Scalar { + pub fn sum(&self) -> T { + self.data().iter().copied().sum() + } + + pub fn product(&self) -> T { + self.data().iter().copied().product() + } +} + +impl FromIterator for TensorBase { + fn from_iter>(iter: I) -> Self { + Self::from_vec(Vec::from_iter(iter)) + } +} diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 134cc591..2ec903a3 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -10,7 +10,7 @@ use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; use num::traits::{Num, Signed}; -pub fn inverse(tensor: &TensorBase) -> TensorResult> +fn inverse_impl(tensor: &TensorBase) -> TensorResult> where T: Copy + Num + PartialOrd + Signed, { @@ -88,7 +88,7 @@ where let rank = *self.rank(); let store = (0..rank).map(|i| self[vec![i; rank]]).collect::>(); - tensor::from_vec(false, self.shape().diagonalize(), store) + tensor::from_vec_with_kind(false, self.shape().diagonalize(), store) } pub fn det(&self) -> Result { @@ -96,12 +96,12 @@ where return Err(ShapeError::InvalidShape.into()); } let shape = self.shape(); - let n = *shape.first().unwrap(); + let n = shape.nrows(); if n == 1 { return Ok(T::zero()); } if n == 2 { - let res = self[vec![0, 0]] * self[vec![1, 1]] - self[vec![0, 1]] * self[vec![1, 0]]; + let res = self[vec![0, 0]] * self[vec![1, 1]] - self[vec![0, 1]] * self[vec![1, 0]]; return Ok(res); } let mut det = T::zero(); @@ -119,14 +119,14 @@ where k += 1; } } - let sub_tensor = tensor::from_vec(false, cur_shape.clone(), sub); + let sub_tensor = tensor::from_vec_with_kind(false, cur_shape.clone(), sub); let sign = if i % 2 == 0 { T::one() } else { -T::one() }; det = det + sign * self[vec![0, i]] * sub_tensor.det()?; } Ok(det) } pub fn inv(&self) -> TensorResult { - inverse(self) + inverse_impl(self) } } @@ -144,7 +144,7 @@ where for j in 0..other.shape()[1] { for k in 0..self.shape()[1] { result[i * other.shape()[1] + j] += - self.store[i * self.shape()[1] + k] * other.store[k * other.shape()[1] + j]; + self.data[i * self.shape()[1] + k] * other.data[k * other.shape()[1] + j]; } } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 3c2c82c3..a8373521 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -2,13 +2,36 @@ Appellation: arith Contrib: FL03 */ -use crate::prelude::TensorExpr; +use crate::prelude::{Scalar, TensorExpr}; use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; use core::ops; use num::traits::Pow; -macro_rules! cmp { + +pub(crate) fn broadcast_scalar_op(lhs: &TensorBase, rhs: &TensorBase, op: BinaryOp, f: F) -> TensorBase where F: Fn(T, T) -> T, T: Copy + Default { + let mut lhs = lhs.clone(); + let mut rhs = rhs.clone(); + if lhs.is_scalar() { + lhs = lhs.broadcast(rhs.shape()); + } + if rhs.is_scalar() { + rhs = rhs.broadcast(lhs.shape()); + } + let shape = lhs.shape().clone(); + let store = lhs.data().iter().zip(rhs.data().iter()).map(|(a, b)| f(*a, *b)).collect(); + let op = TensorExpr::binary(lhs, rhs, op); + from_vec_with_op(false, op, shape, store) +} + +fn check_shapes_or_scalar(lhs: &TensorBase, rhs: &TensorBase) where T: Clone + Default { + let is_scalar = lhs.is_scalar() || rhs.is_scalar(); + debug_assert!(is_scalar || lhs.shape() == rhs.shape(), "Shape Mismatch: {:?} != {:?}", lhs.shape(), rhs.shape()); + + +} + +macro_rules! check { (ne: $lhs:expr, $rhs:expr) => { if $lhs != $rhs { panic!("Shape Mismatch: {:?} != {:?}", $lhs, $rhs); @@ -16,6 +39,34 @@ macro_rules! cmp { }; } +impl TensorBase where T: Scalar { + pub fn apply_binary(&self, other: &Self, op: BinaryOp) -> Self { + check_shapes_or_scalar(self, other); + let shape = self.shape(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a + *b).collect(); + let op = TensorExpr::binary(self.clone(), other.clone(), op); + from_vec_with_op(false, op, shape, store) + } + + pub fn apply_binaryf(&self, other: &Self, op: BinaryOp, f: F) -> Self where F: Fn(T, T) -> T { + check_shapes_or_scalar(self, other); + let shape = self.shape(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| f(*a, *b)).collect(); + let op = TensorExpr::binary(self.clone(), other.clone(), op); + from_vec_with_op(false, op, shape, store) + } +} + +impl TensorBase where T: Scalar { + pub fn pow(&self, exp: T) -> Self { + let shape = self.shape(); + let store = self.data().iter().copied().map(|a| a.pow(exp)).collect(); + let op = TensorExpr::binary_scalar(self.clone(), exp, BinaryOp::Pow); + from_vec_with_op(false, op, shape, store) + } + +} + impl Pow for TensorBase where T: Copy + Pow, @@ -44,10 +95,44 @@ where } } -macro_rules! impl_arithmetic { - (op: $trait:ident, $method:ident, $op:tt) => { - impl_scalar_arith!($trait, $method, $op); +macro_rules! impl_binary_op { + ($(($trait:ident, $method:ident, $op:tt)),*) => { + $( impl_binary_op!($trait, $method, $op); )* + }; + ($trait:ident, $method:ident, $op:tt) => { + impl_binary_op!(scalar: $trait, $method, $op); + impl_binary_op!(tensor: $trait, $method, $op); + }; + (scalar: $trait:ident, $method:ident, $op:tt) => { + impl ops::$trait for TensorBase + where + T: Copy + ops::$trait, + { + type Output = Self; + fn $method(self, other: T) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| *a $op other).collect(); + let op = TensorExpr::binary_scalar(self, other, BinaryOp::$trait); + from_vec_with_op(false, op, shape, store) + } + } + + impl<'a, T> ops::$trait for &'a TensorBase + where + T: Copy + ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: T) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| *a $op other).collect(); + let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$trait); + from_vec_with_op(false, op, shape, store) + } + } + }; + (tensor: $trait:ident, $method:ident, $op:tt) => { impl ops::$trait for TensorBase where T: Copy + ops::$trait, @@ -55,7 +140,7 @@ macro_rules! impl_arithmetic { type Output = Self; fn $method(self, other: Self) -> Self::Output { - cmp!(ne: self.shape(), other.shape()); + check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorExpr::binary(self, other, BinaryOp::$trait); @@ -114,42 +199,7 @@ macro_rules! impl_arithmetic { } } }; - ($(($trait:ident, $method:ident, $op:tt)),*) => { - $( impl_arithmetic!(op: $trait, $method, $op); )* - }; -} - -macro_rules! impl_scalar_arith { - ($trait:ident, $method:ident, $op:tt) => { - - impl ops::$trait for TensorBase - where - T: Copy + ops::$trait, - { - type Output = Self; - - fn $method(self, other: T) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorExpr::binary_scalar(self, other, BinaryOp::$trait); - from_vec_with_op(false, op, shape, store) - } - } - - impl<'a, T> ops::$trait for &'a TensorBase - where - T: Copy + ops::$trait, - { - type Output = TensorBase; - - fn $method(self, other: T) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$trait); - from_vec_with_op(false, op, shape, store) - } - } - }; + } macro_rules! impl_assign_op { @@ -159,7 +209,7 @@ macro_rules! impl_assign_op { T: Copy + ops::$inner, { fn $method(&mut self, other: Self) { - cmp!(ne: self.shape(), other.shape()); + check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorExpr::binary(self.clone(), other, BinaryOp::$inner); @@ -173,7 +223,7 @@ macro_rules! impl_assign_op { T: Copy + ops::$inner, { fn $method(&mut self, other: &'a TensorBase) { - cmp!(ne: self.shape(), other.shape()); + check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$inner); @@ -185,10 +235,37 @@ macro_rules! impl_assign_op { } -impl_arithmetic!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); +macro_rules! impl_binary_method { + (scalar: $variant:ident, $method:ident, $op:tt) => { + pub fn $method(&self, other: T) -> Self { + let shape = self.shape(); + let store = self.data().iter().map(| elem | *elem $op other).collect(); + let op = TensorExpr::binary_scalar(self.clone(), other.clone(), BinaryOp::$variant); + from_vec_with_op(false, op, shape, store) + } + + }; + (tensor: $variant:ident, $method:ident, $op:tt) => { + pub fn $method(&self, other: &Self) -> Self { + check!(ne: self.shape(), other.shape()); + let shape = self.shape(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$variant); + from_vec_with_op(false, op, shape, store) + } + + }; +} + +impl_binary_op!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); impl_assign_op!(AddAssign, add_assign, Add, +); impl_assign_op!(DivAssign, div_assign, Div, /); impl_assign_op!(MulAssign, mul_assign, Mul, *); impl_assign_op!(RemAssign, rem_assign, Rem, %); impl_assign_op!(SubAssign, sub_assign, Sub, -); + +impl TensorBase where T: Scalar { + impl_binary_method!(tensor: Add, add, +); + impl_binary_method!(scalar: Add, add_scalar, +); +} \ No newline at end of file diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 5216fe64..c70cf363 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -15,7 +15,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| -a).collect(); + let store = self.data().iter().map(|a| (*a).neg()).collect(); let op = TensorExpr::unary(self, UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } @@ -28,8 +28,8 @@ where type Output = TensorBase; fn neg(self) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| -a).collect(); + let shape = self.shape(); + let store = self.data().iter().map(|a| (*a).neg()).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } @@ -43,7 +43,7 @@ where fn not(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| !a).collect(); + let store = self.data().iter().map(|a| (*a).not()).collect(); let op = TensorExpr::unary(self, UnaryOp::Not); from_vec_with_op(false, op, shape, store) } @@ -57,7 +57,7 @@ where fn not(self) -> Self::Output { let shape = self.shape(); - let store = self.store.iter().copied().map(|a| !a).collect(); + let store = self.data.iter().copied().map(|a| !a).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Not); from_vec_with_op(false, op, shape, store) } @@ -67,7 +67,7 @@ macro_rules! impl_unary_op { ($variant:ident, $method:ident) => { pub fn $method(&self) -> Self { let shape = self.shape(); - let store = self.store.iter().copied().map(|v| v.$method()).collect(); + let store = self.data().iter().copied().map(|v| v.$method()).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } @@ -91,7 +91,7 @@ where T: Scalar, { let shape = self.shape(); - let store = self.store.iter().copied().map(|v| v.abs()).collect(); + let store = self.data.iter().copied().map(|v| v.abs()).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Abs); from_vec_with_op(false, op, shape, store) } diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 08835396..86891e01 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -18,7 +18,7 @@ where kind: self.kind(), layout, op: op.into(), - store: self.store.clone(), + data: self.data.clone(), } } @@ -36,7 +36,7 @@ where let layout = self.layout().clone().swap_axes(swap, with); let shape = self.layout.shape(); - let mut data = self.store.to_vec(); + let mut data = self.data.to_vec(); for i in 0..shape[swap] { for j in 0..shape[with] { @@ -51,7 +51,7 @@ where kind: self.kind.clone(), layout, op: op.into(), - store: data.clone(), + data: data.clone(), } } /// Transpose the tensor. @@ -64,7 +64,7 @@ where kind: self.kind(), layout, op: op.into(), - store: self.data().clone(), + data: self.data().clone(), } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index a404639c..6c661939 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -40,6 +40,7 @@ mod impls { } mod create; mod grad; + mod iter; mod linalg; mod num; mod reshape; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 75a97bf1..66c64e70 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -54,11 +54,14 @@ impl Shape { pub fn diagonalize(&self) -> Shape { Self::new(vec![self.size()]) } + pub fn get_final_position(&self) -> Vec { + self.iter().map(|&dim| dim - 1).collect() + } /// Inserts a new dimension along the given [Axis]. pub fn insert(&mut self, index: Axis, dim: usize) { self.0.insert(*index, dim) } - + /// Inserts a new dimension along the given [Axis]. pub fn insert_axis(&self, index: Axis) -> Self { let mut shape = self.clone(); shape.insert(index, 1); @@ -89,7 +92,7 @@ impl Shape { /// The number of columns in the shape. pub fn ncols(&self) -> usize { if self.len() >= 2 { - self.0[1] + self[1] } else if self.len() == 1 { 1 } else { @@ -99,12 +102,12 @@ impl Shape { /// The number of rows in the shape. pub fn nrows(&self) -> usize { if self.len() >= 1 { - *self.0.first().unwrap() + self[0] } else { 0 } } - + /// Removes and returns the last dimension of the shape. pub fn pop(&mut self) -> Option { self.0.pop() } @@ -247,66 +250,6 @@ impl SwapAxes for Shape { } } -impl From<()> for Shape { - fn from(_: ()) -> Self { - Self::default() - } -} - -impl From for Shape { - fn from(dim: usize) -> Self { - Self(vec![dim]) - } -} - -impl From> for Shape { - fn from(shape: Vec) -> Self { - Self(shape) - } -} - -impl From<&[usize]> for Shape { - fn from(shape: &[usize]) -> Self { - Self(shape.to_vec()) - } -} - -impl From<(usize,)> for Shape { - fn from(shape: (usize,)) -> Self { - Self(vec![shape.0]) - } -} - -impl From<(usize, usize)> for Shape { - fn from(shape: (usize, usize)) -> Self { - Self(vec![shape.0, shape.1]) - } -} - -impl From<(usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2]) - } -} - -impl From<(usize, usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2, shape.3]) - } -} - -impl From<(usize, usize, usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4]) - } -} - -impl From<(usize, usize, usize, usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize, usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4, shape.5]) - } -} - impl FromIterator for Shape { fn from_iter>(iter: I) -> Self { Self(Vec::from_iter(iter)) @@ -411,6 +354,68 @@ unsafe impl Send for Shape {} unsafe impl Sync for Shape {} + +impl From<()> for Shape { + fn from(_: ()) -> Self { + Self::default() + } +} + +impl From for Shape { + fn from(dim: usize) -> Self { + Self(vec![dim]) + } +} + +impl From> for Shape { + fn from(shape: Vec) -> Self { + Self(shape) + } +} + +impl From<&[usize]> for Shape { + fn from(shape: &[usize]) -> Self { + Self(shape.to_vec()) + } +} + +impl From<(usize,)> for Shape { + fn from(shape: (usize,)) -> Self { + Self(vec![shape.0]) + } +} + +impl From<(usize, usize)> for Shape { + fn from(shape: (usize, usize)) -> Self { + Self(vec![shape.0, shape.1]) + } +} + +impl From<(usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2]) + } +} + +impl From<(usize, usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2, shape.3]) + } +} + +impl From<(usize, usize, usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4]) + } +} + +impl From<(usize, usize, usize, usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize, usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4, shape.5]) + } +} + + // macro_rules! tuple_vec { // ($($n:tt),*) => { // vec![$($n,)*] diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 1f0b06ce..e7d57112 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -9,7 +9,6 @@ use crate::ops::{BackpropOp, TensorExpr}; use crate::prelude::{TensorId, TensorKind}; use crate::shape::{IntoShape, Rank, Shape, Stride}; -use acme::prelude::BinaryOp; #[cfg(not(feature = "std"))] use alloc::vec::{self, Vec}; use core::iter::Map; @@ -18,59 +17,60 @@ use core::slice::Iter as SliceIter; #[cfg(feature = "std")] use std::vec; -pub(crate) fn new( +pub(crate) fn create_with( kind: impl Into, op: impl Into>, shape: impl IntoShape, - store: Vec, + data: Vec, ) -> TensorBase { TensorBase { id: TensorId::new(), + data, kind: kind.into(), layout: Layout::contiguous(shape), op: op.into(), - store, } } -pub(crate) fn from_vec( +pub(crate) fn from_vec_with_kind( kind: impl Into, shape: impl IntoShape, - store: Vec, + data: Vec, ) -> TensorBase { - new(kind, BackpropOp::none(), shape, store) + create_with(kind, BackpropOp::none(), shape, data) } pub(crate) fn from_vec_with_op( kind: impl Into, op: TensorExpr, shape: impl IntoShape, - store: Vec, + data: Vec, ) -> TensorBase { - new(kind.into(), BackpropOp::new(op), shape, store) + create_with(kind.into(), BackpropOp::new(op), shape, data) } #[derive(Clone, Debug, Hash, Ord, PartialOrd)] pub struct TensorBase { pub(crate) id: TensorId, + pub(crate) data: Vec, pub(crate) kind: TensorKind, pub(crate) layout: Layout, pub(crate) op: BackpropOp, - pub(crate) store: Vec, } impl TensorBase { pub fn new(kind: TensorKind, shape: impl IntoShape) -> Self { let shape = shape.into_shape(); - let store = Vec::with_capacity(shape.size()); + let data = Vec::with_capacity(shape.size()); Self { id: TensorId::new(), + data, kind, layout: Layout::contiguous(shape), op: BackpropOp::none(), - store, } } + /// Create a new tensor from an iterator. pub fn from_iter(iter: I) -> Self where I: IntoIterator, @@ -81,12 +81,13 @@ impl TensorBase { pub fn from_scalar(value: T) -> Self { Self { id: TensorId::new(), + data: vec![value], kind: TensorKind::default(), layout: Layout::contiguous(()), op: None.into(), - store: vec![value], } } + /// Create a new tensor from an iterator, with a particular shape. pub fn from_shape_iter(shape: impl IntoShape, iter: I) -> Self where I: IntoIterator, @@ -94,33 +95,33 @@ impl TensorBase { Self::from_shape_vec(shape, Vec::from_iter(iter)) } /// Create a new tensor from a [Vec], with a specified [shape](Shape). - pub fn from_shape_vec(shape: impl IntoShape, store: Vec) -> Self { + pub fn from_shape_vec(shape: impl IntoShape, data: Vec) -> Self { Self { id: TensorId::new(), + data, kind: TensorKind::default(), layout: Layout::contiguous(shape), op: BackpropOp::none(), - store, } } /// Create a new, one-dimensional tensor from a [Vec]. - pub fn from_vec(store: Vec) -> Self { - let shape = Shape::from(store.len()); + pub fn from_vec(data: Vec) -> Self { + let shape = Shape::from(data.len()); Self { id: TensorId::new(), + data, kind: TensorKind::default(), layout: Layout::contiguous(shape), op: BackpropOp::none(), - store, } } /// Return a reference to the tensor's data. pub fn as_slice(&self) -> &[T] { - &self.store + &self.data } /// Return a mutable reference to the tensor's data. pub fn as_mut_slice(&mut self) -> &mut [T] { - &mut self.store + &mut self.data } /// Detach the computational graph from the tensor pub fn detach(&self) -> Self @@ -135,7 +136,7 @@ impl TensorBase { kind: self.kind, layout: self.layout.clone(), op: BackpropOp::none(), - store: self.store.clone(), + data: self.data.clone(), } } } @@ -152,12 +153,12 @@ impl TensorBase { /// Returns the data at the specified index. pub fn get(&self, index: impl AsRef<[usize]>) -> Option<&T> { let i = self.layout.index(index); - self.store.get(i) + self.data().get(i) } /// Returns a mutable reference to the data at the specified index. pub fn get_mut(&mut self, index: impl AsRef<[usize]>) -> Option<&mut T> { let i = self.layout.index(index); - self.store.get_mut(i) + self.data_mut().get_mut(i) } /// Returns the unique identifier of the tensor. pub const fn id(&self) -> TensorId { @@ -169,7 +170,7 @@ impl TensorBase { } /// Returns true if the tensor is empty. pub fn is_empty(&self) -> bool { - self.store.is_empty() + self.data().is_empty() } /// A function to check if the tensor is a scalar pub fn is_scalar(&self) -> bool { @@ -177,7 +178,7 @@ impl TensorBase { } /// A function to check if the tensor is a variable pub const fn is_variable(&self) -> bool { - self.kind.is_variable() + self.kind().is_variable() } /// Return an iterator over the tensor pub fn iter(&self) -> StrideIter<'_, T> { @@ -189,12 +190,12 @@ impl TensorBase { } /// Get a reference to the last element of the tensor pub fn last(&self) -> Option<&T> { - let pos = self.layout.shape().iter().map(|d| d - 1).collect::>(); + let pos = self.shape().iter().map(|d| d - 1).collect::>(); self.get(pos) } /// Get a mutable reference to the last element of the tensor pub fn last_mut(&mut self) -> Option<&mut T> { - let pos = self.layout.shape().iter().map(|d| d - 1).collect::>(); + let pos = self.shape().iter().map(|d| d - 1).collect::>(); self.get_mut(pos) } /// Get a reference to the [Layout] of the tensor @@ -207,19 +208,19 @@ impl TensorBase { } /// Get an owned reference to the [Rank] of the tensor pub fn rank(&self) -> Rank { - self.layout.shape().rank() + self.shape().rank() } /// An owned reference of the tensors [Shape] pub fn shape(&self) -> &Shape { - self.layout.shape() + self.layout().shape() } /// Returns the number of elements in the tensor. pub fn size(&self) -> usize { - self.layout.size() + self.layout().size() } /// Get a reference to the stride of the tensor pub fn stride(&self) -> &Stride { - self.layout.stride() + self.layout().stride() } /// Create an iterator over the tensor pub fn strided(&self) -> StrideIter<'_, T> { @@ -228,44 +229,23 @@ impl TensorBase { /// Turn the tensor into a scalar /// If the tensor has a rank greater than 0, this will return an error pub fn to_scalar(&self) -> TensorResult<&T> { - if self.is_scalar() { - Ok(self.first().unwrap()) - } else { - Err(TensorError::NotScalar) + if !self.is_scalar() { + return Err(TensorError::NotScalar); } + Ok(self.first().unwrap()) } /// Turn the tensor into a one-dimensional vector pub fn to_vec(&self) -> Vec where T: Clone, { - self.store.to_vec() + self.data.to_vec() } /// Changes the kind of tensor to a variable pub fn variable(mut self) -> Self { self.kind = TensorKind::Variable; self } - - pub fn apply_binary(&self, other: &Self, op: BinaryOp, f: F) -> Self - where - F: Fn(&T, &T) -> T, - T: Clone, - { - let store = self - .data() - .iter() - .zip(other.data().iter()) - .map(|(a, b)| f(a, b)) - .collect(); - TensorBase { - id: TensorId::new(), - kind: self.kind(), - layout: self.layout().clone(), - op: BackpropOp::binary(self.clone(), other.clone(), op), - store, - } - } /// pub unsafe fn with_layout(mut self, layout: Layout) -> Self { self.layout = layout; @@ -292,13 +272,13 @@ where } pub fn view<'a>(&'a self) -> TensorBase<&'a T> { - let store = self.store.iter().collect(); + let store = self.data.iter().collect(); TensorBase { id: self.id, kind: self.kind, layout: self.layout.clone(), op: self.op.view(), - store, + data: store, } } } @@ -306,15 +286,15 @@ where #[allow(dead_code)] impl TensorBase { pub(crate) fn data(&self) -> &Vec { - &self.store + &self.data } pub(crate) fn data_mut(&mut self) -> &mut Vec { - &mut self.store + &mut self.data } pub(crate) fn get_by_index(&self, index: usize) -> Option<&T> { - self.store.get(index) + self.data.get(index) } pub(crate) fn map<'a, F>(&'a self, f: F) -> Map, F> @@ -322,7 +302,7 @@ impl TensorBase { F: FnMut(&'a T) -> T, T: 'a + Clone, { - self.store.iter().map(f) + self.data.iter().map(f) } pub(crate) fn mapv(&self, f: F) -> TensorBase @@ -330,13 +310,13 @@ impl TensorBase { F: Fn(T) -> T, T: Copy, { - let store = self.store.iter().copied().map(f).collect(); + let store = self.data.iter().copied().map(f).collect(); TensorBase { id: TensorId::new(), kind: self.kind, layout: self.layout.clone(), op: self.op.clone(), - store, + data: store, } } } @@ -349,7 +329,7 @@ where fn index(&self, index: Idx) -> &Self::Output { let i = self.layout().index(index); - &self.store[i] + &self.data[i] } } @@ -359,7 +339,7 @@ where { fn index_mut(&mut self, index: Idx) -> &mut Self::Output { let i = self.layout().index(index); - &mut self.store[i] + &mut self.data[i] } } @@ -370,12 +350,6 @@ where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - self.layout == other.layout && self.store == other.store - } -} - -impl FromIterator for TensorBase { - fn from_iter>(iter: I) -> Self { - Self::from_vec(Vec::from_iter(iter)) + self.layout == other.layout && self.data == other.data } } diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 4be2199e..f4872f6e 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -26,7 +26,7 @@ where let pos = i * rhs.shape().ncols() + j; let left = i * lhs.shape().ncols() + k; let right = k * rhs.shape().ncols() + j; - result[pos] += lhs.store[left] * rhs.store[right]; + result[pos] += lhs.data[left] * rhs.data[right]; } } } @@ -52,7 +52,7 @@ where let pos = i * rhs.shape().ncols() + j; let left = i * lhs.shape().ncols() + k; let right = k * rhs.shape().ncols() + j; - result[pos] += lhs.store[left] * rhs.store[right]; + result[pos] += lhs.data[left] * rhs.data[right]; } } } diff --git a/tensor/tests/iter.rs b/tensor/tests/iter.rs new file mode 100644 index 00000000..bd2dccc5 --- /dev/null +++ b/tensor/tests/iter.rs @@ -0,0 +1,22 @@ +/* + Appellation: iter + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::{IntoShape, Tensor}; + +#[test] +fn test_sum() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 1f64); + assert_eq!(a.sum(), 4.0); +} + +#[test] +fn test_product() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 2f64); + assert_eq!(a.product(), 16.0); +} \ No newline at end of file diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index f8b8ab84..096c0411 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -6,7 +6,6 @@ extern crate acme_tensor as acme; use acme::prelude::{IntoShape, Tensor}; -use num::One; #[test] fn test_tensor() { @@ -18,9 +17,15 @@ fn test_tensor() { assert_eq!(a.shape(), b.shape()); assert_eq!(a.size(), b.size()); assert_eq!(a.stride(), b.stride()); +} - let tensor = Tensor::::one(); - assert!(tensor.is_scalar()); +#[test] +fn test_scalar_tensor() { + use num::{One, Zero}; + let one = Tensor::::one(); + let zero = Tensor::::zero(); + assert!(one.is_scalar()); + assert!(zero.is_scalar()); } #[test] @@ -47,8 +52,8 @@ fn test_index() { .unwrap(); assert_eq!(a[[0, 0]], 0f64); - assert_eq!(a[[0, 1]], 1f64); - assert_eq!(a[[1, 2]], 5f64); + assert_eq!(a[&[0, 1]], 1f64); + assert_eq!(a[vec![1, 2]], 5f64); } #[test] @@ -63,3 +68,17 @@ fn test_higher_dim() { assert_eq!(a.stride(), b.stride()); assert_eq!(a.stride().len(), 4); } + +#[test] +fn test_sum() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 1f64); + assert_eq!(a.sum(), 4.0); +} + +#[test] +fn test_product() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 2f64); + assert_eq!(a.product(), 16.0); +} \ No newline at end of file From deed2a759025c6ee415fddc76bcbbf7b3d7a77f6 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Tue, 2 Apr 2024 14:01:08 -0500 Subject: [PATCH 78/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 4 +- acme/Cargo.toml | 20 ++++---- acme/benches/tensor.rs | 1 - core/src/ops/binary/kinds.rs | 2 - graphs/Cargo.toml | 4 +- graphs/src/ops/arithmetic.rs | 3 +- graphs/src/ops/mod.rs | 1 - tensor/Cargo.toml | 4 +- tensor/src/actions/iter/iterator.rs | 1 - tensor/src/actions/iter/strides.rs | 7 ++- tensor/src/backend/mod.rs | 6 +-- tensor/src/data/container.rs | 1 - tensor/src/impls/iter.rs | 5 +- tensor/src/impls/ops/binary.rs | 76 ++++++++++++++++++++++------- tensor/src/ops/op.rs | 2 +- tensor/src/shape/shape.rs | 2 - tensor/src/stats/mod.rs | 1 - tensor/src/types/mod.rs | 2 +- tensor/src/types/tensors.rs | 12 ++++- tensor/tests/iter.rs | 2 +- tensor/tests/tensor.rs | 2 +- 21 files changed, 102 insertions(+), 56 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7a35a49e..df87298e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,8 +8,8 @@ keywords = ["acme", "autodiff", "mathematics", "tensor"] license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -version = "0.3.0" -# version = "0.3.0-nightly.4" +# version = "0.3.0" +version = "0.3.0-nightly.4" [workspace] default-members = [ diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 1c40aeff..b3e61155 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -87,16 +87,16 @@ required-features = ["macros"] [build-dependencies] [dependencies] -acme-core = { path = "../core", version = "0.3.0" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } -# acme-core = { path = "../core", version = "0.3.0-nightly.4" } -# acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.4" } -# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.4" } -# acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.4" } -# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.4" } +# acme-core = { path = "../core", version = "0.3.0" } +# acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +# acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +acme-core = { path = "../core", version = "0.3.0-nightly.4" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.4" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.4" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.4" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.4" } [dev-dependencies] approx = "0.5" diff --git a/acme/benches/tensor.rs b/acme/benches/tensor.rs index fb084100..c5164d13 100644 --- a/acme/benches/tensor.rs +++ b/acme/benches/tensor.rs @@ -31,4 +31,3 @@ fn bench_iter_rev(b: &mut Bencher) { let tensor = Tensor::linspace(0f64, n as f64, n); b.iter(|| tensor.strided().rev().take(n)) } - diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs index 7ae562e6..2bd277b2 100644 --- a/core/src/ops/binary/kinds.rs +++ b/core/src/ops/binary/kinds.rs @@ -56,5 +56,3 @@ impl BinaryOp { } } } - - diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 41b606ed..0fcc3415 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -44,8 +44,8 @@ strum.workspace = true [dependencies.acme-core] path = "../core" -version = "0.3.0" -# version = "0.3.0-nightly.4" +# version = "0.3.0" +version = "0.3.0-nightly.4" [package.metadata.docs.rs] all-features = true diff --git a/graphs/src/ops/arithmetic.rs b/graphs/src/ops/arithmetic.rs index b66ee0bd..aee592c2 100644 --- a/graphs/src/ops/arithmetic.rs +++ b/graphs/src/ops/arithmetic.rs @@ -92,7 +92,7 @@ macro_rules! impl_binary_op { $( impl_binary_op!($op, $bound, $operator); )* - + }; ($op:ident, $bound:ident, $operator:tt) => { operator!($op); @@ -128,7 +128,6 @@ operators!(Arithmetic; {Add: Addition => add, Div: Division => div, Mul: Multipl impl_binary_op!((Addition, Add, +), (Division, Div, /), (Multiplication, Mul, *), (Remainder, Rem, %), (Subtraction, Sub, -)); - impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op diff --git a/graphs/src/ops/mod.rs b/graphs/src/ops/mod.rs index 969c3831..f776e7a7 100644 --- a/graphs/src/ops/mod.rs +++ b/graphs/src/ops/mod.rs @@ -46,7 +46,6 @@ pub trait Operator { } impl Operator for Box { - fn name(&self) -> String { self.as_ref().name() } diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index 14d71818..7bbf9995 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -43,8 +43,8 @@ strum = { features = ["derive"], version = "0.26" } [dependencies.acme-core] path = "../core" -version = "0.3.0" -# version = "0.3.0-nightly.4" +# version = "0.3.0" +version = "0.3.0-nightly.4" [dev-dependencies] approx = "0.5" diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 842e03a6..1d8c5338 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -23,4 +23,3 @@ pub struct BaseIter<'a, T> { data: &'a [T], index: usize, } - diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/strides.rs index 0d0ab1e0..9562aac9 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -87,7 +87,12 @@ impl<'a> DoubleEndedIterator for Strided<'a> { } else { return None; }; - let position = self.shape.iter().zip(pos.iter()).map(|(s, p)| s - p).collect(); + let position = self + .shape + .iter() + .zip(pos.iter()) + .map(|(s, p)| s - p) + .collect(); let scope = self.index(&position); println!("{:?}", &position); Some((position, scope)) diff --git a/tensor/src/backend/mod.rs b/tensor/src/backend/mod.rs index f1182dbe..75148ca8 100644 --- a/tensor/src/backend/mod.rs +++ b/tensor/src/backend/mod.rs @@ -19,11 +19,9 @@ pub trait BackendStorage { #[allow(unused_imports)] pub(crate) mod prelude { - pub use super::{Backend, BackendStorage}; pub use super::devices::Device; + pub use super::{Backend, BackendStorage}; } #[cfg(test)] -mod tests { - -} +mod tests {} diff --git a/tensor/src/data/container.rs b/tensor/src/data/container.rs index 31e12e53..a9b89ce5 100644 --- a/tensor/src/data/container.rs +++ b/tensor/src/data/container.rs @@ -90,7 +90,6 @@ where dbg!("Implement a custom iter for ContainerBase"); self.as_slice_memory_order().unwrap().iter() } - pub fn layout(&self) -> &Layout { &self.layout diff --git a/tensor/src/impls/iter.rs b/tensor/src/impls/iter.rs index 46f8c69d..ea5612db 100644 --- a/tensor/src/impls/iter.rs +++ b/tensor/src/impls/iter.rs @@ -5,7 +5,10 @@ use crate::prelude::Scalar; use crate::tensor::TensorBase; -impl TensorBase where T: Scalar { +impl TensorBase +where + T: Scalar, +{ pub fn sum(&self) -> T { self.data().iter().copied().sum() } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index a8373521..08e11a9c 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -8,8 +8,16 @@ use acme::ops::binary::BinaryOp; use core::ops; use num::traits::Pow; - -pub(crate) fn broadcast_scalar_op(lhs: &TensorBase, rhs: &TensorBase, op: BinaryOp, f: F) -> TensorBase where F: Fn(T, T) -> T, T: Copy + Default { +pub(crate) fn broadcast_scalar_op( + lhs: &TensorBase, + rhs: &TensorBase, + op: BinaryOp, + f: F, +) -> TensorBase +where + F: Fn(T, T) -> T, + T: Copy + Default, +{ let mut lhs = lhs.clone(); let mut rhs = rhs.clone(); if lhs.is_scalar() { @@ -19,16 +27,27 @@ pub(crate) fn broadcast_scalar_op(lhs: &TensorBase, rhs: &TensorBase rhs = rhs.broadcast(lhs.shape()); } let shape = lhs.shape().clone(); - let store = lhs.data().iter().zip(rhs.data().iter()).map(|(a, b)| f(*a, *b)).collect(); + let store = lhs + .data() + .iter() + .zip(rhs.data().iter()) + .map(|(a, b)| f(*a, *b)) + .collect(); let op = TensorExpr::binary(lhs, rhs, op); from_vec_with_op(false, op, shape, store) } -fn check_shapes_or_scalar(lhs: &TensorBase, rhs: &TensorBase) where T: Clone + Default { +fn check_shapes_or_scalar(lhs: &TensorBase, rhs: &TensorBase) +where + T: Clone + Default, +{ let is_scalar = lhs.is_scalar() || rhs.is_scalar(); - debug_assert!(is_scalar || lhs.shape() == rhs.shape(), "Shape Mismatch: {:?} != {:?}", lhs.shape(), rhs.shape()); - - + debug_assert!( + is_scalar || lhs.shape() == rhs.shape(), + "Shape Mismatch: {:?} != {:?}", + lhs.shape(), + rhs.shape() + ); } macro_rules! check { @@ -39,32 +58,50 @@ macro_rules! check { }; } -impl TensorBase where T: Scalar { +impl TensorBase +where + T: Scalar, +{ pub fn apply_binary(&self, other: &Self, op: BinaryOp) -> Self { check_shapes_or_scalar(self, other); let shape = self.shape(); - let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a + *b).collect(); + let store = self + .data() + .iter() + .zip(other.data().iter()) + .map(|(a, b)| *a + *b) + .collect(); let op = TensorExpr::binary(self.clone(), other.clone(), op); from_vec_with_op(false, op, shape, store) } - pub fn apply_binaryf(&self, other: &Self, op: BinaryOp, f: F) -> Self where F: Fn(T, T) -> T { + pub fn apply_binaryf(&self, other: &Self, op: BinaryOp, f: F) -> Self + where + F: Fn(T, T) -> T, + { check_shapes_or_scalar(self, other); let shape = self.shape(); - let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| f(*a, *b)).collect(); + let store = self + .data() + .iter() + .zip(other.data().iter()) + .map(|(a, b)| f(*a, *b)) + .collect(); let op = TensorExpr::binary(self.clone(), other.clone(), op); from_vec_with_op(false, op, shape, store) } } -impl TensorBase where T: Scalar { +impl TensorBase +where + T: Scalar, +{ pub fn pow(&self, exp: T) -> Self { let shape = self.shape(); let store = self.data().iter().copied().map(|a| a.pow(exp)).collect(); let op = TensorExpr::binary_scalar(self.clone(), exp, BinaryOp::Pow); from_vec_with_op(false, op, shape, store) } - } impl Pow for TensorBase @@ -199,7 +236,7 @@ macro_rules! impl_binary_op { } } }; - + } macro_rules! impl_assign_op { @@ -243,7 +280,7 @@ macro_rules! impl_binary_method { let op = TensorExpr::binary_scalar(self.clone(), other.clone(), BinaryOp::$variant); from_vec_with_op(false, op, shape, store) } - + }; (tensor: $variant:ident, $method:ident, $op:tt) => { pub fn $method(&self, other: &Self) -> Self { @@ -253,7 +290,7 @@ macro_rules! impl_binary_method { let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$variant); from_vec_with_op(false, op, shape, store) } - + }; } @@ -265,7 +302,10 @@ impl_assign_op!(MulAssign, mul_assign, Mul, *); impl_assign_op!(RemAssign, rem_assign, Rem, %); impl_assign_op!(SubAssign, sub_assign, Sub, -); -impl TensorBase where T: Scalar { +impl TensorBase +where + T: Scalar, +{ impl_binary_method!(tensor: Add, add, +); impl_binary_method!(scalar: Add, add_scalar, +); -} \ No newline at end of file +} diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index 4c678489..f967491e 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -9,7 +9,7 @@ use acme::prelude::{BinaryOp, UnaryOp}; pub type BoxTensor = Box>; -#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd,)] +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[non_exhaustive] pub enum TensorExpr { Binary(BoxTensor, BoxTensor, BinaryOp), diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 66c64e70..d11c91b9 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -354,7 +354,6 @@ unsafe impl Send for Shape {} unsafe impl Sync for Shape {} - impl From<()> for Shape { fn from(_: ()) -> Self { Self::default() @@ -415,7 +414,6 @@ impl From<(usize, usize, usize, usize, usize, usize)> for Shape { } } - // macro_rules! tuple_vec { // ($($n:tt),*) => { // vec![$($n,)*] diff --git a/tensor/src/stats/mod.rs b/tensor/src/stats/mod.rs index 4a5407b1..b00acd97 100644 --- a/tensor/src/stats/mod.rs +++ b/tensor/src/stats/mod.rs @@ -23,7 +23,6 @@ pub trait SummaryStatistics { } pub trait TensorStats: SummaryStatistics { - /// Compute the mean along the specified axis. fn mean_axis(&self, axis: Axis) -> T; } diff --git a/tensor/src/types/mod.rs b/tensor/src/types/mod.rs index 9852fcfe..a94222a6 100644 --- a/tensor/src/types/mod.rs +++ b/tensor/src/types/mod.rs @@ -16,4 +16,4 @@ pub(crate) mod prelude { pub use super::kinds::*; pub use super::order::*; pub use super::tensors::*; -} \ No newline at end of file +} diff --git a/tensor/src/types/tensors.rs b/tensor/src/types/tensors.rs index 0a762ab7..466c22c9 100644 --- a/tensor/src/types/tensors.rs +++ b/tensor/src/types/tensors.rs @@ -9,7 +9,17 @@ use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumDiscriminants, EnumIs, EnumIter, EnumString, VariantNames}; #[derive(Clone, Debug, EnumDiscriminants, Eq, PartialEq)] -#[strum_discriminants(derive(Display, EnumCount, EnumIs, EnumIter, EnumString, Hash, Ord, PartialOrd, VariantNames))] +#[strum_discriminants(derive( + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Hash, + Ord, + PartialOrd, + VariantNames +))] #[strum_discriminants(name(TensorType))] #[cfg_attr(feature = "serde", strum_discriminants(derive(Deserialize, Serialize)))] pub enum Tensors { diff --git a/tensor/tests/iter.rs b/tensor/tests/iter.rs index bd2dccc5..4d6207ac 100644 --- a/tensor/tests/iter.rs +++ b/tensor/tests/iter.rs @@ -19,4 +19,4 @@ fn test_product() { let shape = (2, 2).into_shape(); let a = Tensor::fill(shape, 2f64); assert_eq!(a.product(), 16.0); -} \ No newline at end of file +} diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index 096c0411..5cafa885 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -81,4 +81,4 @@ fn test_product() { let shape = (2, 2).into_shape(); let a = Tensor::fill(shape, 2f64); assert_eq!(a.product(), 16.0); -} \ No newline at end of file +} From 778b2bf11b235d713b6ad3ff33da08b8aa677fd3 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Tue, 2 Apr 2024 15:53:21 -0500 Subject: [PATCH 79/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 5 +- acme/Cargo.toml | 30 ++-- core/Cargo.toml | 1 - .../ops => core/src/ops/binary}/arithmetic.rs | 80 +++++---- core/src/ops/binary/mod.rs | 26 ++- core/src/ops/mod.rs | 3 +- core/src/ops/operator.rs | 17 ++ core/src/types/constants.rs | 101 ++++++++++-- core/src/types/mod.rs | 41 +---- core/src/types/operators.rs | 32 ---- graphs/Cargo.toml | 4 +- graphs/src/ops/kinds.rs | 3 +- graphs/src/ops/mod.rs | 28 +--- graphs/src/scg/graph.rs | 3 +- tensor/Cargo.toml | 4 +- tensor/src/actions/index/slice.rs | 39 ++++- tensor/src/impls/linalg.rs | 156 ++++++++---------- tensor/src/tensor.rs | 25 +++ tensor/tests/linalg.rs | 2 +- 19 files changed, 337 insertions(+), 263 deletions(-) rename {graphs/src/ops => core/src/ops/binary}/arithmetic.rs (71%) create mode 100644 core/src/ops/operator.rs delete mode 100644 core/src/types/operators.rs diff --git a/Cargo.toml b/Cargo.toml index df87298e..72597fff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,8 +8,8 @@ keywords = ["acme", "autodiff", "mathematics", "tensor"] license = "Apache-2.0" repository = "https://github.com/FL03/acme" readme = "README.md" -# version = "0.3.0" -version = "0.3.0-nightly.4" +version = "0.3.0" +# version = "0.3.0-nightly.5" [workspace] default-members = [ @@ -27,7 +27,6 @@ members = [ resolver = "2" [workspace.dependencies] -anyhow = "1" strum = { features = ["derive"], version = "0.26" } smart-default = "0.7" diff --git a/acme/Cargo.toml b/acme/Cargo.toml index b3e61155..c6b1c630 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -44,10 +44,6 @@ macros = [ "dep:acme-macros" ] -tensor = [ - "dep:acme-tensor" -] - serde = [ "acme-core/serde", "acme-graphs/serde", @@ -59,6 +55,12 @@ std = [ "acme-tensor/std" ] +tensor = [ + "dep:acme-tensor" +] + + + [lib] bench = true @@ -87,16 +89,16 @@ required-features = ["macros"] [build-dependencies] [dependencies] -# acme-core = { path = "../core", version = "0.3.0" } -# acme-derive = { optional = true, path = "../derive", version = "0.3.0" } -# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } -# acme-macros = { optional = true, path = "../macros", version = "0.3.0" } -# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } -acme-core = { path = "../core", version = "0.3.0-nightly.4" } -acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.4" } -acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.4" } -acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.4" } -acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.4" } +acme-core = { path = "../core", version = "0.3.0" } +acme-derive = { optional = true, path = "../derive", version = "0.3.0" } +acme-graphs = { optional = true, path = "../graphs", version = "0.3.0" } +acme-macros = { optional = true, path = "../macros", version = "0.3.0" } +acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } +# acme-core = { path = "../core", version = "0.3.0-nightly.5" } +# acme-derive = { optional = true, path = "../derive", version = "0.3.0-nightly.5" } +# acme-graphs = { optional = true, path = "../graphs", version = "0.3.0-nightly.5" } +# acme-macros = { optional = true, path = "../macros", version = "0.3.0-nightly.5" } +# acme-tensor = { optional = true, path = "../tensor", version = "0.3.0-nightly.5" } [dev-dependencies] approx = "0.5" diff --git a/core/Cargo.toml b/core/Cargo.toml index 1ee48fe5..6fc46161 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -36,7 +36,6 @@ test = true [dev-dependencies] [dependencies] -anyhow.workspace = true lazy_static = "1" num = "0.4" serde = { optional = true, features = ["derive"], version = "1" } diff --git a/graphs/src/ops/arithmetic.rs b/core/src/ops/binary/arithmetic.rs similarity index 71% rename from graphs/src/ops/arithmetic.rs rename to core/src/ops/binary/arithmetic.rs index aee592c2..04aaf8a1 100644 --- a/graphs/src/ops/arithmetic.rs +++ b/core/src/ops/binary/arithmetic.rs @@ -2,14 +2,15 @@ Appellation: arithmetic Contrib: FL03 */ -use super::{BinaryOperation, Operator}; +use super::BinaryOperation; +use crate::ops::{Operator, OperatorKind}; use num::traits::NumOps; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; macro_rules! operator { - ($op:ident) => { + ($op:ident, $kind:ident) => { #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] @@ -25,15 +26,26 @@ macro_rules! operator { } } + impl core::fmt::Display for $op { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.name()) + } + } + impl Operator for $op { + + fn kind(&self) -> OperatorKind { + OperatorKind::$kind + } + fn name(&self) -> String { self.name() } } }; - ($($op:ident),*) => { + ($kind:ident: $($op:ident),*) => { $( - operator!($op); + operator!($op, $kind); )* }; @@ -76,6 +88,25 @@ macro_rules! operators { } )* + + pub fn eval(&self, lhs: A, rhs: B) -> C + where + A: NumOps, + { + self.op().eval(lhs, rhs) + } + + pub fn op(self) -> Box> + where + A: NumOps, + { + match self { + $( + $group::$variant(op) => Box::new(op), + )* + } + } + pub fn name(&self) -> String { match self { $( @@ -84,6 +115,16 @@ macro_rules! operators { } } } + + impl Operator for $group { + fn kind(&self) -> OperatorKind { + OperatorKind::Binary + } + + fn name(&self) -> String { + self.name() + } + } }; } @@ -95,7 +136,7 @@ macro_rules! impl_binary_op { }; ($op:ident, $bound:ident, $operator:tt) => { - operator!($op); + operator!($op, Binary); impl BinaryOperation for $op where @@ -109,7 +150,7 @@ macro_rules! impl_binary_op { } }; (expr $op:ident, $bound:ident, $exp:expr) => { - operator!($op); + operator!($op, Binary); impl BinaryOperation for $op where @@ -132,31 +173,4 @@ impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op } - - pub fn into_op(self) -> Box> - where - A: NumOps, - { - match self { - Arithmetic::Add(op) => Box::new(op), - Arithmetic::Div(op) => Box::new(op), - Arithmetic::Mul(op) => Box::new(op), - Arithmetic::Rem(op) => Box::new(op), - Arithmetic::Sub(op) => Box::new(op), - } - } - - pub fn op(&self) -> Box> - where - A: NumOps, - { - self.into_op() - } - - pub fn eval(&self, lhs: A, rhs: B) -> C - where - A: NumOps, - { - self.op().eval(lhs, rhs) - } } diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index f4f64848..566a7fec 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -2,16 +2,36 @@ Appellation: binary Contrib: FL03 */ -pub use self::{kinds::*, operator::*, specs::*}; +pub use self::{arithmetic::*, kinds::*, operator::*, specs::*}; +pub(crate) mod arithmetic; pub(crate) mod kinds; pub(crate) mod operator; pub(crate) mod specs; -pub trait BinOp { +pub trait BinaryOperation { type Output; - fn bin_op(&self, other: &T) -> Self::Output; + fn eval(&self, lhs: A, rhs: B) -> Self::Output; +} + +impl BinaryOperation for S +where + S: Fn(A, B) -> C, +{ + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + self(lhs, rhs) + } +} + +impl BinaryOperation for Box> { + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + self.as_ref().eval(lhs, rhs) + } } #[cfg(test)] diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 6d0d76a5..307d3de9 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -5,9 +5,10 @@ //! # Operations //! //! -pub use self::kinds::*; +pub use self::{kinds::*, operator::*}; pub(crate) mod kinds; +pub(crate) mod operator; pub mod binary; pub mod unary; diff --git a/core/src/ops/operator.rs b/core/src/ops/operator.rs new file mode 100644 index 00000000..f3b8128d --- /dev/null +++ b/core/src/ops/operator.rs @@ -0,0 +1,17 @@ +/* + Appellation: operator + Contrib: FL03 +*/ + +pub enum OperatorKind { + Binary, + Unary, + Ternary, + Nary, +} + +pub trait Operator { + fn kind(&self) -> OperatorKind; + + fn name(&self) -> String; +} diff --git a/core/src/types/constants.rs b/core/src/types/constants.rs index 1102f3cc..a36b522f 100644 --- a/core/src/types/constants.rs +++ b/core/src/types/constants.rs @@ -44,6 +44,18 @@ impl AsMut for Constant { } } +impl Borrow for Constant { + fn borrow(&self) -> &T { + &self.0 + } +} + +impl BorrowMut for Constant { + fn borrow_mut(&mut self) -> &mut T { + &mut self.0 + } +} + impl Deref for Constant { type Target = T; @@ -118,6 +130,83 @@ unsafe impl Send for Constant {} unsafe impl Sync for Constant {} +macro_rules! impl_binary_op { + ($(($bound:ident, $method:ident, $e:tt)),*) => { + $( + impl_binary_op!($bound, $method, $e); + )* + }; + ($bound:ident, $fn:ident, $e:tt) => { + impl core::ops::$bound for Constant + where + T: core::ops::$bound, + { + type Output = Constant; + + fn $fn(self, rhs: Constant) -> Self::Output { + Constant(self.0 $e rhs.0) + } + } + + impl<'a, T> core::ops::$bound<&'a Constant> for Constant + where + T: Copy + core::ops::$bound, + { + type Output = Constant; + + fn $fn(self, rhs: &'a Constant) -> Self::Output { + Constant(self.0 $e rhs.0) + } + } + + impl<'a, T> core::ops::$bound> for &'a Constant + where + T: Copy + core::ops::$bound, + { + type Output = Constant; + + fn $fn(self, rhs: Constant) -> Self::Output { + Constant(self.0 $e rhs.0) + } + } + + impl<'a, T> core::ops::$bound<&'a Constant> for &'a Constant + where + T: Copy + core::ops::$bound, + { + type Output = Constant; + + fn $fn(self, rhs: &'a Constant) -> Self::Output { + Constant(self.0 $e rhs.0) + } + } + + impl core::ops::$bound for Constant + where + T: core::ops::$bound, + { + type Output = Self; + + fn $fn(self, rhs: T) -> Self::Output { + Constant(self.0 $e rhs) + } + } + + impl<'a, T> core::ops::$bound for &'a Constant + where + T: Copy + core::ops::$bound, + { + type Output = Constant; + + fn $fn(self, rhs: T) -> Self::Output { + Constant(self.0 $e rhs) + } + } + }; +} + +impl_binary_op!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); + impl Num for Constant where T: Num, @@ -154,15 +243,3 @@ where self.0.is_zero() } } - -impl Borrow for Constant { - fn borrow(&self) -> &T { - &self.0 - } -} - -impl BorrowMut for Constant { - fn borrow_mut(&mut self) -> &mut T { - &mut self.0 - } -} diff --git a/core/src/types/mod.rs b/core/src/types/mod.rs index 35d59780..31a781de 100644 --- a/core/src/types/mod.rs +++ b/core/src/types/mod.rs @@ -5,11 +5,10 @@ //! # Types //! //! -pub use self::{constants::*, dual::*, operators::*, variables::*}; +pub use self::{constants::*, dual::*, variables::*}; pub(crate) mod constants; pub(crate) mod dual; -pub(crate) mod operators; pub(crate) mod variables; /// A boxed error type for use in the library. @@ -19,44 +18,6 @@ pub type BoxError = Box; #[cfg(feature = "std")] pub type BoxResult = core::result::Result; -macro_rules! impl_op { - ($name:ident, $bound:ident, $fn:ident, $val:tt, $e:expr) => { - impl std::ops::$bound for $name - where - T: std::ops::$bound, - { - type Output = Self; - - fn $fn(self, rhs: $name) -> Self::Output { - $e(self.$val, rhs.$val) - } - } - - impl std::ops::$bound for $name - where - T: std::ops::$bound, - { - type Output = Self; - - fn $fn(self, rhs: T) -> Self::Output { - $e(self.$val, rhs) - } - } - }; -} - -macro_rules! impl_const_op { - ($bound:ident, $fn:ident, $e:expr) => { - impl_op!(Constant, $bound, $fn, 0, |a, b| Constant::new($e(a, b))); - }; -} - -impl_const_op!(Add, add, |a, b| a + b); -impl_const_op!(Div, div, |a, b| a / b); -impl_const_op!(Mul, mul, |a, b| a * b); -impl_const_op!(Rem, rem, |a, b| a % b); -impl_const_op!(Sub, sub, |a, b| a - b); - #[cfg(test)] mod tests { use super::*; diff --git a/core/src/types/operators.rs b/core/src/types/operators.rs deleted file mode 100644 index 2b3bfbcc..00000000 --- a/core/src/types/operators.rs +++ /dev/null @@ -1,32 +0,0 @@ -/* - Appellation: operators - Contrib: FL03 -*/ -use crate::id::Id; - -pub struct Operator { - inputs: Vec, - name: String, -} - -impl Operator { - pub fn new() -> Self { - Self { - inputs: Vec::new(), - name: String::new(), - } - } - - pub fn with_name(mut self, name: impl ToString) -> Self { - self.name = name.to_string(); - self - } - - pub fn inputs(&self) -> &[Id] { - &self.inputs - } - - pub fn name(&self) -> &str { - &self.name - } -} diff --git a/graphs/Cargo.toml b/graphs/Cargo.toml index 0fcc3415..7051d2f9 100644 --- a/graphs/Cargo.toml +++ b/graphs/Cargo.toml @@ -44,8 +44,8 @@ strum.workspace = true [dependencies.acme-core] path = "../core" -# version = "0.3.0" -version = "0.3.0-nightly.4" +version = "0.3.0" +# version = "0.3.0-nightly.5" [package.metadata.docs.rs] all-features = true diff --git a/graphs/src/ops/kinds.rs b/graphs/src/ops/kinds.rs index 8deba2ce..0ce1f60d 100644 --- a/graphs/src/ops/kinds.rs +++ b/graphs/src/ops/kinds.rs @@ -2,8 +2,7 @@ Appellation: kinds Contrib: FL03 */ -use super::arithmetic::*; -use super::BinaryOperation; +use acme::ops::binary::*; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; diff --git a/graphs/src/ops/mod.rs b/graphs/src/ops/mod.rs index f776e7a7..2cfc736d 100644 --- a/graphs/src/ops/mod.rs +++ b/graphs/src/ops/mod.rs @@ -5,36 +5,10 @@ //! # Operations //! //! -pub use self::{arithmetic::*, kinds::*}; +pub use self::kinds::*; -pub(crate) mod arithmetic; pub(crate) mod kinds; -pub trait BinaryOperation { - type Output; - - fn eval(&self, lhs: A, rhs: B) -> Self::Output; -} - -impl BinaryOperation for S -where - S: Fn(A, B) -> C, -{ - type Output = C; - - fn eval(&self, lhs: A, rhs: B) -> Self::Output { - self(lhs, rhs) - } -} - -impl BinaryOperation for Box> { - type Output = C; - - fn eval(&self, lhs: A, rhs: B) -> Self::Output { - self.as_ref().eval(lhs, rhs) - } -} - pub trait Operator { fn boxed(self) -> Box where diff --git a/graphs/src/scg/graph.rs b/graphs/src/scg/graph.rs index 280ecb12..4dc4df20 100644 --- a/graphs/src/scg/graph.rs +++ b/graphs/src/scg/graph.rs @@ -3,8 +3,9 @@ Contrib: FL03 */ use super::Node; -use crate::ops::{BinaryExpr, BinaryOperation, Operations}; +use crate::ops::{BinaryExpr, Operations}; use crate::prelude::GraphResult as Result; +use acme::ops::binary::BinaryOperation; use num::traits::{NumAssign, NumOps, Signed}; use petgraph::algo::toposort; use petgraph::prelude::{DiGraph, NodeIndex}; diff --git a/tensor/Cargo.toml b/tensor/Cargo.toml index 7bbf9995..34001a54 100644 --- a/tensor/Cargo.toml +++ b/tensor/Cargo.toml @@ -43,8 +43,8 @@ strum = { features = ["derive"], version = "0.26" } [dependencies.acme-core] path = "../core" -# version = "0.3.0" -version = "0.3.0-nightly.4" +version = "0.3.0" +# version = "0.3.0-nightly.5" [dev-dependencies] approx = "0.5" diff --git a/tensor/src/actions/index/slice.rs b/tensor/src/actions/index/slice.rs index 6eb3cab3..81a5fe5d 100644 --- a/tensor/src/actions/index/slice.rs +++ b/tensor/src/actions/index/slice.rs @@ -5,12 +5,41 @@ //! # Slice //! //! - +use core::ops::{Range, RangeFrom}; pub struct Slice { - pub start: usize, - pub end: usize, + pub start: isize, + pub end: Option, + pub step: isize, +} + +impl Slice { + pub fn new(start: isize, end: Option, step: isize) -> Self { + Self { start, end, step } + } } -impl Slice {} +impl From> for Slice { + fn from(range: Range) -> Self { + Self { + start: range.start, + end: Some(range.end), + step: 1, + } + } +} -pub enum Index {} +impl From> for Slice { + fn from(range: RangeFrom) -> Self { + Self { + start: range.start, + end: None, + step: 1, + } + } +} + +pub enum Slices { + Index(isize), + Slice(Slice), + NewAxis, +} diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 2ec903a3..33b7a5e8 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -8,123 +8,111 @@ use crate::prelude::{Matmul, Scalar, ShapeError, TensorError, TensorExpr, TensorResult}; use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; -use num::traits::{Num, Signed}; +use num::traits::{Num, NumAssign, Zero}; -fn inverse_impl(tensor: &TensorBase) -> TensorResult> +fn inverse_impl(matrix: &TensorBase) -> TensorResult> where - T: Copy + Num + PartialOrd + Signed, + T: Copy + Num + NumAssign + PartialOrd, { - if !tensor.shape().is_square() { - return Err(ShapeError::InvalidShape.into()); + let op = TensorExpr::unary(matrix.clone(), UnaryOp::Inv); + let rows = matrix.nrows(); + let cols = matrix.ncols(); + + if !matrix.is_square() { + return Err(ShapeError::IncompatibleShapes.into()); // Matrix must be square for inversion } - let shape = tensor.shape(); - let n = *shape.first().unwrap(); - let mut data = tensor.data().to_vec(); - let mut inverse = vec![T::zero(); n * n]; + let identity = TensorBase::eye(rows); - for i in 0..n { - inverse[(i * n) + i] = T::one(); + // Construct an augmented matrix by concatenating the original matrix with an identity matrix + let mut aug = TensorBase::zeros((rows, 2 * cols)); + let acols = 2 * cols; + // aug.slice_mut(s![.., ..cols]).assign(matrix); + for i in 0..rows { + for j in 0..cols { + aug[[i, j]] = matrix[[i, j]]; + } + for j in cols..acols { + aug[[i, j]] = identity[[i, j - cols]]; + } } - let mut permutation = Vec::::from_iter(0..n); + // Perform Gaussian elimination to reduce the left half to the identity matrix + for i in 0..rows { + let pivot = aug[[i, i]]; - for i in 0..n { - let mut max_row = i; - for j in i + 1..n { - if data[(j * n) + i].abs() > data[(max_row * n) + i].abs() { - max_row = j; - } + if pivot == T::zero() { + return Err(TensorError::Singular); // Matrix is singular } - if data[(max_row * n) + i].is_zero() { - return Err(TensorError::Singular); // Matrix is singular + for j in 0..(2 * cols) { + aug[[i, j]] = aug[[i, j]] / pivot; } - if max_row != i { - for j in 0..n { - data.swap((max_row * n) + j, (i * n) + j); - inverse.swap((max_row * n) + j, (i * n) + j); + for j in 0..rows { + if i != j { + let am = aug.clone(); + let factor = aug[[j, i]]; + for k in 0..(2 * cols) { + aug[[j, k]] -= factor * am[[i, k]]; + } } - permutation.swap(max_row, i); } + } - let pivot = data[(i * n) + i]; - for j in 0..n { - data[(i * n) + j] = data[(i * n) + j] / pivot; - inverse[(i * n) + j] = inverse[(i * n) + j] / pivot; + // Extract the inverted matrix from the augmented matrix + let mut inverted = matrix.zeros_like().with_op(op.into()); + for i in 0..rows { + for j in 0..cols { + inverted[[i, j]] = aug[[i, j + cols]]; } + } - for j in 0..n { - if j != i { - let factor = data[(j * n) + i]; - for k in 0..n { - data[(j * n) + k] = data[(j * n) + k] - data[(i * n) + k] * factor; - inverse[(j * n) + k] = inverse[(j * n) + k] - inverse[(i * n) + k] * factor; - } - } + Ok(inverted.to_owned()) +} +/// Returns the lower triangular portion of a matrix. +pub fn tril(a: &TensorBase) -> TensorBase +where + T: Clone + Zero, +{ + let mut out = a.clone(); + for i in 0..a.shape()[0] { + for j in i + 1..a.shape()[1] { + out[[i, j]] = T::zero(); } } - - let mut res = vec![T::zero(); n * n]; - for i in 0..n { - for j in 0..n { - res[(i * n) + permutation[j]] = inverse[(i * n) + j]; + out +} +/// Returns the upper triangular portion of a matrix. +pub fn triu(a: &TensorBase) -> TensorBase +where + T: Clone + Zero, +{ + let mut out = a.clone(); + for i in 0..a.shape()[0] { + for j in 0..i { + out[[i, j]] = T::zero(); } } - let op = TensorExpr::unary(tensor.clone(), UnaryOp::Inv); - let tensor = tensor::from_vec_with_op(false, op, shape, res); - Ok(tensor) + out } impl TensorBase where - T: Copy + Num + PartialOrd + Signed, + T: Copy, { - pub fn diag(&self) -> Self - where - T: Clone, - { + pub fn diag(&self) -> Self { let rank = *self.rank(); let store = (0..rank).map(|i| self[vec![i; rank]]).collect::>(); tensor::from_vec_with_kind(false, self.shape().diagonalize(), store) } +} - pub fn det(&self) -> Result { - if !self.shape().is_square() { - return Err(ShapeError::InvalidShape.into()); - } - let shape = self.shape(); - let n = shape.nrows(); - if n == 1 { - return Ok(T::zero()); - } - if n == 2 { - let res = self[vec![0, 0]] * self[vec![1, 1]] - self[vec![0, 1]] * self[vec![1, 0]]; - return Ok(res); - } - let mut det = T::zero(); - let mut cur_shape = shape.clone(); - for i in 0..n { - let _ = cur_shape.pop(); - let mut sub = vec![T::zero(); (n - 1).pow(2)]; - let mut k = 0; - for j in 0..n { - if j == i { - continue; - } - for l in 1..n { - sub[k] = self[vec![l, j]]; - k += 1; - } - } - let sub_tensor = tensor::from_vec_with_kind(false, cur_shape.clone(), sub); - let sign = if i % 2 == 0 { T::one() } else { -T::one() }; - det = det + sign * self[vec![0, i]] * sub_tensor.det()?; - } - Ok(det) - } +impl TensorBase +where + T: Copy + Num + NumAssign + PartialOrd, +{ pub fn inv(&self) -> TensorResult { inverse_impl(self) } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index e7d57112..90642d73 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -32,6 +32,19 @@ pub(crate) fn create_with( } } +pub(crate) fn from_scalar_with_op( + kind: impl Into, + op: TensorExpr, + data: T, +) -> TensorBase { + create_with( + kind.into(), + BackpropOp::new(op), + Shape::scalar(), + vec![data], + ) +} + pub(crate) fn from_vec_with_kind( kind: impl Into, shape: impl IntoShape, @@ -176,6 +189,10 @@ impl TensorBase { pub fn is_scalar(&self) -> bool { *self.rank() == 0 } + /// Returns true if the tensor is a square matrix. + pub fn is_square(&self) -> bool { + self.shape().is_square() + } /// A function to check if the tensor is a variable pub const fn is_variable(&self) -> bool { self.kind().is_variable() @@ -202,6 +219,14 @@ impl TensorBase { pub const fn layout(&self) -> &Layout { &self.layout } + /// Get the number of columns in the tensor + pub fn ncols(&self) -> usize { + self.shape().ncols() + } + /// Get the number of rows in the tensor + pub fn nrows(&self) -> usize { + self.shape().nrows() + } /// Get a reference to the operation of the tensor pub const fn op(&self) -> &BackpropOp { &self.op diff --git a/tensor/tests/linalg.rs b/tensor/tests/linalg.rs index e64d73c9..6b16c7ac 100644 --- a/tensor/tests/linalg.rs +++ b/tensor/tests/linalg.rs @@ -28,7 +28,6 @@ macro_rules! assert_diff { }; } -#[ignore = "not implemented"] #[test] fn test_inverse() { let shape = Shape::from((2, 2)); @@ -38,6 +37,7 @@ fn test_inverse() { let exp = Tensor::from_shape_vec(shape.clone(), inv_arr); let inverse = tensor.inv().unwrap(); + println!("{:?}", &inverse.to_vec()); for i in 0..shape.nrows() { for j in 0..shape.ncols() { From 97a5fe8678266249c3c1e9921f708cb1848ef6e7 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Wed, 3 Apr 2024 10:32:12 -0500 Subject: [PATCH 80/87] update Signed-off-by: Joe McCain III --- README.md | 2 +- acme/benches/tensor.rs | 4 +- acme/examples/tensor.rs | 6 +- tensor/src/actions/iter/axis.rs | 6 ++ tensor/src/actions/iter/indexed.rs | 39 ------------- tensor/src/actions/iter/iterator.rs | 46 ++++++++++++---- tensor/src/actions/iter/mod.rs | 14 ++--- .../actions/iter/{strides.rs => position.rs} | 55 +++---------------- tensor/src/impls/linalg.rs | 40 +++----------- tensor/src/impls/ops/binary.rs | 48 ++++++++++++++-- tensor/src/ops/backprop.rs | 28 +++++----- tensor/src/ops/kinds/reshape.rs | 2 +- tensor/src/ops/op.rs | 51 ++++++++++------- tensor/src/specs/scalar.rs | 3 +- tensor/src/tensor.rs | 8 +-- tensor/src/utils.rs | 28 ++++++++++ 16 files changed, 188 insertions(+), 192 deletions(-) create mode 100644 tensor/src/actions/iter/axis.rs delete mode 100644 tensor/src/actions/iter/indexed.rs rename tensor/src/actions/iter/{strides.rs => position.rs} (65%) diff --git a/README.md b/README.md index 7d9bb3d1..fba37c17 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ *** -Acme is an application toolkit for building cloud-native applications in Rust designed for complete integration with the scsys ecosystem. +Acme aims to be a complete auto-differentiation library for Rust. The library is designed to be simple to use and easy to integrate into existing projects. The library is still in its early stages and is not yet ready for production use. ## Getting Started diff --git a/acme/benches/tensor.rs b/acme/benches/tensor.rs index c5164d13..4c996a40 100644 --- a/acme/benches/tensor.rs +++ b/acme/benches/tensor.rs @@ -21,7 +21,7 @@ fn bench_iter(b: &mut Bencher) { let shape = SHAPE_3D.clone(); let n = shape.size(); let tensor = Tensor::linspace(0f64, n as f64, n); - b.iter(|| tensor.strided().take(n)) + b.iter(|| tensor.iter().take(n)) } #[bench] @@ -29,5 +29,5 @@ fn bench_iter_rev(b: &mut Bencher) { let shape = SHAPE_3D.clone(); let n = shape.size(); let tensor = Tensor::linspace(0f64, n as f64, n); - b.iter(|| tensor.strided().rev().take(n)) + b.iter(|| tensor.iter().rev().take(n)) } diff --git a/acme/examples/tensor.rs b/acme/examples/tensor.rs index d6d76ed6..e8c540c0 100644 --- a/acme/examples/tensor.rs +++ b/acme/examples/tensor.rs @@ -6,13 +6,13 @@ extern crate acme; -use acme::prelude::{BoxResult, Tensor}; +use acme::prelude::{BoxResult, Matmul, Tensor}; fn main() -> BoxResult { let shape = (2, 3); let tensor: Tensor = Tensor::linspace(1.0, 7.0, 6).reshape(shape)?; let b = tensor.t(); - println!("{:?}", &tensor[&[1, 1]]); - println!("{:?}", &b[&[1, 1]]); + let c = tensor.matmul(&b); + println!("{:?}", &c); Ok(()) } diff --git a/tensor/src/actions/iter/axis.rs b/tensor/src/actions/iter/axis.rs new file mode 100644 index 00000000..e543db32 --- /dev/null +++ b/tensor/src/actions/iter/axis.rs @@ -0,0 +1,6 @@ +/* + Appellation: axis + Contrib: FL03 +*/ + +pub struct AxisIter; diff --git a/tensor/src/actions/iter/indexed.rs b/tensor/src/actions/iter/indexed.rs deleted file mode 100644 index 0a360a0b..00000000 --- a/tensor/src/actions/iter/indexed.rs +++ /dev/null @@ -1,39 +0,0 @@ -/* - Appellation: stride - Contrib: FL03 -*/ -use super::Strided; -use crate::tensor::TensorBase; - -pub struct IndexedIter<'a, T> { - scope: Option<&'a T>, - strides: Strided<'a>, - tensor: &'a TensorBase, -} - -impl<'a, T> IndexedIter<'a, T> { - pub fn new(tensor: &'a TensorBase) -> Self { - let strides = Strided::from(tensor.layout()); - Self { - scope: None, - strides, - tensor, - } - } -} - -impl<'a, T> Iterator for IndexedIter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - let (_pos, idx) = self.strides.next()?; - self.scope = self.tensor.get_by_index(idx); - self.scope - } -} - -impl<'a, T> From<&'a TensorBase> for IndexedIter<'a, T> { - fn from(tensor: &'a TensorBase) -> Self { - Self::new(tensor) - } -} diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 1d8c5338..73e1a7dd 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -2,24 +2,46 @@ Appellation: iterator Contrib: FL03 */ -use crate::prelude::Order; +use super::IndexIter; +use crate::TensorBase; -pub struct Iter { - order: Order, +pub struct StrideIter<'a, T> { + scope: Option<&'a T>, + strides: IndexIter<'a>, + tensor: &'a TensorBase, } -impl Iter { - pub fn new(order: Order) -> Self { - Self { order } +impl<'a, T> StrideIter<'a, T> { + pub fn new(tensor: &'a TensorBase) -> Self { + let strides = IndexIter::from(tensor.layout()); + Self { + scope: None, + strides, + tensor, + } } +} + +impl<'a, T> Iterator for StrideIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + let (_pos, idx) = self.strides.next()?; + self.scope = self.tensor.get_by_index(idx); + self.scope + } +} - pub fn order(&self) -> Order { - self.order +impl<'a, T> DoubleEndedIterator for StrideIter<'a, T> { + fn next_back(&mut self) -> Option { + let (_pos, idx) = self.strides.next_back()?; + self.scope = self.tensor.get_by_index(idx); + self.scope } } -pub struct BaseIter<'a, T> { - iter: &'a Iter, - data: &'a [T], - index: usize, +impl<'a, T> From<&'a TensorBase> for StrideIter<'a, T> { + fn from(tensor: &'a TensorBase) -> Self { + Self::new(tensor) + } } diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 29bc0385..1bbc2191 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -5,11 +5,11 @@ //! # Iter //! //! -pub use self::{indexed::*, iterator::*, strides::*, utils::*}; +pub use self::{axis::*, iterator::*, position::*, utils::*}; -pub(crate) mod indexed; +pub(crate) mod axis; pub(crate) mod iterator; -pub(crate) mod strides; +pub(crate) mod position; pub trait IterTensor { type Item; @@ -47,24 +47,24 @@ mod tests { use crate::prelude::{Shape, Tensor}; #[test] - fn test_strided() { + fn test_iter() { let shape = Shape::from_iter([2, 2, 2, 2]); let n = shape.size(); let exp = Vec::linspace(0f64, n as f64, n); let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); - for (elem, val) in tensor.strided().zip(exp.iter()) { + for (elem, val) in tensor.iter().zip(exp.iter()) { assert_eq!(elem, val); } } #[test] - fn test_strided_rev() { + fn test_iter_rev() { let shape = Shape::from_iter([2, 2]); let n = shape.size(); let exp = Vec::linspace(0f64, n as f64, n); let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); - for (i, j) in tensor.strided().rev().zip(exp.iter().rev()) { + for (i, j) in tensor.iter().rev().zip(exp.iter().rev()) { assert_eq!(i, j); } } diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/position.rs similarity index 65% rename from tensor/src/actions/iter/strides.rs rename to tensor/src/actions/iter/position.rs index 9562aac9..2e2c63a4 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/position.rs @@ -1,59 +1,18 @@ /* - Appellation: stride + Appellation: position Contrib: FL03 */ use crate::prelude::{Layout, Shape, Stride}; -use crate::tensor::TensorBase; -pub struct StrideIter<'a, T> { - scope: Option<&'a T>, - strides: Strided<'a>, - tensor: &'a TensorBase, -} - -impl<'a, T> StrideIter<'a, T> { - pub fn new(tensor: &'a TensorBase) -> Self { - let strides = Strided::from(tensor.layout()); - Self { - scope: None, - strides, - tensor, - } - } -} - -impl<'a, T> Iterator for StrideIter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - let (_pos, idx) = self.strides.next()?; - self.scope = self.tensor.get_by_index(idx); - self.scope - } -} - -impl<'a, T> DoubleEndedIterator for StrideIter<'a, T> { - fn next_back(&mut self) -> Option { - let (_pos, idx) = self.strides.next_back()?; - self.scope = self.tensor.get_by_index(idx); - self.scope - } -} - -impl<'a, T> From<&'a TensorBase> for StrideIter<'a, T> { - fn from(tensor: &'a TensorBase) -> Self { - Self::new(tensor) - } -} - -pub struct Strided<'a> { +/// +pub struct IndexIter<'a> { next: Option, position: Vec, shape: &'a Shape, stride: &'a Stride, } -impl<'a> Strided<'a> { +impl<'a> IndexIter<'a> { pub fn new(offset: usize, shape: &'a Shape, stride: &'a Stride) -> Self { let elem_count: usize = shape.iter().product(); let next = if elem_count == 0 { @@ -80,7 +39,7 @@ impl<'a> Strided<'a> { } } -impl<'a> DoubleEndedIterator for Strided<'a> { +impl<'a> DoubleEndedIterator for IndexIter<'a> { fn next_back(&mut self) -> Option { let (pos, _idx) = if let Some(item) = self.next() { item @@ -100,7 +59,7 @@ impl<'a> DoubleEndedIterator for Strided<'a> { } } -impl<'a> Iterator for Strided<'a> { +impl<'a> Iterator for IndexIter<'a> { type Item = (Vec, usize); fn next(&mut self) -> Option { @@ -133,7 +92,7 @@ impl<'a> Iterator for Strided<'a> { } } -impl<'a> From<&'a Layout> for Strided<'a> { +impl<'a> From<&'a Layout> for IndexIter<'a> { fn from(layout: &'a Layout) -> Self { Self::new(layout.offset, &layout.shape, &layout.stride) } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 33b7a5e8..a29c589a 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -8,7 +8,7 @@ use crate::prelude::{Matmul, Scalar, ShapeError, TensorError, TensorExpr, TensorResult}; use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; -use num::traits::{Num, NumAssign, Zero}; +use num::traits::{Num, NumAssign}; fn inverse_impl(matrix: &TensorBase) -> TensorResult> where @@ -70,32 +70,6 @@ where Ok(inverted.to_owned()) } -/// Returns the lower triangular portion of a matrix. -pub fn tril(a: &TensorBase) -> TensorBase -where - T: Clone + Zero, -{ - let mut out = a.clone(); - for i in 0..a.shape()[0] { - for j in i + 1..a.shape()[1] { - out[[i, j]] = T::zero(); - } - } - out -} -/// Returns the upper triangular portion of a matrix. -pub fn triu(a: &TensorBase) -> TensorBase -where - T: Clone + Zero, -{ - let mut out = a.clone(); - for i in 0..a.shape()[0] { - for j in 0..i { - out[[i, j]] = T::zero(); - } - } - out -} impl TensorBase where @@ -128,11 +102,13 @@ where let shape = self.shape().matmul_shape(&other.shape()).unwrap(); let mut result = vec![T::zero(); shape.size()]; - for i in 0..self.shape()[0] { - for j in 0..other.shape()[1] { - for k in 0..self.shape()[1] { - result[i * other.shape()[1] + j] += - self.data[i * self.shape()[1] + k] * other.data[k * other.shape()[1] + j]; + for i in 0..self.nrows() { + for j in 0..other.ncols() { + for k in 0..self.ncols() { + let scope = i * other.ncols() + j; + let xi = i * self.ncols() + k; + let yi = k * other.ncols() + j; + result[scope] += self.data[xi] * other.data[yi]; } } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 08e11a9c..afa735ea 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -6,8 +6,10 @@ use crate::prelude::{Scalar, TensorExpr}; use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; use core::ops; +use num::traits::float::{Float, FloatCore}; use num::traits::Pow; +#[allow(dead_code)] pub(crate) fn broadcast_scalar_op( lhs: &TensorBase, rhs: &TensorBase, @@ -92,18 +94,54 @@ where } } -impl TensorBase -where - T: Scalar, -{ - pub fn pow(&self, exp: T) -> Self { +impl TensorBase { + pub fn pow(&self, exp: T) -> Self + where + T: Copy + Pow, + { let shape = self.shape(); let store = self.data().iter().copied().map(|a| a.pow(exp)).collect(); let op = TensorExpr::binary_scalar(self.clone(), exp, BinaryOp::Pow); from_vec_with_op(false, op, shape, store) } + + pub fn powf(&self, exp: T) -> Self + where + T: Float, + { + let shape = self.shape(); + let store = self.data().iter().copied().map(|a| a.powf(exp)).collect(); + let op = TensorExpr::binary_scalar(self.clone(), exp, BinaryOp::Pow); + from_vec_with_op(false, op, shape, store) + } + + pub fn powi(&self, exp: i32) -> Self + where + T: FloatCore, + { + let shape = self.shape(); + let store = self.data().iter().copied().map(|a| a.powi(exp)).collect(); + let op = TensorExpr::binary_scalar(self.clone(), T::from(exp).unwrap(), BinaryOp::Pow); + from_vec_with_op(false, op, shape, store) + } } +// impl TensorBase where T: ComplexFloat + Scalar, Real = T> { + +// pub fn powc(&self, exp: ::Complex) -> TensorBase<::Complex> { +// let shape = self.shape(); +// let store = self.data().iter().copied().map(|a| Scalar::powc(a, exp)).collect(); +// let op = TensorExpr::binary_scalar_c(self.clone(), exp, BinaryOp::Pow); +// TensorBase { +// id: TensorId::new(), +// data: store, +// kind: TensorKind::default(), +// layout: Layout::contiguous(shape), +// op: BackpropOp::new(op) +// } +// } +// } + impl Pow for TensorBase where T: Copy + Pow, diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 28048b6c..59dcdf92 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -8,11 +8,11 @@ use acme::prelude::BinaryOp; use core::borrow::Borrow; use core::ops::{Deref, DerefMut}; -#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct BackpropOp(Option>); +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct BackpropOp(Option>); -impl BackpropOp { - pub fn new(op: TensorExpr) -> Self { +impl BackpropOp { + pub fn new(op: TensorExpr) -> Self { BackpropOp(Some(op)) } @@ -20,7 +20,7 @@ impl BackpropOp { BackpropOp(None) } - pub fn binary(lhs: TensorBase, rhs: TensorBase, kind: BinaryOp) -> Self { + pub fn binary(lhs: TensorBase, rhs: TensorBase, kind: BinaryOp) -> Self { BackpropOp(Some(TensorExpr::binary(lhs, rhs, kind))) } @@ -28,34 +28,34 @@ impl BackpropOp { self.0.is_none() } - pub fn op(&self) -> Option<&TensorExpr> { + pub fn op(&self) -> Option<&TensorExpr> { self.0.as_ref() } - pub fn op_mut(&mut self) -> Option<&mut TensorExpr> { + pub fn op_mut(&mut self) -> Option<&mut TensorExpr> { self.0.as_mut() } - pub fn into_inner(self) -> Option> { + pub fn into_inner(self) -> Option> { self.0 } - pub fn take(&mut self) -> Option> { + pub fn take(&mut self) -> Option> { self.0.take() } } -impl BackpropOp +impl BackpropOp where - T: Clone, + A: Clone, { - pub fn view(&self) -> BackpropOp<&T> { + pub fn view(&self) -> BackpropOp<&A> { BackpropOp(self.0.as_ref().map(|op| op.view())) } } -impl Borrow>> for BackpropOp { - fn borrow(&self) -> &Option> { +impl Borrow>> for BackpropOp { + fn borrow(&self) -> &Option> { &self.0 } } diff --git a/tensor/src/ops/kinds/reshape.rs b/tensor/src/ops/kinds/reshape.rs index 3f31caba..7f2fc3a9 100644 --- a/tensor/src/ops/kinds/reshape.rs +++ b/tensor/src/ops/kinds/reshape.rs @@ -8,7 +8,7 @@ use crate::shape::Shape; use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumDiscriminants, EnumIs, EnumIter, EnumString, VariantNames}; -#[derive(Clone, Debug, EnumDiscriminants, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[derive(Clone, Debug, EnumDiscriminants, Eq, Hash, PartialEq)] #[repr(u8)] #[strum(serialize_all = "snake_case")] #[strum_discriminants(derive( diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index f967491e..86376ff0 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -6,57 +6,66 @@ use crate::ops::kinds::reshape::*; use crate::shape::{Axis, Shape}; use crate::TensorBase; use acme::prelude::{BinaryOp, UnaryOp}; +use num::Complex; pub type BoxTensor = Box>; -#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[derive(Clone, Debug, Eq, Hash, PartialEq)] #[non_exhaustive] -pub enum TensorExpr { - Binary(BoxTensor, BoxTensor, BinaryOp), - BinaryScalar(BoxTensor, T, BinaryOp), - Unary(BoxTensor, UnaryOp), - Broadcast(BoxTensor, Shape), - Matmul(BoxTensor, BoxTensor), - Reshape(BoxTensor, Shape), - Shape(ReshapeExpr), - SwapAxes(BoxTensor, Axis, Axis), - Transpose(BoxTensor), +pub enum TensorExpr { + Binary(BoxTensor, BoxTensor, BinaryOp), + BinaryScalar(BoxTensor, B, BinaryOp), + Unary(BoxTensor, UnaryOp), + Broadcast(BoxTensor, Shape), + Matmul(BoxTensor, BoxTensor), + Reshape(BoxTensor, Shape), + Shape(ReshapeExpr), + SwapAxes(BoxTensor, Axis, Axis), + Transpose(BoxTensor), } -impl TensorExpr { - pub fn binary(lhs: TensorBase, rhs: TensorBase, op: BinaryOp) -> Self { +impl TensorExpr { + pub fn binary(lhs: TensorBase, rhs: TensorBase, op: BinaryOp) -> Self { TensorExpr::Binary(Box::new(lhs), Box::new(rhs), op) } - pub fn binary_scalar(lhs: TensorBase, rhs: T, op: BinaryOp) -> Self { + pub fn binary_scalar(lhs: TensorBase, rhs: B, op: BinaryOp) -> Self { + TensorExpr::BinaryScalar(Box::new(lhs), rhs, op) + } + + pub fn binary_scalar_c( + lhs: TensorBase, + rhs: Complex, + op: BinaryOp, + ) -> TensorExpr> { TensorExpr::BinaryScalar(Box::new(lhs), rhs, op) } - pub fn broadcast(tensor: TensorBase, shape: Shape) -> Self { + pub fn broadcast(tensor: TensorBase, shape: Shape) -> Self { TensorExpr::Broadcast(Box::new(tensor), shape) } - pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { + pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { TensorExpr::Matmul(Box::new(lhs), Box::new(rhs)) } - pub fn reshape(tensor: TensorBase, shape: Shape) -> Self { + pub fn reshape(tensor: TensorBase, shape: Shape) -> Self { TensorExpr::Reshape(Box::new(tensor), shape) } - pub fn shape(expr: ReshapeExpr) -> Self { + pub fn shape(expr: ReshapeExpr) -> Self { TensorExpr::Shape(expr) } - pub fn swap_axes(tensor: TensorBase, swap: Axis, with: Axis) -> Self { + pub fn swap_axes(tensor: TensorBase, swap: Axis, with: Axis) -> Self { TensorExpr::SwapAxes(Box::new(tensor), swap, with) } - pub fn transpose(scope: TensorBase) -> Self { + pub fn transpose(scope: TensorBase) -> Self { TensorExpr::Transpose(Box::new(scope)) } - pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { + pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { TensorExpr::Unary(Box::new(tensor), op) } } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index 0f4e2bb3..99158603 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -6,7 +6,7 @@ use crate::tensor::TensorBase; use core::iter::{Product, Sum}; use core::ops::Neg; use num::complex::Complex; -use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps}; +use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps, Pow}; pub trait Scalar: Copy @@ -16,6 +16,7 @@ pub trait Scalar: + NumAssign + NumCast + NumOps + + Pow + Product + Sum + 'static diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 90642d73..d2d9c73b 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -31,7 +31,7 @@ pub(crate) fn create_with( op: op.into(), } } - +#[allow(dead_code)] pub(crate) fn from_scalar_with_op( kind: impl Into, op: TensorExpr, @@ -62,7 +62,7 @@ pub(crate) fn from_vec_with_op( create_with(kind.into(), BackpropOp::new(op), shape, data) } -#[derive(Clone, Debug, Hash, Ord, PartialOrd)] +#[derive(Clone, Debug, Hash)] pub struct TensorBase { pub(crate) id: TensorId, pub(crate) data: Vec, @@ -247,10 +247,6 @@ impl TensorBase { pub fn stride(&self) -> &Stride { self.layout().stride() } - /// Create an iterator over the tensor - pub fn strided(&self) -> StrideIter<'_, T> { - StrideIter::new(self) - } /// Turn the tensor into a scalar /// If the tensor has a rank greater than 0, this will return an error pub fn to_scalar(&self) -> TensorResult<&T> { diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index f4872f6e..31f28689 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -8,6 +8,7 @@ use crate::prelude::{Scalar, TensorExpr, TensorResult}; use crate::shape::ShapeError; use crate::tensor::{from_vec_with_op, TensorBase}; +use num::Zero; pub fn matmul(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> where @@ -35,6 +36,33 @@ where Ok(tensor) } +/// Returns the lower triangular portion of a matrix. +pub fn tril(a: &TensorBase) -> TensorBase +where + T: Clone + Zero, +{ + let mut out = a.clone(); + for i in 0..a.shape()[0] { + for j in i + 1..a.shape()[1] { + out[[i, j]] = T::zero(); + } + } + out +} +/// Returns the upper triangular portion of a matrix. +pub fn triu(a: &TensorBase) -> TensorBase +where + T: Clone + Zero, +{ + let mut out = a.clone(); + for i in 0..a.shape()[0] { + for j in 0..i { + out[[i, j]] = T::zero(); + } + } + out +} + pub fn dot_product(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> where T: Scalar, From 13f73ebf8fc7d16693b5b64e180c1db52f2418c7 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 4 Apr 2024 16:36:57 -0500 Subject: [PATCH 81/87] update Signed-off-by: Joe McCain III --- core/src/lib.rs | 4 + .../math/linalg/{fields/mod.rs => fields.rs} | 0 core/src/math/linalg/mod.rs | 5 +- core/src/math/linalg/vs/mod.rs | 8 ++ core/src/math/mod.rs | 4 + core/src/math/props/mod.rs | 24 +++++ core/src/ops/binary/arithmetic.rs | 18 ++-- core/src/ops/binary/mod.rs | 8 +- core/src/ops/mod.rs | 9 +- core/src/ops/operator.rs | 2 +- core/src/ops/unary/kinds.rs | 18 ++++ core/src/utils.rs | 17 +++ graphs/src/ops/kinds.rs | 2 +- graphs/src/scg/graph.rs | 2 +- macros/src/lib.rs | 24 ++++- tensor/src/data/container.rs | 2 +- tensor/src/data/layout.rs | 46 ++------ tensor/src/data/mod.rs | 26 +++++ tensor/src/error.rs | 32 +++++- tensor/src/impls/iter.rs | 14 ++- tensor/src/impls/linalg.rs | 22 ++-- tensor/src/impls/num.rs | 4 +- tensor/src/impls/reshape.rs | 14 +-- tensor/src/ops/backprop.rs | 7 +- tensor/src/ops/op.rs | 51 ++++----- tensor/src/shape/dim/mod.rs | 12 +-- tensor/src/shape/rank.rs | 66 +++++++++--- tensor/src/shape/shape.rs | 62 +++++------ tensor/src/shape/stride.rs | 102 ++++++++++++------ tensor/src/tensor.rs | 30 ++++-- tensor/src/utils.rs | 29 ----- tensor/tests/reshape.rs | 10 +- tensor/tests/tensor.rs | 4 +- 33 files changed, 420 insertions(+), 258 deletions(-) rename core/src/math/linalg/{fields/mod.rs => fields.rs} (100%) create mode 100644 core/src/math/linalg/vs/mod.rs create mode 100644 core/src/math/props/mod.rs create mode 100644 core/src/utils.rs diff --git a/core/src/lib.rs b/core/src/lib.rs index 6d6e4bde..0ccd9f24 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -8,8 +8,12 @@ #[cfg(not(feature = "std"))] extern crate alloc; +// pub use self::utils::*; + #[macro_use] pub(crate) mod seal; +#[macro_use] +pub(crate) mod utils; pub mod error; pub mod eval; diff --git a/core/src/math/linalg/fields/mod.rs b/core/src/math/linalg/fields.rs similarity index 100% rename from core/src/math/linalg/fields/mod.rs rename to core/src/math/linalg/fields.rs diff --git a/core/src/math/linalg/mod.rs b/core/src/math/linalg/mod.rs index e7951389..7a314105 100644 --- a/core/src/math/linalg/mod.rs +++ b/core/src/math/linalg/mod.rs @@ -7,10 +7,7 @@ //! This module implements fundamental linear algebra concepts and operations. //! pub mod fields; - -pub trait VectorSpace {} - -pub trait Subspace: VectorSpace {} +pub mod vs; #[cfg(test)] mod tests {} diff --git a/core/src/math/linalg/vs/mod.rs b/core/src/math/linalg/vs/mod.rs new file mode 100644 index 00000000..3fd3465d --- /dev/null +++ b/core/src/math/linalg/vs/mod.rs @@ -0,0 +1,8 @@ +/* + Appellation: vs + Contrib: FL03 +*/ + +pub trait VectorSpace {} + +pub trait Subspace: VectorSpace {} diff --git a/core/src/math/mod.rs b/core/src/math/mod.rs index 6a31e9b6..6ab9ce32 100644 --- a/core/src/math/mod.rs +++ b/core/src/math/mod.rs @@ -7,6 +7,10 @@ //! This module contains the core mathematical operations and structures used //! throughout the library. It is divided into submodules for each mathematical //! operation or structure. +pub use self::props::*; + +pub(crate) mod props; + pub mod linalg; #[cfg(test)] diff --git a/core/src/math/props/mod.rs b/core/src/math/props/mod.rs new file mode 100644 index 00000000..23ab7235 --- /dev/null +++ b/core/src/math/props/mod.rs @@ -0,0 +1,24 @@ +/* + Appellation: props + Contrib: FL03 +*/ + +/// +pub trait Associative {} + +/// Commutative Property describes an operation that is invariant under the exchange of its operands. +pub trait Commutative {} + +#[cfg(test)] +mod tests { + + #[test] + fn test_associative() { + assert!(true); + } + + #[test] + fn test_communitative() { + assert!(true); + } +} diff --git a/core/src/ops/binary/arithmetic.rs b/core/src/ops/binary/arithmetic.rs index 04aaf8a1..300257d8 100644 --- a/core/src/ops/binary/arithmetic.rs +++ b/core/src/ops/binary/arithmetic.rs @@ -2,7 +2,7 @@ Appellation: arithmetic Contrib: FL03 */ -use super::BinaryOperation; +use super::{BinOp, BoxedBinOp}; use crate::ops::{Operator, OperatorKind}; use num::traits::NumOps; #[cfg(feature = "serde")] @@ -21,8 +21,8 @@ macro_rules! operator { Self } - pub fn name(&self) -> String { - stringify!($op).to_lowercase() + pub fn name(&self) -> &str { + stringify!($op) } } @@ -38,7 +38,7 @@ macro_rules! operator { OperatorKind::$kind } - fn name(&self) -> String { + fn name(&self) -> &str { self.name() } } @@ -96,7 +96,7 @@ macro_rules! operators { self.op().eval(lhs, rhs) } - pub fn op(self) -> Box> + pub fn op(self) -> BoxedBinOp where A: NumOps, { @@ -107,7 +107,7 @@ macro_rules! operators { } } - pub fn name(&self) -> String { + pub fn name(&self) -> &str { match self { $( $group::$variant(op) => op.name(), @@ -121,7 +121,7 @@ macro_rules! operators { OperatorKind::Binary } - fn name(&self) -> String { + fn name(&self) -> &str { self.name() } } @@ -138,7 +138,7 @@ macro_rules! impl_binary_op { ($op:ident, $bound:ident, $operator:tt) => { operator!($op, Binary); - impl BinaryOperation for $op + impl BinOp for $op where A: core::ops::$bound, { @@ -152,7 +152,7 @@ macro_rules! impl_binary_op { (expr $op:ident, $bound:ident, $exp:expr) => { operator!($op, Binary); - impl BinaryOperation for $op + impl BinOp for $op where A: core::ops::$bound, { diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index 566a7fec..9ce3d3ca 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -9,13 +9,15 @@ pub(crate) mod kinds; pub(crate) mod operator; pub(crate) mod specs; -pub trait BinaryOperation { +pub type BoxedBinOp = Box>; + +pub trait BinOp { type Output; fn eval(&self, lhs: A, rhs: B) -> Self::Output; } -impl BinaryOperation for S +impl BinOp for S where S: Fn(A, B) -> C, { @@ -26,7 +28,7 @@ where } } -impl BinaryOperation for Box> { +impl BinOp for Box> { type Output = C; fn eval(&self, lhs: A, rhs: B) -> Self::Output { diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 307d3de9..90e2b62f 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -39,15 +39,10 @@ where } } -pub trait Operation { - type Kind; - - fn kind(&self) -> Self::Kind; -} - pub(crate) mod prelude { + pub use super::{ApplyTo, ApplyWith, IntoOp}; + pub use super::binary::*; pub use super::kinds::Op; pub use super::unary::*; - pub use super::Operation; } diff --git a/core/src/ops/operator.rs b/core/src/ops/operator.rs index f3b8128d..485c0abf 100644 --- a/core/src/ops/operator.rs +++ b/core/src/ops/operator.rs @@ -13,5 +13,5 @@ pub enum OperatorKind { pub trait Operator { fn kind(&self) -> OperatorKind; - fn name(&self) -> String; + fn name(&self) -> &str; } diff --git a/core/src/ops/unary/kinds.rs b/core/src/ops/unary/kinds.rs index bfd1bd48..13e18b82 100644 --- a/core/src/ops/unary/kinds.rs +++ b/core/src/ops/unary/kinds.rs @@ -58,4 +58,22 @@ impl UnaryOp { _ => true, } } + + enum_fn_constructor!( + (Abs, abs), + (Cos, cos), + (Cosh, cosh), + (Exp, exp), + (Floor, floor), + (Inv, inv), + (Ln, ln), + (Neg, neg), + (Not, not), + (Sin, sin), + (Sinh, sinh), + (Sqrt, sqrt), + (Square, square), + (Tan, tan), + (Tanh, tanh) + ); } diff --git a/core/src/utils.rs b/core/src/utils.rs new file mode 100644 index 00000000..67bee454 --- /dev/null +++ b/core/src/utils.rs @@ -0,0 +1,17 @@ +/* + Appellation: utils + Contrib: FL03 +*/ + +macro_rules! enum_fn_constructor { + ($(($variant:ident, $method:ident)),*) => { + $( + enum_fn_constructor!($variant, $method); + )* + }; + ($variant:ident, $method:ident) => { + pub fn $method() -> Self { + Self::$variant + } + }; +} diff --git a/graphs/src/ops/kinds.rs b/graphs/src/ops/kinds.rs index 0ce1f60d..43d8bfd9 100644 --- a/graphs/src/ops/kinds.rs +++ b/graphs/src/ops/kinds.rs @@ -107,7 +107,7 @@ impl BinaryExpr { } } -impl BinaryOperation for BinaryExpr +impl BinOp for BinaryExpr where T: Copy + Default + PartialOrd + num::traits::NumOps, { diff --git a/graphs/src/scg/graph.rs b/graphs/src/scg/graph.rs index 4dc4df20..b6429d59 100644 --- a/graphs/src/scg/graph.rs +++ b/graphs/src/scg/graph.rs @@ -5,7 +5,7 @@ use super::Node; use crate::ops::{BinaryExpr, Operations}; use crate::prelude::GraphResult as Result; -use acme::ops::binary::BinaryOperation; +use acme::ops::binary::BinOp; use num::traits::{NumAssign, NumOps, Signed}; use petgraph::algo::toposort; use petgraph::prelude::{DiGraph, NodeIndex}; diff --git a/macros/src/lib.rs b/macros/src/lib.rs index d4755fe9..f5eb11bb 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -22,7 +22,7 @@ use syn::parse_macro_input; /// /// # Examples /// -/// ## Compute the gradient of a simple expression +/// ### Basic arithmetic /// /// ``` /// extern crate acme_macros as macros; @@ -32,11 +32,25 @@ use syn::parse_macro_input; /// fn main() { /// let x = 3f64; /// let y = 4f64; -/// let dx = autodiff!(x: x * y); -/// let dy = autodiff!(y: x * y); /// -/// assert_eq!(dx, y); -/// assert_eq!(dy, x); +/// assert_eq!(y, autodiff!(x: x * y)); +/// assert_eq!(x, autodiff!(y: x * y)); +/// assert_eq!(1f64, autodiff!(x: x + y)); +/// } +/// ``` +/// +/// ### Trigonometric functions +/// +/// ``` +/// extern crate acme_macros as macros; +/// +/// use macros::autodiff; +/// +/// fn main() { +/// let x = 2f64; +/// assert_eq!(autodiff!(x: x.cos()), -x.sin()); +/// assert_eq!(autodiff!(x: x.sin()), x.cos()); +/// assert_eq!(autodiff!(x: x.tan()), x.cos().powi(2).recip()); /// } /// ``` #[proc_macro] diff --git a/tensor/src/data/container.rs b/tensor/src/data/container.rs index a9b89ce5..54f71684 100644 --- a/tensor/src/data/container.rs +++ b/tensor/src/data/container.rs @@ -104,7 +104,7 @@ where unsafe { if let Some(slc) = self.as_slice_memory_order() { ContainerBase::from_shape_trusted_iter_unchecked( - self.shape().slice(), + self.shape().as_slice(), slc.iter(), f, ) diff --git a/tensor/src/data/layout.rs b/tensor/src/data/layout.rs index 792231dd..ffb7907b 100644 --- a/tensor/src/data/layout.rs +++ b/tensor/src/data/layout.rs @@ -58,37 +58,17 @@ impl Layout { stride, } } + /// Determine if the current layout is contiguous or not. pub fn is_contiguous(&self) -> bool { - self.shape.is_contiguous(&self.stride) + self.shape().is_contiguous(&self.stride) } pub fn is_layout_c(&self) -> bool { - if let 1 = *self.shape.rank() { - return self.stride[0] == 1 || self.shape[0] <= 1; - } - - for d in self.shape().iter() { - if *d == 0 { - return true; - } - } - - let mut contig_stride = 1_isize; - // check all dimensions -- a dimension of length 1 can have unequal strides - for (dim, s) in izip!(self.shape().iter().rev(), self.stride().iter().rev()) { - if *dim != 1 { - let s = *s as isize; - if s != contig_stride { - return false; - } - contig_stride *= *dim as isize; - } - } - true + super::is_layout_c(self) } /// Determine if the current layout is square or not. pub fn is_square(&self) -> bool { - self.shape.is_square() + self.shape().is_square() } /// Get a peek at the offset of the layout. pub fn offset(&self) -> usize { @@ -98,7 +78,7 @@ impl Layout { /// element. pub fn offset_from_low_addr_ptr_to_logical_ptr(&self) -> usize { let offset = - izip!(self.shape().slice(), self.stride().slice()).fold(0, |_offset, (d, s)| { + izip!(self.shape().as_slice(), self.stride().as_slice()).fold(0, |_offset, (d, s)| { let d = *d as isize; let s = *s as isize; if s < 0 && d > 1 { @@ -132,13 +112,13 @@ impl Layout { } /// Get a reference to the number of elements in the layout. pub fn size(&self) -> usize { - self.shape.size() + self.shape().size() } /// Get a reference to the stride of the layout. pub const fn stride(&self) -> &Stride { &self.stride } - + /// Swap the axes of the layout. pub fn swap_axes(&self, a: Axis, b: Axis) -> Layout { Layout { offset: self.offset, @@ -148,10 +128,7 @@ impl Layout { } pub fn transpose(&self) -> Layout { - let mut layout = self.clone(); - layout.shape.reverse(); - layout.stride.reverse(); - layout + self.clone().reverse_axes() } pub fn with_offset(mut self, offset: usize) -> Self { @@ -167,20 +144,19 @@ impl Layout { } // Internal methods -#[allow(dead_code)] impl Layout { pub(crate) fn index(&self, idx: impl AsRef<[usize]>) -> usize { let idx = idx.as_ref(); - if idx.len() != *self.shape.rank() { + if idx.len() != *self.rank() { panic!("Dimension mismatch"); } - idx.iter().zip(self.stride.iter()).map(|(i, s)| i * s).sum() + self.index_unchecked(idx) } pub(crate) fn index_unchecked(&self, idx: impl AsRef<[usize]>) -> usize { idx.as_ref() .iter() - .zip(self.stride.iter()) + .zip(self.stride().iter()) .map(|(i, s)| i * s) .sum() } diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index ee308166..86d4264c 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -28,10 +28,36 @@ pub type Container = ContainerBase>; pub type SharedContainer = ContainerBase>; pub(crate) mod utils { + use super::Layout; #[cfg(not(feature = "std"))] use alloc::vec::Vec; use core::ptr::NonNull; + pub(crate) fn is_layout_c(layout: &Layout) -> bool { + if let 1 = *layout.shape().rank() { + return layout.stride[0] == 1 || layout.shape[0] <= 1; + } + + for d in layout.shape().iter() { + if *d == 0 { + return true; + } + } + + let mut contig_stride = 1_isize; + // check all dimensions -- a dimension of length 1 can have unequal strides + for (dim, s) in izip!(layout.shape().iter().rev(), layout.stride().iter().rev()) { + if *dim != 1 { + let s = *s as isize; + if s != contig_stride { + return false; + } + contig_stride *= *dim as isize; + } + } + true + } + /// Return a NonNull pointer to the vector's data pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { // this pointer is guaranteed to be non-null diff --git a/tensor/src/error.rs b/tensor/src/error.rs index f323e036..7ff3817e 100644 --- a/tensor/src/error.rs +++ b/tensor/src/error.rs @@ -21,10 +21,10 @@ pub type TensorResult = std::result::Result; #[strum(serialize_all = "snake_case")] pub enum TensorError { Arithmetic(ArithmeticError), - Indexing(String), Shape(ShapeError), Singular, NotScalar, + Unknown(String), } unsafe impl Send for TensorError {} @@ -35,7 +35,13 @@ impl std::error::Error for TensorError {} impl From<&str> for TensorError { fn from(error: &str) -> Self { - TensorError::Indexing(error.to_string()) + TensorError::Unknown(error.to_string()) + } +} + +impl From for TensorError { + fn from(error: String) -> Self { + TensorError::Unknown(error) } } @@ -69,14 +75,30 @@ pub enum ArithmeticError { } macro_rules! into_tensor_error { - ($error:ident, $kind:ident) => { + ($(($error:ident => $kind:ident)),*) => { + $(into_tensor_error!($error => $kind);)* + }; + ($error:ident => $kind:ident) => { impl From<$error> for TensorError { fn from(error: $error) -> Self { TensorError::$kind(error) } } + + impl TryFrom for $error { + type Error = TensorError; + + fn try_from(error: TensorError) -> TensorResult<$error> { + match error { + TensorError::$kind(error) => Ok(error), + error => Err(error), + } + } + } }; } -into_tensor_error!(ArithmeticError, Arithmetic); -into_tensor_error!(ShapeError, Shape); +into_tensor_error!( + (ArithmeticError => Arithmetic), + (ShapeError => Shape) +); diff --git a/tensor/src/impls/iter.rs b/tensor/src/impls/iter.rs index ea5612db..65fae7b6 100644 --- a/tensor/src/impls/iter.rs +++ b/tensor/src/impls/iter.rs @@ -2,19 +2,27 @@ Appellation: iter Contrib: FL03 */ -use crate::prelude::Scalar; +use crate::prelude::{Axis, Scalar}; use crate::tensor::TensorBase; impl TensorBase where T: Scalar, { + pub fn product(&self) -> T { + self.data().iter().copied().product() + } + + pub fn product_axis(&self, _axis: Axis) -> T { + unimplemented!("product_axis") + } + pub fn sum(&self) -> T { self.data().iter().copied().sum() } - pub fn product(&self) -> T { - self.data().iter().copied().product() + pub fn sum_axis(&self, _axis: Axis) -> T { + unimplemented!("sum_axis") } } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index a29c589a..39327534 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -10,19 +10,19 @@ use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; use num::traits::{Num, NumAssign}; -fn inverse_impl(matrix: &TensorBase) -> TensorResult> +fn inverse_impl(tensor: &TensorBase) -> TensorResult> where T: Copy + Num + NumAssign + PartialOrd, { - let op = TensorExpr::unary(matrix.clone(), UnaryOp::Inv); - let rows = matrix.nrows(); - let cols = matrix.ncols(); + let op = TensorExpr::unary(tensor.clone(), UnaryOp::Inv); + let rows = tensor.nrows(); + let cols = tensor.ncols(); - if !matrix.is_square() { + if !tensor.is_square() { return Err(ShapeError::IncompatibleShapes.into()); // Matrix must be square for inversion } - let identity = TensorBase::eye(rows); + let eye = TensorBase::eye(rows); // Construct an augmented matrix by concatenating the original matrix with an identity matrix let mut aug = TensorBase::zeros((rows, 2 * cols)); @@ -30,10 +30,10 @@ where // aug.slice_mut(s![.., ..cols]).assign(matrix); for i in 0..rows { for j in 0..cols { - aug[[i, j]] = matrix[[i, j]]; + aug[[i, j]] = tensor[[i, j]]; } for j in cols..acols { - aug[[i, j]] = identity[[i, j - cols]]; + aug[[i, j]] = eye[[i, j - cols]]; } } @@ -61,14 +61,14 @@ where } // Extract the inverted matrix from the augmented matrix - let mut inverted = matrix.zeros_like().with_op(op.into()); + let mut inv = tensor.zeros_like().with_op(op.into()); for i in 0..rows { for j in 0..cols { - inverted[[i, j]] = aug[[i, j + cols]]; + inv[[i, j]] = aug[[i, j + cols]]; } } - Ok(inverted.to_owned()) + Ok(inv.to_owned()) } impl TensorBase diff --git a/tensor/src/impls/num.rs b/tensor/src/impls/num.rs index 8566d375..c38ab5c2 100644 --- a/tensor/src/impls/num.rs +++ b/tensor/src/impls/num.rs @@ -19,7 +19,7 @@ where impl One for TensorBase where - T: Scalar, + T: Copy + One, { fn one() -> Self { Self::from_scalar(T::one()) @@ -28,7 +28,7 @@ where impl Zero for TensorBase where - T: Scalar, + T: Copy + Zero, { fn zero() -> Self { Self::from_scalar(T::zero()) diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 86891e01..acadb276 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -8,17 +8,19 @@ use crate::tensor::TensorBase; impl TensorBase where - T: Clone + Default, + T: Clone, { + /// coerce the tensor to act like a larger shape. + /// This method doesn't change the underlying data, but it does change the layout. pub fn broadcast(&self, shape: impl IntoShape) -> Self { - let layout = self.layout.broadcast_as(shape).unwrap(); + let layout = self.layout().broadcast_as(shape).unwrap(); let op = TensorExpr::broadcast(self.clone(), layout.shape().clone()); Self { id: TensorId::new(), kind: self.kind(), layout, op: op.into(), - data: self.data.clone(), + data: self.data().clone(), } } @@ -29,8 +31,7 @@ where unimplemented!() } - - /// + /// Swap two axes in the tensor. pub fn swap_axes(&self, swap: Axis, with: Axis) -> Self { let op = TensorExpr::swap_axes(self.clone(), swap, with); @@ -67,7 +68,8 @@ where data: self.data().clone(), } } - + /// Reshape the tensor + /// returns an error if the new shape specifies a different number of elements. pub fn reshape(self, shape: impl IntoShape) -> TensorResult { let shape = shape.into_shape(); if self.size() != shape.size() { diff --git a/tensor/src/ops/backprop.rs b/tensor/src/ops/backprop.rs index 59dcdf92..111e80c6 100644 --- a/tensor/src/ops/backprop.rs +++ b/tensor/src/ops/backprop.rs @@ -43,13 +43,8 @@ impl BackpropOp { pub fn take(&mut self) -> Option> { self.0.take() } -} -impl BackpropOp -where - A: Clone, -{ - pub fn view(&self) -> BackpropOp<&A> { + pub fn view(&self) -> BackpropOp<&A, &B> { BackpropOp(self.0.as_ref().map(|op| op.view())) } } diff --git a/tensor/src/ops/op.rs b/tensor/src/ops/op.rs index 86376ff0..a88dc5f4 100644 --- a/tensor/src/ops/op.rs +++ b/tensor/src/ops/op.rs @@ -26,11 +26,11 @@ pub enum TensorExpr { impl TensorExpr { pub fn binary(lhs: TensorBase, rhs: TensorBase, op: BinaryOp) -> Self { - TensorExpr::Binary(Box::new(lhs), Box::new(rhs), op) + Self::Binary(Box::new(lhs), Box::new(rhs), op) } pub fn binary_scalar(lhs: TensorBase, rhs: B, op: BinaryOp) -> Self { - TensorExpr::BinaryScalar(Box::new(lhs), rhs, op) + Self::BinaryScalar(Box::new(lhs), rhs, op) } pub fn binary_scalar_c( @@ -42,61 +42,54 @@ impl TensorExpr { } pub fn broadcast(tensor: TensorBase, shape: Shape) -> Self { - TensorExpr::Broadcast(Box::new(tensor), shape) + Self::Broadcast(Box::new(tensor), shape) } pub fn matmul(lhs: TensorBase, rhs: TensorBase) -> Self { - TensorExpr::Matmul(Box::new(lhs), Box::new(rhs)) + Self::Matmul(Box::new(lhs), Box::new(rhs)) } pub fn reshape(tensor: TensorBase, shape: Shape) -> Self { - TensorExpr::Reshape(Box::new(tensor), shape) + Self::Reshape(Box::new(tensor), shape) } pub fn shape(expr: ReshapeExpr) -> Self { - TensorExpr::Shape(expr) + Self::Shape(expr) } pub fn swap_axes(tensor: TensorBase, swap: Axis, with: Axis) -> Self { - TensorExpr::SwapAxes(Box::new(tensor), swap, with) + Self::SwapAxes(Box::new(tensor), swap, with) } pub fn transpose(scope: TensorBase) -> Self { - TensorExpr::Transpose(Box::new(scope)) + Self::Transpose(Box::new(scope)) } pub fn unary(tensor: TensorBase, op: UnaryOp) -> Self { - TensorExpr::Unary(Box::new(tensor), op) + Self::Unary(Box::new(tensor), op) } -} -impl TensorExpr { - pub fn lhs(self) -> Option> { + + pub fn lhs(self) -> Option> { match self { - TensorExpr::Binary(lhs, _, _) => Some(*lhs), - TensorExpr::BinaryScalar(lhs, _, _) => Some(*lhs), - TensorExpr::Unary(lhs, _) => Some(*lhs), - TensorExpr::Broadcast(tensor, _) => Some(*tensor), - TensorExpr::Matmul(lhs, _) => Some(*lhs), - TensorExpr::Transpose(lhs) => Some(*lhs), + Self::Binary(lhs, _, _) => Some(*lhs), + Self::BinaryScalar(lhs, _, _) => Some(*lhs), + Self::Unary(lhs, _) => Some(*lhs), + Self::Broadcast(tensor, _) => Some(*tensor), + Self::Matmul(lhs, _) => Some(*lhs), + Self::Transpose(lhs) => Some(*lhs), _ => None, } } - pub fn rhs(self) -> Option> { + pub fn rhs(self) -> Option> { match self { - TensorExpr::Binary(_, rhs, _) => Some(*rhs), - TensorExpr::BinaryScalar(_, scalar, _) => Some(TensorBase::from_scalar(scalar)), - TensorExpr::Matmul(_, rhs) => Some(*rhs), + Self::Binary(_, rhs, _) => Some(*rhs), + Self::BinaryScalar(_, scalar, _) => Some(TensorBase::from_scalar(scalar)), + Self::Matmul(_, rhs) => Some(*rhs), _ => None, } } -} - -impl TensorExpr -where - T: Clone, -{ - pub fn view<'a>(&'a self) -> TensorExpr<&'a T> { + pub fn view<'a>(&'a self) -> TensorExpr<&'a A, &'a B> { match self { TensorExpr::Binary(lhs, rhs, op) => TensorExpr::binary(lhs.view(), rhs.view(), *op), TensorExpr::BinaryScalar(lhs, rhs, op) => { diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index 59c80f19..e7122897 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -21,9 +21,9 @@ pub trait Dimension: IndexMut { type Pattern; fn as_slice(&self) -> &[usize]; - + /// Return the rank of the dimension; i.e. the number of axes. fn rank(&self) -> usize; - + /// Return the size of the dimension; i.e. the number of elements. fn size(&self) -> usize; #[doc(hidden)] @@ -65,7 +65,7 @@ pub(crate) mod utils { strides: &Stride, ) -> Result<(), ShapeError> { // Check condition 3. - let is_empty = dim.slice().iter().any(|&d| d == 0); + let is_empty = dim.as_slice().iter().any(|&d| d == 0); if is_empty && max_offset > data_len { return Err(ShapeError::OutOfBounds); } @@ -84,7 +84,7 @@ pub(crate) mod utils { pub fn dim_stride_overlap(dim: &Shape, strides: &Stride) -> bool { let order = strides._fastest_varying_stride_order(); let mut sum_prev_offsets = 0; - for &index in order.slice() { + for &index in order.as_slice() { let d = dim[index]; let s = (strides[index] as isize).abs(); match d { @@ -123,7 +123,7 @@ pub(crate) mod utils { // Determine absolute difference in units of `A` between least and greatest // address accessible by moving along all axes. - let max_offset: usize = izip!(dim.slice(), strides.slice()) + let max_offset: usize = izip!(dim.as_slice(), strides.as_slice()) .try_fold(0usize, |acc, (&d, &s)| { let s = s as isize; // Calculate maximum possible absolute movement along this axis. @@ -151,7 +151,7 @@ pub(crate) mod utils { pub fn size_of_shape_checked(dim: &Shape) -> Result { let size_nonzero = dim - .slice() + .as_slice() .iter() .filter(|&&d| d != 0) .try_fold(1usize, |acc, &d| acc.checked_mul(d)) diff --git a/tensor/src/shape/rank.rs b/tensor/src/shape/rank.rs index 73591c7e..b786dbaf 100644 --- a/tensor/src/shape/rank.rs +++ b/tensor/src/shape/rank.rs @@ -6,7 +6,8 @@ //! //! The rank of a n-dimensional array describes the number of dimensions use core::borrow::Borrow; -use core::ops::{Deref, DerefMut}; +use core::ops::{Deref, DerefMut, Not}; +use num::traits::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -101,8 +102,13 @@ unsafe impl Send for Rank {} unsafe impl Sync for Rank {} macro_rules! impl_std_ops { + ($(($trait:tt, $method:ident, $e:tt)),*) => { + $( + impl_std_ops!($trait, $method, $e); + )* + }; ($trait:tt, $method:ident, $e:tt) => { - impl std::ops::$trait for Rank { + impl core::ops::$trait for Rank { type Output = Rank; fn $method(self, rhs: usize) -> Self::Output { @@ -111,7 +117,16 @@ macro_rules! impl_std_ops { } } - impl std::ops::$trait for Rank { + impl<'a> core::ops::$trait for &'a Rank { + type Output = Rank; + + fn $method(self, rhs: usize) -> Self::Output { + let rank = self.0 $e rhs; + Rank(rank) + } + } + + impl core::ops::$trait for Rank { type Output = Rank; fn $method(self, rhs: Rank) -> Self::Output { @@ -120,7 +135,7 @@ macro_rules! impl_std_ops { } } - impl<'a> std::ops::$trait for &'a Rank { + impl<'a> core::ops::$trait for &'a Rank { type Output = Rank; fn $method(self, rhs: Rank) -> Self::Output { @@ -129,7 +144,7 @@ macro_rules! impl_std_ops { } } - impl<'a> std::ops::$trait<&'a Rank> for Rank { + impl<'a> core::ops::$trait<&'a Rank> for Rank { type Output = Rank; fn $method(self, rhs: &'a Rank) -> Self::Output { @@ -138,7 +153,7 @@ macro_rules! impl_std_ops { } } - impl<'a> std::ops::$trait<&'a Rank> for &'a Rank { + impl<'a> core::ops::$trait<&'a Rank> for &'a Rank { type Output = Rank; fn $method(self, rhs: &'a Rank) -> Self::Output { @@ -147,11 +162,38 @@ macro_rules! impl_std_ops { } } }; - (many: $(($trait:tt, $method:ident, $e:tt)),*) => { - $( - impl_std_ops!($trait, $method, $e); - )* - }; } -impl_std_ops!(many: (Add, add, +), (Sub, sub, -), (Mul, mul, *), (Div, div, /), (Rem, rem, %)); +impl_std_ops!((Add, add, +), (Sub, sub, -), (Mul, mul, *), (Div, div, /), (Rem, rem, %)); + +impl Not for Rank { + type Output = Rank; + + fn not(self) -> Self::Output { + Rank(!self.0) + } +} + +impl Num for Rank { + type FromStrRadixErr = ::FromStrRadixErr; + + fn from_str_radix(str: &str, radix: u32) -> Result { + usize::from_str_radix(str, radix).map(Rank) + } +} + +impl One for Rank { + fn one() -> Self { + Self(1) + } +} + +impl Zero for Rank { + fn zero() -> Self { + Self(0) + } + + fn is_zero(&self) -> bool { + self.0 == 0 + } +} diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index d11c91b9..baae7d0a 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -30,25 +30,13 @@ impl Shape { pub fn zeros(rank: usize) -> Self { Self(vec![0; rank]) } - #[doc(hidden)] - pub(crate) fn default_strides(&self) -> Stride { - // Compute default array strides - // Shape (a, b, c) => Give strides (b * c, c, 1) - let mut strides = Stride::zeros(self.rank()); - // For empty arrays, use all zero strides. - if self.slice().iter().all(|&d| d != 0) { - let mut it = strides.slice_mut().iter_mut().rev(); - // Set first element to 1 - if let Some(rs) = it.next() { - *rs = 1; - } - let mut cum_prod = 1; - for (rs, dim) in it.zip(self.slice().iter().rev()) { - cum_prod *= *dim; - *rs = cum_prod; - } - } - strides + /// Get a reference to the shape as a slice. + pub fn as_slice(&self) -> &[usize] { + &self.0 + } + /// Get a mutable reference to the shape as a slice. + pub fn as_slice_mut(&mut self) -> &mut [usize] { + &mut self.0 } pub fn diagonalize(&self) -> Shape { @@ -135,14 +123,7 @@ impl Shape { pub fn size(&self) -> usize { self.0.iter().product() } - /// Get a reference to the shape as a slice. - pub fn slice(&self) -> &[usize] { - &self.0 - } - /// Get a mutable reference to the shape as a slice. - pub fn slice_mut(&mut self) -> &mut [usize] { - &mut self.0 - } + /// Swap the dimensions of the current [Shape] at the given [Axis]. pub fn swap(&mut self, a: Axis, b: Axis) { self.0.swap(a.axis(), b.axis()) @@ -155,7 +136,7 @@ impl Shape { } pub fn upcast(&self, to: &Shape, stride: &Stride) -> Option { - let mut new_stride = to.slice().to_vec(); + let mut new_stride = to.as_slice().to_vec(); // begin at the back (the least significant dimension) // size of the axis has to either agree or `from` has to be 1 if to.rank() < self.rank() { @@ -164,10 +145,10 @@ impl Shape { let mut iter = new_stride.as_mut_slice().iter_mut().rev(); for ((er, es), dr) in self - .slice() + .as_slice() .iter() .rev() - .zip(stride.slice().iter().rev()) + .zip(stride.as_slice().iter().rev()) .zip(iter.by_ref()) { /* update strides */ @@ -193,6 +174,27 @@ impl Shape { // Internal methods impl Shape { + #[doc(hidden)] + pub(crate) fn default_strides(&self) -> Stride { + // Compute default array strides + // Shape (a, b, c) => Give strides (b * c, c, 1) + let mut strides = Stride::zeros(self.rank()); + // For empty arrays, use all zero strides. + if self.as_slice().iter().all(|&d| d != 0) { + let mut it = strides.as_slice_mut().iter_mut().rev(); + // Set first element to 1 + if let Some(rs) = it.next() { + *rs = 1; + } + let mut cum_prod = 1; + for (rs, dim) in it.zip(self.as_slice().iter().rev()) { + cum_prod *= *dim; + *rs = cum_prod; + } + } + strides + } + pub(crate) fn matmul_shape(&self, other: &Self) -> TensorResult { if *self.rank() != 2 || *other.rank() != 2 || self[1] != other[0] { return Err(ShapeError::IncompatibleShapes.into()); diff --git a/tensor/src/shape/stride.rs b/tensor/src/shape/stride.rs index ca0a3455..da71a21a 100644 --- a/tensor/src/shape/stride.rs +++ b/tensor/src/shape/stride.rs @@ -4,7 +4,8 @@ */ use super::{Axis, Rank}; use core::borrow::{Borrow, BorrowMut}; -use core::ops::{Deref, DerefMut}; +use core::ops::{Deref, DerefMut, Index, IndexMut}; +use core::slice::{Iter as SliceIter, IterMut as SliceIterMut}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -37,28 +38,32 @@ impl Stride { pub fn zeros(rank: Rank) -> Self { Self(vec![0; *rank]) } - - pub(crate) fn _fastest_varying_stride_order(&self) -> Self { - let mut indices = self.clone(); - for (i, elt) in indices.slice_mut().into_iter().enumerate() { - *elt = i; - } - let strides = self.slice(); - indices - .slice_mut() - .sort_by_key(|&i| (strides[i] as isize).abs()); - indices + /// Returns a reference to the stride. + pub fn as_slice(&self) -> &[usize] { + &self.0 } - - pub fn get(&self, index: usize) -> Option<&usize> { - self.0.get(index) + /// Returns a mutable reference to the stride. + pub fn as_slice_mut(&mut self) -> &mut [usize] { + &mut self.0 } - - pub fn iter(&self) -> std::slice::Iter { + /// Returns the capacity of the stride. + pub fn capacity(&self) -> usize { + self.0.capacity() + } + /// Clears the stride, removing all elements. + pub fn clear(&mut self) { + self.0.clear() + } + /// Gets the element at the specified axis, returning None if the axis is out of bounds. + pub fn get(&self, axis: Axis) -> Option<&usize> { + self.0.get(*axis) + } + /// Returns an iterator over references to the elements of the stride. + pub fn iter(&self) -> SliceIter { self.0.iter() } - - pub fn iter_mut(&mut self) -> std::slice::IterMut { + /// Returns an iterator over mutable references to the elements of the stride. + pub fn iter_mut(&mut self) -> SliceIterMut { self.0.iter_mut() } /// Returns the rank of the stride; i.e., the number of dimensions. @@ -69,19 +74,11 @@ impl Stride { pub fn reverse(&mut self) { self.0.reverse() } - /// Returns a reference to the stride. - pub fn slice(&self) -> &[usize] { - &self.0 - } - /// Returns a mutable reference to the stride. - pub fn slice_mut(&mut self) -> &mut [usize] { - &mut self.0 - } - /// Swaps two elements in the stride. + /// Swaps two elements in the stride, inplace. pub fn swap(&mut self, a: usize, b: usize) { self.0.swap(a, b) } - + /// Returns a new shape with the two axes swapped. pub fn swap_axes(&self, a: Axis, b: Axis) -> Self { let mut stride = self.clone(); stride.swap(a.axis(), b.axis()); @@ -89,6 +86,21 @@ impl Stride { } } +// Internal methods +impl Stride { + pub(crate) fn _fastest_varying_stride_order(&self) -> Self { + let mut indices = self.clone(); + for (i, elt) in indices.as_slice_mut().into_iter().enumerate() { + *elt = i; + } + let strides = self.as_slice(); + indices + .as_slice_mut() + .sort_by_key(|&i| (strides[i] as isize).abs()); + indices + } +} + impl AsRef<[usize]> for Stride { fn as_ref(&self) -> &[usize] { &self.0 @@ -139,13 +151,33 @@ impl FromIterator for Stride { } } -// impl Iterator for Stride { -// type Item = usize; +impl Index for Stride { + type Output = usize; -// fn next(&mut self) -> Option { -// self.0.next() -// } -// } + fn index(&self, index: usize) -> &Self::Output { + &self.0[index] + } +} + +impl IndexMut for Stride { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.0[index] + } +} + +impl Index for Stride { + type Output = usize; + + fn index(&self, index: Axis) -> &Self::Output { + &self.0[*index] + } +} + +impl IndexMut for Stride { + fn index_mut(&mut self, index: Axis) -> &mut Self::Output { + &mut self.0[*index] + } +} impl IntoIterator for Stride { type Item = usize; diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index d2d9c73b..2f66f957 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -207,12 +207,12 @@ impl TensorBase { } /// Get a reference to the last element of the tensor pub fn last(&self) -> Option<&T> { - let pos = self.shape().iter().map(|d| d - 1).collect::>(); + let pos = self.shape().get_final_position(); self.get(pos) } /// Get a mutable reference to the last element of the tensor pub fn last_mut(&mut self) -> Option<&mut T> { - let pos = self.shape().iter().map(|d| d - 1).collect::>(); + let pos = self.shape().get_final_position(); self.get_mut(pos) } /// Get a reference to the [Layout] of the tensor @@ -231,6 +231,10 @@ impl TensorBase { pub const fn op(&self) -> &BackpropOp { &self.op } + /// Get a reference to the operation of the tensor + pub fn op_view(&self) -> BackpropOp<&T> { + self.op().view() + } /// Get an owned reference to the [Rank] of the tensor pub fn rank(&self) -> Rank { self.shape().rank() @@ -260,7 +264,7 @@ impl TensorBase { where T: Clone, { - self.data.to_vec() + self.data().to_vec() } /// Changes the kind of tensor to a variable pub fn variable(mut self) -> Self { @@ -268,7 +272,7 @@ impl TensorBase { self } /// - pub unsafe fn with_layout(mut self, layout: Layout) -> Self { + pub unsafe fn with_layout_unchecked(mut self, layout: Layout) -> Self { self.layout = layout; self } @@ -278,17 +282,17 @@ impl TensorBase { self } - pub fn with_shape(mut self, shape: impl IntoShape) -> Self { + pub unsafe fn with_shape_unchecked(mut self, shape: impl IntoShape) -> Self { self.layout = Layout::contiguous(shape); self } } -impl TensorBase -where - T: Clone, -{ - pub fn to_owned(&self) -> TensorBase { +impl TensorBase { + pub fn to_owned(&self) -> TensorBase + where + T: Clone, + { self.clone() } @@ -342,6 +346,12 @@ impl TensorBase { } } +impl<'a, T> AsRef> for TensorBase<&'a T> { + fn as_ref(&self) -> &TensorBase { + unsafe { &*(self as *const TensorBase<&'a T> as *const TensorBase) } + } +} + impl Index for TensorBase where Idx: AsRef<[usize]>, diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 31f28689..35334ea0 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -2,9 +2,6 @@ Appellation: utils Contrib: FL03 */ -//! # Utilities -//! -//! use crate::prelude::{Scalar, TensorExpr, TensorResult}; use crate::shape::ShapeError; use crate::tensor::{from_vec_with_op, TensorBase}; @@ -63,32 +60,6 @@ where out } -pub fn dot_product(lhs: &TensorBase, rhs: &TensorBase) -> TensorResult> -where - T: Scalar, -{ - if lhs.shape().rank() != rhs.shape().rank() { - return Err(ShapeError::IncompatibleShapes.into()); - } - - let shape = lhs.shape().matmul_shape(&rhs.shape()).unwrap(); - let mut result = vec![T::zero(); shape.size()]; - - for i in 0..lhs.shape().nrows() { - for j in 0..rhs.shape().ncols() { - for k in 0..lhs.shape().ncols() { - let pos = i * rhs.shape().ncols() + j; - let left = i * lhs.shape().ncols() + k; - let right = k * rhs.shape().ncols() + j; - result[pos] += lhs.data[left] * rhs.data[right]; - } - } - } - let op = TensorExpr::matmul(lhs.clone(), rhs.clone()); - let tensor = from_vec_with_op(false, op, shape, result); - Ok(tensor) -} - macro_rules! izip { // @closure creates a tuple-flattening closure for .map() call. usage: // @closure partial_pattern => partial_tuple , rest , of , iterators diff --git a/tensor/tests/reshape.rs b/tensor/tests/reshape.rs index 4c565d86..c2f129ee 100644 --- a/tensor/tests/reshape.rs +++ b/tensor/tests/reshape.rs @@ -32,15 +32,15 @@ fn test_reshape() { #[test] fn test_transpose() { let shape = (2, 3); - let a = Tensor::::linspace(0f64, 6f64, 6).with_shape(shape); + let a = Tensor::::linspace(0f64, 6f64, 6) + .reshape(shape) + .unwrap(); let at = a.t(); let exp = Tensor::from_shape_vec((3, 2), vec![0.0, 3.0, 1.0, 4.0, 2.0, 5.0]); assert_ne!(&a, &at); assert_eq!(at.shape(), &Shape::new(vec![3, 2])); - for i in 0..shape.0 { - for j in 0..shape.1 { - assert_eq!(a[&[i, j]], exp[&[j, i]]); - } + for (i, j) in at.iter().zip(exp.iter()) { + assert_eq!(i, j); } } diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index 5cafa885..f892cb11 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -48,12 +48,12 @@ fn test_index() { let shape = (2, 3).into_shape(); let n = shape.size(); let a = Tensor::::linspace(0f64, n as f64, n) - .reshape(shape) + .reshape(shape.clone()) .unwrap(); assert_eq!(a[[0, 0]], 0f64); assert_eq!(a[&[0, 1]], 1f64); - assert_eq!(a[vec![1, 2]], 5f64); + assert_eq!(a[shape.get_final_position()], 5f64); } #[test] From 8829ff3b5c3d1b390d59f2e128935e204b49fe0b Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 4 Apr 2024 19:04:12 -0500 Subject: [PATCH 82/87] update Signed-off-by: Joe McCain III --- core/src/math/props/mod.rs | 5 +- core/src/ops/binary/arithmetic.rs | 11 +++-- core/src/ops/binary/kinds.rs | 45 ++++++++++++++---- core/src/ops/binary/mod.rs | 2 +- core/src/ops/binary/operator.rs | 12 ++++- core/src/ops/unary/kinds.rs | 2 +- core/src/ops/unary/specs.rs | 4 +- core/src/specs/mod.rs | 24 ++++++++-- core/src/types/variables.rs | 10 ++-- core/src/utils.rs | 17 ++++++- tensor/src/actions/iter/axis.rs | 29 +++++++++++- tensor/src/actions/iter/iterator.rs | 36 +++++++++++++-- tensor/src/actions/mod.rs | 28 ----------- tensor/src/data/mod.rs | 30 +----------- tensor/src/data/repr/owned.rs | 4 +- tensor/src/impls/grad.rs | 16 +++---- tensor/src/impls/ops/binary.rs | 54 ++++++++++++---------- tensor/src/lib.rs | 13 ++++-- tensor/src/linalg/mod.rs | 1 + tensor/src/linalg/tri/mod.rs | 4 ++ tensor/src/{data => shape}/layout.rs | 36 +++++++++++++-- tensor/src/shape/mod.rs | 4 +- tensor/src/shape/shape.rs | 8 +++- tensor/src/shape/stride.rs | 10 ++++ tensor/src/tensor.rs | 69 +++++++++++++++++++--------- 25 files changed, 318 insertions(+), 156 deletions(-) delete mode 100644 tensor/src/actions/mod.rs create mode 100644 tensor/src/linalg/tri/mod.rs rename tensor/src/{data => shape}/layout.rs (84%) diff --git a/core/src/math/props/mod.rs b/core/src/math/props/mod.rs index 23ab7235..335f1ec6 100644 --- a/core/src/math/props/mod.rs +++ b/core/src/math/props/mod.rs @@ -2,8 +2,11 @@ Appellation: props Contrib: FL03 */ +//! # Properties +//! +//! -/// +/// Associative Property describes an operation that is invariant under the grouping of its operands. pub trait Associative {} /// Commutative Property describes an operation that is invariant under the exchange of its operands. diff --git a/core/src/ops/binary/arithmetic.rs b/core/src/ops/binary/arithmetic.rs index 300257d8..a9c96898 100644 --- a/core/src/ops/binary/arithmetic.rs +++ b/core/src/ops/binary/arithmetic.rs @@ -48,7 +48,6 @@ macro_rules! operator { operator!($op, $kind); )* }; - } macro_rules! operators { @@ -149,17 +148,17 @@ macro_rules! impl_binary_op { } } }; - (expr $op:ident, $bound:ident, $exp:expr) => { + (other: $op:ident, $bound:tt, $call:ident) => { operator!($op, Binary); impl BinOp for $op where - A: core::ops::$bound, + A: $bound, { type Output = C; fn eval(&self, lhs: A, rhs: B) -> Self::Output { - $exp(lhs, rhs) + $bound::$call(lhs, rhs) } } }; @@ -169,6 +168,10 @@ operators!(Arithmetic; {Add: Addition => add, Div: Division => div, Mul: Multipl impl_binary_op!((Addition, Add, +), (Division, Div, /), (Multiplication, Mul, *), (Remainder, Rem, %), (Subtraction, Sub, -)); +use num::traits::Pow; + +impl_binary_op!(other: Power, Pow, pow); + impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs index 2bd277b2..ddef1c3e 100644 --- a/core/src/ops/binary/kinds.rs +++ b/core/src/ops/binary/kinds.rs @@ -2,8 +2,10 @@ Appellation: kinds Contrib: FL03 */ +use super::arithmetic::*; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +use smart_default::SmartDefault; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[cfg_attr( @@ -15,7 +17,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Clone, Copy, Debug, - Default, Display, EnumCount, EnumIs, @@ -25,6 +26,7 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; Ord, PartialEq, PartialOrd, + SmartDefault, VariantNames, )] #[repr(u8)] @@ -32,12 +34,12 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; pub enum BinaryOp { // { #[default] - Add, - Sub, - Mul, - Div, + Add(Addition), + Div(Division), + Mul(Multiplication), + Sub(Subtraction), Pow, - Rem, + Rem(Remainder), Max, Min, And, @@ -45,14 +47,41 @@ pub enum BinaryOp { Xor, Shl, Shr, - // Custom(Kind), + Custom(), } impl BinaryOp { pub fn differentiable(&self) -> bool { match self { - BinaryOp::Add | BinaryOp::Sub | BinaryOp::Mul | BinaryOp::Div | BinaryOp::Pow => true, + BinaryOp::Add(_) | BinaryOp::Div(_) | Self::Mul(_) | Self::Sub(_) | BinaryOp::Pow => { + true + } _ => false, } } + + pub fn is_commutative(&self) -> bool { + match self { + BinaryOp::Add(_) | Self::Mul(_) | BinaryOp::And | BinaryOp::Or | BinaryOp::Xor => true, + _ => false, + } + } + + simple_enum_constructor!( + (Add, add, Addition), + (Div, div, Division), + (Mul, mul, Multiplication), + (Rem, rem, Remainder), + (Sub, sub, Subtraction) + ); + unit_enum_constructor!( + (Pow, pow), + (Max, max), + (Min, min), + (And, bitand), + (Or, bitor), + (Xor, bitxor), + (Shl, shl), + (Shr, shr) + ); } diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index 9ce3d3ca..aa66f634 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -11,7 +11,7 @@ pub(crate) mod specs; pub type BoxedBinOp = Box>; -pub trait BinOp { +pub trait BinOp { type Output; fn eval(&self, lhs: A, rhs: B) -> Self::Output; diff --git a/core/src/ops/binary/operator.rs b/core/src/ops/binary/operator.rs index 72fb146d..668525f9 100644 --- a/core/src/ops/binary/operator.rs +++ b/core/src/ops/binary/operator.rs @@ -14,7 +14,11 @@ impl BinaryArgs { Self { lhs, rhs } } - pub fn swap(self) -> BinaryArgs { + pub fn into_args(self) -> (A, B) { + (self.lhs, self.rhs) + } + + pub fn reverse(self) -> BinaryArgs { BinaryArgs::new(self.rhs, self.lhs) } @@ -27,6 +31,12 @@ impl BinaryArgs { } } +impl BinaryArgs { + pub fn swap(&mut self) { + std::mem::swap(&mut self.lhs, &mut self.rhs); + } +} + impl From> for (A, B) { fn from(args: BinaryArgs) -> Self { (args.lhs, args.rhs) diff --git a/core/src/ops/unary/kinds.rs b/core/src/ops/unary/kinds.rs index 13e18b82..0fc2b2f7 100644 --- a/core/src/ops/unary/kinds.rs +++ b/core/src/ops/unary/kinds.rs @@ -59,7 +59,7 @@ impl UnaryOp { } } - enum_fn_constructor!( + unit_enum_constructor!( (Abs, abs), (Cos, cos), (Cosh, cosh), diff --git a/core/src/ops/unary/specs.rs b/core/src/ops/unary/specs.rs index cb8c2240..e1ee5c25 100644 --- a/core/src/ops/unary/specs.rs +++ b/core/src/ops/unary/specs.rs @@ -2,7 +2,7 @@ Appellation: specs Contrib: FL03 */ -use core::ops; +use core::ops::Neg; use num::traits::{Inv, Num}; use num::Complex; @@ -34,7 +34,7 @@ macro_rules! impl_conj { impl Conjugate for Complex where - T: Clone + Num + ops::Neg, + T: Clone + Neg + Num, { type Complex = Self; type Real = T; diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 5df9b8e9..9d20c80b 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -12,16 +12,30 @@ pub(crate) mod store; pub mod func; -use core::borrow::Borrow; +pub trait AsSlice { + fn as_slice(&self) -> &[T]; +} + +impl AsSlice for S +where + S: AsRef<[T]>, +{ + fn as_slice(&self) -> &[T] { + self.as_ref() + } +} -pub trait Idx { - type Index; +pub trait AsSliceMut { + fn as_slice_mut(&mut self) -> &mut [T]; } -pub trait IdxExt: Idx +impl AsSliceMut for S where - Self: Borrow + Copy, + S: AsMut<[T]>, { + fn as_slice_mut(&mut self) -> &mut [T] { + self.as_mut() + } } pub(crate) mod prelude { diff --git a/core/src/types/variables.rs b/core/src/types/variables.rs index 18a82a76..373b08c0 100644 --- a/core/src/types/variables.rs +++ b/core/src/types/variables.rs @@ -224,7 +224,7 @@ macro_rules! impl_std_op { fn $method(self, rhs: Variable) -> Self::Output { let name = format!("{}", stringify!($method)); let value = self.eval_once() $e rhs.eval_once(); - Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + Variable::new(name).with_op(BinaryOp::$method()).with_value(value) } } @@ -237,7 +237,7 @@ macro_rules! impl_std_op { fn $method(self, rhs: &'a Variable) -> Self::Output { let name = format!("{}", stringify!($method)); let value = self.eval_once() $e rhs.eval(); - Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + Variable::new(name).with_op(BinaryOp::$method()).with_value(value) } } @@ -250,7 +250,7 @@ macro_rules! impl_std_op { fn $method(self, rhs: Variable) -> Self::Output { let name = format!("{}", stringify!($method)); let value = self.eval() $e rhs.eval_once(); - Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + Variable::new(name).with_op(BinaryOp::$method()).with_value(value) } } @@ -263,7 +263,7 @@ macro_rules! impl_std_op { fn $method(self, rhs: &'a Variable) -> Self::Output { let name = format!("{}", stringify!($method)); let value = self.eval() $e rhs.eval(); - Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + Variable::new(name).with_op(BinaryOp::$method()).with_value(value) } } @@ -276,7 +276,7 @@ macro_rules! impl_std_op { fn $method(self, rhs: T) -> Self::Output { let name = format!("{}", stringify!($method)); let value = self.eval_once() $e rhs; - Variable::new(name).with_op(BinaryOp::$trait).with_value(value) + Variable::new(name).with_op(BinaryOp::$method()).with_value(value) } } }; diff --git a/core/src/utils.rs b/core/src/utils.rs index 67bee454..952eaa72 100644 --- a/core/src/utils.rs +++ b/core/src/utils.rs @@ -3,10 +3,10 @@ Contrib: FL03 */ -macro_rules! enum_fn_constructor { +macro_rules! unit_enum_constructor { ($(($variant:ident, $method:ident)),*) => { $( - enum_fn_constructor!($variant, $method); + unit_enum_constructor!($variant, $method); )* }; ($variant:ident, $method:ident) => { @@ -15,3 +15,16 @@ macro_rules! enum_fn_constructor { } }; } + +macro_rules! simple_enum_constructor { + ($(($variant:ident, $method:ident, $new:expr)),*) => { + $( + simple_enum_constructor!($variant, $method, $new); + )* + }; + ($variant:ident, $method:ident, $new:expr) => { + pub fn $method() -> Self { + Self::$variant($new) + } + }; +} diff --git a/tensor/src/actions/iter/axis.rs b/tensor/src/actions/iter/axis.rs index e543db32..aee73000 100644 --- a/tensor/src/actions/iter/axis.rs +++ b/tensor/src/actions/iter/axis.rs @@ -2,5 +2,32 @@ Appellation: axis Contrib: FL03 */ +use crate::data::{ContainerBase, RawData}; +use crate::index::{Ix, Ixs}; +use crate::shape::{Axis, Layout}; -pub struct AxisIter; +pub struct AxisIter { + index: Ix, + end: Ix, + stride: Ixs, + inner_layout: Layout, + ptr: *mut A, +} + +impl AxisIter { + pub fn new(v: ContainerBase, axis: Axis) -> Self + where + S: RawData, + { + let stride = v.stride()[axis]; + let end = v.shape()[axis]; + // Self { + // index: 0, + // end, + // stride, + // inner_layout: layout.remove_axis(axis), + // ptr: v.as_mut_ptr(), + // } + unimplemented!() + } +} diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 73e1a7dd..831b79a7 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -5,13 +5,13 @@ use super::IndexIter; use crate::TensorBase; -pub struct StrideIter<'a, T> { +pub struct Iter<'a, T> { scope: Option<&'a T>, strides: IndexIter<'a>, tensor: &'a TensorBase, } -impl<'a, T> StrideIter<'a, T> { +impl<'a, T> Iter<'a, T> { pub fn new(tensor: &'a TensorBase) -> Self { let strides = IndexIter::from(tensor.layout()); Self { @@ -22,7 +22,7 @@ impl<'a, T> StrideIter<'a, T> { } } -impl<'a, T> Iterator for StrideIter<'a, T> { +impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; fn next(&mut self) -> Option { @@ -32,7 +32,7 @@ impl<'a, T> Iterator for StrideIter<'a, T> { } } -impl<'a, T> DoubleEndedIterator for StrideIter<'a, T> { +impl<'a, T> DoubleEndedIterator for Iter<'a, T> { fn next_back(&mut self) -> Option { let (_pos, idx) = self.strides.next_back()?; self.scope = self.tensor.get_by_index(idx); @@ -40,8 +40,34 @@ impl<'a, T> DoubleEndedIterator for StrideIter<'a, T> { } } -impl<'a, T> From<&'a TensorBase> for StrideIter<'a, T> { +impl<'a, T> From<&'a TensorBase> for Iter<'a, T> { fn from(tensor: &'a TensorBase) -> Self { Self::new(tensor) } } + +#[allow(dead_code)] +pub struct IterMut<'a, T> { + scope: Option<&'a mut T>, + strides: IndexIter<'a>, + tensor: &'a mut TensorBase, +} + +impl<'a, T> IterMut<'a, T> { + pub fn new(strides: IndexIter<'a>, tensor: &'a mut TensorBase) -> Self { + Self { + scope: None, + strides, + tensor, + } + } +} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + let (_pos, idx) = self.strides.next()?; + unimplemented!() + } +} diff --git a/tensor/src/actions/mod.rs b/tensor/src/actions/mod.rs deleted file mode 100644 index 4cb5d9b6..00000000 --- a/tensor/src/actions/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -/* - Appellation: actions - Contrib: FL03 -*/ -//! # Actions -//! -//! This module describes the actions that may be taken on or by a tensor. -//! -//! The actions include:
-//! * Automatic Differentiation -//! * Creation Routines (``) -//! * Indexing -//! * Iteration - -pub mod create; -pub mod grad; -pub mod index; -pub mod iter; - -pub(crate) mod prelude { - pub use super::create::*; - pub use super::grad::*; - pub use super::index::*; - pub use super::iter::*; -} - -#[cfg(test)] -mod tests {} diff --git a/tensor/src/data/mod.rs b/tensor/src/data/mod.rs index 86d4264c..59c86795 100644 --- a/tensor/src/data/mod.rs +++ b/tensor/src/data/mod.rs @@ -6,10 +6,9 @@ //! //! pub(crate) use self::utils::*; -pub use self::{container::*, layout::*, specs::*}; +pub use self::{container::*, specs::*}; pub(crate) mod container; -pub(crate) mod layout; pub(crate) mod specs; pub mod elem; @@ -28,36 +27,10 @@ pub type Container
= ContainerBase>; pub type SharedContainer = ContainerBase>; pub(crate) mod utils { - use super::Layout; #[cfg(not(feature = "std"))] use alloc::vec::Vec; use core::ptr::NonNull; - pub(crate) fn is_layout_c(layout: &Layout) -> bool { - if let 1 = *layout.shape().rank() { - return layout.stride[0] == 1 || layout.shape[0] <= 1; - } - - for d in layout.shape().iter() { - if *d == 0 { - return true; - } - } - - let mut contig_stride = 1_isize; - // check all dimensions -- a dimension of length 1 can have unequal strides - for (dim, s) in izip!(layout.shape().iter().rev(), layout.stride().iter().rev()) { - if *dim != 1 { - let s = *s as isize; - if s != contig_stride { - return false; - } - contig_stride *= *dim as isize; - } - } - true - } - /// Return a NonNull pointer to the vector's data pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { // this pointer is guaranteed to be non-null @@ -78,7 +51,6 @@ pub(crate) mod utils { } pub(crate) mod prelude { - pub use super::layout::Layout; pub use super::repr::*; pub use super::specs::*; } diff --git a/tensor/src/data/repr/owned.rs b/tensor/src/data/repr/owned.rs index e1f3d8e7..4cf3d116 100644 --- a/tensor/src/data/repr/owned.rs +++ b/tensor/src/data/repr/owned.rs @@ -50,6 +50,7 @@ impl OwnedRepr { } // Internal methods +#[allow(dead_code)] impl OwnedRepr { pub(crate) fn as_nonnull_mut(&mut self) -> NonNull { self.ptr @@ -75,7 +76,7 @@ impl OwnedRepr { capacity: self_.capacity, } } - #[allow(dead_code)] + pub(crate) fn into_vec(self) -> Vec { ManuallyDrop::new(self).take_as_vec() } @@ -84,7 +85,6 @@ impl OwnedRepr { /// ## Safety /// /// The first `new_len` elements of the data should be valid. - #[allow(dead_code)] pub(crate) unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity); self.len = new_len; diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index ad4e913a..72b33880 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -95,39 +95,39 @@ where if let Some(op) = &*node.op { match op { TensorExpr::Binary(lhs, rhs, kind) => match kind { - BinaryOp::Add => { + BinaryOp::Add(_) => { *entry!(store, lhs) += &grad; *entry!(store, rhs) += &grad; } - BinaryOp::Div => { + BinaryOp::Div(_) => { *entry!(store, lhs) += &grad / rhs.as_ref(); *entry!(store, rhs) -= &grad * lhs.as_ref() / (rhs.as_ref() * rhs.as_ref()); } - BinaryOp::Mul => { + BinaryOp::Mul(_) => { *entry!(store, lhs) += &grad * rhs.as_ref(); *entry!(store, rhs) += &grad * lhs.as_ref(); } - BinaryOp::Sub => { + BinaryOp::Sub(_) => { *entry!(store, lhs) += &grad; *entry!(store, rhs) -= &grad; } _ => todo!(), }, TensorExpr::BinaryScalar(lhs, rhs, kind) => match kind { - BinaryOp::Add => { + BinaryOp::Add(_) => { *entry!(store, lhs) += &grad; } - BinaryOp::Div => { + BinaryOp::Div(_) => { *entry!(store, lhs) += &grad / *rhs; } - BinaryOp::Mul => { + BinaryOp::Mul(_) => { *entry!(store, lhs) += &grad * *rhs; } BinaryOp::Pow => { *entry!(store, lhs) += &grad * *rhs * lhs.pow(*rhs - T::one()); } - BinaryOp::Sub => { + BinaryOp::Sub(_) => { *entry!(store, lhs) += &grad; } _ => todo!(), diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index afa735ea..2ff3a00e 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -188,7 +188,7 @@ macro_rules! impl_binary_op { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorExpr::binary_scalar(self, other, BinaryOp::$trait); + let op = TensorExpr::binary_scalar(self, other, BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } } @@ -202,7 +202,7 @@ macro_rules! impl_binary_op { fn $method(self, other: T) -> Self::Output { let shape = self.shape().clone(); let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$trait); + let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } } @@ -218,7 +218,7 @@ macro_rules! impl_binary_op { check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self, other, BinaryOp::$trait); + let op = TensorExpr::binary(self, other, BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } } @@ -235,7 +235,7 @@ macro_rules! impl_binary_op { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self, other.clone(), BinaryOp::$trait); + let op = TensorExpr::binary(self, other.clone(), BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } } @@ -252,7 +252,7 @@ macro_rules! impl_binary_op { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self.clone(), other, BinaryOp::$trait); + let op = TensorExpr::binary(self.clone(), other, BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } } @@ -269,7 +269,7 @@ macro_rules! impl_binary_op { } let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$trait); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } } @@ -278,30 +278,30 @@ macro_rules! impl_binary_op { } macro_rules! impl_assign_op { - ($trait:ident, $method:ident, $inner:ident, $op:tt) => { - impl ops::$trait for TensorBase + ($trait:ident, $method:ident, $constructor:ident, $inner:ident, $op:tt) => { + impl core::ops::$trait for TensorBase where - T: Copy + ops::$inner, + T: Copy + core::ops::$inner, { fn $method(&mut self, other: Self) { check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self.clone(), other, BinaryOp::$inner); + let op = TensorExpr::binary(self.clone(), other, BinaryOp::$constructor()); *self = from_vec_with_op(false, op, shape, store); } } - impl<'a, T> ops::$trait<&'a TensorBase> for TensorBase + impl<'a, T> core::ops::$trait<&'a TensorBase> for TensorBase where - T: Copy + ops::$inner, + T: Copy + core::ops::$inner, { fn $method(&mut self, other: &'a TensorBase) { check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$inner); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$constructor()); *self = from_vec_with_op(false, op, shape, store); } @@ -311,21 +311,27 @@ macro_rules! impl_assign_op { } macro_rules! impl_binary_method { - (scalar: $variant:ident, $method:ident, $op:tt) => { + ($method:ident, $f:expr) => { + pub fn $method(&self, other: &Self) -> Self { + $f(self, other) + } + + }; + (scalar: $variant:tt, $method:ident, $op:tt) => { pub fn $method(&self, other: T) -> Self { let shape = self.shape(); let store = self.data().iter().map(| elem | *elem $op other).collect(); - let op = TensorExpr::binary_scalar(self.clone(), other.clone(), BinaryOp::$variant); + let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$variant()); from_vec_with_op(false, op, shape, store) } }; - (tensor: $variant:ident, $method:ident, $op:tt) => { + (tensor: $method:ident, $op:tt) => { pub fn $method(&self, other: &Self) -> Self { check!(ne: self.shape(), other.shape()); let shape = self.shape(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); - let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$variant); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$method()); from_vec_with_op(false, op, shape, store) } @@ -334,16 +340,16 @@ macro_rules! impl_binary_method { impl_binary_op!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); -impl_assign_op!(AddAssign, add_assign, Add, +); -impl_assign_op!(DivAssign, div_assign, Div, /); -impl_assign_op!(MulAssign, mul_assign, Mul, *); -impl_assign_op!(RemAssign, rem_assign, Rem, %); -impl_assign_op!(SubAssign, sub_assign, Sub, -); +impl_assign_op!(AddAssign, add_assign, add, Add, +); +impl_assign_op!(DivAssign, div_assign, div, Div, /); +impl_assign_op!(MulAssign, mul_assign, mul, Mul, *); +impl_assign_op!(RemAssign, rem_assign, rem, Rem, %); +impl_assign_op!(SubAssign, sub_assign, sub, Sub, -); impl TensorBase where T: Scalar, { - impl_binary_method!(tensor: Add, add, +); - impl_binary_method!(scalar: Add, add_scalar, +); + impl_binary_method!(tensor: add, +); + impl_binary_method!(scalar: add, add_scalar, +); } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 6c661939..5c8821f5 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -12,7 +12,7 @@ extern crate alloc; extern crate acme_core as acme; #[doc(inline)] -pub use self::{tensor::*, utils::*}; +pub use self::{actions::*, tensor::*, utils::*}; #[macro_use] pub(crate) mod seal; @@ -20,7 +20,6 @@ pub(crate) mod tensor; #[macro_use] pub(crate) mod utils; -pub mod actions; pub mod backend; pub mod data; pub mod error; @@ -33,6 +32,14 @@ pub mod specs; pub mod stats; pub mod types; +pub(crate) mod actions { + + pub mod create; + pub mod grad; + pub mod index; + pub mod iter; +} + mod impls { mod ops { mod binary; @@ -50,7 +57,7 @@ pub type Tensor = tensor::TensorBase; pub mod prelude { #[doc(inline)] - pub use crate::actions::prelude::*; + pub use crate::actions::{create::*, grad::*, index::*, iter::*}; #[doc(inline)] pub use crate::data::prelude::*; #[doc(inline)] diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index edb3352e..bb715919 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -5,6 +5,7 @@ //! # Linear Algebra //! //! +pub mod tri; pub mod uplo; pub trait Inverse { diff --git a/tensor/src/linalg/tri/mod.rs b/tensor/src/linalg/tri/mod.rs new file mode 100644 index 00000000..24f83564 --- /dev/null +++ b/tensor/src/linalg/tri/mod.rs @@ -0,0 +1,4 @@ +/* + Appellation: tri + Contrib: FL03 +*/ diff --git a/tensor/src/data/layout.rs b/tensor/src/shape/layout.rs similarity index 84% rename from tensor/src/data/layout.rs rename to tensor/src/shape/layout.rs index ffb7907b..bff25e04 100644 --- a/tensor/src/data/layout.rs +++ b/tensor/src/shape/layout.rs @@ -63,9 +63,7 @@ impl Layout { pub fn is_contiguous(&self) -> bool { self.shape().is_contiguous(&self.stride) } - pub fn is_layout_c(&self) -> bool { - super::is_layout_c(self) - } + /// Determine if the current layout is square or not. pub fn is_square(&self) -> bool { self.shape().is_square() @@ -95,6 +93,13 @@ impl Layout { debug_assert_eq!(self.stride.len(), *self.shape.rank()); self.shape.rank() } + pub fn remove_axis(&self, axis: Axis) -> Self { + Self { + offset: self.offset, + shape: self.shape().remove_axis(axis), + stride: self.stride().remove_axis(axis), + } + } /// Reshape the layout to a new shape. pub fn reshape(&mut self, shape: impl IntoShape) { self.shape = shape.into_shape(); @@ -160,6 +165,31 @@ impl Layout { .map(|(i, s)| i * s) .sum() } + + pub(crate) fn is_layout_c(&self) -> bool { + if let 1 = *self.rank() { + return self.stride[0] == 1 || self.shape[0] <= 1; + } + + for d in self.shape().iter() { + if *d == 0 { + return true; + } + } + + let mut contig_stride = 1_isize; + // check all dimensions -- a dimension of length 1 can have unequal strides + for (dim, s) in izip!(self.shape().iter().rev(), self.stride().iter().rev()) { + if *dim != 1 { + let s = *s as isize; + if s != contig_stride { + return false; + } + contig_stride *= *dim as isize; + } + } + true + } } #[cfg(test)] diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index cf5123f4..4277fb4c 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -6,10 +6,11 @@ //! //! This modules provides implements several useful primitives for working with //! the shape of a [Tensor](crate::tensor::TensorBase). -pub use self::{axis::*, error::*, rank::*, shape::Shape, stride::*}; +pub use self::{axis::*, error::*, layout::Layout, rank::*, shape::Shape, stride::*}; pub(crate) mod axis; pub(crate) mod error; +pub(crate) mod layout; pub(crate) mod rank; pub(crate) mod shape; pub(crate) mod stride; @@ -41,6 +42,7 @@ pub(crate) mod prelude { pub use super::axis::{Axis, IntoAxis}; pub use super::dim::*; pub use super::error::*; + pub use super::layout::Layout; pub use super::rank::{IntoRank, Rank}; pub use super::shape::*; pub use super::stride::*; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index baae7d0a..1c590035 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -107,10 +107,16 @@ impl Shape { pub fn rank(&self) -> Rank { self.0.len().into() } - /// Remove the dimension at the given [Axis]. + /// Remove the dimension at the given [Axis], pub fn remove(&mut self, index: Axis) -> usize { self.0.remove(*index) } + /// Remove the dimension at the given [Axis]. + pub fn remove_axis(&self, index: Axis) -> Shape { + let mut shape = self.clone(); + shape.remove(index); + shape + } /// Reverse the dimensions of the shape. pub fn reverse(&mut self) { self.0.reverse() diff --git a/tensor/src/shape/stride.rs b/tensor/src/shape/stride.rs index da71a21a..28de7caf 100644 --- a/tensor/src/shape/stride.rs +++ b/tensor/src/shape/stride.rs @@ -70,6 +70,16 @@ impl Stride { pub fn rank(&self) -> Rank { self.0.len().into() } + /// Removes and returns the stride of the axis. + pub fn remove(&mut self, axis: Axis) -> usize { + self.0.remove(*axis) + } + /// Returns a new stride with the axis removed. + pub fn remove_axis(&self, axis: Axis) -> Self { + let mut stride = self.clone(); + stride.remove(axis); + stride + } /// Reverses the stride. pub fn reverse(&mut self) { self.0.reverse() diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 2f66f957..e6ad2a29 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -2,12 +2,11 @@ Appellation: tensor Contrib: FL03 */ -use crate::actions::iter::StrideIter; -use crate::data::Layout; +use crate::actions::iter::Iter; use crate::error::{TensorError, TensorResult}; use crate::ops::{BackpropOp, TensorExpr}; use crate::prelude::{TensorId, TensorKind}; -use crate::shape::{IntoShape, Rank, Shape, Stride}; +use crate::shape::{IntoShape, Layout, Rank, Shape, Stride}; #[cfg(not(feature = "std"))] use alloc::vec::{self, Vec}; @@ -17,7 +16,7 @@ use core::slice::Iter as SliceIter; #[cfg(feature = "std")] use std::vec; -pub(crate) fn create_with( +pub(crate) fn create( kind: impl Into, op: impl Into>, shape: impl IntoShape, @@ -37,7 +36,7 @@ pub(crate) fn from_scalar_with_op( op: TensorExpr, data: T, ) -> TensorBase { - create_with( + create( kind.into(), BackpropOp::new(op), Shape::scalar(), @@ -50,7 +49,7 @@ pub(crate) fn from_vec_with_kind( shape: impl IntoShape, data: Vec, ) -> TensorBase { - create_with(kind, BackpropOp::none(), shape, data) + create(kind, BackpropOp::none(), shape, data) } pub(crate) fn from_vec_with_op( @@ -59,7 +58,7 @@ pub(crate) fn from_vec_with_op( shape: impl IntoShape, data: Vec, ) -> TensorBase { - create_with(kind.into(), BackpropOp::new(op), shape, data) + create(kind.into(), BackpropOp::new(op), shape, data) } #[derive(Clone, Debug, Hash)] @@ -72,17 +71,6 @@ pub struct TensorBase { } impl TensorBase { - pub fn new(kind: TensorKind, shape: impl IntoShape) -> Self { - let shape = shape.into_shape(); - let data = Vec::with_capacity(shape.size()); - Self { - id: TensorId::new(), - data, - kind, - layout: Layout::contiguous(shape), - op: BackpropOp::none(), - } - } /// Create a new tensor from an iterator. pub fn from_iter(iter: I) -> Self where @@ -198,8 +186,8 @@ impl TensorBase { self.kind().is_variable() } /// Return an iterator over the tensor - pub fn iter(&self) -> StrideIter<'_, T> { - StrideIter::new(self) + pub fn iter(&self) -> Iter<'_, T> { + Iter::new(self) } /// Get the kind of the tensor pub const fn kind(&self) -> TensorKind { @@ -271,7 +259,14 @@ impl TensorBase { self.kind = TensorKind::Variable; self } - /// + /// Set the layout of the tensor + pub fn with_layout(self, layout: Layout) -> Self { + if layout.size() != self.size() { + panic!("Size mismatch"); + } + unsafe { self.with_layout_unchecked(layout) } + } + /// Set the layout of the tensor without checking for compatibility pub unsafe fn with_layout_unchecked(mut self, layout: Layout) -> Self { self.layout = layout; self @@ -322,6 +317,10 @@ impl TensorBase { self.data.get(index) } + pub(crate) fn get_by_index_mut(&mut self, index: usize) -> Option<&mut T> { + self.data.get_mut(index) + } + pub(crate) fn map<'a, F>(&'a self, f: F) -> Map, F> where F: FnMut(&'a T) -> T, @@ -344,6 +343,25 @@ impl TensorBase { data: store, } } + + pub(crate) fn map_binary(&self, other: &TensorBase, op: F) -> TensorBase + where + F: acme::prelude::BinOp, + T: Copy, + { + let store = self + .iter() + .zip(other.iter()) + .map(|(a, b)| op.eval(*a, *b)) + .collect(); + TensorBase { + id: TensorId::new(), + kind: self.kind, + layout: self.layout.clone(), + op: self.op.clone(), + data: store, + } + } } impl<'a, T> AsRef> for TensorBase<&'a T> { @@ -384,3 +402,12 @@ where self.layout == other.layout && self.data == other.data } } + +impl PartialOrd for TensorBase +where + T: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.data.partial_cmp(&other.data) + } +} From d18d05ab3b58d649e2f1ce9546b9b34b89475096 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 4 Apr 2024 20:53:41 -0500 Subject: [PATCH 83/87] update Signed-off-by: Joe McCain III --- core/src/types/constants.rs | 28 +++++++- core/src/types/dual.rs | 2 +- tensor/src/actions/iter/iterator.rs | 4 +- tensor/src/actions/iter/mod.rs | 8 +-- tensor/src/data/container.rs | 12 ++-- tensor/src/impls/linalg.rs | 14 ++-- tensor/src/lib.rs | 2 + tensor/src/shape/error.rs | 1 + tensor/src/shape/layout.rs | 14 ++-- tensor/src/shape/shape.rs | 104 +++++++++++++++------------- tensor/src/specs/ndtensor.rs | 4 +- tensor/src/specs/scalar.rs | 3 +- tensor/src/stats/impl_stats.rs | 58 ++++++++++++++++ tensor/src/stats/mod.rs | 76 +++++++++++++++++++- tensor/src/tensor.rs | 23 +++++- tensor/src/utils.rs | 7 ++ tensor/tests/composition.rs | 2 +- tensor/tests/tensor.rs | 6 +- 18 files changed, 279 insertions(+), 89 deletions(-) create mode 100644 tensor/src/stats/impl_stats.rs diff --git a/core/src/types/constants.rs b/core/src/types/constants.rs index a36b522f..f7693eab 100644 --- a/core/src/types/constants.rs +++ b/core/src/types/constants.rs @@ -4,11 +4,37 @@ */ use crate::prelude::{EvaluateOnce, Gradient}; use core::borrow::{Borrow, BorrowMut}; -use core::ops::{Deref, DerefMut, Neg, Not}; +use core::ops::{self, Deref, DerefMut, Neg, Not}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; +pub struct ConstantPtr { + ptr: *const T, +} + +impl ConstantPtr { + pub fn new(ptr: *const T) -> Self { + Self { ptr } + } + + pub fn as_ptr(&self) -> *const T { + self.ptr + } + + pub fn as_mut_ptr(&self) -> *mut T { + self.ptr as *mut T + } +} + +impl ops::Add for ConstantPtr { + type Output = ConstantPtr; + + fn add(self, rhs: usize) -> Self::Output { + ConstantPtr::new(self.ptr.wrapping_add(rhs)) + } +} + #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[repr(C)] diff --git a/core/src/types/dual.rs b/core/src/types/dual.rs index 868a60b1..a77b8fbb 100644 --- a/core/src/types/dual.rs +++ b/core/src/types/dual.rs @@ -13,10 +13,10 @@ //! e^2 = 0 use crate::prelude::{EvaluateOnce, Gradient}; +use core::ops::{self, Neg, Not}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::ops::{self, Neg, Not}; #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 831b79a7..a8851d7e 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -52,7 +52,7 @@ pub struct IterMut<'a, T> { strides: IndexIter<'a>, tensor: &'a mut TensorBase, } - +#[allow(dead_code)] impl<'a, T> IterMut<'a, T> { pub fn new(strides: IndexIter<'a>, tensor: &'a mut TensorBase) -> Self { Self { @@ -67,7 +67,7 @@ impl<'a, T> Iterator for IterMut<'a, T> { type Item = &'a mut T; fn next(&mut self) -> Option { - let (_pos, idx) = self.strides.next()?; + let (_pos, _idx) = self.strides.next()?; unimplemented!() } } diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 1bbc2191..81499594 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -5,16 +5,14 @@ //! # Iter //! //! -pub use self::{axis::*, iterator::*, position::*, utils::*}; +// pub use self::{axis::*, iterator::*, position::*, utils::*}; +pub use self::{iterator::Iter, position::IndexIter, utils::*}; +#[allow(dead_code, unused)] pub(crate) mod axis; pub(crate) mod iterator; pub(crate) mod position; -pub trait IterTensor { - type Item; -} - pub(crate) mod utils { use core::ptr; diff --git a/tensor/src/data/container.rs b/tensor/src/data/container.rs index 54f71684..4e106d25 100644 --- a/tensor/src/data/container.rs +++ b/tensor/src/data/container.rs @@ -128,7 +128,7 @@ where } pub fn stride(&self) -> &Stride { - self.layout().stride() + self.layout().strides() } pub fn size(&self) -> usize { @@ -148,7 +148,7 @@ where ) -> Self { let layout = Layout::new(0, dim, strides); // debug check for issues that indicates wrong use of this constructor - debug_assert!(can_index_slice(&v, &layout.shape(), &layout.stride()).is_ok()); + debug_assert!(can_index_slice(&v, &layout.shape(), &layout.strides()).is_ok()); let ptr = { let tmp = nonnull_from_vec_data(&mut v); @@ -198,9 +198,13 @@ where self.data._is_pointer_inbounds(self.as_ptr()) } - pub(crate) unsafe fn with_layout(self, layout: Layout) -> ContainerBase { - debug_assert_eq!(self.layout().rank(), layout.rank()); + pub(crate) fn with_layout(self, layout: Layout) -> ContainerBase { + debug_assert_eq!(self.layout().size(), layout.size()); + unsafe { self.with_layout_unchecked(layout) } + } + + pub(crate) unsafe fn with_layout_unchecked(self, layout: Layout) -> ContainerBase { Self { data: self.data, layout, diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 39327534..eb15108d 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -76,10 +76,8 @@ where T: Copy, { pub fn diag(&self) -> Self { - let rank = *self.rank(); - - let store = (0..rank).map(|i| self[vec![i; rank]]).collect::>(); - tensor::from_vec_with_kind(false, self.shape().diagonalize(), store) + let n = self.nrows(); + Self::from_shape_iter(self.shape().diag(), (0..n).map(|i| self[vec![i; n]])) } } @@ -99,16 +97,16 @@ where type Output = Self; fn matmul(&self, other: &Self) -> Self { + let sc = |m: usize, n: usize| m * self.ncols() + n; + let oc = |m: usize, n: usize| m * other.ncols() + n; + let shape = self.shape().matmul_shape(&other.shape()).unwrap(); let mut result = vec![T::zero(); shape.size()]; for i in 0..self.nrows() { for j in 0..other.ncols() { for k in 0..self.ncols() { - let scope = i * other.ncols() + j; - let xi = i * self.ncols() + k; - let yi = k * other.ncols() + j; - result[scope] += self.data[xi] * other.data[yi]; + result[oc(i, j)] += self.data[sc(i, k)] * other.data[oc(k, j)]; } } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 5c8821f5..5ff66c97 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -20,7 +20,9 @@ pub(crate) mod tensor; #[macro_use] pub(crate) mod utils; +#[doc(hidden)] pub mod backend; +#[doc(hidden)] pub mod data; pub mod error; #[cfg(feature = "io")] diff --git a/tensor/src/shape/error.rs b/tensor/src/shape/error.rs index bfd16f1b..de97ba0c 100644 --- a/tensor/src/shape/error.rs +++ b/tensor/src/shape/error.rs @@ -41,6 +41,7 @@ pub enum ShapeError { InvalidAxis, InvalidShape, MismatchedElements, + NotSquare, OutOfBounds, Overflow, Unsupported, diff --git a/tensor/src/shape/layout.rs b/tensor/src/shape/layout.rs index bff25e04..1357ba1d 100644 --- a/tensor/src/shape/layout.rs +++ b/tensor/src/shape/layout.rs @@ -35,7 +35,7 @@ impl Layout { let mut stride = vec![0; *diff]; for (&dst_dim, (&src_dim, &src_stride)) in shape[*diff..] .iter() - .zip(self.shape().iter().zip(self.stride().iter())) + .zip(self.shape().iter().zip(self.strides().iter())) { let s = if dst_dim == src_dim { src_stride @@ -76,7 +76,7 @@ impl Layout { /// element. pub fn offset_from_low_addr_ptr_to_logical_ptr(&self) -> usize { let offset = - izip!(self.shape().as_slice(), self.stride().as_slice()).fold(0, |_offset, (d, s)| { + izip!(self.shape().as_slice(), self.strides().as_slice()).fold(0, |_offset, (d, s)| { let d = *d as isize; let s = *s as isize; if s < 0 && d > 1 { @@ -97,7 +97,7 @@ impl Layout { Self { offset: self.offset, shape: self.shape().remove_axis(axis), - stride: self.stride().remove_axis(axis), + stride: self.strides().remove_axis(axis), } } /// Reshape the layout to a new shape. @@ -120,7 +120,7 @@ impl Layout { self.shape().size() } /// Get a reference to the stride of the layout. - pub const fn stride(&self) -> &Stride { + pub const fn strides(&self) -> &Stride { &self.stride } /// Swap the axes of the layout. @@ -131,7 +131,7 @@ impl Layout { stride: self.stride.swap_axes(a, b), } } - + /// Transpose the layout. pub fn transpose(&self) -> Layout { self.clone().reverse_axes() } @@ -161,7 +161,7 @@ impl Layout { pub(crate) fn index_unchecked(&self, idx: impl AsRef<[usize]>) -> usize { idx.as_ref() .iter() - .zip(self.stride().iter()) + .zip(self.strides().iter()) .map(|(i, s)| i * s) .sum() } @@ -179,7 +179,7 @@ impl Layout { let mut contig_stride = 1_isize; // check all dimensions -- a dimension of length 1 can have unequal strides - for (dim, s) in izip!(self.shape().iter().rev(), self.stride().iter().rev()) { + for (dim, s) in izip!(self.shape().iter().rev(), self.strides().iter().rev()) { if *dim != 1 { let s = *s as isize; if s != contig_stride { diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 1c590035..3e882187 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -2,13 +2,15 @@ Appellation: shape Contrib: FL03 */ -use super::{Axis, Rank, Stride}; -use crate::prelude::{ShapeError, SwapAxes, TensorResult}; - +use super::{Axis, Rank, ShapeError, Stride}; +use crate::prelude::{SwapAxes, TensorResult}; +#[cfg(not(feature = "std"))] +use alloc::vec; use core::ops::{self, Deref}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; - +#[cfg(feature = "std")] +use std::vec; /// A shape is a description of the number of elements in each dimension. #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -38,9 +40,10 @@ impl Shape { pub fn as_slice_mut(&mut self) -> &mut [usize] { &mut self.0 } - - pub fn diagonalize(&self) -> Shape { - Self::new(vec![self.size()]) + /// Attempts to create a one-dimensional shape that describes the + /// diagonal of the current shape. + pub fn diag(&self) -> Shape { + Self::new(i![self.nrows()]) } pub fn get_final_position(&self) -> Vec { self.iter().map(|&dim| dim - 1).collect() @@ -59,6 +62,10 @@ impl Shape { pub fn is_empty(&self) -> bool { self.0.is_empty() } + /// Returns true if the shape is a scalar. + pub fn is_scalar(&self) -> bool { + self.is_empty() + } /// Checks to see if the shape is square pub fn is_square(&self) -> bool { self.iter().all(|&dim| dim == self[0]) @@ -140,47 +147,12 @@ impl Shape { shape.swap(swap, with); shape } - - pub fn upcast(&self, to: &Shape, stride: &Stride) -> Option { - let mut new_stride = to.as_slice().to_vec(); - // begin at the back (the least significant dimension) - // size of the axis has to either agree or `from` has to be 1 - if to.rank() < self.rank() { - return None; - } - - let mut iter = new_stride.as_mut_slice().iter_mut().rev(); - for ((er, es), dr) in self - .as_slice() - .iter() - .rev() - .zip(stride.as_slice().iter().rev()) - .zip(iter.by_ref()) - { - /* update strides */ - if *dr == *er { - /* keep stride */ - *dr = *es; - } else if *er == 1 { - /* dead dimension, zero stride */ - *dr = 0 - } else { - return None; - } - } - - /* set remaining strides to zero */ - for dr in iter { - *dr = 0; - } - - Some(new_stride.into()) - } } // Internal methods +#[allow(dead_code)] +#[doc(hidden)] impl Shape { - #[doc(hidden)] pub(crate) fn default_strides(&self) -> Stride { // Compute default array strides // Shape (a, b, c) => Give strides (b * c, c, 1) @@ -222,6 +194,42 @@ impl Shape { stride.reverse(); stride.into() } + + pub(crate) fn upcast(&self, to: &Shape, stride: &Stride) -> Option { + let mut new_stride = to.as_slice().to_vec(); + // begin at the back (the least significant dimension) + // size of the axis has to either agree or `from` has to be 1 + if to.rank() < self.rank() { + return None; + } + + let mut iter = new_stride.as_mut_slice().iter_mut().rev(); + for ((er, es), dr) in self + .as_slice() + .iter() + .rev() + .zip(stride.as_slice().iter().rev()) + .zip(iter.by_ref()) + { + /* update strides */ + if *dr == *er { + /* keep stride */ + *dr = *es; + } else if *er == 1 { + /* dead dimension, zero stride */ + *dr = 0 + } else { + return None; + } + } + + /* set remaining strides to zero */ + for dr in iter { + *dr = 0; + } + + Some(new_stride.into()) + } } impl AsRef<[usize]> for Shape { @@ -252,9 +260,7 @@ impl Extend for Shape { impl SwapAxes for Shape { fn swap_axes(&self, a: Axis, b: Axis) -> Self { - let mut shape = self.clone(); - shape.swap(a, b); - shape + self.swap_axes(a, b) } } @@ -266,7 +272,7 @@ impl FromIterator for Shape { impl IntoIterator for Shape { type Item = usize; - type IntoIter = std::vec::IntoIter; + type IntoIter = vec::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() @@ -275,7 +281,7 @@ impl IntoIterator for Shape { impl<'a> IntoIterator for &'a mut Shape { type Item = &'a mut usize; - type IntoIter = std::slice::IterMut<'a, usize>; + type IntoIter = core::slice::IterMut<'a, usize>; fn into_iter(self) -> Self::IntoIter { self.0.iter_mut() diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 16860150..55326595 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -24,8 +24,8 @@ pub trait NdTensor { self.shape().size() } - fn stride(&self) -> &Stride { - self.layout().stride() + fn strides(&self) -> &Stride { + self.layout().strides() } } diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index 99158603..81d78513 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -6,12 +6,13 @@ use crate::tensor::TensorBase; use core::iter::{Product, Sum}; use core::ops::Neg; use num::complex::Complex; -use num::traits::{Float, FromPrimitive, NumAssign, NumCast, NumOps, Pow}; +use num::traits::{Float, FromPrimitive, Inv, NumAssign, NumCast, NumOps, Pow}; pub trait Scalar: Copy + Default + FromPrimitive + + Inv + Neg + NumAssign + NumCast diff --git a/tensor/src/stats/impl_stats.rs b/tensor/src/stats/impl_stats.rs new file mode 100644 index 00000000..89bb8d7b --- /dev/null +++ b/tensor/src/stats/impl_stats.rs @@ -0,0 +1,58 @@ +use super::Statistics; +use crate::prelude::Scalar; +use crate::TensorBase; + +impl TensorBase +where + T: Ord, +{ + pub fn max(&self) -> &T { + self.iter().max().unwrap() + } + + pub fn min(&self) -> &T { + self.iter().min().unwrap() + } + + pub fn sort(&mut self) { + self.data_mut().sort(); + } +} + +impl Statistics for TensorBase +where + T: Ord + Scalar, +{ + fn max(&self) -> T { + *self.max() + } + + fn mean(&self) -> T { + self.sum() / T::from_usize(self.size()).unwrap() + } + + fn median(&self) -> T { + self.data().median() + } + + fn min(&self) -> T { + *self.min() + } + + fn mode(&self) -> T { + self.data().mode() + } + + fn sum(&self) -> T { + self.iter().copied().sum() + } + + fn std(&self) -> T { + self.variance().sqrt() + } + + fn variance(&self) -> T { + let mean = self.mean(); + self.iter().map(|x| (*x - mean).powi(2)).sum::() / T::from_usize(self.size()).unwrap() + } +} diff --git a/tensor/src/stats/mod.rs b/tensor/src/stats/mod.rs index b00acd97..aa1f2892 100644 --- a/tensor/src/stats/mod.rs +++ b/tensor/src/stats/mod.rs @@ -3,9 +3,19 @@ Contrib: FL03 */ -use crate::shape::Axis; +mod impl_stats; -pub trait SummaryStatistics { +use crate::prelude::{Axis, Scalar}; +#[cfg(not(feature = "std"))] +use alloc::collections::BTreeMap; +#[cfg(feature = "std")] +use std::collections::BTreeMap; + +// pub trait StatElem: Copy + FromPrimitive + Product + Num + NumAssign + Ord + Sum {} + +// impl StatElem for S where S: Copy + FromPrimitive + Product + Num + NumAssign + Ord + Sum {} + +pub trait Statistics { /// Returns the maximum value in the collection. fn max(&self) -> T; /// Returns the mean (average) value of the collection. @@ -16,13 +26,73 @@ pub trait SummaryStatistics { fn min(&self) -> T; /// Get the mode of the collection. fn mode(&self) -> T; + + fn sum(&self) -> T; /// Compute the standard deviation fn std(&self) -> T; /// Compute the variance fn variance(&self) -> T; } -pub trait TensorStats: SummaryStatistics { +macro_rules! impl_stats { + ($container:ty, $size:ident) => { + impl Statistics for $container + where + Self: Clone, + T: Ord + Scalar, + { + fn max(&self) -> T { + self.iter().max().unwrap().clone() + } + + fn mean(&self) -> T { + self.sum() / T::from_usize(self.$size()).unwrap() + } + + fn median(&self) -> T { + let mut sorted = self.clone(); + sorted.sort(); + let mid = sorted.$size() / 2; + if sorted.$size() % 2 == 0 { + (sorted[mid - 1] + sorted[mid]) / T::from_usize(2).unwrap() + } else { + sorted[mid] + } + } + + fn min(&self) -> T { + self.iter().min().unwrap().clone() + } + + fn mode(&self) -> T { + let mut freqs = BTreeMap::new(); + for &val in self.iter() { + *freqs.entry(val).or_insert(0) += 1; + } + let max_freq = freqs.values().max().unwrap(); + *freqs.iter().find(|(_, &freq)| freq == *max_freq).unwrap().0 + } + + fn sum(&self) -> T { + self.iter().copied().sum() + } + + fn std(&self) -> T { + self.variance().sqrt() + } + + fn variance(&self) -> T { + let sqr = |x| x * x; + let mean = self.mean(); + self.iter().map(|x| sqr(*x - mean)).sum::() + / T::from_usize(self.$size()).unwrap() + } + } + }; +} +impl_stats!(Vec, len); +impl_stats!([T], len); +pub trait StatisticsExt: Statistics { /// Compute the mean along the specified axis. fn mean_axis(&self, axis: Axis) -> T; } diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index e6ad2a29..2b772e0a 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -236,8 +236,8 @@ impl TensorBase { self.layout().size() } /// Get a reference to the stride of the tensor - pub fn stride(&self) -> &Stride { - self.layout().stride() + pub fn strides(&self) -> &Stride { + self.layout().strides() } /// Turn the tensor into a scalar /// If the tensor has a rank greater than 0, this will return an error @@ -394,6 +394,15 @@ where impl Eq for TensorBase where T: Eq {} +impl Ord for TensorBase +where + T: Ord, +{ + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.data.cmp(&other.data) + } +} + impl PartialEq for TensorBase where T: PartialEq, @@ -403,6 +412,16 @@ where } } +impl PartialEq for TensorBase +where + S: AsRef<[T]>, + T: PartialEq, +{ + fn eq(&self, other: &S) -> bool { + &self.data == other.as_ref() + } +} + impl PartialOrd for TensorBase where T: PartialOrd, diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 35334ea0..da686a39 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -60,6 +60,13 @@ where out } +macro_rules! i { + ($($x:expr),*) => { + vec![$($x),*] + }; + +} + macro_rules! izip { // @closure creates a tuple-flattening closure for .map() call. usage: // @closure partial_pattern => partial_tuple , rest , of , iterators diff --git a/tensor/tests/composition.rs b/tensor/tests/composition.rs index 9e917e8d..24f4dec5 100644 --- a/tensor/tests/composition.rs +++ b/tensor/tests/composition.rs @@ -17,7 +17,7 @@ fn test_ones_and_zeros() { assert_ne!(a.id(), b.id()); assert_eq!(a.shape(), b.shape()); assert_eq!(a.size(), b.size()); - assert_eq!(a.stride(), b.stride()); + assert_eq!(a.strides(), b.strides()); assert_eq!(a, Tensor::ones(shape)); assert_eq!(b, Tensor::zeros(shape)); diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index f892cb11..2ac97aee 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -16,7 +16,7 @@ fn test_tensor() { assert_ne!(a.id(), b.id()); assert_eq!(a.shape(), b.shape()); assert_eq!(a.size(), b.size()); - assert_eq!(a.stride(), b.stride()); + assert_eq!(a.strides(), b.strides()); } #[test] @@ -65,8 +65,8 @@ fn test_higher_dim() { assert_ne!(a.id(), b.id()); assert_eq!(a.shape(), b.shape()); assert_eq!(a.size(), b.size()); - assert_eq!(a.stride(), b.stride()); - assert_eq!(a.stride().len(), 4); + assert_eq!(a.strides(), b.strides()); + assert_eq!(a.strides().len(), 4); } #[test] From 443881c52cc06bd56bf170ddf74b66473bed0a6d Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Thu, 4 Apr 2024 20:55:48 -0500 Subject: [PATCH 84/87] update Signed-off-by: Joe McCain III --- core/src/types/constants.rs | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/core/src/types/constants.rs b/core/src/types/constants.rs index f7693eab..a36b522f 100644 --- a/core/src/types/constants.rs +++ b/core/src/types/constants.rs @@ -4,37 +4,11 @@ */ use crate::prelude::{EvaluateOnce, Gradient}; use core::borrow::{Borrow, BorrowMut}; -use core::ops::{self, Deref, DerefMut, Neg, Not}; +use core::ops::{Deref, DerefMut, Neg, Not}; use num::{Num, One, Zero}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -pub struct ConstantPtr { - ptr: *const T, -} - -impl ConstantPtr { - pub fn new(ptr: *const T) -> Self { - Self { ptr } - } - - pub fn as_ptr(&self) -> *const T { - self.ptr - } - - pub fn as_mut_ptr(&self) -> *mut T { - self.ptr as *mut T - } -} - -impl ops::Add for ConstantPtr { - type Output = ConstantPtr; - - fn add(self, rhs: usize) -> Self::Output { - ConstantPtr::new(self.ptr.wrapping_add(rhs)) - } -} - #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[repr(C)] From f83f705ba0af72644e6cb38f8349fc3ed922cf0e Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 5 Apr 2024 06:04:51 -0500 Subject: [PATCH 85/87] update Signed-off-by: Joe McCain III --- Cargo.toml | 5 +- core/src/eval/evaluator.rs | 6 - core/src/lib.rs | 2 - core/src/{eval/mod.rs => specs/eval.rs} | 3 - core/src/specs/mod.rs | 5 +- core/src/types/variables.rs | 2 +- exp/ndtensor/Cargo.toml | 25 +++ .../ndtensor}/src/data/container.rs | 11 +- {tensor => exp/ndtensor}/src/data/elem.rs | 2 +- {tensor => exp/ndtensor}/src/data/mod.rs | 0 .../ndtensor}/src/data/repr/owned.rs | 0 .../ndtensor}/src/data/repr/shared.rs | 0 .../ndtensor}/src/data/repr/view.rs | 0 {tensor => exp/ndtensor}/src/data/specs.rs | 0 exp/ndtensor/src/dim/dimension.rs | 6 + exp/ndtensor/src/dim/mod.rs | 210 ++++++++++++++++++ exp/ndtensor/src/index/mod.rs | 17 ++ exp/ndtensor/src/index/slice.rs | 45 ++++ exp/ndtensor/src/iter/axis.rs | 33 +++ exp/ndtensor/src/iter/iterator.rs | 7 + exp/ndtensor/src/iter/mod.rs | 43 ++++ exp/ndtensor/src/iter/position.rs | 99 +++++++++ exp/ndtensor/src/lib.rs | 22 ++ exp/ndtensor/src/seal.rs | 29 +++ exp/ndtensor/src/utils.rs | 53 +++++ tensor/src/actions/create/arange.rs | 180 +++------------ tensor/src/actions/create/linspace.rs | 10 + tensor/src/actions/create/mod.rs | 2 +- tensor/src/actions/iter/axis.rs | 9 +- tensor/src/actions/iter/position.rs | 2 - tensor/src/lib.rs | 5 +- tensor/src/shape/dim/mod.rs | 3 +- tensor/src/shape/layout.rs | 25 --- tensor/src/shape/shape.rs | 18 +- tensor/src/specs/create.rs | 6 - tensor/src/specs/ndtensor.rs | 9 - 36 files changed, 673 insertions(+), 221 deletions(-) delete mode 100644 core/src/eval/evaluator.rs rename core/src/{eval/mod.rs => specs/eval.rs} (88%) create mode 100644 exp/ndtensor/Cargo.toml rename {tensor => exp/ndtensor}/src/data/container.rs (95%) rename {tensor => exp/ndtensor}/src/data/elem.rs (86%) rename {tensor => exp/ndtensor}/src/data/mod.rs (100%) rename {tensor => exp/ndtensor}/src/data/repr/owned.rs (100%) rename {tensor => exp/ndtensor}/src/data/repr/shared.rs (100%) rename {tensor => exp/ndtensor}/src/data/repr/view.rs (100%) rename {tensor => exp/ndtensor}/src/data/specs.rs (100%) create mode 100644 exp/ndtensor/src/dim/dimension.rs create mode 100644 exp/ndtensor/src/dim/mod.rs create mode 100644 exp/ndtensor/src/index/mod.rs create mode 100644 exp/ndtensor/src/index/slice.rs create mode 100644 exp/ndtensor/src/iter/axis.rs create mode 100644 exp/ndtensor/src/iter/iterator.rs create mode 100644 exp/ndtensor/src/iter/mod.rs create mode 100644 exp/ndtensor/src/iter/position.rs create mode 100644 exp/ndtensor/src/lib.rs create mode 100644 exp/ndtensor/src/seal.rs create mode 100644 exp/ndtensor/src/utils.rs delete mode 100644 tensor/src/specs/create.rs diff --git a/Cargo.toml b/Cargo.toml index 72597fff..e401a0a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,8 +21,9 @@ members = [ "core", "derive", "graphs", - "macros", - "tensor" + "macros", + "tensor", + "exp/ndtensor" ] resolver = "2" diff --git a/core/src/eval/evaluator.rs b/core/src/eval/evaluator.rs deleted file mode 100644 index f7e20587..00000000 --- a/core/src/eval/evaluator.rs +++ /dev/null @@ -1,6 +0,0 @@ -/* - Appellation: evaluator - Contrib: FL03 -*/ - -pub struct Evaluator; diff --git a/core/src/lib.rs b/core/src/lib.rs index 0ccd9f24..08d4b316 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -16,7 +16,6 @@ pub(crate) mod seal; pub(crate) mod utils; pub mod error; -pub mod eval; pub mod id; pub mod math; pub mod ops; @@ -25,7 +24,6 @@ pub mod types; pub mod prelude { pub use crate::error::*; - pub use crate::eval::*; pub use crate::id::*; pub use crate::ops::prelude::*; pub use crate::specs::prelude::*; diff --git a/core/src/eval/mod.rs b/core/src/specs/eval.rs similarity index 88% rename from core/src/eval/mod.rs rename to core/src/specs/eval.rs index 97a4e088..e3bfc22a 100644 --- a/core/src/eval/mod.rs +++ b/core/src/specs/eval.rs @@ -2,9 +2,6 @@ Appellation: eval Contrib: FL03 */ -pub use self::evaluator::*; - -pub(crate) mod evaluator; pub trait EvaluateOnce { type Output; diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 9d20c80b..ab66afcc 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -3,9 +3,10 @@ Contrib: FL03 */ -pub use self::{arith::*, gradient::*, prop::*, store::*}; +pub use self::{arith::*, eval::*, gradient::*, prop::*, store::*}; pub(crate) mod arith; +pub(crate) mod eval; pub(crate) mod gradient; pub(crate) mod prop; pub(crate) mod store; @@ -39,6 +40,8 @@ where } pub(crate) mod prelude { + pub use super::arith::*; + pub use super::eval::*; pub use super::func::*; pub use super::gradient::*; pub use super::prop::*; diff --git a/core/src/types/variables.rs b/core/src/types/variables.rs index 373b08c0..4e7d5d1c 100644 --- a/core/src/types/variables.rs +++ b/core/src/types/variables.rs @@ -2,8 +2,8 @@ Appellation: variables Contrib: FL03 */ -use crate::eval::{Evaluate, EvaluateMut, EvaluateOnce}; use crate::prelude::{BinaryOp, Gradient, Op, UnaryOp}; +use crate::specs::{Evaluate, EvaluateMut, EvaluateOnce}; use core::borrow::{Borrow, BorrowMut}; use core::ops::{Neg, Not}; use num::{Num, One, Zero}; diff --git a/exp/ndtensor/Cargo.toml b/exp/ndtensor/Cargo.toml new file mode 100644 index 00000000..13e2ddce --- /dev/null +++ b/exp/ndtensor/Cargo.toml @@ -0,0 +1,25 @@ +[package] +authors.workspace = true +categories.workspace = true +description.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +name = "ndtensor" +repository.workspace = true +readme.workspace = true +version.workspace = true + +[features] +default = [ + "std" +] + +std = [ + "acme/std", +] + +[dependencies] +acme = { path = "../../acme" } +rawpointer = "0.2.1" \ No newline at end of file diff --git a/tensor/src/data/container.rs b/exp/ndtensor/src/data/container.rs similarity index 95% rename from tensor/src/data/container.rs rename to exp/ndtensor/src/data/container.rs index 4e106d25..09dbe08e 100644 --- a/tensor/src/data/container.rs +++ b/exp/ndtensor/src/data/container.rs @@ -4,10 +4,9 @@ */ use super::specs::{Data, DataOwned, RawData, RawDataMut}; use super::{nonnull_from_vec_data, Container, SharedContainer}; -use crate::actions::iter::to_vec_mapped; -use crate::prelude::Layout; -use crate::shape::dim::can_index_slice; -use crate::shape::{IntoShape, IntoStride, Shape, Stride}; +use crate::dim::can_index_slice; +use crate::iter::to_vec_mapped; +use acme::prelude::{IntoShape, IntoStride, Layout, Shape, Stride}; use core::ptr::NonNull; use core::slice; use rawpointer::PointerExt; @@ -80,7 +79,7 @@ where } /// Return true if the array is known to be c-contiguous (Row Major) pub fn is_standard_layout(&self) -> bool { - self.layout().is_layout_c() + crate::dim::is_layout_c(self.layout()) } /// pub fn iter(&self) -> slice::Iter<'_, A> @@ -174,7 +173,7 @@ where F: FnMut(I::Item) -> A, { let shape = shape.into_shape(); - let strides = shape.default_strides(); // shape.stride().strides_for_dim(&dim); + let strides = crate::dim::default_strides(&shape); // shape.stride().strides_for_dim(&dim); let v = to_vec_mapped(iter, map); Self::from_vec_dim_stride_unchecked(shape, strides, v) } diff --git a/tensor/src/data/elem.rs b/exp/ndtensor/src/data/elem.rs similarity index 86% rename from tensor/src/data/elem.rs rename to exp/ndtensor/src/data/elem.rs index b19a4a5b..21a0d325 100644 --- a/tensor/src/data/elem.rs +++ b/exp/ndtensor/src/data/elem.rs @@ -5,7 +5,7 @@ //! # Elements //! //! -use crate::prelude::DType; +use acme::prelude::DType; pub trait Element { type Elem; diff --git a/tensor/src/data/mod.rs b/exp/ndtensor/src/data/mod.rs similarity index 100% rename from tensor/src/data/mod.rs rename to exp/ndtensor/src/data/mod.rs diff --git a/tensor/src/data/repr/owned.rs b/exp/ndtensor/src/data/repr/owned.rs similarity index 100% rename from tensor/src/data/repr/owned.rs rename to exp/ndtensor/src/data/repr/owned.rs diff --git a/tensor/src/data/repr/shared.rs b/exp/ndtensor/src/data/repr/shared.rs similarity index 100% rename from tensor/src/data/repr/shared.rs rename to exp/ndtensor/src/data/repr/shared.rs diff --git a/tensor/src/data/repr/view.rs b/exp/ndtensor/src/data/repr/view.rs similarity index 100% rename from tensor/src/data/repr/view.rs rename to exp/ndtensor/src/data/repr/view.rs diff --git a/tensor/src/data/specs.rs b/exp/ndtensor/src/data/specs.rs similarity index 100% rename from tensor/src/data/specs.rs rename to exp/ndtensor/src/data/specs.rs diff --git a/exp/ndtensor/src/dim/dimension.rs b/exp/ndtensor/src/dim/dimension.rs new file mode 100644 index 00000000..8912ec08 --- /dev/null +++ b/exp/ndtensor/src/dim/dimension.rs @@ -0,0 +1,6 @@ +/* + Appellation: dimension + Contrib: FL03 +*/ + +pub struct Dim; diff --git a/exp/ndtensor/src/dim/mod.rs b/exp/ndtensor/src/dim/mod.rs new file mode 100644 index 00000000..0e946dd6 --- /dev/null +++ b/exp/ndtensor/src/dim/mod.rs @@ -0,0 +1,210 @@ +/* + Appellation: dim + Contrib: FL03 +*/ +//! # Dimension +//! + +pub use self::{dimension::Dim, utils::*}; + +pub(crate) mod dimension; + +use core::ops::IndexMut; + +pub trait IntoDimension { + type Dim: Dimension; + + fn into_dimension(self) -> Self::Dim; +} + +pub trait Dimension: IndexMut { + type Pattern; + + fn as_slice(&self) -> &[usize]; + /// Return the rank of the dimension; i.e. the number of axes. + fn rank(&self) -> usize; + /// Return the size of the dimension; i.e. the number of elements. + fn size(&self) -> usize; + + #[doc(hidden)] + /// Return stride offset for index. + fn stride_offset(index: &Self, strides: &Self) -> isize { + let mut offset = 0; + for (&i, &s) in izip!(index.as_slice(), strides.as_slice()) { + offset += stride_offset(i, s); + } + offset + } +} + +pub(crate) mod utils { + use crate::index::{Ix, Ixs}; + use acme::prelude::{Layout, Shape, ShapeError, Stride}; + use core::mem; + + /// Calculate offset from `Ix` stride converting sign properly + #[inline(always)] + pub fn stride_offset(n: Ix, stride: Ix) -> isize { + (n as isize) * (stride as Ixs) + } + + pub(crate) fn default_strides(shape: &Shape) -> Stride { + // Compute default array strides + // Shape (a, b, c) => Give strides (b * c, c, 1) + let mut strides = Stride::zeros(shape.rank()); + // For empty arrays, use all zero strides. + if shape.iter().all(|&d| d != 0) { + let mut it = strides.as_slice_mut().iter_mut().rev(); + // Set first element to 1 + if let Some(rs) = it.next() { + *rs = 1; + } + let mut cum_prod = 1; + for (rs, dim) in it.zip(shape.iter().rev()) { + cum_prod *= *dim; + *rs = cum_prod; + } + } + strides + } + + pub(crate) fn is_layout_c(layout: &Layout) -> bool { + if let 1 = *layout.rank() { + return layout.strides()[0] == 1 || layout.shape()[0] <= 1; + } + + for d in layout.shape().iter() { + if *d == 0 { + return true; + } + } + + let mut contig_stride = 1_isize; + // check all dimensions -- a dimension of length 1 can have unequal strides + for (dim, s) in izip!(layout.shape().iter().rev(), layout.strides().iter().rev()) { + if *dim != 1 { + let s = *s as isize; + if s != contig_stride { + return false; + } + contig_stride *= *dim as isize; + } + } + true + } + + pub(crate) fn can_index_slice( + data: &[A], + shape: &Shape, + stride: &Stride, + ) -> Result<(), ShapeError> { + // Check conditions 1 and 2 and calculate `max_offset`. + let max_offset = max_abs_offset_check_overflow::(shape, stride)?; + can_index_slice_impl(max_offset, data.len(), shape, stride) + } + + fn can_index_slice_impl( + max_offset: usize, + data_len: usize, + dim: &Shape, + strides: &Stride, + ) -> Result<(), ShapeError> { + // Check condition 3. + let is_empty = dim.as_slice().iter().any(|&d| d == 0); + if is_empty && max_offset > data_len { + return Err(ShapeError::OutOfBounds); + } + if !is_empty && max_offset >= data_len { + return Err(ShapeError::OutOfBounds); + } + + // Check condition 4. + if !is_empty && dim_stride_overlap(dim, strides) { + return Err(ShapeError::Unsupported); + } + + Ok(()) + } + + pub fn dim_stride_overlap(dim: &Shape, strides: &Stride) -> bool { + let order = crate::_fastest_varying_stride_order(strides); + let mut sum_prev_offsets = 0; + for &index in order.as_slice() { + let d = dim[index]; + let s = (strides[index] as isize).abs(); + match d { + 0 => return false, + 1 => {} + _ => { + if s <= sum_prev_offsets { + return true; + } + sum_prev_offsets += (d - 1) as isize * s; + } + } + } + false + } + + pub fn max_abs_offset_check_overflow( + dim: &Shape, + strides: &Stride, + ) -> Result { + max_abs_offset_check_overflow_impl(mem::size_of::(), dim, strides) + } + + fn max_abs_offset_check_overflow_impl( + elem_size: usize, + dim: &Shape, + strides: &Stride, + ) -> Result { + // Condition 1. + if dim.rank() != strides.rank() { + return Err(ShapeError::IncompatibleLayout); + } + + // Condition 3. + let _ = size_of_shape_checked(dim)?; + + // Determine absolute difference in units of `A` between least and greatest + // address accessible by moving along all axes. + let max_offset: usize = izip!(dim.as_slice(), strides.as_slice()) + .try_fold(0usize, |acc, (&d, &s)| { + let s = s as isize; + // Calculate maximum possible absolute movement along this axis. + let off = d.saturating_sub(1).checked_mul(s.unsigned_abs())?; + acc.checked_add(off) + }) + .ok_or_else(|| ShapeError::Overflow)?; + // Condition 2a. + if max_offset > isize::MAX as usize { + return Err(ShapeError::Overflow); + } + + // Determine absolute difference in units of bytes between least and + // greatest address accessible by moving along all axes + let max_offset_bytes = max_offset + .checked_mul(elem_size) + .ok_or_else(|| ShapeError::Overflow)?; + // Condition 2b. + if max_offset_bytes > isize::MAX as usize { + return Err(ShapeError::Overflow); + } + + Ok(max_offset) + } + + pub fn size_of_shape_checked(dim: &Shape) -> Result { + let size_nonzero = dim + .as_slice() + .iter() + .filter(|&&d| d != 0) + .try_fold(1usize, |acc, &d| acc.checked_mul(d)) + .ok_or_else(|| ShapeError::Overflow)?; + if size_nonzero > ::std::isize::MAX as usize { + Err(ShapeError::Overflow) + } else { + Ok(dim.size()) + } + } +} diff --git a/exp/ndtensor/src/index/mod.rs b/exp/ndtensor/src/index/mod.rs new file mode 100644 index 00000000..d2c48ca3 --- /dev/null +++ b/exp/ndtensor/src/index/mod.rs @@ -0,0 +1,17 @@ +/* + Appellation: index + Contrib: FL03 +*/ +//! # Index +//! +//! +pub use self::slice::*; + +pub(crate) mod slice; + +pub type Ix = usize; + +pub type Ixs = isize; + +#[cfg(test)] +mod tests {} diff --git a/exp/ndtensor/src/index/slice.rs b/exp/ndtensor/src/index/slice.rs new file mode 100644 index 00000000..81a5fe5d --- /dev/null +++ b/exp/ndtensor/src/index/slice.rs @@ -0,0 +1,45 @@ +/* + Appellation: slice + Contrib: FL03 +*/ +//! # Slice +//! +//! +use core::ops::{Range, RangeFrom}; +pub struct Slice { + pub start: isize, + pub end: Option, + pub step: isize, +} + +impl Slice { + pub fn new(start: isize, end: Option, step: isize) -> Self { + Self { start, end, step } + } +} + +impl From> for Slice { + fn from(range: Range) -> Self { + Self { + start: range.start, + end: Some(range.end), + step: 1, + } + } +} + +impl From> for Slice { + fn from(range: RangeFrom) -> Self { + Self { + start: range.start, + end: None, + step: 1, + } + } +} + +pub enum Slices { + Index(isize), + Slice(Slice), + NewAxis, +} diff --git a/exp/ndtensor/src/iter/axis.rs b/exp/ndtensor/src/iter/axis.rs new file mode 100644 index 00000000..1c8a56f5 --- /dev/null +++ b/exp/ndtensor/src/iter/axis.rs @@ -0,0 +1,33 @@ +/* + Appellation: axis + Contrib: FL03 +*/ +use crate::data::{ContainerBase, RawData}; +use crate::index::{Ix, Ixs}; +use acme::tensor::shape::{Axis, Layout}; + +pub struct AxisIter { + index: Ix, + end: Ix, + stride: Ixs, + inner_layout: Layout, + ptr: *mut A, +} + +impl AxisIter { + pub fn new(v: ContainerBase, axis: Axis) -> Self + where + S: RawData, + { + let stride = v.stride()[axis]; + let end = v.shape()[axis]; + // Self { + // index: 0, + // end, + // stride, + // inner_layout: layout.remove_axis(axis), + // ptr: v.as_mut_ptr(), + // } + unimplemented!() + } +} diff --git a/exp/ndtensor/src/iter/iterator.rs b/exp/ndtensor/src/iter/iterator.rs new file mode 100644 index 00000000..a3d1975c --- /dev/null +++ b/exp/ndtensor/src/iter/iterator.rs @@ -0,0 +1,7 @@ +/* + Appellation: iterator + Contrib: FL03 +*/ +use super::IndexIter; + +// pub struct diff --git a/exp/ndtensor/src/iter/mod.rs b/exp/ndtensor/src/iter/mod.rs new file mode 100644 index 00000000..b4691270 --- /dev/null +++ b/exp/ndtensor/src/iter/mod.rs @@ -0,0 +1,43 @@ +/* + Appellation: iter + Contrib: FL03 +*/ +//! # Iter +//! +//! +// pub use self::{axis::*, iterator::*, position::*, utils::*}; +pub use self::{iterator::*, position::IndexIter, utils::*}; + +#[allow(dead_code, unused)] +pub(crate) mod axis; +pub(crate) mod iterator; +pub(crate) mod position; + +pub(crate) mod utils { + use core::ptr; + + pub fn to_vec_mapped(iter: I, mut f: F) -> Vec + where + I: ExactSizeIterator, // + TrustedIterator + F: FnMut(I::Item) -> B, + { + // Use an `unsafe` block to do this efficiently. + // We know that iter will produce exactly .size() elements, + // and the loop can vectorize if it's clean (without branch to grow the vector). + let (size, _) = iter.size_hint(); + let mut result = Vec::with_capacity(size); + let mut out_ptr = result.as_mut_ptr(); + let mut len = 0; + iter.fold((), |(), elt| unsafe { + ptr::write(out_ptr, f(elt)); + len += 1; + result.set_len(len); + out_ptr = out_ptr.offset(1); + }); + debug_assert_eq!(size, result.len()); + result + } +} + +#[cfg(test)] +mod tests {} diff --git a/exp/ndtensor/src/iter/position.rs b/exp/ndtensor/src/iter/position.rs new file mode 100644 index 00000000..a4a18607 --- /dev/null +++ b/exp/ndtensor/src/iter/position.rs @@ -0,0 +1,99 @@ +/* + Appellation: position + Contrib: FL03 +*/ +use acme::prelude::{Layout, Shape, Stride}; + +/// +pub struct IndexIter<'a> { + next: Option, + position: Vec, + shape: &'a Shape, + stride: &'a Stride, +} + +impl<'a> IndexIter<'a> { + pub fn new(offset: usize, shape: &'a Shape, stride: &'a Stride) -> Self { + let elem_count: usize = shape.iter().product(); + let next = if elem_count == 0 { + None + } else { + // This applies to the scalar case. + Some(offset) + }; + Self { + next, + position: vec![0; *shape.rank()], + shape, + stride, + } + } + + pub(crate) fn index(&self, index: impl AsRef<[usize]>) -> usize { + index + .as_ref() + .iter() + .zip(self.stride.iter()) + .map(|(i, s)| i * s) + .sum() + } +} + +impl<'a> DoubleEndedIterator for IndexIter<'a> { + fn next_back(&mut self) -> Option { + let (pos, _idx) = if let Some(item) = self.next() { + item + } else { + return None; + }; + let position = self + .shape + .iter() + .zip(pos.iter()) + .map(|(s, p)| s - p) + .collect(); + let scope = self.index(&position); + println!("{:?}", &position); + Some((position, scope)) + // unimplemented!() + } +} + +impl<'a> Iterator for IndexIter<'a> { + type Item = (Vec, usize); + + fn next(&mut self) -> Option { + let scope = match self.next { + None => return None, + Some(storage_index) => storage_index, + }; + let mut updated = false; + let mut next = scope; + for ((multi_i, max_i), stride_i) in self + .position + .iter_mut() + .zip(self.shape.iter()) + .zip(self.stride.iter()) + .rev() + { + let next_i = *multi_i + 1; + if next_i < *max_i { + *multi_i = next_i; + updated = true; + next += stride_i; + break; + } else { + next -= *multi_i * stride_i; + *multi_i = 0 + } + } + self.next = if updated { Some(next) } else { None }; + Some((self.position.clone(), scope)) + } +} + +impl<'a> From<&'a Layout> for IndexIter<'a> { + fn from(layout: &'a Layout) -> Self { + Self::new(layout.offset(), layout.shape(), layout.strides()) + } +} diff --git a/exp/ndtensor/src/lib.rs b/exp/ndtensor/src/lib.rs new file mode 100644 index 00000000..766689c3 --- /dev/null +++ b/exp/ndtensor/src/lib.rs @@ -0,0 +1,22 @@ +extern crate acme; +#[cfg(not(feature = "std"))] +extern crate alloc; + +pub use self::utils::*; + +#[macro_use] +pub(crate) mod seal; +#[macro_use] +pub(crate) mod utils; + +pub mod data; +pub mod dim; +pub mod index; +pub mod iter; + +pub mod prelude { + #[doc(inline)] + pub use crate::data::prelude::*; + #[doc(inline)] + pub use crate::iter::*; +} diff --git a/exp/ndtensor/src/seal.rs b/exp/ndtensor/src/seal.rs new file mode 100644 index 00000000..1b6ae830 --- /dev/null +++ b/exp/ndtensor/src/seal.rs @@ -0,0 +1,29 @@ +/* + Appellation: seal + Contrib: FL03 +*/ +//! The public parts of this private module are used to create traits +//! that cannot be implemented outside of our own crate. This way we +//! can feel free to extend those traits without worrying about it +//! being a breaking change for other implementations. + +/// If this type is pub but not publicly reachable, third parties +/// can't name it and can't implement traits using it. +pub struct PrivateMarker; + +macro_rules! private_decl { + () => { + /// This trait is private to implement; this method exists to make it + /// impossible to implement outside the crate. + #[doc(hidden)] + fn __private__(&self) -> $crate::seal::PrivateMarker; + }; +} + +macro_rules! private_impl { + () => { + fn __private__(&self) -> $crate::seal::PrivateMarker { + $crate::seal::PrivateMarker + } + }; +} diff --git a/exp/ndtensor/src/utils.rs b/exp/ndtensor/src/utils.rs new file mode 100644 index 00000000..67435fa3 --- /dev/null +++ b/exp/ndtensor/src/utils.rs @@ -0,0 +1,53 @@ +/* + Appellation: utils + Contrib: FL03 +*/ +use acme::prelude::Stride; + +pub(crate) fn _fastest_varying_stride_order(strides: &Stride) -> Stride { + let mut indices = strides.clone(); + for (i, elt) in indices.as_slice_mut().into_iter().enumerate() { + *elt = i; + } + let strides = strides.as_slice(); + indices + .as_slice_mut() + .sort_by_key(|&i| (strides[i] as isize).abs()); + indices +} + +macro_rules! izip { + // @closure creates a tuple-flattening closure for .map() call. usage: + // @closure partial_pattern => partial_tuple , rest , of , iterators + // eg. izip!( @closure ((a, b), c) => (a, b, c) , dd , ee ) + ( @closure $p:pat => $tup:expr ) => { + |$p| $tup + }; + + // The "b" identifier is a different identifier on each recursion level thanks to hygiene. + ( @closure $p:pat => ( $($tup:tt)* ) , $_iter:expr $( , $tail:expr )* ) => { + izip!(@closure ($p, b) => ( $($tup)*, b ) $( , $tail )*) + }; + + // unary + ($first:expr $(,)*) => { + IntoIterator::into_iter($first) + }; + + // binary + ($first:expr, $second:expr $(,)*) => { + izip!($first) + .zip($second) + }; + + // n-ary where n > 2 + ( $first:expr $( , $rest:expr )* $(,)* ) => { + izip!($first) + $( + .zip($rest) + )* + .map( + izip!(@closure a => (a) $( , $rest )*) + ) + }; +} diff --git a/tensor/src/actions/create/arange.rs b/tensor/src/actions/create/arange.rs index f74ffef8..1bc7973d 100644 --- a/tensor/src/actions/create/arange.rs +++ b/tensor/src/actions/create/arange.rs @@ -3,180 +3,74 @@ Contrib: FL03 */ use super::utils::steps; -use core::ops::{self, Range}; -use num::traits::{Bounded, FromPrimitive, Num, ToPrimitive}; +use num::traits::{FromPrimitive, Num, ToPrimitive}; pub struct Arange { - range: Boundary, + scope: usize, + start: T, + stop: T, step: T, } impl Arange { - pub fn new(range: Boundary, step: T) -> Self { - Self { range, step } - } - - pub fn range(start: T, stop: T, step: T) -> Self { - Self::new(Boundary::Range { start, stop }, step) - } -} -impl Arange -where - T: Copy + Default + Num + PartialOrd, -{ - pub fn start(&self) -> T { - self.range.start() - } - - pub fn steps(&self) -> usize - where - T: FromPrimitive + ToPrimitive, - { - steps(self.start(), self.stop(), self.step) - } - - pub fn step(&self) -> T { - self.step - } - - pub fn stop(&self) -> T - where - T: FromPrimitive + PartialOrd, - { - self.range.stop_or_linear() - } -} - -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum Boundary { - Range { start: T, stop: T }, - From { start: T }, - Inclusive { start: T, stop: T }, - Until { stop: T }, -} - -impl Boundary -where - T: Copy + Default, -{ - /// Returns the start value of the range. - pub fn start(&self) -> T { - match self { - Boundary::Range { start, .. } => *start, - Boundary::From { start } => *start, - Boundary::Inclusive { start, .. } => *start, - Boundary::Until { .. } => T::default(), - } - } - /// Returns the stop value of the range. - pub fn stop(&self) -> Option { - match self { - Boundary::Range { stop, .. } - | Boundary::Inclusive { stop, .. } - | Boundary::Until { stop } => Some(*stop), - _ => None, + pub fn new(start: T, stop: T, step: T) -> Self { + Self { + scope: 0, + start, + stop, + step, } } - pub fn step_size(&self, steps: usize) -> T - where - T: FromPrimitive + Num + PartialOrd, - { - let steps = T::from_usize(steps).unwrap(); - let start = self.start(); - let stop = self.stop_or_default(); - let step = (stop - start) / steps; - step + pub fn start(&self) -> &T { + &self.start } -} -impl Boundary -where - T: Copy + Default + PartialOrd, -{ - pub fn stop_or(&self, default: T) -> T { - debug_assert!(default >= self.start()); - - self.stop().unwrap_or(default) + pub fn stop(&self) -> &T { + &self.stop } - pub fn stop_or_linear(&self) -> T - where - T: FromPrimitive + Num, - { - self.stop_or(self.start() * T::from_usize(2).unwrap()) + pub fn step(&self) -> &T { + &self.step } - pub fn stop_or_default(&self) -> T { - self.stop_or(T::default()) - } - - pub fn stop_or_max(&self) -> T + pub fn steps(&self) -> usize where - T: Bounded, + T: Copy + Num + ToPrimitive, { - self.stop_or(T::max_value()) - } -} -impl From> for Boundary { - fn from(args: Range) -> Self { - Boundary::Range { - start: args.start, - stop: args.end, - } + steps(self.start, self.stop, self.step) } } -impl From> for Boundary { - fn from(args: ops::RangeFrom) -> Self { - Boundary::From { start: args.start } - } -} - -impl From> for Boundary { - fn from(args: ops::RangeTo) -> Self { - Boundary::Until { stop: args.end } - } -} - -impl From<[T; 2]> for Boundary +impl Iterator for Arange where - T: Copy, + T: Copy + FromPrimitive + Num + ToPrimitive, { - fn from(args: [T; 2]) -> Self { - Boundary::Range { - start: args[0], - stop: args[1], + type Item = T; + + fn next(&mut self) -> Option { + if self.scope < self.steps() { + let value = self.start + self.step * T::from_usize(self.scope).unwrap(); + self.scope += 1; + Some(value) + } else { + None } } } -impl From<(T, T)> for Boundary { - fn from(args: (T, T)) -> Self { - Boundary::Inclusive { - start: args.0, - stop: args.1, - } - } -} - -impl From for Boundary { - fn from(stop: T) -> Self { - Boundary::Until { stop } - } -} - #[cfg(test)] mod tests { use super::*; #[test] fn test_arange() { - let setup = Boundary::Range { start: 0, stop: 10 }; - let arange = Arange::new(setup, 1); - assert_eq!(arange.start(), 0); - assert_eq!(arange.stop(), 10); - assert_eq!(arange.step(), 1); - assert_eq!(setup, (0..10).into()); + let mut arange = Arange::new(0, 10, 2); + assert_eq!(arange.next(), Some(0)); + assert_eq!(arange.next(), Some(2)); + assert_eq!(arange.next(), Some(4)); + assert_eq!(arange.next(), Some(6)); + assert_eq!(arange.next(), Some(8)); + assert_eq!(arange.next(), None); } } diff --git a/tensor/src/actions/create/linspace.rs b/tensor/src/actions/create/linspace.rs index c0edde68..2753552d 100644 --- a/tensor/src/actions/create/linspace.rs +++ b/tensor/src/actions/create/linspace.rs @@ -13,6 +13,16 @@ pub trait LinspaceExt: Linspace { fn linspace_until(&self, stop: T, steps: usize) -> Self; } +impl LinspaceExt for S +where + S: Linspace, + T: Default, +{ + fn linspace_until(&self, stop: T, steps: usize) -> Self { + S::linspace(T::default(), stop, steps) + } +} + impl Linspace for Vec where T: Copy + Default + FromPrimitive + Num + PartialOrd, diff --git a/tensor/src/actions/create/mod.rs b/tensor/src/actions/create/mod.rs index b4cbcf20..b09dbcf0 100644 --- a/tensor/src/actions/create/mod.rs +++ b/tensor/src/actions/create/mod.rs @@ -11,7 +11,7 @@ pub(crate) mod stack; pub(crate) mod utils { use core::ops::{Div, Sub}; use num::traits::{FromPrimitive, ToPrimitive}; - + /// Calculate the step size for a given range and number of steps. pub fn step_size(start: T, stop: T, steps: usize) -> T where T: FromPrimitive + Div + Sub, diff --git a/tensor/src/actions/iter/axis.rs b/tensor/src/actions/iter/axis.rs index aee73000..4535016a 100644 --- a/tensor/src/actions/iter/axis.rs +++ b/tensor/src/actions/iter/axis.rs @@ -2,9 +2,9 @@ Appellation: axis Contrib: FL03 */ -use crate::data::{ContainerBase, RawData}; use crate::index::{Ix, Ixs}; use crate::shape::{Axis, Layout}; +use crate::TensorBase; pub struct AxisIter { index: Ix, @@ -15,11 +15,8 @@ pub struct AxisIter { } impl AxisIter { - pub fn new(v: ContainerBase, axis: Axis) -> Self - where - S: RawData, - { - let stride = v.stride()[axis]; + pub fn new(v: TensorBase, axis: Axis) -> Self { + let stride = v.strides()[axis]; let end = v.shape()[axis]; // Self { // index: 0, diff --git a/tensor/src/actions/iter/position.rs b/tensor/src/actions/iter/position.rs index 2e2c63a4..32c4306f 100644 --- a/tensor/src/actions/iter/position.rs +++ b/tensor/src/actions/iter/position.rs @@ -53,9 +53,7 @@ impl<'a> DoubleEndedIterator for IndexIter<'a> { .map(|(s, p)| s - p) .collect(); let scope = self.index(&position); - println!("{:?}", &position); Some((position, scope)) - // unimplemented!() } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 5ff66c97..4528c707 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -14,6 +14,7 @@ extern crate acme_core as acme; #[doc(inline)] pub use self::{actions::*, tensor::*, utils::*}; +#[allow(unused)] #[macro_use] pub(crate) mod seal; pub(crate) mod tensor; @@ -22,8 +23,6 @@ pub(crate) mod utils; #[doc(hidden)] pub mod backend; -#[doc(hidden)] -pub mod data; pub mod error; #[cfg(feature = "io")] pub mod io; @@ -61,8 +60,6 @@ pub mod prelude { #[doc(inline)] pub use crate::actions::{create::*, grad::*, index::*, iter::*}; #[doc(inline)] - pub use crate::data::prelude::*; - #[doc(inline)] pub use crate::error::*; #[doc(inline)] pub use crate::linalg::prelude::*; diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index e7122897..a36de856 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -37,8 +37,9 @@ pub trait Dimension: IndexMut { } } +#[allow(dead_code)] pub(crate) mod utils { - use crate::actions::index::{Ix, Ixs}; + use crate::index::{Ix, Ixs}; use crate::shape::{Shape, ShapeError, Stride}; use core::mem; diff --git a/tensor/src/shape/layout.rs b/tensor/src/shape/layout.rs index 1357ba1d..70393a54 100644 --- a/tensor/src/shape/layout.rs +++ b/tensor/src/shape/layout.rs @@ -165,31 +165,6 @@ impl Layout { .map(|(i, s)| i * s) .sum() } - - pub(crate) fn is_layout_c(&self) -> bool { - if let 1 = *self.rank() { - return self.stride[0] == 1 || self.shape[0] <= 1; - } - - for d in self.shape().iter() { - if *d == 0 { - return true; - } - } - - let mut contig_stride = 1_isize; - // check all dimensions -- a dimension of length 1 can have unequal strides - for (dim, s) in izip!(self.shape().iter().rev(), self.strides().iter().rev()) { - if *dim != 1 { - let s = *s as isize; - if s != contig_stride { - return false; - } - contig_stride *= *dim as isize; - } - } - true - } } #[cfg(test)] diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 3e882187..00494a0a 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -40,6 +40,20 @@ impl Shape { pub fn as_slice_mut(&mut self) -> &mut [usize] { &mut self.0 } + + pub fn check_size(&self) -> Result { + let size_nonzero = self + .as_slice() + .iter() + .filter(|&&d| d != 0) + .try_fold(1usize, |acc, &d| acc.checked_mul(d)) + .ok_or_else(|| ShapeError::Overflow)?; + if size_nonzero > ::std::isize::MAX as usize { + Err(ShapeError::Overflow) + } else { + Ok(self.size()) + } + } /// Attempts to create a one-dimensional shape that describes the /// diagonal of the current shape. pub fn diag(&self) -> Shape { @@ -158,14 +172,14 @@ impl Shape { // Shape (a, b, c) => Give strides (b * c, c, 1) let mut strides = Stride::zeros(self.rank()); // For empty arrays, use all zero strides. - if self.as_slice().iter().all(|&d| d != 0) { + if self.iter().all(|&d| d != 0) { let mut it = strides.as_slice_mut().iter_mut().rev(); // Set first element to 1 if let Some(rs) = it.next() { *rs = 1; } let mut cum_prod = 1; - for (rs, dim) in it.zip(self.as_slice().iter().rev()) { + for (rs, dim) in it.zip(self.iter().rev()) { cum_prod *= *dim; *rs = cum_prod; } diff --git a/tensor/src/specs/create.rs b/tensor/src/specs/create.rs deleted file mode 100644 index 0b60d59e..00000000 --- a/tensor/src/specs/create.rs +++ /dev/null @@ -1,6 +0,0 @@ -/* - Appellation: reshape - Contrib: FL03 -*/ - - diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 55326595..173301bd 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -29,15 +29,6 @@ pub trait NdTensor { } } -pub trait NdStore { - type Container; - type Elem; -} - -pub trait NdIterator { - type Item; -} - pub trait TensorData { type Elem; } From f1eb761ac16a4f530dc574835bb103548612d086 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 5 Apr 2024 06:09:49 -0500 Subject: [PATCH 86/87] update Signed-off-by: Joe McCain III --- tensor/src/actions/create/linspace.rs | 4 +-- tensor/src/stats/impl_stats.rs | 51 ++++++++++++++++++++------- 2 files changed, 41 insertions(+), 14 deletions(-) diff --git a/tensor/src/actions/create/linspace.rs b/tensor/src/actions/create/linspace.rs index 2753552d..8f245227 100644 --- a/tensor/src/actions/create/linspace.rs +++ b/tensor/src/actions/create/linspace.rs @@ -10,7 +10,7 @@ pub trait Linspace { } pub trait LinspaceExt: Linspace { - fn linspace_until(&self, stop: T, steps: usize) -> Self; + fn linspace_until(stop: T, steps: usize) -> Self; } impl LinspaceExt for S @@ -18,7 +18,7 @@ where S: Linspace, T: Default, { - fn linspace_until(&self, stop: T, steps: usize) -> Self { + fn linspace_until(stop: T, steps: usize) -> Self { S::linspace(T::default(), stop, steps) } } diff --git a/tensor/src/stats/impl_stats.rs b/tensor/src/stats/impl_stats.rs index 89bb8d7b..87956552 100644 --- a/tensor/src/stats/impl_stats.rs +++ b/tensor/src/stats/impl_stats.rs @@ -2,21 +2,49 @@ use super::Statistics; use crate::prelude::Scalar; use crate::TensorBase; -impl TensorBase -where - T: Ord, -{ - pub fn max(&self) -> &T { +impl TensorBase { + pub fn max(&self) -> &T + where + T: Ord, + { self.iter().max().unwrap() } - pub fn min(&self) -> &T { + pub fn mean(&self) -> T + where + T: Scalar, + { + self.sum() / T::from_usize(self.size()).unwrap() + } + + pub fn min(&self) -> &T + where + T: Ord, + { self.iter().min().unwrap() } - pub fn sort(&mut self) { + pub fn sort(&mut self) + where + T: Ord, + { self.data_mut().sort(); } + + pub fn std(&self) -> T + where + T: Scalar, + { + self.variance().sqrt() + } + + pub fn variance(&self) -> T + where + T: Scalar, + { + let mean = self.mean(); + self.iter().map(|x| (*x - mean).powi(2)).sum::() / T::from_usize(self.size()).unwrap() + } } impl Statistics for TensorBase @@ -28,7 +56,7 @@ where } fn mean(&self) -> T { - self.sum() / T::from_usize(self.size()).unwrap() + self.mean() } fn median(&self) -> T { @@ -44,15 +72,14 @@ where } fn sum(&self) -> T { - self.iter().copied().sum() + self.sum() } fn std(&self) -> T { - self.variance().sqrt() + self.std() } fn variance(&self) -> T { - let mean = self.mean(); - self.iter().map(|x| (*x - mean).powi(2)).sum::() / T::from_usize(self.size()).unwrap() + self.variance() } } From 1c020eaf3b9201e0888599bea456937544243ab2 Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Fri, 5 Apr 2024 10:23:52 -0500 Subject: [PATCH 87/87] update Signed-off-by: Joe McCain III --- acme/src/lib.rs | 6 +- core/src/lib.rs | 2 - core/src/math/linalg/fields.rs | 4 +- core/src/ops/binary/arithmetic.rs | 27 ++++--- core/src/ops/binary/mod.rs | 23 +++++- core/src/ops/binary/operator.rs | 46 +++++++++++- core/src/ops/kinds.rs | 4 +- core/src/ops/mod.rs | 9 +-- core/src/ops/operator.rs | 34 +++++++++ core/src/seal.rs | 34 --------- core/src/types/dual.rs | 109 +++++++++++++++------------- derive/examples/params.rs | 21 ------ derive/src/ast/mod.rs | 4 - derive/src/cmp/mod.rs | 6 -- derive/src/cmp/params/mod.rs | 37 ---------- derive/src/lib.rs | 46 ------------ derive/src/utils.rs | 13 ---- exp/ndtensor/src/lib.rs | 1 + exp/ndtensor/tests/default.rs | 18 +++++ graphs/src/dcg/graph.rs | 3 +- graphs/src/grad/id.rs | 13 ++-- graphs/src/id/id.rs | 7 +- graphs/src/scg/edge.rs | 2 +- graphs/src/scg/node.rs | 6 +- tensor/src/actions/iter/iterator.rs | 12 +-- tensor/src/actions/iter/mod.rs | 2 +- tensor/src/actions/iter/position.rs | 2 +- tensor/src/impls/linalg.rs | 69 ++++++++++++------ tensor/src/lib.rs | 2 + tensor/src/linalg/mod.rs | 17 ++--- tensor/src/linalg/specs.rs | 57 +++++++++++++++ tensor/src/shape/dim/mod.rs | 89 +++++++++++------------ tensor/src/shape/layout.rs | 74 +++++++++++-------- tensor/src/shape/mod.rs | 8 +- tensor/src/specs/affine.rs | 26 ------- tensor/src/specs/mod.rs | 4 +- tensor/src/specs/ndtensor.rs | 4 + tensor/src/specs/scalar.rs | 40 ++++++++++ tensor/src/stats/mod.rs | 4 + tensor/src/tensor.rs | 49 +++++++++++-- tensor/tests/stats.rs | 40 ++++++++++ 41 files changed, 559 insertions(+), 415 deletions(-) delete mode 100644 core/src/seal.rs delete mode 100644 derive/examples/params.rs delete mode 100644 derive/src/ast/mod.rs delete mode 100644 derive/src/cmp/mod.rs delete mode 100644 derive/src/cmp/params/mod.rs delete mode 100644 derive/src/utils.rs create mode 100644 exp/ndtensor/tests/default.rs create mode 100644 tensor/src/linalg/specs.rs delete mode 100644 tensor/src/specs/affine.rs create mode 100644 tensor/tests/stats.rs diff --git a/acme/src/lib.rs b/acme/src/lib.rs index 7b65292e..94120d85 100644 --- a/acme/src/lib.rs +++ b/acme/src/lib.rs @@ -23,6 +23,9 @@ pub use acme_macros::*; pub use acme_tensor as tensor; pub mod prelude { + #[cfg(feature = "tensor")] + #[doc(inline)] + pub use crate::tensor::prelude::*; #[doc(inline)] pub use acme_core::prelude::*; #[cfg(feature = "derive")] @@ -32,7 +35,4 @@ pub mod prelude { pub use acme_graphs::prelude::*; #[cfg(feature = "macros")] pub use acme_macros::*; - #[cfg(feature = "tensor")] - #[doc(inline)] - pub use acme_tensor::prelude::*; } diff --git a/core/src/lib.rs b/core/src/lib.rs index 08d4b316..e09264d3 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -10,8 +10,6 @@ extern crate alloc; // pub use self::utils::*; -#[macro_use] -pub(crate) mod seal; #[macro_use] pub(crate) mod utils; diff --git a/core/src/math/linalg/fields.rs b/core/src/math/linalg/fields.rs index d821f6a3..3debe6d0 100644 --- a/core/src/math/linalg/fields.rs +++ b/core/src/math/linalg/fields.rs @@ -6,7 +6,9 @@ //! //! -pub trait Field {} +pub trait Field { + type Elem; +} #[cfg(test)] mod tests {} diff --git a/core/src/ops/binary/arithmetic.rs b/core/src/ops/binary/arithmetic.rs index a9c96898..5519061e 100644 --- a/core/src/ops/binary/arithmetic.rs +++ b/core/src/ops/binary/arithmetic.rs @@ -10,6 +10,11 @@ use serde::{Deserialize, Serialize}; use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; macro_rules! operator { + ($kind:ident: $($op:ident),*) => { + $( + operator!($op, $kind); + )* + }; ($op:ident, $kind:ident) => { #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -43,15 +48,11 @@ macro_rules! operator { } } }; - ($kind:ident: $($op:ident),*) => { - $( - operator!($op, $kind); - )* - }; + } macro_rules! operators { - ($group:ident; {$($variant:ident: $op:ident => $method:ident),*}) => { + ($group:ident: [$(($variant:ident, $op:ident, $method:ident)),*]) => { #[derive( Clone, Copy, @@ -148,7 +149,7 @@ macro_rules! impl_binary_op { } } }; - (other: $op:ident, $bound:tt, $call:ident) => { + (other: $op:ident, $bound:ident, $call:ident) => { operator!($op, Binary); impl BinOp for $op @@ -164,14 +165,22 @@ macro_rules! impl_binary_op { }; } -operators!(Arithmetic; {Add: Addition => add, Div: Division => div, Mul: Multiplication => mul, Rem: Remainder => rem, Sub: Subtraction => sub}); - impl_binary_op!((Addition, Add, +), (Division, Div, /), (Multiplication, Mul, *), (Remainder, Rem, %), (Subtraction, Sub, -)); use num::traits::Pow; impl_binary_op!(other: Power, Pow, pow); +operators!( + Arithmetic: [ + (Add, Addition, add), + (Div, Division, div), + (Mul, Multiplication, mul), + (Rem, Remainder, rem), + (Sub, Subtraction, sub) + ] +); + impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op diff --git a/core/src/ops/binary/mod.rs b/core/src/ops/binary/mod.rs index aa66f634..1806928f 100644 --- a/core/src/ops/binary/mod.rs +++ b/core/src/ops/binary/mod.rs @@ -9,7 +9,28 @@ pub(crate) mod kinds; pub(crate) mod operator; pub(crate) mod specs; -pub type BoxedBinOp = Box>; +pub type BoxedBinOp = Box>; + +#[derive(Clone, Debug)] +#[allow(dead_code)] +enum Bop +where + Kind: BinaryOperand, +{ + Custom { name: String, op: Kind }, +} + +#[allow(dead_code)] +pub(crate) trait BinaryOperand { + type Args: BinArgs; + type Output; + + fn eval( + &self, + lhs: ::Lhs, + rhs: ::Rhs, + ) -> Self::Output; +} pub trait BinOp { type Output; diff --git a/core/src/ops/binary/operator.rs b/core/src/ops/binary/operator.rs index 668525f9..110ebb89 100644 --- a/core/src/ops/binary/operator.rs +++ b/core/src/ops/binary/operator.rs @@ -3,7 +3,43 @@ Contrib: FL03 */ use super::BinaryOp; +use core::marker::PhantomData; +use core::mem; +pub trait BinArgs { + type Lhs; + type Rhs; + + fn lhs(&self) -> &Self::Lhs; + + fn rhs(&self) -> &Self::Rhs; +} + +impl BinArgs for (A, B) { + type Lhs = A; + type Rhs = B; + + fn lhs(&self) -> &Self::Lhs { + &self.0 + } + + fn rhs(&self) -> &Self::Rhs { + &self.1 + } +} + +impl BinArgs for BinaryArgs { + type Lhs = A; + type Rhs = B; + + fn lhs(&self) -> &Self::Lhs { + self.lhs() + } + + fn rhs(&self) -> &Self::Rhs { + self.rhs() + } +} pub struct BinaryArgs { pub lhs: A, pub rhs: B, @@ -33,7 +69,7 @@ impl BinaryArgs { impl BinaryArgs { pub fn swap(&mut self) { - std::mem::swap(&mut self.lhs, &mut self.rhs); + mem::swap(&mut self.lhs, &mut self.rhs); } } @@ -69,8 +105,12 @@ where } } -pub struct BinaryOperator { - pub args: BinaryArgs, +pub struct BinaryOperator +where + Args: BinArgs, +{ + pub args: Args, pub communitative: bool, pub op: BinaryOp, + pub output: PhantomData, } diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs index 04d9afce..d5973e75 100644 --- a/core/src/ops/kinds.rs +++ b/core/src/ops/kinds.rs @@ -45,6 +45,6 @@ impl From for Op { } } -pub enum Expr { - Binary(BinaryOperator>), +pub enum Expr { + Binary(BinaryOperator<(A, B), C>), } diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs index 90e2b62f..4c18e9a8 100644 --- a/core/src/ops/mod.rs +++ b/core/src/ops/mod.rs @@ -19,13 +19,6 @@ pub trait ApplyTo { fn apply_to(&self, other: T) -> Self::Output; } -pub trait ApplyWith { - type Output; - type With; - - fn apply_with(&self, other: T, with: Self::With) -> Self::Output; -} - pub trait IntoOp { fn into_op(self) -> Op; } @@ -40,7 +33,7 @@ where } pub(crate) mod prelude { - pub use super::{ApplyTo, ApplyWith, IntoOp}; + pub use super::{ApplyTo, IntoOp}; pub use super::binary::*; pub use super::kinds::Op; diff --git a/core/src/ops/operator.rs b/core/src/ops/operator.rs index 485c0abf..93abde2c 100644 --- a/core/src/ops/operator.rs +++ b/core/src/ops/operator.rs @@ -15,3 +15,37 @@ pub trait Operator { fn name(&self) -> &str; } + +#[allow(dead_code)] +pub(crate) struct Operand { + kind: OperatorKind, + name: String, +} + +pub trait Args { + type Pattern; + + fn args(self) -> Self::Pattern; +} + +impl Args for () { + type Pattern = (); + + fn args(self) -> Self::Pattern { + () + } +} + +impl Args for (A, B) { + type Pattern = (A, B); + + fn args(self) -> Self::Pattern { + self + } +} + +pub trait Evaluator { + type Output; + + fn eval(&self, args: Args) -> Self::Output; +} diff --git a/core/src/seal.rs b/core/src/seal.rs deleted file mode 100644 index b53ba76e..00000000 --- a/core/src/seal.rs +++ /dev/null @@ -1,34 +0,0 @@ -/* - Appellation: seal - Contrib: FL03 -*/ -#![allow(unused)] -//! The public parts of this private module are used to create traits -//! that cannot be implemented outside of our own crate. This way we -//! can feel free to extend those traits without worrying about it -//! being a breaking change for other implementations. - -/// If this type is pub but not publicly reachable, third parties -/// can't name it and can't implement traits using it. -pub struct PrivateMarker; - -macro_rules! private_decl { - () => { - /// This trait is private to implement; this method exists to make it - /// impossible to implement outside the crate. - #[doc(hidden)] - fn __private__(&self) -> $crate::seal::PrivateMarker; - }; -} - -macro_rules! private_impl { - () => { - fn __private__(&self) -> $crate::seal::PrivateMarker { - $crate::seal::PrivateMarker - } - }; -} - -pub trait Sealed { - private_decl!(); -} diff --git a/core/src/types/dual.rs b/core/src/types/dual.rs index a77b8fbb..768862ec 100644 --- a/core/src/types/dual.rs +++ b/core/src/types/dual.rs @@ -22,30 +22,35 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] pub struct Dual { dual: T, - real: T, + value: T, } impl Dual { - pub fn new(real: T, dual: T) -> Self { - Self { dual, real } + pub fn new(value: T, dual: T) -> Self { + Self { dual, value } } - pub fn real(real: T) -> Self + pub fn from_real(value: T) -> Self where T: Default, { - Self { - dual: T::default(), - real, - } + Self::new(value, T::default()) + } + + pub fn dual(&self) -> &T { + &self.dual + } + + pub fn dual_mut(&mut self) -> &mut T { + &mut self.dual } pub fn value(&self) -> &T { - &self.real + &self.value } pub fn value_mut(&mut self) -> &mut T { - &mut self.real + &mut self.value } } @@ -54,7 +59,7 @@ where T: std::fmt::Display, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "({}, {})", self.real, self.dual) + write!(f, "({}, {})", self.value, self.dual) } } @@ -62,7 +67,7 @@ impl EvaluateOnce for Dual { type Output = T; fn eval_once(self) -> Self::Output { - self.real + self.value } } @@ -73,7 +78,7 @@ where type Gradient = Dual; fn grad(&self, _: T) -> Self::Gradient { - Dual::real(T::default()) + Dual::from_real(T::default()) } } @@ -84,7 +89,7 @@ where type Output = Dual; fn neg(self) -> Self::Output { - Dual::new(-self.real, -self.dual) + Dual::new(-self.value, -self.dual) } } @@ -95,7 +100,7 @@ where type Output = Dual; fn not(self) -> Self::Output { - Dual::new(!self.real, !self.dual) + Dual::new(!self.value, !self.dual) } } @@ -108,7 +113,7 @@ where T: Default, { fn from(value: T) -> Self { - Self::real(value) + Self::from_real(value) } } @@ -120,8 +125,8 @@ where fn div(self, rhs: Self) -> Self::Output { Dual::new( - self.real / rhs.real, - (self.dual * rhs.real - self.real * rhs.dual) / (rhs.real * rhs.real), + self.value / rhs.value, + (self.dual * rhs.value - self.value * rhs.dual) / (rhs.value * rhs.value), ) } } @@ -133,7 +138,7 @@ where type Output = Dual; fn div(self, rhs: T) -> Self::Output { - Dual::new(self.real / rhs, self.dual / rhs) + Dual::new(self.value / rhs, self.dual / rhs) } } @@ -142,8 +147,8 @@ where T: Copy + ops::DivAssign + num::traits::NumOps, { fn div_assign(&mut self, rhs: Self) { - self.real /= rhs.real; - self.dual = (self.dual * rhs.real - self.real * rhs.dual) / (rhs.real * rhs.real); + self.value /= rhs.value; + self.dual = (self.dual * rhs.value - self.value * rhs.dual) / (rhs.value * rhs.value); } } @@ -152,7 +157,7 @@ where T: Copy + ops::DivAssign, { fn div_assign(&mut self, rhs: T) { - self.real /= rhs; + self.value /= rhs; self.dual /= rhs; } } @@ -164,7 +169,7 @@ where type FromStrRadixErr = T::FromStrRadixErr; fn from_str_radix(str: &str, radix: u32) -> Result { - T::from_str_radix(str, radix).map(Dual::real) + T::from_str_radix(str, radix).map(Dual::from_real) } } @@ -177,7 +182,7 @@ where } fn is_one(&self) -> bool { - self.real.is_one() + self.value.is_one() } } @@ -190,7 +195,7 @@ where } fn is_zero(&self) -> bool { - self.real.is_zero() + self.value.is_zero() } } @@ -199,78 +204,78 @@ macro_rules! impl_binary_op { $(impl_binary_op!($op, $method, $e);)* }; ($trait:ident, $method:ident, $e:tt) => { - impl std::ops::$trait> for Dual + impl ops::$trait> for Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Dual; fn $method(self, rhs: Self) -> Self::Output { - let real = self.real $e rhs.real; + let real = self.value $e rhs.value; let dual = self.dual $e rhs.dual; Dual::new(real, dual) } } - impl<'a, T> std::ops::$trait<&'a Dual> for Dual + impl<'a, T> ops::$trait<&'a Dual> for Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Dual; fn $method(self, rhs: &'a Dual) -> Self::Output { - let real = self.real $e rhs.real; + let real = self.value $e rhs.value; let dual = self.dual $e rhs.dual; Dual::new(real, dual) } } - impl<'a, T> std::ops::$trait> for &'a Dual + impl<'a, T> ops::$trait> for &'a Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Dual; fn $method(self, rhs: Dual) -> Self::Output { - let real = self.real $e rhs.real; + let real = self.value $e rhs.value; let dual = self.dual $e rhs.dual; Dual::new(real, dual) } } - impl<'a, T> std::ops::$trait<&'a Dual> for &'a Dual + impl<'a, T> ops::$trait<&'a Dual> for &'a Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Dual; fn $method(self, rhs: &'a Dual) -> Self::Output { - let real = self.real $e rhs.real; + let real = self.value $e rhs.value; let dual = self.dual $e rhs.dual; Dual::new(real, dual) } } - impl std::ops::$trait for Dual + impl ops::$trait for Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Dual; fn $method(self, rhs: T) -> Self::Output { - let real = self.real $e rhs; + let real = self.value $e rhs; Dual::new(real, self.dual) } } - impl<'a, T> std::ops::$trait for &'a Dual + impl<'a, T> ops::$trait for &'a Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { type Output = Dual; fn $method(self, rhs: T) -> Self::Output { - let real = self.real $e rhs; + let real = self.value $e rhs; Dual::new(real, self.dual) } } @@ -282,32 +287,32 @@ macro_rules! impl_assign_op { $(impl_assign_op!($op, $method, $e);)* }; ($trait:ident, $method:ident, $e:tt) => { - impl std::ops::$trait> for Dual + impl ops::$trait> for Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { fn $method(&mut self, rhs: Self) { - self.real $e rhs.real; + self.value $e rhs.value; self.dual $e rhs.dual; } } - impl<'a, T> std::ops::$trait<&'a Dual> for Dual + impl<'a, T> ops::$trait<&'a Dual> for Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { fn $method(&mut self, rhs: &'a Dual) { - self.real $e rhs.real; + self.value $e rhs.value; self.dual $e rhs.dual; } } - impl std::ops::$trait for Dual + impl ops::$trait for Dual where - T: Copy + std::ops::$trait, + T: Copy + ops::$trait, { fn $method(&mut self, rhs: T) { - self.real $e rhs; + self.value $e rhs; } } }; diff --git a/derive/examples/params.rs b/derive/examples/params.rs deleted file mode 100644 index 37305ad6..00000000 --- a/derive/examples/params.rs +++ /dev/null @@ -1,21 +0,0 @@ -/* - Appellation: params - Contrib: FL03 -*/ -extern crate acme_derive as acme; - -use acme::Params; - -fn main() -> Result<(), Box> { - let _params = LinearParams { weight: 1.0 }; - let wk = LinearParamsKey::Weight; - println!("{:?}", &wk); - // let _key = wk.key(); - Ok(()) -} - -#[derive(Params)] -pub struct LinearParams { - #[param] - pub weight: T, -} diff --git a/derive/src/ast/mod.rs b/derive/src/ast/mod.rs deleted file mode 100644 index ff6ef7ff..00000000 --- a/derive/src/ast/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -/* - Appellation: ast - Contrib: FL03 -*/ diff --git a/derive/src/cmp/mod.rs b/derive/src/cmp/mod.rs deleted file mode 100644 index eb18459f..00000000 --- a/derive/src/cmp/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -/* - Appellation: cmp - Contrib: FL03 -*/ - -pub mod params; diff --git a/derive/src/cmp/params/mod.rs b/derive/src/cmp/params/mod.rs deleted file mode 100644 index 743d14aa..00000000 --- a/derive/src/cmp/params/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -/* - Appellation: params - Contrib: FL03 -*/ -use crate::utils::capitalize_first; -use proc_macro2::TokenStream; -use quote::{format_ident, quote}; -use syn::{Fields, FieldsNamed, Ident, Variant}; - -pub fn generate_keys(fields: &Fields, name: &Ident) -> TokenStream { - match fields { - Fields::Named(inner) => handle_named_fields(inner, name), - _ => panic!("Only named fields are supported"), - } -} - -fn handle_named_fields(fields: &FieldsNamed, name: &Ident) -> TokenStream { - let FieldsNamed { named, .. } = fields; - let _fields_str = named.iter().cloned().map(|field| field.ident.unwrap()); - let variants = named.iter().cloned().map(|field| { - let ident = field.ident.unwrap(); - let variant_ident = format_ident!("{}", capitalize_first(&ident.to_string())); - Variant { - attrs: vec![], - ident: variant_ident, - fields: Fields::Unit, - discriminant: None, - } - }); - - quote! { - #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] - pub enum #name { - #(#variants),* - } - } -} diff --git a/derive/src/lib.rs b/derive/src/lib.rs index ebd7bf80..d246b84b 100644 --- a/derive/src/lib.rs +++ b/derive/src/lib.rs @@ -6,49 +6,3 @@ //! //! extern crate proc_macro; - -pub(crate) mod ast; -pub(crate) mod cmp; -pub(crate) mod utils; - -use proc_macro::TokenStream; -use quote::{format_ident, quote}; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput}; - -/// This macro generates a parameter struct and an enum of parameter keys. -#[proc_macro_derive(Params, attributes(param))] -pub fn params(input: TokenStream) -> TokenStream { - // Parse the input tokens into a syntax tree - let input = parse_macro_input!(input as DeriveInput); - - // Get the name of the struct - let struct_name = &input.ident; - let store_name = format_ident!("{}Key", struct_name); - - // Generate the parameter struct definition - let _param_struct = match &input.data { - Data::Struct(s) => match &s.fields { - _ => {} - }, - _ => panic!("Only structs are supported"), - }; - - // Generate the parameter keys enum - let param_keys_enum = match &input.data { - Data::Struct(s) => { - let DataStruct { fields, .. } = s; - - crate::cmp::params::generate_keys(fields, &store_name) - } - _ => panic!("Only structs are supported"), - }; - - // Combine the generated code - let generated_code = quote! { - - #param_keys_enum - }; - - // Return the generated code as a TokenStream - generated_code.into() -} diff --git a/derive/src/utils.rs b/derive/src/utils.rs deleted file mode 100644 index 76255e20..00000000 --- a/derive/src/utils.rs +++ /dev/null @@ -1,13 +0,0 @@ -/* - Appellation: utils - Contrib: FL03 -*/ - -/// A function for capitalizing the first letter of a string. -pub fn capitalize_first(s: &str) -> String { - s.chars() - .take(1) - .flat_map(|f| f.to_uppercase()) - .chain(s.chars().skip(1)) - .collect() -} diff --git a/exp/ndtensor/src/lib.rs b/exp/ndtensor/src/lib.rs index 766689c3..28fc3332 100644 --- a/exp/ndtensor/src/lib.rs +++ b/exp/ndtensor/src/lib.rs @@ -2,6 +2,7 @@ extern crate acme; #[cfg(not(feature = "std"))] extern crate alloc; +#[allow(unused_imports)] pub use self::utils::*; #[macro_use] diff --git a/exp/ndtensor/tests/default.rs b/exp/ndtensor/tests/default.rs new file mode 100644 index 00000000..d1c044d2 --- /dev/null +++ b/exp/ndtensor/tests/default.rs @@ -0,0 +1,18 @@ +/* + Appellation: default + Contrib: FL03 +*/ +#![cfg(test)] + +fn addition(a: A, b: B) -> C +where + A: std::ops::Add, +{ + a + b +} + +#[test] +fn compiles() { + let result = addition(2, 2); + assert_eq!(result, 4); +} diff --git a/graphs/src/dcg/graph.rs b/graphs/src/dcg/graph.rs index e8a9385c..becfdc62 100644 --- a/graphs/src/dcg/graph.rs +++ b/graphs/src/dcg/graph.rs @@ -8,10 +8,11 @@ use super::DynamicGraph; use crate::ops::*; use crate::prelude::GraphResult as Result; use crate::NodeIndex; + +use core::ops::{Index, Neg}; use num::traits::NumAssign; use petgraph::algo::toposort; use std::collections::HashMap; -use std::ops::{Index, Neg}; pub struct Dcg { store: DynamicGraph, diff --git a/graphs/src/grad/id.rs b/graphs/src/grad/id.rs index f3711953..99943d9d 100644 --- a/graphs/src/grad/id.rs +++ b/graphs/src/grad/id.rs @@ -2,11 +2,12 @@ Appellation: id Contrib: FL03 */ -use acme::id::Id; +use crate::id::Id; +use core::fmt; +use core::marker::PhantomData; +use core::ops::Deref; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::Deref; #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] @@ -28,9 +29,9 @@ impl GradientId { } } -impl std::fmt::Display for GradientId { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.inner) +impl fmt::Display for GradientId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.inner) } } diff --git a/graphs/src/id/id.rs b/graphs/src/id/id.rs index 71d4baca..cc722033 100644 --- a/graphs/src/id/id.rs +++ b/graphs/src/id/id.rs @@ -4,6 +4,7 @@ */ use super::EntryId; use crate::NodeIndex; +use core::fmt; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -40,11 +41,11 @@ where } } -impl std::fmt::Display for Id +impl fmt::Display for Id where - Idx: std::fmt::Display, + Idx: fmt::Display, { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if f.alternate() { write!(f, "{}.{}", self.index(), self.id) } else { diff --git a/graphs/src/scg/edge.rs b/graphs/src/scg/edge.rs index 391906f3..548f6e6a 100644 --- a/graphs/src/scg/edge.rs +++ b/graphs/src/scg/edge.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::grad::GradientId; -use acme::prelude::Id; +use crate::id::Id; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; diff --git a/graphs/src/scg/node.rs b/graphs/src/scg/node.rs index 0d7ec53b..147a3067 100644 --- a/graphs/src/scg/node.rs +++ b/graphs/src/scg/node.rs @@ -6,8 +6,8 @@ //! //! A computational graph relies on weighted nodes to represent constants, operations, and variables. //! The edges connecting to any given node are considered to be inputs and help to determine the flow of information +use crate::id::EntryId; use crate::ops::Operations; -use acme::id::AtomicId; use petgraph::prelude::NodeIndex; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(feature = "serde", derive(Deserialize, Serialize,))] #[derive(Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Node { - id: AtomicId, + id: EntryId, inputs: Vec, name: String, op: Option, @@ -24,7 +24,7 @@ pub struct Node { impl Node { pub fn new(name: impl ToString) -> Self { Self { - id: AtomicId::new(), + id: EntryId::new(), inputs: Vec::new(), name: name.to_string(), op: None, diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index a8851d7e..82228b9d 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -46,20 +46,13 @@ impl<'a, T> From<&'a TensorBase> for Iter<'a, T> { } } -#[allow(dead_code)] -pub struct IterMut<'a, T> { - scope: Option<&'a mut T>, +pub struct IterMut<'a, T: 'a> { strides: IndexIter<'a>, tensor: &'a mut TensorBase, } -#[allow(dead_code)] impl<'a, T> IterMut<'a, T> { pub fn new(strides: IndexIter<'a>, tensor: &'a mut TensorBase) -> Self { - Self { - scope: None, - strides, - tensor, - } + Self { strides, tensor } } } @@ -68,6 +61,7 @@ impl<'a, T> Iterator for IterMut<'a, T> { fn next(&mut self) -> Option { let (_pos, _idx) = self.strides.next()?; + let _scope = self.tensor.get_by_index_mut(_idx); unimplemented!() } } diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 81499594..ebbab833 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -6,7 +6,7 @@ //! //! // pub use self::{axis::*, iterator::*, position::*, utils::*}; -pub use self::{iterator::Iter, position::IndexIter, utils::*}; +pub use self::{iterator::*, position::IndexIter, utils::*}; #[allow(dead_code, unused)] pub(crate) mod axis; diff --git a/tensor/src/actions/iter/position.rs b/tensor/src/actions/iter/position.rs index 32c4306f..137d7356 100644 --- a/tensor/src/actions/iter/position.rs +++ b/tensor/src/actions/iter/position.rs @@ -92,6 +92,6 @@ impl<'a> Iterator for IndexIter<'a> { impl<'a> From<&'a Layout> for IndexIter<'a> { fn from(layout: &'a Layout) -> Self { - Self::new(layout.offset, &layout.shape, &layout.stride) + Self::new(layout.offset, &layout.shape, &layout.strides) } } diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index eb15108d..4b7d7cf9 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -5,55 +5,54 @@ //! Implementations for linear algebra operations. //! //! -use crate::prelude::{Matmul, Scalar, ShapeError, TensorError, TensorExpr, TensorResult}; +use crate::linalg::{Inverse, Matmul}; +use crate::prelude::{Scalar, ShapeError, TensorError, TensorExpr, TensorResult}; use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; use num::traits::{Num, NumAssign}; fn inverse_impl(tensor: &TensorBase) -> TensorResult> where - T: Copy + Num + NumAssign + PartialOrd, + T: Copy + NumAssign + PartialOrd, { let op = TensorExpr::unary(tensor.clone(), UnaryOp::Inv); - let rows = tensor.nrows(); - let cols = tensor.ncols(); + let n = tensor.nrows(); if !tensor.is_square() { - return Err(ShapeError::IncompatibleShapes.into()); // Matrix must be square for inversion + return Err(ShapeError::NotSquare.into()); // Matrix must be square for inversion } - let eye = TensorBase::eye(rows); + let eye = TensorBase::eye(n); // Construct an augmented matrix by concatenating the original matrix with an identity matrix - let mut aug = TensorBase::zeros((rows, 2 * cols)); - let acols = 2 * cols; + let mut aug = TensorBase::zeros((n, 2 * n)); // aug.slice_mut(s![.., ..cols]).assign(matrix); - for i in 0..rows { - for j in 0..cols { + for i in 0..n { + for j in 0..n { aug[[i, j]] = tensor[[i, j]]; } - for j in cols..acols { - aug[[i, j]] = eye[[i, j - cols]]; + for j in n..(2 * n) { + aug[[i, j]] = eye[[i, j - n]]; } } // Perform Gaussian elimination to reduce the left half to the identity matrix - for i in 0..rows { + for i in 0..n { let pivot = aug[[i, i]]; if pivot == T::zero() { return Err(TensorError::Singular); // Matrix is singular } - for j in 0..(2 * cols) { + for j in 0..(2 * n) { aug[[i, j]] = aug[[i, j]] / pivot; } - for j in 0..rows { + for j in 0..n { if i != j { let am = aug.clone(); let factor = aug[[j, i]]; - for k in 0..(2 * cols) { + for k in 0..(2 * n) { aug[[j, k]] -= factor * am[[i, k]]; } } @@ -62,9 +61,9 @@ where // Extract the inverted matrix from the augmented matrix let mut inv = tensor.zeros_like().with_op(op.into()); - for i in 0..rows { - for j in 0..cols { - inv[[i, j]] = aug[[i, j + cols]]; + for i in 0..n { + for j in 0..n { + inv[[i, j]] = aug[[i, j + n]]; } } @@ -75,17 +74,45 @@ impl TensorBase where T: Copy, { + /// Creates a new tensor containing only the diagonal elements of the original tensor. pub fn diag(&self) -> Self { let n = self.nrows(); Self::from_shape_iter(self.shape().diag(), (0..n).map(|i| self[vec![i; n]])) } + /// Find the inverse of the tensor + /// + /// # Errors + /// + /// Returns an error if the matrix is not square or if the matrix is singular. + /// + pub fn inv(&self) -> TensorResult + where + T: NumAssign + PartialOrd, + { + inverse_impl(self) + } + /// Compute the trace of the matrix. + /// The trace of a matrix is the sum of the diagonal elements. + pub fn trace(&self) -> TensorResult + where + T: Num, + { + if !self.is_square() { + return Err(ShapeError::NotSquare.into()); + } + let n = self.nrows(); + let trace = (0..n).fold(T::zero(), |acc, i| acc + self[[i, i]]); + Ok(trace) + } } -impl TensorBase +impl Inverse for TensorBase where T: Copy + Num + NumAssign + PartialOrd, { - pub fn inv(&self) -> TensorResult { + type Output = TensorResult; + + fn inv(&self) -> Self::Output { inverse_impl(self) } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index 4528c707..dbc4e5e7 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -70,6 +70,8 @@ pub mod prelude { #[doc(inline)] pub use crate::specs::prelude::*; #[doc(inline)] + pub use crate::stats::prelude::*; + #[doc(inline)] pub use crate::types::prelude::*; pub use crate::utils::*; #[doc(inline)] diff --git a/tensor/src/linalg/mod.rs b/tensor/src/linalg/mod.rs index bb715919..6b990ba7 100644 --- a/tensor/src/linalg/mod.rs +++ b/tensor/src/linalg/mod.rs @@ -5,23 +5,16 @@ //! # Linear Algebra //! //! -pub mod tri; -pub mod uplo; - -pub trait Inverse { - fn inv(&self) -> Self; -} +pub use self::specs::*; -/// Matrix multiplication -pub trait Matmul { - type Output; +pub(crate) mod specs; - fn matmul(&self, rhs: &Rhs) -> Self::Output; -} +pub mod tri; +pub mod uplo; pub(crate) mod prelude { + pub use super::specs::*; pub use super::uplo::UPLO; - pub use super::{Inverse, Matmul}; } #[cfg(test)] diff --git a/tensor/src/linalg/specs.rs b/tensor/src/linalg/specs.rs new file mode 100644 index 00000000..7b1b4d08 --- /dev/null +++ b/tensor/src/linalg/specs.rs @@ -0,0 +1,57 @@ +/* + Appellation: specs + Contrib: FL03 +*/ +use core::iter::Sum; +use core::ops; + +/// [Affine] describes a type of geometric transformation which preserves +/// lines and parallelisms. +/// +/// ### General Formula +/// f(x) = A * x + b +pub trait Affine { + type Output; + + fn affine(&self, mul: A, add: B) -> Self::Output; +} + +impl Affine for S +where + S: Clone + ops::Mul, + C: ops::Add, +{ + type Output = C; + + fn affine(&self, mul: A, add: B) -> Self::Output { + self.clone() * mul + add + } +} + +/// Inversion +/// +/// The inverse of a matrix is a matrix that, when multiplied with the original matrix, gives the +/// identity matrix. +pub trait Inverse { + type Output; + + fn inv(&self) -> Self::Output; +} + +/// Matrix multiplication +pub trait Matmul { + type Output; + + fn matmul(&self, rhs: &Rhs) -> Self::Output; +} + +impl Matmul for Vec +where + T: Copy + ops::Mul + Sum, +{ + type Output = T; + + fn matmul(&self, rhs: &Self) -> Self::Output { + self.iter().zip(rhs.iter()).map(|(a, b)| *a * *b).sum() + } +} diff --git a/tensor/src/shape/dim/mod.rs b/tensor/src/shape/dim/mod.rs index a36de856..076e4ff9 100644 --- a/tensor/src/shape/dim/mod.rs +++ b/tensor/src/shape/dim/mod.rs @@ -37,19 +37,12 @@ pub trait Dimension: IndexMut { } } -#[allow(dead_code)] pub(crate) mod utils { use crate::index::{Ix, Ixs}; use crate::shape::{Shape, ShapeError, Stride}; use core::mem; - /// Calculate offset from `Ix` stride converting sign properly - #[inline(always)] - pub fn stride_offset(n: Ix, stride: Ix) -> isize { - (n as isize) * (stride as Ixs) - } - - pub(crate) fn can_index_slice( + pub fn can_index_slice( data: &[A], shape: &Shape, stride: &Stride, @@ -59,29 +52,6 @@ pub(crate) mod utils { can_index_slice_impl(max_offset, data.len(), shape, stride) } - fn can_index_slice_impl( - max_offset: usize, - data_len: usize, - dim: &Shape, - strides: &Stride, - ) -> Result<(), ShapeError> { - // Check condition 3. - let is_empty = dim.as_slice().iter().any(|&d| d == 0); - if is_empty && max_offset > data_len { - return Err(ShapeError::OutOfBounds); - } - if !is_empty && max_offset >= data_len { - return Err(ShapeError::OutOfBounds); - } - - // Check condition 4. - if !is_empty && dim_stride_overlap(dim, strides) { - return Err(ShapeError::Unsupported); - } - - Ok(()) - } - pub fn dim_stride_overlap(dim: &Shape, strides: &Stride) -> bool { let order = strides._fastest_varying_stride_order(); let mut sum_prev_offsets = 0; @@ -109,6 +79,49 @@ pub(crate) mod utils { max_abs_offset_check_overflow_impl(mem::size_of::(), dim, strides) } + pub fn size_of_shape_checked(dim: &Shape) -> Result { + let size_nonzero = dim + .as_slice() + .iter() + .filter(|&&d| d != 0) + .try_fold(1usize, |acc, &d| acc.checked_mul(d)) + .ok_or_else(|| ShapeError::Overflow)?; + if size_nonzero > ::std::isize::MAX as usize { + Err(ShapeError::Overflow) + } else { + Ok(dim.size()) + } + } + + /// Calculate offset from `Ix` stride converting sign properly + #[inline(always)] + pub fn stride_offset(n: Ix, stride: Ix) -> isize { + (n as isize) * (stride as Ixs) + } + + fn can_index_slice_impl( + max_offset: usize, + data_len: usize, + dim: &Shape, + strides: &Stride, + ) -> Result<(), ShapeError> { + // Check condition 3. + let is_empty = dim.as_slice().iter().any(|&d| d == 0); + if is_empty && max_offset > data_len { + return Err(ShapeError::OutOfBounds); + } + if !is_empty && max_offset >= data_len { + return Err(ShapeError::OutOfBounds); + } + + // Check condition 4. + if !is_empty && dim_stride_overlap(dim, strides) { + return Err(ShapeError::Unsupported); + } + + Ok(()) + } + fn max_abs_offset_check_overflow_impl( elem_size: usize, dim: &Shape, @@ -149,18 +162,4 @@ pub(crate) mod utils { Ok(max_offset) } - - pub fn size_of_shape_checked(dim: &Shape) -> Result { - let size_nonzero = dim - .as_slice() - .iter() - .filter(|&&d| d != 0) - .try_fold(1usize, |acc, &d| acc.checked_mul(d)) - .ok_or_else(|| ShapeError::Overflow)?; - if size_nonzero > ::std::isize::MAX as usize { - Err(ShapeError::Overflow) - } else { - Ok(dim.size()) - } - } } diff --git a/tensor/src/shape/layout.rs b/tensor/src/shape/layout.rs index 70393a54..76fd88eb 100644 --- a/tensor/src/shape/layout.rs +++ b/tensor/src/shape/layout.rs @@ -12,15 +12,25 @@ use serde::{Deserialize, Serialize}; pub struct Layout { pub(crate) offset: usize, pub(crate) shape: Shape, - pub(crate) stride: Stride, + pub(crate) strides: Stride, } impl Layout { - pub fn new(offset: usize, shape: impl IntoShape, stride: impl IntoStride) -> Self { + pub fn new(offset: usize, shape: impl IntoShape, strides: impl IntoStride) -> Self { Self { offset, shape: shape.into_shape(), - stride: stride.into_stride(), + strides: strides.into_stride(), + } + } + /// Create a new layout with a contiguous stride. + pub fn contiguous(shape: impl IntoShape) -> Self { + let shape = shape.into_shape(); + let stride = shape.stride_contiguous(); + Self { + offset: 0, + shape, + strides: stride, } } /// Broadcast the layout to a new shape. @@ -48,27 +58,16 @@ impl Layout { } Ok(Self::new(self.offset, shape, stride)) } - /// Create a new layout with a contiguous stride. - pub fn contiguous(shape: impl IntoShape) -> Self { - let shape = shape.into_shape(); - let stride = shape.stride_contiguous(); - Self { - offset: 0, - shape, - stride, - } - } - /// Determine if the current layout is contiguous or not. pub fn is_contiguous(&self) -> bool { - self.shape().is_contiguous(&self.stride) + self.shape().is_contiguous(&self.strides) } - - /// Determine if the current layout is square or not. + /// A function for determining if the layout is square. + /// An n-dimensional object is square if all of its dimensions are equal. pub fn is_square(&self) -> bool { self.shape().is_square() } - /// Get a peek at the offset of the layout. + /// Peek the offset of the layout. pub fn offset(&self) -> usize { self.offset } @@ -76,13 +75,13 @@ impl Layout { /// element. pub fn offset_from_low_addr_ptr_to_logical_ptr(&self) -> usize { let offset = - izip!(self.shape().as_slice(), self.strides().as_slice()).fold(0, |_offset, (d, s)| { + izip!(self.shape().as_slice(), self.strides().as_slice()).fold(0, |acc, (d, s)| { let d = *d as isize; let s = *s as isize; if s < 0 && d > 1 { - _offset - s * (d - 1) + acc - s * (d - 1) } else { - _offset + acc } }); debug_assert!(offset >= 0); @@ -90,25 +89,30 @@ impl Layout { } /// Return the rank (number of dimensions) of the layout. pub fn rank(&self) -> Rank { - debug_assert_eq!(self.stride.len(), *self.shape.rank()); + debug_assert_eq!(self.strides.len(), *self.shape.rank()); self.shape.rank() } + /// Remove an axis from the current layout, returning the new layout. pub fn remove_axis(&self, axis: Axis) -> Self { Self { offset: self.offset, shape: self.shape().remove_axis(axis), - stride: self.strides().remove_axis(axis), + strides: self.strides().remove_axis(axis), } } /// Reshape the layout to a new shape. pub fn reshape(&mut self, shape: impl IntoShape) { self.shape = shape.into_shape(); - self.stride = self.shape.stride_contiguous(); + self.strides = self.shape.stride_contiguous(); } /// Reverse the order of the axes. - pub fn reverse_axes(mut self) -> Layout { + pub fn reverse(&mut self) { self.shape.reverse(); - self.stride.reverse(); + self.strides.reverse(); + } + /// Reverse the order of the axes. + pub fn reverse_axes(mut self) -> Layout { + self.reverse(); self } /// Get a reference to the shape of the layout. @@ -121,14 +125,14 @@ impl Layout { } /// Get a reference to the stride of the layout. pub const fn strides(&self) -> &Stride { - &self.stride + &self.strides } /// Swap the axes of the layout. pub fn swap_axes(&self, a: Axis, b: Axis) -> Layout { Layout { offset: self.offset, shape: self.shape.swap_axes(a, b), - stride: self.stride.swap_axes(a, b), + strides: self.strides.swap_axes(a, b), } } /// Transpose the layout. @@ -141,9 +145,19 @@ impl Layout { self } - pub fn with_shape(mut self, shape: impl IntoShape) -> Self { + pub fn with_shape_c(mut self, shape: impl IntoShape) -> Self { self.shape = shape.into_shape(); - self.stride = self.shape.stride_contiguous(); + self.strides = self.shape.stride_contiguous(); + self + } + + pub unsafe fn with_shape_unchecked(mut self, shape: impl IntoShape) -> Self { + self.shape = shape.into_shape(); + self + } + + pub unsafe fn with_strides_unchecked(mut self, stride: impl IntoStride) -> Self { + self.strides = stride.into_stride(); self } } diff --git a/tensor/src/shape/mod.rs b/tensor/src/shape/mod.rs index 4277fb4c..f2fe00b2 100644 --- a/tensor/src/shape/mod.rs +++ b/tensor/src/shape/mod.rs @@ -15,6 +15,7 @@ pub(crate) mod rank; pub(crate) mod shape; pub(crate) mod stride; +#[doc(hidden)] pub mod dim; pub trait IntoShape { @@ -40,12 +41,11 @@ pub(crate) mod prelude { pub use super::IntoShape; pub use super::axis::{Axis, IntoAxis}; - pub use super::dim::*; - pub use super::error::*; + pub use super::error::{ShapeError, ShapeResult}; pub use super::layout::Layout; pub use super::rank::{IntoRank, Rank}; - pub use super::shape::*; - pub use super::stride::*; + pub use super::shape::Shape; + pub use super::stride::{IntoStride, Stride}; } #[cfg(test)] diff --git a/tensor/src/specs/affine.rs b/tensor/src/specs/affine.rs deleted file mode 100644 index 7cfb6cf1..00000000 --- a/tensor/src/specs/affine.rs +++ /dev/null @@ -1,26 +0,0 @@ -/* - Appellation: affine - Contrib: FL03 -*/ -/// [Affine] describes a type of geometric transformation which preserves -/// lines and parallelisms. -/// -/// ### General Formula -/// f(x) = A * x + b -pub trait Affine { - type Output; - - fn affine(&self, mul: A, add: B) -> Self::Output; -} - -impl Affine for S -where - S: Clone + std::ops::Mul, - C: std::ops::Add, -{ - type Output = C; - - fn affine(&self, mul: A, add: B) -> Self::Output { - self.clone() * mul + add - } -} diff --git a/tensor/src/specs/mod.rs b/tensor/src/specs/mod.rs index ea2ecb09..b9fa3676 100644 --- a/tensor/src/specs/mod.rs +++ b/tensor/src/specs/mod.rs @@ -2,15 +2,13 @@ Appellation: specs Contrib: FL03 */ -pub use self::{affine::*, moves::*, ndtensor::*, scalar::*}; +pub use self::{moves::*, ndtensor::*, scalar::*}; -pub(crate) mod affine; pub(crate) mod moves; pub(crate) mod ndtensor; pub(crate) mod scalar; pub(crate) mod prelude { - pub use super::affine::*; pub use super::moves::*; pub use super::ndtensor::*; pub use super::scalar::*; diff --git a/tensor/src/specs/ndtensor.rs b/tensor/src/specs/ndtensor.rs index 173301bd..37a03afb 100644 --- a/tensor/src/specs/ndtensor.rs +++ b/tensor/src/specs/ndtensor.rs @@ -8,6 +8,10 @@ use crate::shape::{Rank, Shape, Stride}; pub trait NdTensor { type Data: TensorData; + fn as_mut_ptr(&mut self) -> *mut ::Elem; + + fn as_ptr(&self) -> *const ::Elem; + fn id(&self) -> TensorId; fn layout(&self) -> &Layout; diff --git a/tensor/src/specs/scalar.rs b/tensor/src/specs/scalar.rs index 81d78513..d841101f 100644 --- a/tensor/src/specs/scalar.rs +++ b/tensor/src/specs/scalar.rs @@ -27,8 +27,28 @@ pub trait Scalar: type Real: Scalar + NumOps; + fn from_real(re: Self::Real) -> Self; + fn abs(self) -> Self::Real; + fn add_complex(&self, other: Self::Complex) -> Self::Complex { + self.as_complex() + other + } + + fn div_complex(&self, other: Self::Complex) -> Self::Complex { + self.as_complex() / other + } + + fn mul_complex(&self, other: Self::Complex) -> Self::Complex { + self.as_complex() * other + } + + fn sub_complex(&self, other: Self::Complex) -> Self::Complex { + self.as_complex() - other + } + + fn as_complex(&self) -> Self::Complex; + fn conj(&self) -> Self::Complex; fn im(&self) -> Self::Real { @@ -43,6 +63,10 @@ pub trait Scalar: fn exp(self) -> Self; + fn inv(self) -> Self { + Inv::inv(self) + } + fn ln(self) -> Self; fn pow(self, exp: Self) -> Self; @@ -87,10 +111,18 @@ where type Complex = Self; type Real = T; + fn from_real(re: Self::Real) -> Self { + Complex::new(re, Default::default()) + } + fn abs(self) -> Self::Real { Complex::norm(self) } + fn as_complex(&self) -> Self::Complex { + *self + } + fn conj(&self) -> Self::Complex { Complex::conj(self) } @@ -162,10 +194,18 @@ macro_rules! impl_scalar { type Complex = Complex<$re>; type Real = $re; + fn from_real(re: Self::Real) -> Self { + re + } + fn abs(self) -> Self::Real { <$re>::abs(self) } + fn as_complex(&self) -> Self::Complex { + Complex::new(*self, <$re>::default()) + } + fn conj(&self) -> Self::Complex { Complex::new(*self, -<$re>::default()) } diff --git a/tensor/src/stats/mod.rs b/tensor/src/stats/mod.rs index aa1f2892..41ba775a 100644 --- a/tensor/src/stats/mod.rs +++ b/tensor/src/stats/mod.rs @@ -97,5 +97,9 @@ pub trait StatisticsExt: Statistics { fn mean_axis(&self, axis: Axis) -> T; } +pub(crate) mod prelude { + pub use super::{Statistics, StatisticsExt}; +} + #[cfg(test)] mod tests {} diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 2b772e0a..338227b7 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -116,6 +116,14 @@ impl TensorBase { op: BackpropOp::none(), } } + /// Return a mutable pointer to the tensor's data. + pub fn as_mut_ptr(&mut self) -> *mut T { + self.data_mut().as_mut_ptr() + } + /// Return a pointer to the tensor's data. + pub fn as_ptr(&self) -> *const T { + self.data().as_ptr() + } /// Return a reference to the tensor's data. pub fn as_slice(&self) -> &[T] { &self.data @@ -124,6 +132,16 @@ impl TensorBase { pub fn as_mut_slice(&mut self) -> &mut [T] { &mut self.data } + /// Assign the values of another tensor to this tensor. + pub fn assign(&mut self, other: &Self) + where + T: Clone, + { + self.data_mut() + .iter_mut() + .zip(other.data()) + .for_each(|(a, b)| *a = b.clone()); + } /// Detach the computational graph from the tensor pub fn detach(&self) -> Self where @@ -227,6 +245,11 @@ impl TensorBase { pub fn rank(&self) -> Rank { self.shape().rank() } + /// Set the value of the tensor at the specified index + pub fn set(&mut self, index: impl AsRef<[usize]>, value: T) { + let i = self.layout().index(index); + self.data_mut()[i] = value; + } /// An owned reference of the tensors [Shape] pub fn shape(&self) -> &Shape { self.layout().shape() @@ -278,11 +301,24 @@ impl TensorBase { } pub unsafe fn with_shape_unchecked(mut self, shape: impl IntoShape) -> Self { - self.layout = Layout::contiguous(shape); + self.layout = self.layout.with_shape_c(shape); self } } +impl<'a, T> TensorBase<&'a T> { + // pub fn as_tensor(&self) -> TensorBase where T: Copy { + // let store = self.data.iter().copied().collect(); + // TensorBase { + // id: self.id, + // kind: self.kind, + // layout: self.layout.clone(), + // op: self.op.clone(), + // data: store, + // } + // } +} + impl TensorBase { pub fn to_owned(&self) -> TensorBase where @@ -292,13 +328,12 @@ impl TensorBase { } pub fn view<'a>(&'a self) -> TensorBase<&'a T> { - let store = self.data.iter().collect(); TensorBase { - id: self.id, - kind: self.kind, - layout: self.layout.clone(), - op: self.op.view(), - data: store, + id: self.id(), + kind: self.kind(), + layout: self.layout().clone(), + op: self.op().view(), + data: self.data().iter().collect(), } } } diff --git a/tensor/tests/stats.rs b/tensor/tests/stats.rs new file mode 100644 index 00000000..ee351d29 --- /dev/null +++ b/tensor/tests/stats.rs @@ -0,0 +1,40 @@ +/* + Appellation: stats + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::{Shape, Tensor}; + +macro_rules! adiff { + ($a:expr, $b:expr) => { + ($a - $b).abs() + }; +} +macro_rules! assert_diff { + ($a:expr, $b:expr, $tol:expr) => { + let diff = adiff!($a, $b); + assert!( + diff < $tol, + "the difference ({}) between {} and {} exceeds the allowed tolerance", + diff, + $a, + $b + ); + }; + ($a:expr, $b:expr) => { + assert_diff!($a, $b, 1e-10); + }; +} + +#[test] +fn test_std() { + let shape = Shape::from((2, 2)); + let tensor = Tensor::linspace(0f64, shape.size() as f64, shape.size()) + .reshape(shape) + .unwrap(); + let exp = 1.118033988749895; + assert_diff!(tensor.std(), exp); + assert_diff!(tensor.variance(), exp.powi(2)); +}