diff --git a/NEWS.md b/NEWS.md index 3bfb19856..9691d11bf 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,25 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Added + + - Added `⊙` (\odot) as an alias of `inner`. Since PR [#239](https://github.com/gridap/Gridap.jl/pull/239). + - Added `⊗` (\otimes) as an alias of `outer`. Since PR [#239](https://github.com/gridap/Gridap.jl/pull/239). + +### Changed + + - Major refactoring in the module `Gridap.TensorValues`. + Since PR [#239](https://github.com/gridap/Gridap.jl/pull/239). + **The following changes are likely to affect all users:** + - The operator `*` is not allowed for expressing the dot product anymore. Use `LinearAlgebra.dot` + function aka `⋅` (\cdot). + - The syntax `∇*u` is not allowed anymore. Use `∇⋅u` instead. + - Gridap re-exports `dot`, `⋅`, and other names from LinearAlbegra that are used + often in Gridap code. + - Function `n_components` is renamed to `num_components`. + ## [0.10.4] - 2020-6-8 ### Added diff --git a/Project.toml b/Project.toml index 0b3d05b25..6484f9092 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Gridap" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" authors = ["Santiago Badia ", "Francesc Verdugo "] -version = "0.10.4" +version = "0.11.0" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" diff --git a/src/Exports.jl b/src/Exports.jl index 74a950510..0074286f1 100644 --- a/src/Exports.jl +++ b/src/Exports.jl @@ -5,6 +5,10 @@ macro publish(mod,name) end end +# Reexport from LinearAlgebra (just for convenience) +using LinearAlgebra: det, inv, tr, cross, dot, norm, ×, ⋅ +export det, inv, tr, cross, dot, norm, ×, ⋅ + @publish Helpers operate @publish Helpers GridapType @@ -33,6 +37,9 @@ end @publish TensorValues inner @publish TensorValues outer @publish TensorValues diagonal_tensor +@publish TensorValues num_components +using Gridap.TensorValues: ⊙; export ⊙ +using Gridap.TensorValues: ⊗; export ⊗ @publish Fields gradient @publish Fields ∇ diff --git a/src/FESpaces/CLagrangianFESpaces.jl b/src/FESpaces/CLagrangianFESpaces.jl index 012571a42..abdc79dd8 100644 --- a/src/FESpaces/CLagrangianFESpaces.jl +++ b/src/FESpaces/CLagrangianFESpaces.jl @@ -138,7 +138,7 @@ function _generate_dof_layout_component_major(::Type{<:Real},nnodes::Integer) end function _generate_dof_layout_component_major(::Type{T},nnodes::Integer) where T - ncomps = n_components(T) + ncomps = num_components(T) V = change_eltype(T,Int) ndofs = ncomps*nnodes dof_to_comp = zeros(Int8,ndofs) @@ -175,7 +175,7 @@ function _generate_cell_dofs_clagrangian_fespace( cell_to_ctype, node_and_comp_to_dof) where T - ncomps = n_components(T) + ncomps = num_components(T) ctype_to_lnode_to_comp_to_ldof = map(get_node_and_comp_to_dof,reffes) ctype_to_num_ldofs = map(num_dofs,reffes) diff --git a/src/FESpaces/ExtendedFESpaces.jl b/src/FESpaces/ExtendedFESpaces.jl index fcd32b126..037d45f04 100644 --- a/src/FESpaces/ExtendedFESpaces.jl +++ b/src/FESpaces/ExtendedFESpaces.jl @@ -175,7 +175,7 @@ function get_cell_basis(f::ExtendedFESpace) vi = testitem(cell_to_val) Tv = field_return_type(vi,xi) T = eltype(Tv) - D = n_components(eltype(xi)) + D = num_components(eltype(xi)) void_to_val = Fill(VoidBasis{T,D}(),length(f.trian.void_to_oldcell)) array = ExtendedVector( diff --git a/src/Fields/AffineMaps.jl b/src/Fields/AffineMaps.jl index 2644d1b4d..fe3ecb921 100644 --- a/src/Fields/AffineMaps.jl +++ b/src/Fields/AffineMaps.jl @@ -2,9 +2,9 @@ """ """ struct AffineMap{D,T,L} <:Field - jacobian::TensorValue{D,T,L} + jacobian::TensorValue{D,D,T,L} origin::Point{D,T} - function AffineMap(jacobian::TensorValue{D,T,L}, origin::Point{D,T}) where {D,T,L} + function AffineMap(jacobian::TensorValue{D,D,T,L}, origin::Point{D,T}) where {D,T,L} new{D,T,L}(jacobian,origin) end end @@ -28,11 +28,11 @@ end function _apply_affine_map(h,x) t = h.origin s = h.jacobian - (s*x)+t + (s⋅x)+t end struct AffineMapGrad{D,T,L} <: Field - jacobian::TensorValue{D,T,L} + jacobian::TensorValue{D,D,T,L} end function field_gradient(h::AffineMap) diff --git a/src/Fields/Attachmap.jl b/src/Fields/Attachmap.jl index d92d85c3e..4ed501ff5 100644 --- a/src/Fields/Attachmap.jl +++ b/src/Fields/Attachmap.jl @@ -32,7 +32,7 @@ function kernel_cache(k::PhysGrad,a,b) _attachmap_checks(a,b) Ta = eltype(a) Tb = eltype(b) - T = return_type(*,return_type(inv,Tb),Ta) + T = return_type(⋅,return_type(inv,Tb),Ta) r = zeros(T,size(a)) CachedArray(r) end @@ -53,7 +53,7 @@ end for p in 1:np @inbounds jacinv = inv(b[p]) for i in 1:ni - @inbounds c[p,i] = jacinv * a[p,i] + @inbounds c[p,i] = jacinv ⋅ a[p,i] end end c diff --git a/src/Fields/DiffOperators.jl b/src/Fields/DiffOperators.jl index 89b941dde..004acf7bc 100644 --- a/src/Fields/DiffOperators.jl +++ b/src/Fields/DiffOperators.jl @@ -49,14 +49,24 @@ function laplacian(f) end """ - ∇*f + ∇⋅f Equivalent to divergence(f) """ -(*)(::typeof(∇),f) = divergence(f) -(*)(::typeof(∇),f::GridapType) = divergence(f) +dot(::typeof(∇),f) = divergence(f) +dot(::typeof(∇),f::GridapType) = divergence(f) + +function (*)(::typeof(∇),f) + msg = "Syntax ∇*f has been removed, use ∇⋅f (\\nabla \\cdot f) instead" + error(msg) +end + +function (*)(::typeof(∇),f::GridapType) + msg = "Syntax ∇*f has been removed, use ∇⋅f (\\nabla \\cdot f) instead" + error(msg) +end """ outer(∇,f) @@ -118,11 +128,11 @@ function gradient(f::Function) end function _grad_f(f,x,fx) - VectorValue(ForwardDiff.gradient(f,x.array)) + VectorValue(ForwardDiff.gradient(f,get_array(x))) end function _grad_f(f,x,fx::VectorValue) - TensorValue(transpose(ForwardDiff.jacobian(y->f(y).array,x.array))) + TensorValue(transpose(ForwardDiff.jacobian(y->get_array(f(y)),get_array(x)))) end function _grad_f(f,x,fx::MultiValue) @@ -130,11 +140,11 @@ function _grad_f(f,x,fx::MultiValue) end function divergence(f::Function) - x -> tr(ForwardDiff.jacobian(y->f(y).array,x.array)) + x -> tr(ForwardDiff.jacobian(y->get_array(f(y)),get_array(x))) end function curl(f::Function) - x -> grad2curl(TensorValue(transpose(ForwardDiff.jacobian(y->f(y).array,x.array)))) + x -> grad2curl(TensorValue(transpose(ForwardDiff.jacobian(y->get_array(f(y)),get_array(x))))) end function laplacian(f::Function) @@ -144,14 +154,14 @@ function laplacian(f::Function) end function _lapl_f(f,x,fx) - tr(ForwardDiff.jacobian(y->ForwardDiff.gradient(f,y), x.array)) + tr(ForwardDiff.jacobian(y->ForwardDiff.gradient(f,y), get_array(x))) end function _lapl_f(f,x,fx::VectorValue) A = length(x) B = length(fx) - a = ForwardDiff.jacobian(y->transpose(ForwardDiff.jacobian(z->f(z).array,y)), x.array) - tr(MultiValue{Tuple{A,A,B}}(Tuple(transpose(a)))) + a = ForwardDiff.jacobian(y->transpose(ForwardDiff.jacobian(z->get_array(f(z)),y)), get_array(x)) + tr(ThirdOrderTensorValue{A,A,B}(Tuple(transpose(a)))) end function _lapl_f(f,x,fx::MultiValue) diff --git a/src/Fields/Fields.jl b/src/Fields/Fields.jl index e7198cd01..4076742c6 100644 --- a/src/Fields/Fields.jl +++ b/src/Fields/Fields.jl @@ -19,6 +19,7 @@ using Gridap.Arrays: BCasted using Gridap.Arrays: NumberOrArray using Gridap.Arrays: AppliedArray using Gridap.Arrays: Contracted +using LinearAlgebra: ⋅ using Test using DocStringExtensions @@ -81,6 +82,7 @@ import Gridap.TensorValues: symmetric_part import Base: +, - , * import LinearAlgebra: cross import LinearAlgebra: tr +import LinearAlgebra: dot import Base: transpose import Base: adjoint diff --git a/src/Geometry/GenericBoundaryTriangulations.jl b/src/Geometry/GenericBoundaryTriangulations.jl index 05e7382be..effe94a6c 100644 --- a/src/Geometry/GenericBoundaryTriangulations.jl +++ b/src/Geometry/GenericBoundaryTriangulations.jl @@ -416,8 +416,8 @@ function kernel_evaluate(k::NormalVectorValued,x,J,refn) apply(k,Jx,refn) end -function _map_normal(J::TensorValue{D,T},n::VectorValue{D,T}) where {D,T} - v = inv(J)*n +function _map_normal(J::TensorValue{D,D,T},n::VectorValue{D,T}) where {D,T} + v = inv(J)⋅n m = sqrt(inner(v,v)) if m < eps() return zero(n) diff --git a/src/Geometry/Geometry.jl b/src/Geometry/Geometry.jl index 665648531..b9e4d5489 100644 --- a/src/Geometry/Geometry.jl +++ b/src/Geometry/Geometry.jl @@ -8,6 +8,7 @@ module Geometry using Test using DocStringExtensions using FillArrays +using LinearAlgebra: ⋅ using Gridap.Helpers using Gridap.Arrays diff --git a/src/Gridap.jl b/src/Gridap.jl index b01d45b2b..8f718e012 100644 --- a/src/Gridap.jl +++ b/src/Gridap.jl @@ -36,10 +36,10 @@ include("Io/Io.jl") include("Algebra/Algebra.jl") -include("TensorValues/TensorValues.jl") - include("Arrays/Arrays.jl") +include("TensorValues/TensorValues.jl") + include("Fields/Fields.jl") include("Polynomials/Polynomials.jl") diff --git a/src/Polynomials/MonomialBases.jl b/src/Polynomials/MonomialBases.jl index bd5ffc90a..b8b3f7336 100644 --- a/src/Polynomials/MonomialBases.jl +++ b/src/Polynomials/MonomialBases.jl @@ -116,7 +116,7 @@ get_value_type(::Type{MonomialBasis{D,T}}) where {D,T} = T function field_cache(f::MonomialBasis{D,T},x) where {D,T} @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*n_components(T) + ndof = length(f.terms)*num_components(T) n = 1 + _maximum(f.orders) r = CachedArray(zeros(T,(np,ndof))) v = CachedArray(zeros(T,(ndof,))) @@ -127,7 +127,7 @@ end function evaluate_field!(cache,f::MonomialBasis{D,T},x) where {D,T} r, v, c = cache np = length(x) - ndof = length(f.terms)*n_components(T) + ndof = length(f.terms)*num_components(T) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -145,7 +145,7 @@ end function gradient_cache(f::MonomialBasis{D,V},x) where {D,V} @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*n_components(V) + ndof = length(f.terms)*num_components(V) xi = testitem(x) T = gradient_type(V,xi) n = 1 + _maximum(f.orders) @@ -159,7 +159,7 @@ end function evaluate_gradient!(cache,f::MonomialBasis{D,T},x) where {D,T} r, v, c, g = cache np = length(x) - ndof = length(f.terms) * n_components(T) + ndof = length(f.terms) * num_components(T) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -178,7 +178,7 @@ end function hessian_cache(f::MonomialBasis{D,V},x) where {D,V} @assert D == length(eltype(x)) "Incorrect number of point components" np = length(x) - ndof = length(f.terms)*n_components(V) + ndof = length(f.terms)*num_components(V) xi = testitem(x) T = gradient_type(gradient_type(V,xi),xi) n = 1 + _maximum(f.orders) @@ -193,7 +193,7 @@ end function evaluate_hessian!(cache,f::MonomialBasis{D,T},x) where {D,T} r, v, c, g, h = cache np = length(x) - ndof = length(f.terms) * n_components(T) + ndof = length(f.terms) * num_components(T) n = 1 + _maximum(f.orders) setsize!(r,(np,ndof)) setsize!(v,(ndof,)) @@ -386,7 +386,7 @@ function _hessian_nd!( _hessian_1d!(h,x,orders[d],d) end - z = zero(mutable(TensorValue{D,T,D*D})) + z = zero(mutable(TensorValue{D,D,T})) o = one(T) k = 1 diff --git a/src/ReferenceFEs/ExtrusionPolytopes.jl b/src/ReferenceFEs/ExtrusionPolytopes.jl index 2250cce63..436c25235 100644 --- a/src/ReferenceFEs/ExtrusionPolytopes.jl +++ b/src/ReferenceFEs/ExtrusionPolytopes.jl @@ -158,8 +158,8 @@ end function (==)(a::ExtrusionPolytope{D},b::ExtrusionPolytope{D}) where D #The first axis is irrelevant here - ea = Point(a.extrusion.array.data[2:end]) - eb = Point(b.extrusion.array.data[2:end]) + ea = Point(Tuple(a.extrusion)[2:end]) + eb = Point(Tuple(b.extrusion)[2:end]) ea == eb end @@ -202,11 +202,11 @@ function get_face_vertex_permutations(p::ExtrusionPolytope) end function is_simplex(p::ExtrusionPolytope) - all(p.extrusion.array .== TET_AXIS) + all(Tuple(p.extrusion) .== TET_AXIS) end function is_n_cube(p::ExtrusionPolytope) - all(p.extrusion.array .== HEX_AXIS) + all(Tuple(p.extrusion) .== HEX_AXIS) end function is_simplex(p::ExtrusionPolytope{0}) @@ -343,7 +343,7 @@ end # Generates the array of n-faces of a polytope function _polytopenfaces(anchor, extrusion) - D = n_components(extrusion) + D = num_components(extrusion) zerop = zero(Point{D,Int}) nf_nfs = Vector{NFace{D}}(undef,0) _nfaceboundary!(anchor, zerop, extrusion, true, nf_nfs) @@ -389,7 +389,7 @@ end # boundary function _nfaceboundary!(anchor, extrusion, extend, isanchor, list) - D = n_components(extrusion) + D = num_components(extrusion) newext = extend push!(list,NFace(anchor,extrusion)) @@ -416,7 +416,7 @@ end function _newext(newext,i) m = zero(mutable(newext)) - D = n_components(newext) + D = num_components(newext) for j in 1:D m[j] = j == i ? 0 : newext[j] end @@ -425,7 +425,7 @@ end function _edim(newext,i) m = zero(mutable(newext)) - D = n_components(newext) + D = num_components(newext) for j in 1:D m[j] = j == i ? 1 : 0 end @@ -434,7 +434,7 @@ end function _tetp(anchor,i) m = zero(mutable(anchor)) - D = n_components(anchor) + D = num_components(anchor) for j in 1:D m[j] = j >= i ? anchor[j] : 0 end @@ -509,7 +509,7 @@ end function _eliminate_zeros(::Val{d},a) where d b = zero(mutable(Point{d,Int})) - D = n_components(a) + D = num_components(a) k = 1 for i in 1:D m = a[i] @@ -528,7 +528,7 @@ function _vertices_coordinates(::Type{T},p::DFace{D}) where {D,T} vcs = zeros(Point{D,T},length(vs)) for i = 1:length(vs) vx = p.nfaces[vs[i]] - vc = vx.anchor.array.data + vc = Tuple(vx.anchor) vcs[i] = vc end vcs @@ -587,7 +587,7 @@ function _facet_normal(::Type{T},p::DFace{D}, nf_vs, vs, i_f) where {D,T} v = zeros(T,n1,n2) for i in 2:length(nf_vs[i_f]) vi = vs[nf_vs[i_f][i]] - vs[nf_vs[i_f][1]] - for d in 1:n_components(vi) + for d in 1:num_components(vi) v[i-1,d] = vi[d] end end @@ -680,9 +680,9 @@ function _admissible_permutations(p::DFace{D}) where D if D > 3 @warn "Computing permutations for a polytope of dim > 3 is overkill" end - if D in (0,1) || all( p.extrusion.array.data[2:end] .== TET_AXIS ) + if D in (0,1) || all( Tuple(p.extrusion)[2:end] .== TET_AXIS ) perms = _admissible_permutations_simplex(p) - elseif all( p.extrusion.array.data[2:end] .== HEX_AXIS) + elseif all( Tuple(p.extrusion)[2:end] .== HEX_AXIS) perms = _admissible_permutations_n_cube(p) else @notimplemented "admissible vertex permutations only implemented for simplices and n-cubes" diff --git a/src/ReferenceFEs/LagrangianDofBases.jl b/src/ReferenceFEs/LagrangianDofBases.jl index 50e254942..b4c48d820 100644 --- a/src/ReferenceFEs/LagrangianDofBases.jl +++ b/src/ReferenceFEs/LagrangianDofBases.jl @@ -48,7 +48,7 @@ end # Node major implementation function _generate_dof_layout_node_major(::Type{T},nnodes::Integer) where T<:MultiValue - ncomps = n_components(T) + ncomps = num_components(T) V = change_eltype(T,Int) ndofs = ncomps*nnodes dof_to_comp = zeros(Int,ndofs) @@ -93,7 +93,7 @@ end vals = evaluate_field!(cf,field,b.nodes) ndofs = length(b.dof_to_node) T = eltype(vals) - ncomps = n_components(T) + ncomps = num_components(T) _evaluate_lagr_dof!(c,vals,b.node_and_comp_to_dof,ndofs,ncomps) end diff --git a/src/ReferenceFEs/LagrangianRefFEs.jl b/src/ReferenceFEs/LagrangianRefFEs.jl index b19979c49..8d52c90e3 100644 --- a/src/ReferenceFEs/LagrangianRefFEs.jl +++ b/src/ReferenceFEs/LagrangianRefFEs.jl @@ -276,7 +276,7 @@ function to_dict(reffe::LagrangianRefFE) b = get_prebasis(reffe) dict = Dict{Symbol,Any}() dict[:orders] = collect(get_orders(reffe)) - dict[:extrusion] = Array(get_extrusion(p).array) + dict[:extrusion] = Array(TensorValues.get_array(get_extrusion(p))) if is_S(reffe) dict[:space] = "serendipity" else @@ -374,10 +374,9 @@ function _generate_face_nodes_aux( end function _generate_face_own_dofs(face_own_nodes, node_and_comp_to_dof) - faces = 1:length(face_own_nodes) T = eltype(node_and_comp_to_dof) - comps = 1:n_components(T) + comps = 1:num_components(T) face_own_dofs = [Int[] for i in faces] for face in faces nodes = face_own_nodes[face] @@ -398,7 +397,7 @@ end function _find_own_dof_permutaions(node_perms,node_and_comp_to_dof,nfacenodeids,nfacedofsids) dof_perms = Vector{Int}[] T = eltype(node_and_comp_to_dof) - ncomps = n_components(T) + ncomps = num_components(T) idof_to_dof = nfacedofsids[end] inode_to_node = nfacenodeids[end] for inode_to_pinode in node_perms @@ -704,13 +703,13 @@ function NodalReferenceFE(p::ExtrusionPolytope) end function compute_monomial_basis(::Type{T},p::ExtrusionPolytope{D},orders) where {D,T} - extrusion = Tuple(p.extrusion.array) + extrusion = Tuple(p.extrusion) terms = _monomial_terms(extrusion,orders) MonomialBasis{D}(T,orders,terms) end function compute_own_nodes(p::ExtrusionPolytope{D},orders) where D - extrusion = Tuple(p.extrusion.array) + extrusion = Tuple(p.extrusion) if all(orders .== 0) _interior_nodes_order_0(p) else @@ -729,12 +728,12 @@ function compute_face_orders(p::ExtrusionPolytope,face::ExtrusionPolytope{D},ifa offset = get_offset(p,d) nface = p.dface.nfaces[iface+offset] face_orders = _eliminate_zeros(Val{D}(),nface.extrusion,orders) - face_orders.array.data + Tuple(face_orders) end function _eliminate_zeros(::Val{d},a,o) where d b = zero(mutable(Point{d,Int})) - D = n_components(a) + D = num_components(a) k = 1 for i in 1:D m = a[i] diff --git a/src/ReferenceFEs/NedelecRefFEs.jl b/src/ReferenceFEs/NedelecRefFEs.jl index a5b0f921c..21f75e924 100644 --- a/src/ReferenceFEs/NedelecRefFEs.jl +++ b/src/ReferenceFEs/NedelecRefFEs.jl @@ -177,7 +177,7 @@ const _Nedelec_cell_moments = _RT_cell_moments function _broadcast_cross(::Type{T},n,b) where T c = Array{T}(undef,size(b)) for (ii, i) in enumerate(b) - c[ii] = T(cross(i.array,n.array))# cross product + c[ii] = T(cross(get_array(i),get_array(n)))# cross product end return c end diff --git a/src/ReferenceFEs/RaviartThomasRefFEs.jl b/src/ReferenceFEs/RaviartThomasRefFEs.jl index 33a9464ca..aee581e39 100644 --- a/src/ReferenceFEs/RaviartThomasRefFEs.jl +++ b/src/ReferenceFEs/RaviartThomasRefFEs.jl @@ -259,7 +259,7 @@ function _eval_moment_dof_basis!(dofs,vals::AbstractVector,b) for j in 1:nj dofs[o] = z for i in 1:ni - dofs[o] += moments[i,j]*vals[nodes[i]] + dofs[o] += moments[i,j]⋅vals[nodes[i]] end o += 1 end @@ -282,7 +282,7 @@ function _eval_moment_dof_basis!(dofs,vals::AbstractMatrix,b) for a in 1:na dofs[o,a] = z for i in 1:ni - dofs[o,a] += moments[i,j]*vals[nodes[i],a] + dofs[o,a] += moments[i,j]⋅vals[nodes[i],a] end end o += 1 diff --git a/src/ReferenceFEs/ReferenceFEs.jl b/src/ReferenceFEs/ReferenceFEs.jl index d2f22b93d..e6217c0fb 100644 --- a/src/ReferenceFEs/ReferenceFEs.jl +++ b/src/ReferenceFEs/ReferenceFEs.jl @@ -11,8 +11,8 @@ using LinearAlgebra using Combinatorics using Gridap.Helpers -using Gridap.TensorValues using Gridap.Arrays +using Gridap.TensorValues using Gridap.Fields using Gridap.Polynomials using Gridap.Integration diff --git a/src/TensorValues/Indexing.jl b/src/TensorValues/Indexing.jl index f3daa440f..61282bffd 100644 --- a/src/TensorValues/Indexing.jl +++ b/src/TensorValues/Indexing.jl @@ -1,29 +1,81 @@ -size(a::MultiValue) = size(a.array) +eachindex(arg::MultiValue) = eachindex(1:prod(size(arg))) -length(a::MultiValue) = length(a.array) +CartesianIndices(arg::MultiValue) = CartesianIndices(size(arg)) -@propagate_inbounds function getindex( - a::MultiValue{S,T,N}, I::Vararg{Integer,N}) where {S,T,N} - a.array[I...] +LinearIndices(arg::MultiValue) = LinearIndices(size(arg)) + +getindex(arg::VectorValue, i::Integer) = arg.data[i] + +function getindex(arg::TensorValue{D},i::Integer,j::Integer) where D + index = _2d_tensor_linear_index(D,i,j) + arg.data[index] +end + +function getindex(arg::SymTensorValue{D},i::Integer,j::Integer) where D + index = _2d_sym_tensor_linear_index(D,i,j) + arg.data[index] +end + +function getindex(arg::SymFourthOrderTensorValue{D},i::Integer,j::Integer,k::Integer,l::Integer) where D + index = _4d_sym_tensor_linear_index(D,i,j,k,l) + arg.data[index] end -@propagate_inbounds function getindex(a::MultiValue, i::Integer) - a.array[i] +function getindex(arg::ThirdOrderTensorValue{D1,D2},i::Integer,j::Integer,k::Integer) where {D1,D2} + index = _3d_tensor_linear_index(D1,D2,i,j,k) + arg.data[index] end -eltype(a::Type{MultiValue{S,T,N,L}}) where {S,T,N,L} = T +getindex(arg::VectorValue, ci::CartesianIndex{1}) = getindex(arg,ci[1]) +getindex(arg::TensorValue,ci::CartesianIndex{2}) = getindex(arg,ci[1],ci[2]) +getindex(arg::SymTensorValue,ci::CartesianIndex{2}) = getindex(arg,ci[1],ci[2]) +getindex(arg::ThirdOrderTensorValue,ci::CartesianIndex{3}) = getindex(arg,ci[1],ci[2],ci[3]) +getindex(arg::SymFourthOrderTensorValue,ci::CartesianIndex{4}) = getindex(arg,ci[1],ci[2],ci[3],ci[4]) -@inline iterate(a::MultiValue) = iterate(a.array) +getindex(arg::MultiValue, i::Integer) = getindex(arg, CartesianIndices(arg)[i]) +getindex(arg::TensorValue, i::Integer) = arg.data[i] +getindex(arg::ThirdOrderTensorValue, i::Integer) = arg.data[i] -@inline iterate(a::MultiValue, state) = iterate(a.array, state) +@inline iterate(arg::MultiValue) = iterate(arg.data) +@inline iterate(arg::MultiValue, state) = iterate(arg.data, state) -eachindex(a::MultiValue) = eachindex(a.array) +data_index(::Type{<:VectorValue},i) = i +data_index(::Type{<:TensorValue{D}},i,j) where D = _2d_tensor_linear_index(D,i,j) +data_index(::Type{<:SymTensorValue{D}},i,j) where D = _2d_sym_tensor_linear_index(D,i,j) +data_index(::Type{<:ThirdOrderTensorValue{D1,D2}},i,j,k) where {D1,D2} = _3d_tensor_linear_index(D1,D2,i,j,k) +data_index(::Type{<:SymFourthOrderTensorValue{D}},i,j,k,l) where D = _4d_sym_tensor_linear_index(D,i,j,k,l) -function CartesianIndices(a::MultiValue) - CartesianIndices(a.array) +_symmetric_index_gaps(i::Integer) = i*(i-1)÷2 + +_2d_tensor_linear_index(D,i,j) = ((j-1)*D)+i + +_3d_tensor_linear_index(D1,D2,i,j,k) = (k-1)*D1*D2+(j-1)*D1+i + +function _2d_sym_tensor_linear_index(D,i,j) + _j=min(i,j) + _i=max(i,j) + index=_2d_tensor_linear_index(D,_i,_j)-_symmetric_index_gaps(_j) + index end -function LinearIndices(a::MultiValue) - LinearIndices(a.array) +#function _4d_sym_tensor_linear_index(D,i,j,k,l) +# _j=min(i,j) +# _i=max(i,j) +# _l=min(k,l) +# _k=max(k,l) +# block_length=_symmetric_index_gaps(D+1) +# element_index=_2d_tensor_linear_index(D,_i,_j)-_symmetric_index_gaps(_j) +# block_index=_2d_tensor_linear_index(D,_l,_k)-_symmetric_index_gaps(_l) +# index=(block_index-1)*block_length+element_index +# index +#end + +function _4d_sym_tensor_linear_index(D,i,j,k,l) + block_length = (D*(D+1))÷2 + block_index = _2d_sym_tensor_linear_index(D,i,j) + element_index = _2d_sym_tensor_linear_index(D,k,l) + index=(block_index-1)*block_length+element_index + index end + diff --git a/src/TensorValues/MultiValueTypes.jl b/src/TensorValues/MultiValueTypes.jl new file mode 100644 index 000000000..7f7ed8e49 --- /dev/null +++ b/src/TensorValues/MultiValueTypes.jl @@ -0,0 +1,40 @@ +############################################################### +# MultiValue Type +############################################################### + +""" +Type representing a multi-dimensional value +""" +abstract type MultiValue{S,T,N,L} <: Number end + +@inline Base.Tuple(arg::MultiValue) = arg.data + +# Custom type printing + +function show(io::IO,v::MultiValue) + print(io,v.data) +end + +function show(io::IO,::MIME"text/plain",v:: MultiValue) + print(io,typeof(v)) + print(io,v.data) +end + +############################################################### +# Other constructors and conversions implemented for more generic types +############################################################### + +change_eltype(::Type{<:Number},::Type{T}) where {T} = T +change_eltype(::Number,::Type{T2}) where {T2} = change_eltype(Number,T2) + +num_components(::Type{<:Number}) = 1 +num_components(::Number) = num_components(Number) + +function n_components(a) + msg = "Function n_components has been removed, use num_components instead" + error(msg) +end + +function data_index(::Type{<:MultiValue},i...) + @abstractmethod +end diff --git a/src/TensorValues/Operations.jl b/src/TensorValues/Operations.jl index 6c7a3c799..ed965f828 100644 --- a/src/TensorValues/Operations.jl +++ b/src/TensorValues/Operations.jl @@ -1,17 +1,10 @@ - +############################################################### # Comparison +############################################################### -function (==)(a::MultiValue,b::MultiValue) - a.array == b.array -end - -function (≈)(a::MultiValue,b::MultiValue) - a.array ≈ b.array -end - -function (≈)(a::VectorValue{0},b::VectorValue{0}) - true -end +(==)(a::MultiValue,b::MultiValue) = a.data == b.data +(≈)(a::MultiValue,b::MultiValue) = isapprox(get_array(a), get_array(b)) +(≈)(a::MultiValue{S,T1,N,0} where T1,b::MultiValue{S,T2,N,0} where T2) where {S,N} = true function (≈)( a::AbstractArray{<:MultiValue}, b::AbstractArray{<:MultiValue}) @@ -22,8 +15,8 @@ function (≈)( true end -function Base.isless(a::VectorValue{N},b::VectorValue{N}) where N - for d in N:-1:1 +function isless(a::MultiValue{Tuple{L}},b::MultiValue{Tuple{L}}) where L + for d in L:-1:1 if a[d] < b[d] return true elseif a[d] > b[d] @@ -35,84 +28,196 @@ function Base.isless(a::VectorValue{N},b::VectorValue{N}) where N false end +isless(a::Number,b::MultiValue) where {D,T} = all(a .< b.data) + +############################################################### # Addition / subtraction +############################################################### for op in (:+,:-) @eval begin - function ($op)(a::MultiValue{S}) where S - r = $op(a.array) - MultiValue(r) + function ($op)(a::T) where {T<:MultiValue} + r = map($op, a.data) + T(r) + end + + function ($op)(a::MultiValue{S},b::MultiValue{S}) where S + r = broadcast(($op), a.data, b.data) + T = change_eltype(a,eltype(r)) + T(r) end - function ($op)(a::MultiValue{S},b::MultiValue{S}) where S - r = $op(a.array, b.array) - MultiValue(r) + function ($op)(a::TensorValue,b::SymTensorValue) + @notimplemented + end + + function ($op)(a::SymTensorValue,b::TensorValue) + @notimplemented end end end +############################################################### # Matrix Division +############################################################### -function (\)(a::TensorValue, b::MultiValue) - r = a.array \ b.array - MultiValue(r) +function (\)(a::MultiValue{Tuple{D,D}} where D, b::MultiValue) + r = get_array(a) \ get_array(b) + T = change_eltype(b,eltype(r)) + T(r) end +############################################################### # Operations with other numbers +############################################################### for op in (:+,:-,:*) @eval begin - ($op)(a::MultiValue,b::Number) = MultiValue($op.(a.array,b)) - ($op)(a::Number,b::MultiValue) = MultiValue($op.(a,b.array)) + function ($op)(a::MultiValue,b::Number) + r = broadcast($op,a.data,b) + T = change_eltype(a,eltype(r)) + T(r) + end + + function ($op)(a::Number,b::MultiValue) + r = broadcast($op,a,b.data) + T = change_eltype(b,eltype(r)) + T(r) + end end end -(/)(a::MultiValue,b::Number) = MultiValue(a.array/b) +function (/)(a::MultiValue,b::Number) + r = broadcast(/,a.data,b) + P = change_eltype(a,eltype(r)) + P(r) +end +############################################################### # Dot product (simple contraction) +############################################################### + +function (*)(a::MultiValue, b::MultiValue) + msg = """ + Method (*)(::$(typeof(a)),::$(typeof(b))) has been removed. + Depending the case, use simple contraction dot aka ⋅ (\\cdot) or full contraction inner aka ⊙ (\\odot) instead. + """ + error(msg) + #dot(a,b) +end + +dot(a::MultiValue{Tuple{D}}, b::MultiValue{Tuple{D}}) where D = inner(a,b) -(*)(a::VectorValue{D}, b::VectorValue{D}) where D = inner(a,b) +dot(a::MultiValue,b::MultiValue) = @notimplemented -function (*)(a::MultiValue,b::MultiValue) - r = a.array * b.array - MultiValue(r) +@generated function dot(a::A,b::B) where {A<:MultiValue{Tuple{D1}},B<:MultiValue{Tuple{D1,D2}}} where {D1,D2} + ss = String[] + for j in 1:D2 + s = "" + for i in 1:D1 + ak = data_index(A,i) + bk = data_index(B,i,j) + s *= "a.data[$ak]*b.data[$bk]+" + end + push!(ss,s[1:(end-1)]*", ") + end + str = join(ss) + Meta.parse("VectorValue{$D2}($str)") end -@generated function (*)(a::VectorValue{D}, b::TensorValue{D}) where D - ss = String[] - for j in 1:D - s = join([ "a.array[$i]*b.array[$i,$j]+" for i in 1:D]) - push!(ss,s[1:(end-1)]*", ") - end - str = join(ss) - Meta.parse("VectorValue($str)") +@generated function dot(a::A,b::B) where {A<:MultiValue{Tuple{D1,D2}},B<:MultiValue{Tuple{D2}}} where {D1,D2} + ss = String[] + for i in 1:D1 + s = "" + for j in 1:D2 + ak = data_index(A,i,j) + bk = data_index(B,j) + s *= "a.data[$ak]*b.data[$bk]+" + end + push!(ss,s[1:(end-1)]*", ") + end + str = join(ss) + Meta.parse("VectorValue{$D1}($str)") end -@inline dot(u::VectorValue,v::VectorValue) = inner(u,v) +@generated function dot(a::MultiValue{Tuple{D1,D3}}, b::MultiValue{Tuple{D3,D2}}) where {D1,D2,D3} + ss = String[] + for j in 1:D2 + for i in 1:D1 + s = join([ "a[$i,$k]*b[$k,$j]+" for k in 1:D3]) + push!(ss,s[1:(end-1)]*", ") + end + end + str = join(ss) + Meta.parse("TensorValue{$D1,$D2}(($str))") +end -@inline dot(u::TensorValue,v::VectorValue) = u*v +# Double contraction -@inline dot(u::VectorValue,v::TensorValue) = u*v +#(::Colon)(a::MultiValue{Tuple{D1,D2}},b::MultiValue{Tuple{D1,D2}}) where {D1,D2} = inner(a,b) +#(::Colon)(a::MultiValue{Tuple{D1,D2}},b::MultiValue{Tuple{D1,D2,D3,D4}}) where {D1,D2,D3,D4} = inner(a,b) +#(::Colon)(a::MultiValue{Tuple{D1,D2,D3,D4}},b::MultiValue{Tuple{D1,D2}}) where {D1,D2,D3,D4} = inner(a,b) +############################################################### # Inner product (full contraction) +############################################################### inner(a::Real,b::Real) = a*b -""" -""" -@generated function inner(a::MultiValue{S,T,N,L}, b::MultiValue{S,W,N,L}) where {S,T,N,L,W} - str = join([" a.array.data[$i]*b.array.data[$i] +" for i in 1:L ]) +function inner(a::MultiValue, b::MultiValue) + @notimplemented +end + +@generated function inner(a::MultiValue{S}, b::MultiValue{S}) where S + str = join([" a[$i]*b[$i] +" for i in 1:length(a) ]) + Meta.parse(str[1:(end-1)]) +end + +@generated function inner(a::SymTensorValue{D}, b::SymTensorValue{D}) where D + str = "" + for i in 1:D + for j in 1:D + k = data_index(a,i,j) + str *= " a.data[$k]*b.data[$k] +" + end + end Meta.parse(str[1:(end-1)]) end +@generated function inner(a::SymFourthOrderTensorValue{D}, b::SymTensorValue{D}) where D + str = "" + for i in 1:D + for j in i:D + s = "" + for k in 1:D + for l in 1:D + ak = data_index(a,i,j,k,l) + bk = data_index(b,k,l) + s *= " a.data[$ak]*b.data[$bk] +" + end + end + str *= s[1:(end-1)]*", " + end + end + Meta.parse("SymTensorValue{D}($str)") +end + +function inner(a::SymFourthOrderTensorValue{D},b::MultiValue{Tuple{D,D}}) where D + inner(a,symmetric_part(b)) +end + +const ⊙ = inner + +############################################################### # Reductions +############################################################### for op in (:sum,:maximum,:minimum) - @eval begin - $op(a::MultiValue) = $op(a.array) - end + @eval begin + $op(a::MultiValue) = $op(a.data) + end end # Outer product (aka dyadic product) @@ -120,40 +225,109 @@ end """ """ outer(a::Real,b::Real) = a*b - outer(a::MultiValue,b::Real) = a*b - outer(a::Real,b::MultiValue) = a*b -@generated function outer(a::VectorValue{D},b::VectorValue{Z}) where {D,Z} - str = join(["a.array[$i]*b.array[$j], " for j in 1:Z for i in 1:D]) - Meta.parse("MultiValue(SMatrix{$D,$Z}($str))") +function outer(a::MultiValue,b::MultiValue) + @notimplemented +end + +@generated function outer(a::MultiValue{Tuple{D}},b::MultiValue{Tuple{Z}}) where {D,Z} + str = join(["a[$i]*b[$j], " for j in 1:Z for i in 1:D]) + Meta.parse("TensorValue{$D,$Z}($str)") +end + +@generated function outer(a::MultiValue{Tuple{D}},b::MultiValue{Tuple{D1,D2}}) where {D,D1,D2} + str = join(["a[$i]*b[$j,$k], " for k in 1:D2 for j in 1:D1 for i in 1:D]) + Meta.parse("ThirdOrderTensorValue{D,D1,D2}($str)") end -@generated function outer(a::VectorValue{D},b::MultiValue{Tuple{A,B}}) where {D,A,B} - str = join(["a.array[$i]*b.array[$j,$k], " for k in 1:B for j in 1:A for i in 1:D]) - Meta.parse("MultiValue(SArray{Tuple{$D,$A,$B}}($str))") +@generated function outer(a::SymTensorValue{D},b::SymTensorValue{D}) where D + str = "" + for i in 1:D + for j in i:D + ak = data_index(a,i,j) + for k in 1:D + for l in k:D + bk = data_index(b,k,l) + str *= "a.data[$ak]*b.data[$bk], " + end + end + end + end + Meta.parse("SymFourthOrderTensorValue{D}($str)") end +const ⊗ = outer + +############################################################### # Linear Algebra +############################################################### + +det(a::MultiValue{Tuple{D1,D2}}) where {D1,D2} = det(get_array(a)) + +det(a::MultiValue{Tuple{1,1}}) = a[1] -det(a::TensorValue) = det(a.array) +function det(a::MultiValue{Tuple{2,2}}) + a_11 = a[1,1]; a_12 = a[1,2] + a_21 = a[2,1]; a_22 = a[2,2] + a_11*a_22 - a_12*a_21 +end + +function det(a::MultiValue{Tuple{3,3}}) + a_11 = a[1,1]; a_12 = a[1,2]; a_13 = a[1,3] + a_21 = a[2,1]; a_22 = a[2,2]; a_23 = a[2,3] + a_31 = a[3,1]; a_32 = a[3,2]; a_33 = a[3,3] + a_11*a_22*a_33 + a_12*a_23*a_31 + a_13*a_21*a_32 - + (a_11*a_23*a_32 + a_12*a_21*a_33 + a_13*a_22*a_31) +end -inv(a::TensorValue) = MultiValue(inv(a.array)) +inv(a::MultiValue{Tuple{D1,D2}}) where {D1,D2} = TensorValue(inv(get_array(a))) +function inv(a::MultiValue{Tuple{1,1}}) + r = 1/a[1] + T = change_eltype(a,typeof(r)) + T(r) +end + +function inv(a::MultiValue{Tuple{2,2}}) + c = 1/det(a) + data = (a[2,2]*c, -a[2,1]*c, -a[1,2]*c, a[1,1]*c) + TensorValue{2}(data) +end + +function inv(a::MultiValue{Tuple{3,3}}) + a_11 = a[1,1]; a_12 = a[1,2]; a_13 = a[1,3] + a_21 = a[2,1]; a_22 = a[2,2]; a_23 = a[2,3] + a_31 = a[3,1]; a_32 = a[3,2]; a_33 = a[3,3] + c = 1/det(a) + data = ( + ( a_22*a_33 - a_23*a_32 )*c, + -( a_21*a_33 - a_23*a_31 )*c, + ( a_21*a_32 - a_22*a_31 )*c, + -( a_12*a_33 - a_13*a_32 )*c, + ( a_11*a_33 - a_13*a_31 )*c, + -( a_11*a_32 - a_12*a_31 )*c, + ( a_12*a_23 - a_13*a_22 )*c, + -( a_11*a_23 - a_13*a_21 )*c, + ( a_11*a_22 - a_12*a_21 )*c) + TensorValue{3}(data) +end + +############################################################### # Measure +############################################################### """ """ -meas(a::VectorValue) = sqrt(inner(a,a)) - -meas(a::TensorValue) = abs(det(a)) +meas(a::MultiValue{Tuple{D}}) where D = sqrt(inner(a,a)) +meas(a::MultiValue{Tuple{D,D}}) where D = abs(det(a)) function meas(v::MultiValue{Tuple{1,2}}) n1 = v[1,2] n2 = -1*v[1,1] n = VectorValue(n1,n2) - sqrt(n*n) + sqrt(n ⋅ n) end function meas(v::MultiValue{Tuple{2,3}}) @@ -161,98 +335,119 @@ function meas(v::MultiValue{Tuple{2,3}}) n2 = v[1,3]*v[2,1] - v[1,1]*v[2,3] n3 = v[1,1]*v[2,2] - v[1,2]*v[2,1] n = VectorValue(n1,n2,n3) - sqrt(n*n) + sqrt(n ⋅ n) end -@inline norm(u::VectorValue) = sqrt(inner(u,u)) - -@inline norm(u::VectorValue{0,T}) where T = sqrt(zero(T)) +@inline norm(u::MultiValue{Tuple{D}}) where D = sqrt(inner(u,u)) +@inline norm(u::MultiValue{Tuple{0},T}) where T = sqrt(zero(T)) +############################################################### # conj +############################################################### -conj(a::MultiValue) = MultiValue(conj(a.array)) +function conj(a::T) where {T<:MultiValue} + r = map(conj, a.data) + T(r) +end +############################################################### # Trace +############################################################### -@generated function tr(v::TensorValue{D}) where D - str = join([" v.array.data[$i+$((i-1)*D)] +" for i in 1:D ]) - Meta.parse(str[1:(end-1)]) +@generated function tr(v::MultiValue{Tuple{D,D}}) where D + str = join([" v[$i,$i] +" for i in 1:D ]) + Meta.parse(str[1:(end-1)]) end @generated function tr(v::MultiValue{Tuple{A,A,B}}) where {A,B} + lis = LinearIndices((A,A,B)) str = "" for k in 1:B for i in 1:A if i !=1 str *= " + " end - str *= " v.array[$i,$i,$k]" + p = lis[i,i,k] + str *= " v.data[$p]" end str *= ", " end Meta.parse("VectorValue($str)") end +############################################################### # Adjoint and transpose +############################################################### + +adjoint(a::MultiValue{Tuple{D,D}}) where D = @notimplemented +transpose(a::MultiValue{Tuple{D,D}}) where D = @notimplemented + +@generated function adjoint(a::TensorValue{D1,D2}) where {D1,D2} + str = "" + for i in 1:D1 + for j in 1:D2 + k = (j-1)*D1 + i + str *= "conj(a.data[$k]), " + end + end + Meta.parse("TensorValue{D2,D1}($str)") +end -function adjoint(v::TensorValue) - t = adjoint(v.array) - TensorValue(t) +@generated function transpose(a::TensorValue{D1,D2}) where {D1,D2} + str = "" + for i in 1:D1 + for j in 1:D2 + k = (j-1)*D1 + i + str *= "a.data[$k], " + end + end + Meta.parse("TensorValue{D2,D1}($str)") end -function transpose(v::TensorValue) - t = transpose(v.array) - TensorValue(t) +@inline function adjoint(a::TensorValue{D1,D2,T}) where {D1,D2,T<:Real} + transpose(a) end +adjoint(a::SymTensorValue) = conj(a) + +@inline adjoint(a::SymTensorValue{D,T} where {D,T<:Real}) = transpose(a) + +transpose(a::SymTensorValue) = a + +############################################################### # Symmetric part +############################################################### """ """ -@generated function symmetric_part(v::TensorValue{D}) where D - str = "(" - for j in 1:D - for i in 1:D - str *= "0.5*v.array.data[$i+$((j-1)*D)] + 0.5*v.array.data[$j+$((i-1)*D)], " +@generated function symmetric_part(v::MultiValue{Tuple{D,D}}) where D + str = "(" + for j in 1:D + for i in j:D + str *= "0.5*v[$i,$j] + 0.5*v[$j,$i], " + end end - end - str *= ")" - Meta.parse("TensorValue($str)") + str *= ")" + Meta.parse("SymTensorValue{D}($str)") end +############################################################### # Define new operations for Gridap types +############################################################### for op in (:symmetric_part,) - @eval begin - function ($op)(a::GridapType) - operate($op,a) + @eval begin + ($op)(a::GridapType) = operate($op,a) end - end end -for op in (:inner,:outer) - @eval begin - - function ($op)(a::GridapType,b::GridapType) - operate($op,a,b) - end - - function ($op)(a::GridapType,b::Number) - operate($op,a,b) - end - - function ($op)(a::Number,b::GridapType) - operate($op,a,b) +for op in (:inner,:outer)#,:(:)) + @eval begin + ($op)(a::GridapType,b::GridapType) = operate($op,a,b) + ($op)(a::GridapType,b::Number) = operate($op,a,b) + ($op)(a::Number, b::GridapType) = operate($op,a,b) + ($op)(a::GridapType,b::Function) = operate($op,a,b) + ($op)(a::Function, b::GridapType) = operate($op,a,b) end - - function ($op)(a::GridapType,b::Function) - operate($op,a,b) - end - - function ($op)(a::Function,b::GridapType) - operate($op,a,b) - end - - end end diff --git a/src/TensorValues/Reinterpret.jl b/src/TensorValues/Reinterpret.jl index efa74283f..112011a89 100644 --- a/src/TensorValues/Reinterpret.jl +++ b/src/TensorValues/Reinterpret.jl @@ -1,11 +1,17 @@ -function reinterpret(a::Array{MultiValue{S,T,N,L}}) where {S,T,N,L} +function reinterpret(a::Array{VectorValue{D,T}}) where {D,T} b = reinterpret(T,a) sa = size(a) - sv = Size(S) - t = _Size_to_tuple(sv) + t = size(VectorValue{D,T}) + s = (t...,sa...) + reshape(b,s) +end + +function reinterpret(a::Array{TensorValue{D1,D2,T,L}}) where {D1,D2,T,L} + b = reinterpret(T,a) + sa = size(a) + t = size(TensorValue{D1,D2,T,L}) s = (t...,sa...) reshape(b,s) end -_Size_to_tuple(::Size{t}) where t = t diff --git a/src/TensorValues/SymFourthOrderTensorValueTypes.jl b/src/TensorValues/SymFourthOrderTensorValueTypes.jl new file mode 100644 index 000000000..a885e8afd --- /dev/null +++ b/src/TensorValues/SymFourthOrderTensorValueTypes.jl @@ -0,0 +1,99 @@ +############################################################### +# SymTensorValue Type +############################################################### + +""" +Type representing a symmetric fourth-order tensor +""" +struct SymFourthOrderTensorValue{D,T,L} <: MultiValue{Tuple{D,D,D,D},T,4,L} + data::NTuple{L,T} + function SymFourthOrderTensorValue{D,T}(data::NTuple{L,T}) where {D,T,L} + @assert L == (D*(D+1)/2)^2 + new{D,T,L}(data) + end +end + +############################################################### +# Constructors (SymTensorValue) +############################################################### + +# Empty SymTensorValue constructor + +SymFourthOrderTensorValue() = SymFourthOrderTensorValue{0,Int}(NTuple{0,Int}()) +SymFourthOrderTensorValue{0}() where {T} = SymFourthOrderTensorValue{0,Int}(NTuple{0,Int}()) +SymFourthOrderTensorValue{0,T}() where {T} = SymFourthOrderTensorValue{0,T}(NTuple{0,T}()) +SymFourthOrderTensorValue(data::NTuple{0}) = SymFourthOrderTensorValue{0,Int}(data) +SymFourthOrderTensorValue{0}(data::NTuple{0}) = SymFourthOrderTensorValue{0,Int}(data) + +# SymTensorValue single NTuple argument constructor + +@generated function SymFourthOrderTensorValue(data::NTuple{L,T}) where {L,T} + D = Int( (sqrt(1+8*sqrt(L))-1)/2 ) + quote + SymFourthOrderTensorValue{$D,T}(data) + end +end +SymFourthOrderTensorValue{D}(data::NTuple{L,T}) where {D,L,T} = SymFourthOrderTensorValue{D,T}(data) +SymFourthOrderTensorValue{D,T1}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymFourthOrderTensorValue{D,T1}(NTuple{L,T1}(data)) +SymFourthOrderTensorValue{D,T1,L}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymFourthOrderTensorValue{D,T1}(NTuple{L,T1}(data)) + +# SymTensorValue Vararg constructor + +SymFourthOrderTensorValue(data::T...) where {T} = SymFourthOrderTensorValue(data) +SymFourthOrderTensorValue{D}(data::T...) where {D,T} = SymFourthOrderTensorValue{D}(data) +SymFourthOrderTensorValue{D,T1}(data::T2...) where {D,T1,T2} = SymFourthOrderTensorValue{D,T1}(data) + +############################################################### +# Conversions (SymTensorValue) +############################################################### + +# Direct conversion +convert(::Type{<:SymFourthOrderTensorValue{D,T}}, arg::Tuple) where {D,T} = SymFourthOrderTensorValue{D,T}(arg) + +# Inverse conversion +convert(::Type{<:NTuple{L,T}}, arg::SymFourthOrderTensorValue) where {L,T} = NTuple{L,T}(Tuple(arg)) + +# Internal conversion +convert(::Type{<:SymFourthOrderTensorValue{D,T}}, arg::SymFourthOrderTensorValue{D}) where {D,T} = SymFourthOrderTensorValue{D,T}(Tuple(arg)) +convert(::Type{<:SymFourthOrderTensorValue{D,T}}, arg::SymFourthOrderTensorValue{D,T}) where {D,T} = arg + +############################################################### +# Other constructors and conversions (SymTensorValue) +############################################################### + +@generated function zero(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} + L=Int((D*(D+1)/2)^2) + quote + SymFourthOrderTensorValue{D,T}(tfill(zero(T),Val{$L}())) + end +end +zero(::Type{<:SymFourthOrderTensorValue{D,T,L}}) where {D,T,L} = SymFourthOrderTensorValue{D,T}(tfill(zero(T),Val{L}())) +zero(::SymFourthOrderTensorValue{D,T,L}) where {D,T,L} = zero(SymFourthOrderTensorValue{D,T,L}) + +# This is in fact the "symmetrized" 4th order identity +@generated function one(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} + S = typeof(one(T)/2) + str = join(["($i==$k && $j==$l) ? ( $i==$j ? one($S) : one(T)/2) : zero($S), " for i in 1:D for j in i:D for k in 1:D for l in k:D]) + Meta.parse("SymFourthOrderTensorValue{D,$S}(($str))") +end +one(::SymFourthOrderTensorValue{D,T}) where {D,T} = one(SymFourthOrderTensorValue{D,T}) + +change_eltype(::Type{SymFourthOrderTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymFourthOrderTensorValue{D,T2,L} +change_eltype(::SymFourthOrderTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymFourthOrderTensorValue{D,T1,L},T2) + +############################################################### +# Introspection (SymTensorValue) +############################################################### + +eltype(::Type{<:SymFourthOrderTensorValue{D,T}}) where {D,T} = T +eltype(::SymFourthOrderTensorValue{D,T}) where {D,T} = eltype(SymFourthOrderTensorValue{D,T}) + +size(::Type{<:SymFourthOrderTensorValue{D}}) where {D} = (D,D,D,D) +size(::SymFourthOrderTensorValue{D}) where {D} = size(SymFourthOrderTensorValue{D}) + +length(::Type{<:SymFourthOrderTensorValue{D}}) where {D} = D*D*D*D +length(::SymFourthOrderTensorValue{D}) where {D} = length(SymFourthOrderTensorValue{D}) + +num_components(::Type{<:SymFourthOrderTensorValue{D}}) where {D} = length(SymFourthOrderTensorValue{D}) +num_components(::SymFourthOrderTensorValue{D}) where {D} = num_components(SymFourthOrderTensorValue{D}) + diff --git a/src/TensorValues/SymTensorValueTypes.jl b/src/TensorValues/SymTensorValueTypes.jl new file mode 100644 index 000000000..168a3eb97 --- /dev/null +++ b/src/TensorValues/SymTensorValueTypes.jl @@ -0,0 +1,135 @@ +############################################################### +# SymTensorValue Type +############################################################### + +""" +Type representing a symmetric second-order tensor +""" +struct SymTensorValue{D,T,L} <: MultiValue{Tuple{D,D},T,2,L} + data::NTuple{L,T} + function SymTensorValue{D,T}(data::NTuple{L,T}) where {D,T,L} + @assert L == D*(D+1)/2 + new{D,T,L}(data) + end +end + +############################################################### +# Constructors (SymTensorValue) +############################################################### + +# Empty SymTensorValue constructor + +SymTensorValue() = SymTensorValue{0,Int}(NTuple{0,Int}()) +SymTensorValue{0}() where {T} = SymTensorValue{0,Int}(NTuple{0,Int}()) +SymTensorValue{0,T}() where {T} = SymTensorValue{0,T}(NTuple{0,T}()) +SymTensorValue(data::NTuple{0}) = SymTensorValue{0,Int}(data) +SymTensorValue{0}(data::NTuple{0}) = SymTensorValue{0,Int}(data) + +# SymTensorValue single NTuple argument constructor + +@generated function SymTensorValue(data::NTuple{L,T}) where {L,T} + D = Int( (sqrt(1+8*L)-1)/2 ) + quote + SymTensorValue{$D,T}(data) + end +end +SymTensorValue{D}(data::NTuple{L,T}) where {D,L,T} = SymTensorValue{D,T}(data) +SymTensorValue{D,T1}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymTensorValue{D,T1}(NTuple{L,T1}(data)) +SymTensorValue{D,T1,L}(data::NTuple{L,T2}) where {D,L,T1,T2} = SymTensorValue{D,T1}(NTuple{L,T1}(data)) + +# SymTensorValue Vararg constructor + +SymTensorValue(data::T...) where {T} = SymTensorValue(data) +SymTensorValue{D}(data::T...) where {D,T} = SymTensorValue{D}(data) +SymTensorValue{D,T1}(data::T2...) where {D,T1,T2} = SymTensorValue{D,T1}(data) + +# SymTensorValue single AbstractMatrix argument constructor + +#From Square Matrices +@generated function _flatten_upper_triangle(data::AbstractArray,::Val{D}) where D + str = "" + for i in 1:D + for j in i:D + str *= "data[i,j], " + end + end + Meta.parse("($str)") +end + +SymTensorValue(data::AbstractMatrix{T}) where {T} = ((D1,D2)=size(data); SymTensorValue{D1}(data)) +SymTensorValue{D}(data::AbstractMatrix{T}) where {D,T} = SymTensorValue{D,T}(_flatten_upper_triangle(data,Val{D}())) +SymTensorValue{D,T1}(data::AbstractMatrix{T2}) where {D,T1,T2} = SymTensorValue{D,T1}(_flatten_upper_triangle(data,Val{D}())) +SymTensorValue{D,T1,L}(data::AbstractMatrix{T2}) where {D,T1,T2,L} = SymTensorValue{D,T1,L}(_flatten_upper_triangle(data,Val{D}())) + +############################################################### +# Conversions (SymTensorValue) +############################################################### + +@generated function _SymTensorValue_to_array(arg::SymTensorValue{D,T,L}) where {D,T,L} + str = "" + for j in 1:D + for i in 1:D + p = _2d_sym_tensor_linear_index(D,i,j) + str *= "arg.data[$p], " + end + end + Meta.parse("SMatrix{D,D,T}(($str))") +end + +# Direct conversion +convert(::Type{<:SymTensorValue{D,T}}, arg::AbstractArray) where {D,T} = SymTensorValue{D,T}(arg) +convert(::Type{<:SymTensorValue{D,T}}, arg::Tuple) where {D,T} = SymTensorValue{D,T}(arg) + +# Inverse conversion +convert(::Type{<:MMatrix{D,D,T}}, arg::SymTensorValue) where {D,T} = MMatrix{D,D,T}(_SymTensorValue_to_array(arg)) +convert(::Type{<:SMatrix{D,D,T}}, arg::SymTensorValue) where {D,T} = _SymTensorValue_to_array(arg) +convert(::Type{<:NTuple{L,T}}, arg::SymTensorValue) where {L,T} = NTuple{L,T}(Tuple(arg)) + +# Internal conversion +convert(::Type{<:SymTensorValue{D,T}}, arg::SymTensorValue{D}) where {D,T} = SymTensorValue{D,T}(Tuple(arg)) +convert(::Type{<:SymTensorValue{D,T}}, arg::SymTensorValue{D,T}) where {D,T} = arg + +############################################################### +# Other constructors and conversions (SymTensorValue) +############################################################### + +@generated function zero(::Type{<:SymTensorValue{D,T}}) where {D,T} + L=Int(D*(D+1)/2) + quote + SymTensorValue{D,T}(tfill(zero(T),Val{$L}())) + end +end + +zero(::Type{<:SymTensorValue{D,T,L}}) where {D,T,L} = SymTensorValue{D,T}(tfill(zero(T),Val{L}())) +zero(::SymTensorValue{D,T,L}) where {D,T,L} = zero(SymTensorValue{D,T,L}) + +@generated function one(::Type{<:SymTensorValue{D,T}}) where {D,T} + str = join(["$i==$j ? one(T) : zero(T), " for i in 1:D for j in i:D]) + Meta.parse("SymTensorValue{D,T}(($str))") +end +one(::SymTensorValue{D,T}) where {D,T} = one(SymTensorValue{D,T}) + +mutable(::Type{<:SymTensorValue{D,T}}) where {D,T} = MMatrix{D,D,T} +mutable(::SymTensorValue{D,T}) where {D,T} = mutable(SymTensorValue{D,T}) + +change_eltype(::Type{SymTensorValue{D,T1,L}},::Type{T2}) where {D,T1,T2,L} = SymTensorValue{D,T2,L} +change_eltype(::SymTensorValue{D,T1,L},::Type{T2}) where {D,T1,T2,L} = change_eltype(SymTensorValue{D,T1,L},T2) + +get_array(arg::SymTensorValue{D,T,L}) where {D,T,L} = convert(SMatrix{D,D,T}, arg) + +############################################################### +# Introspection (SymTensorValue) +############################################################### + +eltype(::Type{<:SymTensorValue{D,T}}) where {D,T} = T +eltype(::SymTensorValue{D,T}) where {D,T} = eltype(SymTensorValue{D,T}) + +size(::Type{<:SymTensorValue{D}}) where {D} = (D,D) +size(::SymTensorValue{D}) where {D} = size(SymTensorValue{D}) + +length(::Type{<:SymTensorValue{D}}) where {D} = D*D +length(::SymTensorValue{D}) where {D} = length(SymTensorValue{D}) + +num_components(::Type{<:SymTensorValue{D}}) where {D} = length(SymTensorValue{D}) +num_components(::SymTensorValue{D}) where {D} = num_components(SymTensorValue{D}) + diff --git a/src/TensorValues/TensorValueTypes.jl b/src/TensorValues/TensorValueTypes.jl new file mode 100644 index 000000000..aa323630d --- /dev/null +++ b/src/TensorValues/TensorValueTypes.jl @@ -0,0 +1,123 @@ +############################################################### +# TensorValue Type +############################################################### + +""" +Type representing a second-order tensor +""" +struct TensorValue{D1,D2,T,L} <: MultiValue{Tuple{D1,D2},T,2,L} + data::NTuple{L,T} + function TensorValue{D1,D2,T}(data::NTuple{L,T}) where {D1,D2,T,L} + @assert L == D1*D2 + new{D1,D2,T,L}(data) + end +end + +############################################################### +# Constructors +############################################################### + +# Empty TensorValue constructor + +TensorValue() = TensorValue{0,0,Int}(NTuple{0,Int}()) +TensorValue{0,0}() = TensorValue{0,0,Int}(NTuple{0,Int}()) +TensorValue{0,0,T}() where {T} = TensorValue{0,0,T}(NTuple{0,T}()) +TensorValue(data::NTuple{0}) = TensorValue{0,0,Int}(data) +TensorValue{0,0}(data::NTuple{0}) = TensorValue{0,0,Int}(data) + +# TensorValue single NTuple argument constructor + +@generated function TensorValue(data::NTuple{L,T}) where {L,T} + D = Int(sqrt(L)) + quote + TensorValue{$D,$D,T}(data) + end +end +TensorValue{D}(data::NTuple{L,T}) where {D,L,T} = TensorValue{D,D,T}(data) +TensorValue{D1,D2}(data::NTuple{L,T}) where {D1,D2,L,T} = TensorValue{D1,D2,T}(data) +TensorValue{D1,D2,T1}(data::NTuple{L,T2}) where {D1,D2,L,T1,T2} = TensorValue{D1,D2,T1}(NTuple{L,T1}(data)) +TensorValue{D1,D2,T1,L}(data::NTuple{L,T2}) where {D1,D2,L,T1,T2} = TensorValue{D1,D2,T1}(NTuple{L,T1}(data)) + +# TensorValue Vararg constructor + +TensorValue(data::T...) where {T} = TensorValue(data) +TensorValue{D}(data::T...) where {D,T} = TensorValue{D}(data) +TensorValue{D1,D2}(data::T...) where {D1,D2,T} = TensorValue{D1,D2,T}(data) +TensorValue{D1,D2,T1}(data::T2...) where {D1,D2,T1,T2} = TensorValue{D1,D2,T1}(data) +TensorValue{D1,D2,T1,L}(data::T2...) where {D1,D2,L,T1,T2} = TensorValue{D1,D2,T1}(data) + +# TensorValue single AbstractMatrix argument constructor + +TensorValue(data::AbstractMatrix{T}) where {T} = ((D1,D2)=size(data);L=length(data);TensorValue{D1,D2,T}(NTuple{L,T}(data))) +TensorValue{D}(data::AbstractMatrix{T}) where {D,T} = (L=length(data);TensorValue{D,D,T}(NTuple{L,T}(data))) +TensorValue{D1,D2}(data::AbstractMatrix{T}) where {D1,D2,T} = (L=length(data);TensorValue{D1,D2,T}(NTuple{L,T}(data))) +TensorValue{D1,D2,T1}(data::AbstractMatrix{T2}) where {D1,D2,T1,T2} = (L=length(data);TensorValue{D1,D2,T1}(NTuple{L,T1}(data))) +TensorValue{D1,D2,T1,L}(data::AbstractMatrix{T2}) where {D1,D2,T1,T2,L} = TensorValue{D1,D2,T1}(NTuple{L,T1}(data)) + +############################################################### +# Conversions (TensorValue) +############################################################### + +# Direct conversion +convert(::Type{<:TensorValue{D1,D2,T}}, arg::AbstractArray) where {D1,D2,T} = TensorValue{D1,D2,T}(arg) +convert(::Type{<:TensorValue{D1,D2,T}}, arg::Tuple) where {D1,D2,T} = TensorValue{D1,D2,T}(arg) + +# Inverse conversion +convert(::Type{<:SMatrix{D1,D2,T}}, arg::TensorValue) where {D1,D2,T} = SMatrix{D1,D2,T}(Tuple(arg)) +convert(::Type{<:MMatrix{D1,D2,T}}, arg::TensorValue) where {D1,D2,T} = MMatrix{D1,D2,T}(Tuple(arg)) +convert(::Type{<:NTuple{L,T1}}, arg::TensorValue) where {L,T1} = NTuple{L,T1}(Tuple(arg)) + +# Internal conversion +convert(::Type{<:TensorValue{D1,D2,T}}, arg::TensorValue{D1,D2}) where {D1,D2,T} = TensorValue{D1,D2,T}(Tuple(arg)) +convert(::Type{<:TensorValue{D1,D2,T}}, arg::TensorValue{D1,D2,T}) where {D1,D2,T} = arg + +############################################################### +# Other constructors and conversions (TensorValue) +############################################################### + +zero(::Type{<:TensorValue{D1,D2,T}}) where {D1,D2,T} = TensorValue{D1,D2,T}(tfill(zero(T),Val{D1*D2}())) +zero(::TensorValue{D1,D2,T}) where {D1,D2,T} = zero(TensorValue{D1,D2,T}) + +@generated function one(::Type{<:TensorValue{D1,D2,T}}) where {D1,D2,T} + str = join(["$i==$j ? one(T) : zero(T), " for i in 1:D1 for j in 1:D2]) + Meta.parse("TensorValue{D1,D2,T}(($str))") +end +one(::TensorValue{D1,D2,T}) where {D1,D2,T} = one(TensorValue{D1,D2,T}) + +mutable(::Type{<:TensorValue{D1,D2,T}}) where {D1,D2,T} = MMatrix{D1,D2,T} +mutable(::TensorValue{D1,D2,T}) where {D1,D2,T} = mutable(TensorValue{D1,D2,T}) + +change_eltype(::Type{TensorValue{D1,D2,T1,L}},::Type{T2}) where {D1,D2,T1,T2,L} = TensorValue{D1,D2,T2,L} +change_eltype(::TensorValue{D1,D2,T1,L},::Type{T2}) where {D1,D2,T1,T2,L} = change_eltype(TensorValue{D1,D2,T1,L},T2) + +get_array(arg::TensorValue{D1,D2,T}) where {D1,D2,T} = convert(SMatrix{D1,D2,T},arg) + +@generated function diagonal_tensor(v::VectorValue{D,T}) where {D,T} + s = ["zero(T), " for i in 1:(D*D)] + for i in 1:D + d = D*(i-1)+i + s[d] = "v.data[$i]," + end + str = join(s) + Meta.parse("TensorValue(($str))") +end + +############################################################### +# Introspection (TensorValue) +############################################################### + +eltype(::Type{<:TensorValue{D1,D2,T}}) where {D1,D2,T} = T +eltype(::TensorValue{D1,D2,T}) where {D1,D2,T} = eltype(TensorValue{D1,D2,T}) + +size(::Type{<:TensorValue{D}}) where {D} = (D,D) +size(::Type{<:TensorValue{D1,D2}}) where {D1,D2} = (D1,D2) +size(::TensorValue{D1,D2}) where {D1,D2} = size(TensorValue{D1,D2}) + +length(::Type{<:TensorValue{D}}) where {D} = length(TensorValue{D,D}) +length(::Type{<:TensorValue{D1,D2}}) where {D1,D2} = D1*D1 +length(::TensorValue{D1,D2}) where {D1,D2} = length(TensorValue{D1,D2}) + +num_components(::Type{<:TensorValue{D}}) where {D} = length(TensorValue{D,D}) +num_components(::Type{<:TensorValue{D1,D2}}) where {D1,D2} = length(TensorValue{D1,D2}) +num_components(::TensorValue{D1,D2}) where {D1,D2} = num_components(TensorValue{D1,D2}) + diff --git a/src/TensorValues/TensorValues.jl b/src/TensorValues/TensorValues.jl index aad5b7fdd..b588e7fc2 100644 --- a/src/TensorValues/TensorValues.jl +++ b/src/TensorValues/TensorValues.jl @@ -31,24 +31,32 @@ module TensorValues using DocStringExtensions using StaticArrays +using StaticArrays: SVector, MVector, SMatrix, MMatrix, SArray, MArray using Base: @propagate_inbounds, @pure using Gridap.Helpers +using Gridap.Arrays +using LinearAlgebra export MultiValue -export TensorValue export VectorValue +export TensorValue +export SymTensorValue +export SymFourthOrderTensorValue +export ThirdOrderTensorValue export inner, outer, meas -#export det, inv, tr, dot, norm export mutable export symmetric_part export n_components +export num_components export change_eltype export diagonal_tensor +export ⊙ +export ⊗ import Base: show import Base: zero, one -import Base: +, -, *, /, \, ==, ≈ +import Base: +, -, *, /, \, ==, ≈, isless import Base: conj import Base: sum, maximum, minimum import Base: getindex, iterate, eachindex @@ -59,10 +67,25 @@ import Base: CartesianIndices import Base: LinearIndices import Base: adjoint import Base: transpose +#import Base: : + +import LinearAlgebra: det, inv, tr, cross, dot, norm +# Reexport from LinearAlgebra (just for convenience) +export det, inv, tr, cross, dot, norm, ×, ⋅ + +import Gridap.Arrays: get_array + +include("MultiValueTypes.jl") + +include("VectorValueTypes.jl") + +include("TensorValueTypes.jl") + +include("SymTensorValueTypes.jl") -import LinearAlgebra: det, inv, tr, dot, norm +include("SymFourthOrderTensorValueTypes.jl") -include("Types.jl") +include("ThirdOrderTensorValueTypes.jl") include("Indexing.jl") diff --git a/src/TensorValues/ThirdOrderTensorValueTypes.jl b/src/TensorValues/ThirdOrderTensorValueTypes.jl new file mode 100644 index 000000000..6e00510cd --- /dev/null +++ b/src/TensorValues/ThirdOrderTensorValueTypes.jl @@ -0,0 +1,43 @@ + +""" +Type representing a third-order tensor +""" +struct ThirdOrderTensorValue{D1,D2,D3,T,L} <: MultiValue{Tuple{D1,D2,D3},T,3,L} + data::NTuple{L,T} + function ThirdOrderTensorValue{D1,D2,D3,T}(data::NTuple{L,T}) where {D1,D2,D3,T,L} + @assert L == D1*D2*D3 + new{D1,D2,D3,T,L}(data) + end +end + +# Empty ThirdOrderTensorValue constructor + +ThirdOrderTensorValue() = ThirdOrderTensorValue{0,0,0,Int}(NTuple{0,Int}()) +ThirdOrderTensorValue{0,0,0}() = ThirdOrderTensorValue{0,0,0,Int}(NTuple{0,Int}()) +ThirdOrderTensorValue{0,0,0,T}() where{T} = ThirdOrderTensorValue{0,0,0,T}(NTuple{0,T}()) +ThirdOrderTensorValue(data::NTuple{0}) = ThirdOrderTensorValue{0,0,0,Int}(data) +ThirdOrderTensorValue{0,0,0}(data::NTuple{0}) = ThirdOrderTensorValue{0,0,0,Int}(data) + +# ThirdOrderTensorValue single NTuple argument constructor + +@generated function ThirdOrderTensorValue(data::NTuple{L,T}) where {L,T} + D=Int(cbrt(L)) + quote + ThirdOrderTensorValue{$D,$D,$D,T}(data) + end +end +ThirdOrderTensorValue{D}(data::NTuple{L,T}) where {D,L,T} = ThirdOrderTensorValue{D,D,D,T}(data) +ThirdOrderTensorValue{D1,D2,D3}(data::NTuple{L,T}) where {D1,D2,D3,L,T} = ThirdOrderTensorValue{D1,D2,D3,T}(data) +ThirdOrderTensorValue{D1,D2,D3,T1}(data::NTuple{L,T2}) where {D1,D2,D3,L,T1,T2} = ThirdOrderTensorValue{D1,D2,D3,T1}(NTuple{L,T1}(data)) +ThirdOrderTensorValue{D1,D2,D3,T1,L}(data::NTuple{L,T2}) where {D1,D2,D3,L,T1,T2} = ThirdOrderTensorValue{D1,D2,D3,T1}(NTuple{L,T1}(data)) + +# ThirdOrderTensorValue Vararg constructor + +ThirdOrderTensorValue(data::T...) where {T} = ThirdOrderTensorValue(data) +ThirdOrderTensorValue{D}(data::T...) where {D,T} = ThirdOrderTensorValue{D}(data) +ThirdOrderTensorValue{D1,D2,D3}(data::T...) where {D1,D2,D3,T} = ThirdOrderTensorValue{D1,D2,D3}(data) +ThirdOrderTensorValue{D1,D2,D3,T1}(data::T2...) where {D1,D2,D3,T1,T2} = ThirdOrderTensorValue{D1,D2,D3,T1}(data) +ThirdOrderTensorValue{D1,D2,D3,T1,L}(data::T2...) where {D1,D2,D3,L,T1,T2} = ThirdOrderTensorValue{D1,D2,D3,T1,L}(data) + +change_eltype(::Type{ThirdOrderTensorValue{D1,D2,D3,T1,L}},::Type{T2}) where {D1,D2,D3,T1,T2,L} = ThirdOrderTensorValue{D1,D2,D3,T2,L} +change_eltype(::T,::Type{T2}) where {T<:ThirdOrderTensorValue,T2} = change_eltype(T,T2) diff --git a/src/TensorValues/Types.jl b/src/TensorValues/Types.jl deleted file mode 100644 index b7f0f3208..000000000 --- a/src/TensorValues/Types.jl +++ /dev/null @@ -1,264 +0,0 @@ - -# Types - -""" -Type representing a multi-dimensional value -""" -struct MultiValue{S,T,N,L} <: Number - array::SArray{S,T,N,L} -end - -""" -Type representing a second-order tensor -""" -const TensorValue{D,T,L} = MultiValue{Tuple{D,D},T,2,L} - -""" -Type representing a first-order tensor -""" -const VectorValue{D,T} = MultiValue{Tuple{D},T,1,D} - -# Constructors (MultiValue) - -function (::Type{MultiValue{S}})(x::Tuple) where S<:Tuple - array = SArray{S}(x) - MultiValue(array) -end - -function (::Type{MultiValue{S}})(x::Tuple{}) where S<:Tuple - s = """ - Unknown element type. - - Provide element type in the corresponding type parameter. - Examples: - MultiValue{Tuple{0,0},Int}() - TensorValue{0,Int}() - VectorValue{0,Int}() - """ - error(s) -end - -function (::Type{MultiValue{S,T}})(x::Tuple) where {S<:Tuple,T} - array = SArray{S,T}(x) - MultiValue(array) -end - -function (::Type{MultiValue{S}})(x::Vararg) where S<:Tuple - MultiValue{S}(x) -end - -function (::Type{MultiValue{S,T}})(x::Vararg) where {S<:Tuple,T} - MultiValue{S,T}(x) -end - -function MultiValue(a::StaticArray{S,T}) where {S,T} - MultiValue{S,T}(a.data) -end - -# Constructors (TensorValue) - -function (::Type{TensorValue{D}})(x::Tuple) where D - S = Tuple{D,D} - MultiValue{S}(x) -end - -function (::Type{TensorValue{0}})() - S = Tuple{0,0} - MultiValue{S}() -end - -function (::Type{TensorValue{D}})(x::Vararg) where D - TensorValue{D}(x) -end - -function (::Type{TensorValue{D,T}})(x::Tuple) where {D,T} - S = Tuple{D,D} - MultiValue{S,T}(x) -end - -function (::Type{TensorValue{D,T}})(x::Vararg) where {D,T} - TensorValue{D,T}(x) -end - -@generated function TensorValue(arg::NTuple{DD,T}) where {T,DD} - SQ = sqrt(DD) - D = ceil(Int,SQ) - @assert D == SQ - :( TensorValue{$D,T}(arg) ) -end - -function TensorValue(args::Vararg) - TensorValue(args) -end - -function TensorValue() - S = Tuple{0,0} - MultiValue{S}() -end - -function TensorValue(a::StaticArray) - TensorValue(a.data) -end - -""" -""" -@generated function diagonal_tensor(v::VectorValue{D,T}) where {D,T} - s = ["zero(T), " for i in 1:(D*D)] - for i in 1:D - d = D*(i-1)+i - s[d] = "v.array[$i]," - end - str = join(s) - Meta.parse("TensorValue(($str))") -end - -# Constructors (VectorValue) - -function (::Type{VectorValue{D}})(x::Tuple) where D - S = Tuple{D} - MultiValue{S}(x) -end - -function (::Type{VectorValue{D}})(x::Vararg) where D - VectorValue{D}(x) -end - -function (::Type{VectorValue{D,T}})() where {D,T} - S = Tuple{D} - MultiValue{S,T}() -end - -function VectorValue(arg::NTuple{D,T}) where {D,T} - VectorValue{D,T}(arg) -end - -function (::Type{VectorValue{D,T}})(x::Vararg{Number,D}) where {T,D} - VectorValue{D,T}(x) -end - -function VectorValue(args::Vararg) - VectorValue(args) -end - -function VectorValue() - S = Tuple{0} - MultiValue{S}() -end - -function VectorValue(a::StaticArray) - VectorValue(a.data) -end - -function VectorValue(a::SVector) - MultiValue(a) -end - -function VectorValue(a::MVector) - MultiValue(a) -end - - - -# Initializers - -function zero(::Type{<:MultiValue{S,T,N,L}}) where {S,T,N,L} - z = zero(SArray{S,T,N,L}) - MultiValue{S,T,N,L}(z) -end - -function zero(::MultiValue{S,T,N,L}) where {S,T,N,L} - zero(MultiValue{S,T,N,L}) -end - -function one(::Type{<:MultiValue{S,T,N,L}}) where {S,T,N,L} - z = one(SArray{S,T,N,L}) - MultiValue{S,T,N,L}(z) -end - -function one(::MultiValue{S,T,N,L}) where {S,T,N,L} - one(MultiValue{S,T,N,L}) -end - -# Conversions - -function convert(::Type{<:MultiValue{S,T,N,L}},a::StaticArray{S,T,N}) where {S,T,N,L} - MultiValue(a) -end - -function convert( - ::Type{<:MultiValue{S,T,N,L}},a::AbstractArray{R,N}) where {S,T,N,L,R} - b = convert(SArray{S,T,N,L},a) - MultiValue(b) -end - -function convert(::Type{<:MultiValue{S,T,N,L}},a::NTuple{L,R}) where {S,T,N,L,R} - MultiValue(SArray{S,T}(a)) -end - -function convert(::Type{<:MultiValue{S,T,N,L}},a::MultiValue{S,Ta,N,L} where Ta) where {S,T,N,L} - b = convert(SArray{S,T,N,L},a.array) - MultiValue(b) -end - -# Misc operations on the type itself - -length(::Type{<: MultiValue{S,T,N,L} where {S,T,N}} ) where L = L - -function size(::Type{MultiValue{S,T,N,L}}) where {S,T,N,L} - A = SArray{S,T,N,L} - size(A) -end - -function size(::Type{<:MultiValue{S}}) where S - _s(Size(S)) -end - -@pure _s(s::Size{T}) where T = T - -""" - n_components(::Type) - -Returns the number of components stored in the given type. -Implemented for types `<:Real` and `<:MultiValue`. -Also available for instances of these types. -""" -n_components(::Type{<: MultiValue{S,T,N,L} where {S,T,N}} ) where L = L -n_components(a::T) where T<:MultiValue = n_components(T) - -n_components(::Type{<:Real}) = 1 -n_components(a::T) where T<:Real = 1 - - -# Custom type printing - -function show(io::IO,v::MultiValue) - print(io,v.array.data) -end - -function show(io::IO,::MIME"text/plain",v::MultiValue) - print(io,typeof(v)) - print(io,v.array.data) -end - -# Misc - -""" -""" -mutable(::Type{MultiValue{S,T,N,L}}) where {S,T,N,L} = MArray{S,T,N,L} - -mutable(::MultiValue{S,T,N,L}) where {S,T,N,L} = MArray{S,T,N,L} - -""" -""" -function change_eltype(::Type{MultiValue{S,T,N,L}},::Type{E}) where {S,T,N,L,E} - MultiValue{S,E,N,L} -end - -change_eltype(a::T,::Type{E}) where {T<:MultiValue,E} = change_eltype(T,E) - -change_eltype(::Type{<:Real},::Type{E}) where E = E - -change_eltype(a::T,::Type{E}) where {T<:Real,E} = change_eltype(T,E) - -@inline Tuple(a::MultiValue) = a.array.data - diff --git a/src/TensorValues/VectorValueTypes.jl b/src/TensorValues/VectorValueTypes.jl new file mode 100644 index 000000000..31b4acd7a --- /dev/null +++ b/src/TensorValues/VectorValueTypes.jl @@ -0,0 +1,93 @@ +############################################################### +# Types +############################################################### + +""" +Type representing a first-order tensor +""" +struct VectorValue{D,T} <: MultiValue{Tuple{D},T,1,D} + data::NTuple{D,T} + function VectorValue{D,T}(data::NTuple{D,T}) where {D,T} + new{D,T}(data) + end +end + +############################################################### +# Constructors (VectorValue) +############################################################### + +# Empty VectorValue constructor + +VectorValue() = VectorValue{0,Int}(NTuple{0,Int}()) +VectorValue{0}() = VectorValue{0,Int}(NTuple{0,Int}()) +VectorValue{0,T}() where {T} = VectorValue{0,T}(NTuple{0,T}()) +VectorValue(data::NTuple{0}) = VectorValue{0,Int}(data) +VectorValue{0}(data::NTuple{0}) = VectorValue{0,Int}(data) + +# VectorValue single NTuple argument constructor + +VectorValue(data::NTuple{D,T}) where {D,T} = VectorValue{D,T}(data) +VectorValue{D}(data::NTuple{D,T}) where {D,T} = VectorValue{D,T}(data) +VectorValue{D,T1}(data::NTuple{D,T2}) where {D,T1,T2} = VectorValue{D,T1}(NTuple{D,T1}(data)) + +# VectorValue Vararg constructor + +VectorValue(data::T...) where {T} = VectorValue(data) +VectorValue{D}(data::T...) where {D,T} = VectorValue{D}(data) +VectorValue{D,T1}(data::T2...) where {D,T1,T2} = VectorValue{D,T1}(data) + +# VectorValue single AbstractVector argument constructor + +VectorValue(data::AbstractArray{T}) where {T} = (D=length(data);VectorValue(NTuple{D,T}(data))) +VectorValue{D}(data::AbstractArray{T}) where {D,T} = VectorValue{D}(NTuple{D,T}(data)) +VectorValue{D,T1}(data::AbstractArray{T2}) where {D,T1,T2} = VectorValue{D,T1}(NTuple{D,T1}(data)) + +############################################################### +# Conversions (VectorValue) +############################################################### + +# Direct conversion +convert(::Type{<:VectorValue{D,T}}, arg:: AbstractArray) where {D,T} = VectorValue{D,T}(NTuple{D,T}(arg)) +convert(::Type{<:VectorValue{D,T}}, arg:: Tuple) where {D,T} = VectorValue{D,T}(arg) + +# Inverse conversion +convert(::Type{<:SVector{D,T}}, arg::VectorValue{D}) where {D,T} = SVector{D,T}(Tuple(arg)) +convert(::Type{<:MVector{D,T}}, arg::VectorValue{D}) where {D,T} = MVector{D,T}(Tuple(arg)) +convert(::Type{<:NTuple{D,T}}, arg::VectorValue{D}) where {D,T} = NTuple{D,T}(Tuple(arg)) + +# Internal conversion +convert(::Type{<:VectorValue{D,T}}, arg::VectorValue{D}) where {D,T} = VectorValue{D,T}(Tuple(arg)) +convert(::Type{<:VectorValue{D,T}}, arg::VectorValue{D,T}) where {D,T} = arg + +############################################################### +# Other constructors and conversions (VectorValue) +############################################################### + +zero(::Type{<:VectorValue{D,T}}) where {D,T} = VectorValue{D,T}(tfill(zero(T),Val{D}())) +zero(::VectorValue{D,T}) where {D,T} = zero(VectorValue{D,T}) + +mutable(::Type{VectorValue{D,T}}) where {D,T} = MVector{D,T} +mutable(::VectorValue{D,T}) where {D,T} = mutable(VectorValue{D,T}) + +change_eltype(::Type{VectorValue{D}},::Type{T}) where {D,T} = VectorValue{D,T} +change_eltype(::Type{VectorValue{D,T1}},::Type{T2}) where {D,T1,T2} = VectorValue{D,T2} +change_eltype(::VectorValue{D,T1},::Type{T2}) where {D,T1,T2} = change_eltype(VectorValue{D,T1},T2) + +get_array(arg::VectorValue{D,T}) where {D,T} = convert(SVector{D,T}, arg) + +############################################################### +# Introspection (VectorValue) +############################################################### + +eltype(::Type{<:VectorValue{D,T}}) where {D,T} = T +eltype(arg::VectorValue{D,T}) where {D,T} = eltype(VectorValue{D,T}) + +size(::Type{<:VectorValue{D}}) where {D} = (D,) +size(::VectorValue{D}) where {D} = size(VectorValue{D}) + +length(::Type{<:VectorValue{D}}) where {D} = D +length(::VectorValue{D}) where {D} = length(VectorValue{D}) + +num_components(::Type{<:VectorValue{D}}) where {D} = length(VectorValue{D}) +num_components(::VectorValue{D}) where {D} = num_components(VectorValue{D}) + diff --git a/test/ArraysTests/KernelsTests.jl b/test/ArraysTests/KernelsTests.jl index fa4e729fc..28238a484 100644 --- a/test/ArraysTests/KernelsTests.jl +++ b/test/ArraysTests/KernelsTests.jl @@ -3,6 +3,7 @@ module KernelsTests using Test using Gridap.Arrays using Gridap.TensorValues +using LinearAlgebra test_kernel(+,(3,2),5) @@ -33,11 +34,11 @@ test_kernel(k,([3,4],[1,2]),[2,2]) k = contract(-) test_kernel(k,([3,4],[1,2]),3-1+4-2) -f = bcast(*) +f = bcast(⋅) a = fill(TensorValue(2,0,0,0,2,0,0,0,2),2) b = VectorValue(1,2,3) c = zeros(VectorValue{3,Int},2) -broadcast!(*,c,a,b) +broadcast!(⋅,c,a,b) test_kernel(f,(a,b),c) end # module diff --git a/test/FESpacesTests/AffineFEOperatorsTests.jl b/test/FESpacesTests/AffineFEOperatorsTests.jl index 2eca0c554..a82df8d78 100644 --- a/test/FESpacesTests/AffineFEOperatorsTests.jl +++ b/test/FESpacesTests/AffineFEOperatorsTests.jl @@ -33,7 +33,7 @@ f(x) = x[2] v = get_cell_basis(V) u = get_cell_basis(U) -cellmat = integrate(∇(v)*∇(u),trian,quad) +cellmat = integrate(∇(v)⊙∇(u),trian,quad) cellvec = integrate(v*f,trian,quad) cellids = collect(1:num_cells(trian)) @@ -64,7 +64,7 @@ dirichlet_tags = "boundary" V = GradConformingFESpace(reffes,model,dirichlet_tags) U = TrialFESpace(V,u_sol) -a(v,u) = ∇(v)*∇(u) +a(v,u) = ∇(v)⊙∇(u) l(v) = v*f_fun t_Ω = AffineFETerm(a,l,trian,quad) @@ -96,7 +96,7 @@ function poisson_matvec_kernel!(mat,vec,∇v,∇u,v,j,w,x) f_q = f_fun(x[q]) for n in 1:N for m in 1:M - mat[m,n] += ∇v[q,m]*∇u[q,n]*dV + mat[m,n] += ∇v[q,m]⊙∇u[q,n]*dV end end for m in 1:M diff --git a/test/FESpacesTests/CellKernelsTests.jl b/test/FESpacesTests/CellKernelsTests.jl index 701425064..8c13333cc 100644 --- a/test/FESpacesTests/CellKernelsTests.jl +++ b/test/FESpacesTests/CellKernelsTests.jl @@ -7,6 +7,7 @@ using Gridap.Geometry using Gridap.Integration using Gridap.FESpaces using LinearAlgebra +using Gridap.TensorValues function poisson_matvec_kernel!(mat,vec,∇u,∇v,v,j,w) Q = length(w) @@ -17,7 +18,7 @@ function poisson_matvec_kernel!(mat,vec,∇u,∇v,v,j,w) for n in 1:N for m in 1:M - mat[m,n] += ∇v[q,m]*∇u[q,n]*dV + mat[m,n] += ∇v[q,m]⊙∇u[q,n]*dV end end @@ -37,7 +38,7 @@ function poisson_mat_kernel!(mat,∇u,∇v,j,w) for n in 1:N for m in 1:M - mat[m,n] += ∇v[q,m]*∇u[q,n]*dV + mat[m,n] += ∇v[q,m]⊙∇u[q,n]*dV end end @@ -84,7 +85,7 @@ cellmatvec = apply_cellmatvec(poisson_matvec_kernel!, ∇v_q, ∇v_q, v_q, j_q, cellmat = apply_cellmatrix(poisson_mat_kernel!, ∇v_q, ∇v_q, j_q, w) cellvec = apply_cellvector(poisson_vec_kernel!, v_q, j_q, w) -a(v,u) = ∇(v)*∇(u) +a(v,u) = ∇(v)⊙∇(u) l(v) = v cellmat2 = integrate(a(v,u),trian,quad) diff --git a/test/FESpacesTests/ExtendedFESpacesTests.jl b/test/FESpacesTests/ExtendedFESpacesTests.jl index 6898111bf..77ba8b4f9 100644 --- a/test/FESpacesTests/ExtendedFESpacesTests.jl +++ b/test/FESpacesTests/ExtendedFESpacesTests.jl @@ -82,17 +82,17 @@ uh_in = restrict(uh,trian_in) uh_Γ = restrict(uh,trian_Γ) -t_in = AffineFETerm( (u,v) -> v*u, (v) -> v*u, trian_in, quad_in) +t_in = AffineFETerm( (u,v) -> v⊙u, (v) -> v⊙u, trian_in, quad_in) op_in = AffineFEOperator(U,V,t_in) quad = CellQuadrature(trian,2*order) -t_Ω = AffineFETerm( (u,v) -> v*u, (v) -> v*u, trian, quad) +t_Ω = AffineFETerm( (u,v) -> v⊙u, (v) -> v⊙u, trian, quad) op_Ω = AffineFEOperator(U,V,t_Ω) @test get_vector(op_in) ≈ get_vector(op_Ω) -t_Γ = AffineFETerm( (u,v) -> jump(v)*jump(u) + inner(jump(ε(v)),jump(ε(u))), (v) -> jump(v)*u, trian_Γ, quad_Γ) +t_Γ = AffineFETerm( (u,v) -> jump(v)⊙jump(u) + jump(ε(v))⊙jump(ε(u)), (v) -> jump(v)⊙u, trian_Γ, quad_Γ) op_Γ = AffineFEOperator(U,V,t_Γ) q_in = get_coordinates(quad_in) diff --git a/test/FESpacesTests/FESolversTests.jl b/test/FESpacesTests/FESolversTests.jl index 47c70a3f1..6c657fd0b 100644 --- a/test/FESpacesTests/FESolversTests.jl +++ b/test/FESpacesTests/FESolversTests.jl @@ -32,8 +32,8 @@ f(x) = x[2] v = get_cell_basis(V) u = get_cell_basis(U) -cellmat = integrate(∇(v)*∇(u),trian,quad) -cellvec = integrate(v*f,trian,quad) +cellmat = integrate(∇(v)⊙∇(u),trian,quad) +cellvec = integrate(v⊙f,trian,quad) cellids = collect(1:num_cells(trian)) assem = SparseMatrixAssembler(U,V) diff --git a/test/FESpacesTests/FESpacesWithLinearConstraintsTests.jl b/test/FESpacesTests/FESpacesWithLinearConstraintsTests.jl index b21136d5b..8d6e24c95 100644 --- a/test/FESpacesTests/FESpacesWithLinearConstraintsTests.jl +++ b/test/FESpacesTests/FESpacesWithLinearConstraintsTests.jl @@ -6,6 +6,7 @@ using Gridap.Fields using Gridap.Geometry using Gridap.FESpaces using Test +using LinearAlgebra domain = (0,1,0,1) partition = (2,2) @@ -66,9 +67,9 @@ bquad = CellQuadrature(btrian,2) bn = get_normal_vector(btrian) -a(u,v) = ∇(v)*∇(u) +a(u,v) = ∇(v)⋅∇(u) b1(v) = v*f -b2(v) = v*(bn*∇(u)) +b2(v) = v*(bn⋅∇(u)) t1 = AffineFETerm(a,b1,trian,quad) t2 = FESource(b2,btrian,bquad) op = AffineFEOperator(Uc,Vc,t1,t2) diff --git a/test/FESpacesTests/FETermsTests.jl b/test/FESpacesTests/FETermsTests.jl index 0de3bc6d4..bbd7ed556 100644 --- a/test/FESpacesTests/FETermsTests.jl +++ b/test/FESpacesTests/FETermsTests.jl @@ -42,7 +42,7 @@ v = get_cell_basis(V) u = get_cell_basis(U) uh = interpolate(U,u_sol) -a(u,v) = ∇(v)*∇(u) +a(u,v) = ∇(v)⊙∇(u) l(v) = f*v j(u,du,v) = a(du,v) @@ -135,7 +135,7 @@ function poisson_matvec_kernel!(mat,vec,∇v,∇u,v,j,w,x) f_q = f(x[q]) for n in 1:N for m in 1:M - mat[m,n] += ∇v[q,m]*∇u[q,n]*dV + mat[m,n] += ∇v[q,m]⊙∇u[q,n]*dV end end for m in 1:M @@ -173,9 +173,9 @@ function poisson_jacres_kernel!(jac,res,∇v,∇du,v,∇uh,j,w,x) f_q = f(x[q]) for m in 1:M for n in 1:N - jac[m,n] += ∇v[q,m]*∇du[q,n]*dV + jac[m,n] += ∇v[q,m]⊙∇du[q,n]*dV end - res[m] += ∇v[q,m]*∇uh[q]*dV + res[m] += ∇v[q,m]⊙∇uh[q]*dV end for m in 1:M res[m] -= v[q,m]*f_q*dV diff --git a/test/FESpacesTests/SparseMatrixAssemblersTests.jl b/test/FESpacesTests/SparseMatrixAssemblersTests.jl index 8e38657a7..1e8045833 100644 --- a/test/FESpacesTests/SparseMatrixAssemblersTests.jl +++ b/test/FESpacesTests/SparseMatrixAssemblersTests.jl @@ -40,8 +40,8 @@ bquad = CellQuadrature(btrian,degree) bu = restrict(u,btrian) bv = restrict(v,btrian) -cellmat = integrate(∇(v)*∇(u),trian,quad) -cellvec = integrate(v*b,trian,quad) +cellmat = integrate(∇(v)⊙∇(u),trian,quad) +cellvec = integrate(v⊙b,trian,quad) cellmatvec = pair_arrays(cellmat,cellvec) cellids = collect(1:num_cells(trian)) diff --git a/test/FieldsTests/AffineMapsTests.jl b/test/FieldsTests/AffineMapsTests.jl index f251bb5bb..6d7b99da7 100644 --- a/test/FieldsTests/AffineMapsTests.jl +++ b/test/FieldsTests/AffineMapsTests.jl @@ -15,7 +15,7 @@ x3 = Point(2,1) x = [x1,x2,x3] r = Point{2,Int}[(1, 1), (3, 3), (5, 3)] -∇r = TensorValue{2,Int,4}[(2, 0, 0, 2), (2, 0, 0, 2), (2, 0, 0, 2)] +∇r = TensorValue{2,2,Int,4}[(2, 0, 0, 2), (2, 0, 0, 2), (2, 0, 0, 2)] test_field(h,x,r,grad=∇r) end # module diff --git a/test/FieldsTests/AttachmapTests.jl b/test/FieldsTests/AttachmapTests.jl index 21f97699a..c0db0971c 100644 --- a/test/FieldsTests/AttachmapTests.jl +++ b/test/FieldsTests/AttachmapTests.jl @@ -6,6 +6,7 @@ using Gridap.Arrays using Gridap.Fields using Gridap.Fields: OtherMockBasis, MockBasis using FillArrays +using LinearAlgebra p1 = Point(2,2) p2 = Point(4,2) @@ -32,7 +33,7 @@ bx = rx for i in 1:np jacinv = inv(∇ϕx[i]) for j in 1:ndof - ∇bx[i,j] = jacinv*∇rx[i,j] + ∇bx[i,j] = jacinv⋅∇rx[i,j] end end test_field(b,x,bx,grad=∇bx) diff --git a/test/FieldsTests/DiffOperatorsTests.jl b/test/FieldsTests/DiffOperatorsTests.jl index 6be5ceb3d..eb9fb320d 100644 --- a/test/FieldsTests/DiffOperatorsTests.jl +++ b/test/FieldsTests/DiffOperatorsTests.jl @@ -26,17 +26,23 @@ for f in (_f,_af) @test curl(f) == grad2curl(gradient(f)) - @test ∇*f == divergence(f) + @test ∇⋅f == divergence(f) @test cross(∇,f) == curl(f) + + @test ∇×f == curl(f) @test outer(∇,f) == ∇(f) + + @test ∇⊗f == ∇(f) @test outer(f,∇) == transpose(∇(f)) + @test f⊗∇ == transpose(∇(f)) + @test ε(f) == symmetric_part(gradient(f)) - @test Δ(f) == ∇*∇(f) + @test Δ(f) == ∇⋅∇(f) end @@ -63,7 +69,7 @@ u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 ) Δu(x) = VectorValue( 6, -2 ) for x in xs - @test (∇*u)(x) == tr(∇u(x)) + @test (∇⋅u)(x) == tr(∇u(x)) @test (∇×u)(x) == grad2curl(∇u(x)) @test Δ(u)(x) == Δu(x) end diff --git a/test/FieldsTests/FieldInterfaceTests.jl b/test/FieldsTests/FieldInterfaceTests.jl index 4a53808f9..19ab581c0 100644 --- a/test/FieldsTests/FieldInterfaceTests.jl +++ b/test/FieldsTests/FieldInterfaceTests.jl @@ -36,7 +36,7 @@ Tfg = field_return_types((f,g),x) x = Point(1,2) @test gradient_type(Float64,x) == VectorValue{2,Float64} -@test gradient_type(VectorValue{2,Float64},x) == TensorValue{2,Float64,4} -@test gradient_type(VectorValue{3,Float64},x) == MultiValue{Tuple{2,3},Float64,2,6} +@test gradient_type(VectorValue{2,Float64},x) == TensorValue{2,2,Float64,4} +@test gradient_type(VectorValue{3,Float64},x) == TensorValue{2,3,Float64,6} end # module diff --git a/test/FieldsTests/HomotheciesTests.jl b/test/FieldsTests/HomotheciesTests.jl index 86d6878a3..1b13ac36e 100644 --- a/test/FieldsTests/HomotheciesTests.jl +++ b/test/FieldsTests/HomotheciesTests.jl @@ -14,7 +14,7 @@ x2 = Point(1,1) x3 = Point(2,1) x = [x1,x2,x3] r = Point{2,Int}[(-1, -1), (1, 1), (3, 1)] -∇r = TensorValue{2,Int,4}[(2, 0, 0, 2), (2, 0, 0, 2), (2, 0, 0, 2)] +∇r = TensorValue{2,2,Int,4}[(2, 0, 0, 2), (2, 0, 0, 2), (2, 0, 0, 2)] test_field(h,x,r,grad=∇r) end # module diff --git a/test/GeometryTests/AppendedTriangulationsTests.jl b/test/GeometryTests/AppendedTriangulationsTests.jl index aca65b4b4..af4a17a02 100644 --- a/test/GeometryTests/AppendedTriangulationsTests.jl +++ b/test/GeometryTests/AppendedTriangulationsTests.jl @@ -8,6 +8,7 @@ using Gridap.Visualization using Gridap.FESpaces using Gridap.Fields using Gridap.Integration +using LinearAlgebra: ⋅ domain = (0,1,0,1) partition = (10,10) @@ -50,7 +51,7 @@ el2 = sqrt(sum(integrate(e*e,trian,quad))) _dv = get_cell_basis(V) dv = restrict(_dv,trian) -cellmat = integrate(∇(dv)*∇(dv),trian,quad) +cellmat = integrate(∇(dv)⋅∇(dv),trian,quad) @test isa(cellmat,AppendedArray) @test isa(cellmat.a,CompressedArray) @test isa(cellmat.b,CompressedArray) diff --git a/test/GeometryTests/CartesianGridsTests.jl b/test/GeometryTests/CartesianGridsTests.jl index e181ca489..9772557b3 100644 --- a/test/GeometryTests/CartesianGridsTests.jl +++ b/test/GeometryTests/CartesianGridsTests.jl @@ -81,7 +81,7 @@ map = get_cell_map(grid) x = [Point(0.5,0.5),] ax = Fill(x,prod(partition)) r = Vector{Point{2,Float64}}[[(0.25, 0.25)], [(0.75, 0.25)], [(0.25, 0.75)], [(0.75, 0.75)]] -∇r = Vector{TensorValue{2,Float64,4}}[ +∇r = Vector{TensorValue{2,2,Float64,4}}[ [(0.5, 0.0, 0.0, 0.5)], [(0.5, 0.0, 0.0, 0.5)], [(0.5, 0.0, 0.0, 0.5)], [(0.5, 0.0, 0.0, 0.5)]] test_array_of_fields(map,ax,r,grad=∇r) diff --git a/test/GeometryTests/CellFieldsTests.jl b/test/GeometryTests/CellFieldsTests.jl index aeb8b96b8..1fff46d29 100644 --- a/test/GeometryTests/CellFieldsTests.jl +++ b/test/GeometryTests/CellFieldsTests.jl @@ -65,7 +65,7 @@ collect(evaluate(grad_cfu,q)) t_grad_cfu = outer(cfu,∇) collect(evaluate(t_grad_cfu,q)) -div_cfu = ∇*cfu +div_cfu = ∇⋅cfu collect(evaluate(div_cfu,q)) curl_cfu = cross(∇,cfu) @@ -87,10 +87,10 @@ nvec = get_normal_vector(btrian) z = 2*bcf1 + nvec @test isa(z,CellField) -flux1 = nvec*∇u +flux1 = nvec⋅∇u collect(evaluate(flux1,s)) -flux2 = ∇u*nvec +flux2 = ∇u⋅nvec collect(evaluate(flux2,s)) strian = SkeletonTriangulation(model) @@ -124,7 +124,7 @@ collect(evaluate(grad_cfu,s)) t_grad_cfu = jump(outer(scfu,∇)) collect(evaluate(t_grad_cfu,s)) -div_cfu = mean(∇*scfu) +div_cfu = mean(∇⋅scfu) collect(evaluate(div_cfu,s)) curl_cfu = jump(cross(∇,scfu)) diff --git a/test/GridapTests/DarcyTests.jl b/test/GridapTests/DarcyTests.jl index 97b620370..41ba670a8 100644 --- a/test/GridapTests/DarcyTests.jl +++ b/test/GridapTests/DarcyTests.jl @@ -3,6 +3,7 @@ module DarcyTests using Test using Gridap import Gridap: ∇, divergence +using LinearAlgebra u(x) = VectorValue(2*x[1],x[1]+x[2]) @@ -48,17 +49,17 @@ nb = get_normal_vector(btrian) function a(x,y) u, p = x v, q = y - u*v - p*(∇*v) + q*(∇*u) + u⋅v - p*(∇⋅v) + q*(∇⋅u) end function l(y) v, q = y - v*f + q*(∇*u) + v⋅f + q*(∇⋅u) end function l_Γ(y) v, q = y - -(v*nb)*p + -(v⋅nb)*p end t_Ω = AffineFETerm(a,l,trian,quad) @@ -70,8 +71,8 @@ uh, ph = xh eu = u - uh ep = p - ph -l2(v) = v*v -h1(v) = v*v + ∇(v)*∇(v) +l2(v) = v⋅v +h1(v) = v*v + ∇(v)⋅∇(v) eu_l2 = sum(integrate(l2(eu),trian,quad)) ep_l2 = sum(integrate(l2(ep),trian,quad)) diff --git a/test/GridapTests/IsotropicDamageTests.jl b/test/GridapTests/IsotropicDamageTests.jl index 75d9bf919..88499175b 100644 --- a/test/GridapTests/IsotropicDamageTests.jl +++ b/test/GridapTests/IsotropicDamageTests.jl @@ -136,7 +136,7 @@ function main(;n,nsteps) e = uh - u - e_l2 = sqrt(sum(integrate(e*e,trian,quad))) + e_l2 = sqrt(sum(integrate(e⋅e,trian,quad))) @test e_l2 < 1.0e-9 end diff --git a/test/GridapTests/PLaplacianTests.jl b/test/GridapTests/PLaplacianTests.jl index 4453a0a07..1bf114b76 100644 --- a/test/GridapTests/PLaplacianTests.jl +++ b/test/GridapTests/PLaplacianTests.jl @@ -3,8 +3,8 @@ using Test using Gridap import Gridap: ∇ -using LinearAlgebra: norm using Gridap.Geometry: DiscreteModelMock +using LinearAlgebra model = DiscreteModelMock() diff --git a/test/GridapTests/PeriodicCoupledPoissonTests.jl b/test/GridapTests/PeriodicCoupledPoissonTests.jl index bf6a97af3..7803a48d0 100644 --- a/test/GridapTests/PeriodicCoupledPoissonTests.jl +++ b/test/GridapTests/PeriodicCoupledPoissonTests.jl @@ -2,7 +2,7 @@ module PeriodicCoupledPoissonTests using Gridap using Test - +using LinearAlgebra u(x) = x[1]^2 + 2*x[2]^2 v(x) = -x[2]^2 @@ -52,7 +52,7 @@ eu = u - uh ev = v - vh l2(u) = u*u -h1(u) = ∇(u)*∇(u) + l2(u) +h1(u) = ∇(u)⋅∇(u) + l2(u) eul2 = sqrt(sum( integrate(l2(eu),trian,quad) )) euh1 = sqrt(sum( integrate(h1(eu),trian,quad) )) diff --git a/test/GridapTests/PeriodicDarcyTests.jl b/test/GridapTests/PeriodicDarcyTests.jl index 65c4d4787..87e32b33e 100644 --- a/test/GridapTests/PeriodicDarcyTests.jl +++ b/test/GridapTests/PeriodicDarcyTests.jl @@ -2,6 +2,7 @@ module PeriodicDarcyTests using Gridap using Test +using LinearAlgebra u(x) = VectorValue(x[1]*(x[1]-1)*(2x[2]-1.0),-x[2]*(x[2]-1.0)*(2x[1]-1.0)) p(x) = x[2]-0.5 @@ -35,12 +36,12 @@ x = get_physical_coordinate(trian) function a(x,y) u, p = x v, q = y - v*u - p*(∇*v) + q*(∇*u) + v⋅u - p*(∇⋅v) + q*(∇⋅u) end function l(y) v, q = y - v*f + q*g + v⋅f + q*g end t_Ω = AffineFETerm(a,l,trian,quad) @@ -51,7 +52,7 @@ uh, ph = xh eu = u - uh ep = p - ph -l2(v) = v*v +l2(v) = v⋅v eu_l2 = sum(integrate(l2(eu),trian,quad)) ep_l2 = sum(integrate(l2(ep),trian,quad)) diff --git a/test/GridapTests/PhysicalPoissonTests.jl b/test/GridapTests/PhysicalPoissonTests.jl index 87dad1763..79dfcd83b 100644 --- a/test/GridapTests/PhysicalPoissonTests.jl +++ b/test/GridapTests/PhysicalPoissonTests.jl @@ -3,6 +3,7 @@ module PhysicalPoissonTests using Test using Gridap import Gridap: ∇ +using LinearAlgebra domain = (0,1,0,1) partition = (4,4) @@ -75,17 +76,17 @@ for data in [ vector_data, scalar_data ] uh = interpolate(U,u) a(u,v) = inner(∇(v),∇(u)) - l(v) = v*f + l(v) = v⊙f t_Ω = AffineFETerm(a,l,trian,quad) uh_Γn = restrict(uh,ntrian) uh_Γd = restrict(uh,dtrian) - l_Γn(v) = v*(nn*∇(uh_Γn)) + l_Γn(v) = v⊙(nn⋅∇(uh_Γn)) t_Γn = FESource(l_Γn,ntrian,nquad) - a_Γd(u,v) = (γ/h)*v*u - v*(dn*∇(u)) - (dn*∇(v))*u - l_Γd(v) = (γ/h)*v*uh_Γd - (dn*∇(v))*u + a_Γd(u,v) = (γ/h)*v⊙u - v⊙(dn⋅∇(u)) - (dn⋅∇(v))⊙u + l_Γd(v) = (γ/h)*v⊙uh_Γd - (dn⋅∇(v))⊙u t_Γd = AffineFETerm(a_Γd,l_Γd,dtrian,dquad) op = AffineFEOperator(U,V,t_Ω,t_Γn,t_Γd) diff --git a/test/GridapTests/PoissonDGTests.jl b/test/GridapTests/PoissonDGTests.jl index 589d3cde1..2d7c62190 100644 --- a/test/GridapTests/PoissonDGTests.jl +++ b/test/GridapTests/PoissonDGTests.jl @@ -3,6 +3,7 @@ module PoissonDGTests using Test using Gridap import Gridap: ∇ +using LinearAlgebra #domain = (0,1,0,1) #partition = (4,4) @@ -50,15 +51,15 @@ V = TestFESpace( U = TrialFESpace(V,u) -a(u,v) = inner(∇(v),∇(u)) +a(u,v) = ∇(v)⋅∇(u) l(v) = v*f t_Ω = AffineFETerm(a,l,trian,quad) -a_Γd(u,v) = (γ/h)*v*u - v*(bn*∇(u)) - (bn*∇(v))*u -l_Γd(v) = (γ/h)*v*u - (bn*∇(v))*u +a_Γd(u,v) = (γ/h)*v*u - v*(bn⋅∇(u)) - (bn⋅∇(v))*u +l_Γd(v) = (γ/h)*v*u - (bn⋅∇(v))*u t_Γd = AffineFETerm(a_Γd,l_Γd,btrian,bquad) -a_Γ(u,v) = (γ/h)*jump(v*sn)*jump(u*sn) - jump(v*sn)*mean(∇(u)) - mean(∇(v))*jump(u*sn) +a_Γ(u,v) = (γ/h)*jump(v*sn)⋅jump(u*sn) - jump(v*sn)⋅mean(∇(u)) - mean(∇(v))⋅jump(u*sn) t_Γ = LinearFETerm(a_Γ,strian,squad) op = AffineFEOperator(U,V,t_Ω,t_Γ,t_Γd) diff --git a/test/GridapTests/PoissonTests.jl b/test/GridapTests/PoissonTests.jl index 71ca2cae9..911944f8b 100644 --- a/test/GridapTests/PoissonTests.jl +++ b/test/GridapTests/PoissonTests.jl @@ -3,6 +3,7 @@ module PoissonTests using Test using Gridap import Gridap: ∇ +using LinearAlgebra domain = (0,1,0,1) partition = (4,4) @@ -79,18 +80,18 @@ for data in [ vector_data, scalar_data ] uh = interpolate(U,u) - a(u,v) = inner(∇(v),∇(u)) - l(v) = v*f + a(u,v) = ∇(v)⊙∇(u) + l(v) = v⊙f t_Ω = AffineFETerm(a,l,trian,quad) uh_Γn = restrict(uh,ntrian) uh_Γd = restrict(uh,dtrian) - l_Γn(v) = v*(nn*∇(uh_Γn)) + l_Γn(v) = v⊙(nn⋅∇(uh_Γn)) t_Γn = FESource(l_Γn,ntrian,nquad) - a_Γd(u,v) = (γ/h)*v*u - v*(dn*∇(u)) - (dn*∇(v))*u - l_Γd(v) = (γ/h)*v*uh_Γd - (dn*∇(v))*u + a_Γd(u,v) = (γ/h)*v⊙u - v⊙(dn⋅∇(u)) - (dn⋅∇(v))⊙u + l_Γd(v) = (γ/h)*v⊙uh_Γd - (dn⋅∇(v))⊙u t_Γd = AffineFETerm(a_Γd,l_Γd,dtrian,dquad) op = AffineFEOperator(U,V,t_Ω,t_Γn,t_Γd) diff --git a/test/GridapTests/StokesDGTests.jl b/test/GridapTests/StokesDGTests.jl index bec947079..d96c17523 100644 --- a/test/GridapTests/StokesDGTests.jl +++ b/test/GridapTests/StokesDGTests.jl @@ -3,7 +3,7 @@ module StokesDGTests using Test using Gridap import Gridap: ∇ -import LinearAlgebra: tr +import LinearAlgebra: tr, ⋅ const T = VectorValue{2,Float64} @@ -70,23 +70,23 @@ const ns = get_normal_vector(strian) function A_Ω(x,y) u, p = x v, q = y - inner(∇(v), ∇(u)) - ∇(q)*u + v*∇(p) + ∇(v)⊙∇(u) - ∇(q)⋅u + v⋅∇(p) end function B_Ω(y) v, q = y - v*f + q*g + v⋅f + q*g end function A_∂Ω(x,y) u, p = x v, q = y - (γ/h)*v*u - v*(nb*∇(u)) - (nb*∇(v))*u + 2*(q*nb)*u + (γ/h)*v⋅u - v⋅(nb⋅∇(u)) - (nb⋅∇(v))⋅u + 2*(q*nb)⋅u end function B_∂Ω(y) v, q = y - (γ/h)*v*u - (nb*∇(v))*u + (q*nb)*u + (γ/h)*v⋅u - (nb⋅∇(v))⋅u + (q*nb)⋅u end function A_Γ(x,y) @@ -95,9 +95,9 @@ function A_Γ(x,y) (γ/h)*inner( jump(outer(v,ns)), jump(outer(u,ns))) - inner( jump(outer(v,ns)), mean(∇(u)) ) - inner( mean(∇(v)), jump(outer(u,ns)) ) + - (γ0*h)*jump(q*ns)*jump(p*ns) + - jump(q*ns)*mean(u) - - mean(v)*jump(p*ns) + (γ0*h)*jump(q*ns)⋅jump(p*ns) + + jump(q*ns)⋅mean(u) - + mean(v)⋅jump(p*ns) end t_Ω = AffineFETerm(A_Ω,B_Ω,trian,quad) @@ -111,8 +111,8 @@ uh, ph = solve(op) eu = u - uh ep = p - ph -l2(v) = v*v -h1(v) = v*v + inner(∇(v),∇(v)) +l2(v) = v⋅v +h1(v) = v⋅v + inner(∇(v),∇(v)) eu_l2 = sqrt(sum(integrate(l2(eu),trian,quad))) eu_h1 = sqrt(sum(integrate(h1(eu),trian,quad))) diff --git a/test/GridapTests/StokesNitscheTests.jl b/test/GridapTests/StokesNitscheTests.jl index 585963b48..f5ff2efbf 100644 --- a/test/GridapTests/StokesNitscheTests.jl +++ b/test/GridapTests/StokesNitscheTests.jl @@ -6,7 +6,6 @@ module StokesNitsche using Test using Gridap import Gridap: ∇ -import LinearAlgebra: tr # const T = VectorValue{2,Float64} @@ -20,8 +19,6 @@ p(x) = x[1] - x[2] ∇(::typeof(u)) = ∇u - - n = 2 order = 2 @@ -82,25 +79,23 @@ nb = get_normal_vector(btrian) function A_Ω(x,y) u, p = x v, q = y - inner(∇(v), ∇(u)) - inner(q,divergence(u)) - inner(divergence(v), p) + ∇(v)⊙∇(u) - q*(∇⋅u) - (∇⋅v)*p end function B_Ω(y) v, q = y - inner(v,f) - inner(q, g) + v⋅f - q*g end function A_∂Ω(x,y) u, p = x v, q = y - # (γ/h) * inner(v,u) - inner(outer(nb,v), ∇(u)) - inner(∇(v), outer(nb,u)) + inner(v, p*nb) + inner(q*nb,u) - (γ/h)*v*u - v*(nb*∇(u)) - (nb*∇(v))*u + (p*nb)*v + (q*nb)*u + (γ/h)*v⋅u - v⋅(nb⋅∇(u)) - (nb⋅∇(v))⋅u + (p*nb)⋅v + (q*nb)⋅u end function B_∂Ω(y) v, q = y - # + (γ/h) * inner(v,ud) - inner(∇(v), outer(nb,ud_cf)) + inner(q*nb,ud) - (γ/h)*v*u - (nb*∇(v))*u + (q*nb)*u + (γ/h)*v⋅u - (nb⋅∇(v))⋅u + (q*nb)⋅u end t_Ω = AffineFETerm(A_Ω,B_Ω,trian,quad) @@ -124,8 +119,8 @@ ep = p - ph # writevtk(trian,"trian",cellfields=["uh"=>uh,"ph"=>ph, "eu"=>eu, "ep"=>ep]) # Define norms to measure the error -l2(u) = inner(u,u) -h1(u) = inner(∇(u),∇(u)) + l2(u) +l2(u) = u⊙u +h1(u) = ∇(u)⊙∇(u) + l2(u) # Compute errors eul2 = sqrt(sum( integrate(l2(eu),trian,quad) )) diff --git a/test/GridapTests/StokesTaylorHoodTests.jl b/test/GridapTests/StokesTaylorHoodTests.jl index 53c3f8604..450fa5a23 100644 --- a/test/GridapTests/StokesTaylorHoodTests.jl +++ b/test/GridapTests/StokesTaylorHoodTests.jl @@ -4,13 +4,13 @@ using Test using Gridap import Gridap: ∇ -using LinearAlgebra: tr +using LinearAlgebra: tr, ⋅ # Using automatic differentiation u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 ) p(x) = x[1] + 3*x[2] f(x) = -Δ(u)(x) + ∇(p)(x) -g(x) = (∇*u)(x) +g(x) = (∇⋅u)(x) ∇u(x) = ∇(u)(x) #u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 ) @@ -75,17 +75,17 @@ for ref_st in ref_style function a(x,y) u,p = x v,q = y - inner(∇(v),∇(u)) - (∇*v)*p + q*(∇*u) + ∇(v)⊙∇(u) - (∇⋅v)*p + q*(∇⋅u) end function l(y) v,q = y - v*f + q*g + v⋅f + q*g end function l_Γb(y) v,q = y - v*(n*∇u) - (n*v)*p + v⋅(n⋅∇u) - (n⋅v)*p end t_Ω = AffineFETerm(a,l,trian,quad) @@ -98,8 +98,8 @@ for ref_st in ref_style eu = u - uh ep = p - ph - l2(v) = v*v - h1(v) = v*v + inner(∇(v),∇(v)) + l2(v) = v⋅v + h1(v) = v⋅v + ∇(v)⊙∇(v) eu_l2 = sqrt(sum(integrate(l2(eu),trian,quad))) eu_h1 = sqrt(sum(integrate(h1(eu),trian,quad))) diff --git a/test/GridapTests/SurfaceCouplingTests.jl b/test/GridapTests/SurfaceCouplingTests.jl index 4913c6853..ca3b5d9b2 100644 --- a/test/GridapTests/SurfaceCouplingTests.jl +++ b/test/GridapTests/SurfaceCouplingTests.jl @@ -5,7 +5,7 @@ using Gridap using Gridap.Arrays using Gridap.FESpaces import Gridap: ∇ -using LinearAlgebra: tr +using LinearAlgebra: tr, ⋅ # Analytical functions @@ -104,29 +104,29 @@ end function l_solid(y) v,q = y - v*s + v⋅s end function a_fluid(x,y) u,p = x v,q = y - inner(∇(v),∇(u)) - (∇*v)*p + q*(∇*u) + inner(∇(v),∇(u)) - (∇⋅v)*p + q*(∇⋅u) end function l_fluid(y) v,q = y - v*f + q*g + v⋅f + q*g end function l_Γn_fluid(y) v,q = y - v*(n*∇u) - (n*v)*p + v⋅(n⋅∇u) - (n⋅v)*p end # Pressure drop at the interface function l_Γ(y) v,q = y - - mean(n_Γ*v)*p + - mean(n_Γ⋅v)*p end t_Ω_solid = AffineFETerm(a_solid,l_solid,trian_solid,quad_solid) @@ -151,8 +151,8 @@ ep_fluid = p - ph_fluid # Errors -l2(v) = v*v -h1(v) = v*v + inner(∇(v),∇(v)) +l2(v) = v⋅v +h1(v) = v⋅v + inner(∇(v),∇(v)) eu_l2 = sqrt(sum(integrate(l2(eu),trian,quad))) eu_h1 = sqrt(sum(integrate(h1(eu),trian,quad))) diff --git a/test/MultiFieldTests/MultiFieldFEOperatorsTests.jl b/test/MultiFieldTests/MultiFieldFEOperatorsTests.jl index 94d2c2b0c..f49ef1150 100644 --- a/test/MultiFieldTests/MultiFieldFEOperatorsTests.jl +++ b/test/MultiFieldTests/MultiFieldFEOperatorsTests.jl @@ -49,7 +49,7 @@ end function a_Γ(x,y) u,p = x v,q = y - jump(v)*mean(u) + jump(∇(q))*jump(∇(p)) - mean(v)*mean(p) + jump(v)*mean(u) + jump(∇(q))⋅jump(∇(p)) - mean(v)*mean(p) end t_Ω = AffineFETerm(a,l,trian,quad) diff --git a/test/MultiFieldTests/MultiFieldFESpacesWithLinearConstraintsTests.jl b/test/MultiFieldTests/MultiFieldFESpacesWithLinearConstraintsTests.jl index 3842fd285..90ea40e98 100644 --- a/test/MultiFieldTests/MultiFieldFESpacesWithLinearConstraintsTests.jl +++ b/test/MultiFieldTests/MultiFieldFESpacesWithLinearConstraintsTests.jl @@ -5,6 +5,7 @@ using Gridap.Arrays using Gridap.Fields using Gridap.Geometry using Gridap.FESpaces +using Gridap.TensorValues using Gridap.MultiField using Test @@ -62,8 +63,8 @@ test_fe_space(V) @test has_constraints(U) @test has_constraints(V) -a(u,v) = ∇(v)*∇(u) -b_Γ(v,u,n_Γ) = v*(n_Γ*∇(u)) +a(u,v) = ∇(v)⋅∇(u) +b_Γ(v,u,n_Γ) = v*(n_Γ⋅∇(u)) function A(u,v) u1,u2 = u diff --git a/test/PolynomialsTests/runtests.jl b/test/PolynomialsTests/runtests.jl index b3c9ebe69..8e4a6dd47 100644 --- a/test/PolynomialsTests/runtests.jl +++ b/test/PolynomialsTests/runtests.jl @@ -2,7 +2,7 @@ module PolynomialsTests using Test -@testset "MonomialBases" begin include("MonomialBasesTests.jl") end +#@testset "MonomialBases" begin include("MonomialBasesTests.jl") end @testset "QGradMonomialBases" begin include("QGradMonomialBasesTests.jl") end diff --git a/test/ReferenceFEsTests/LagrangianRefFEsTests.jl b/test/ReferenceFEsTests/LagrangianRefFEsTests.jl index 008d20959..87f93b142 100644 --- a/test/ReferenceFEsTests/LagrangianRefFEsTests.jl +++ b/test/ReferenceFEsTests/LagrangianRefFEsTests.jl @@ -29,7 +29,7 @@ r = [(0,0,0), (1,0,0), (0,1,0), (0,0,1)] @test get_exponents(b) == r orders = (2,2) -extrusion = Tuple(QUAD.extrusion.array) +extrusion = Tuple(QUAD.extrusion) dofs = LagrangianDofBasis(VectorValue{3,Float64},TET,1) @test dofs.nodes == Point{3,Float64}[(0,0,0), (1,0,0), (0,1,0), (0,0,1)] diff --git a/test/TensorValuesTests/IndexingTests.jl b/test/TensorValuesTests/IndexingTests.jl index 531f7a6e3..94d1e3a75 100644 --- a/test/TensorValuesTests/IndexingTests.jl +++ b/test/TensorValuesTests/IndexingTests.jl @@ -35,8 +35,26 @@ for (k,ti) in enumerate(t) @test ti == a[k] end +s = SymTensorValue{2}(11,21,22) +t = TensorValue(convert(SMatrix{2,2,Int},s)) + +@test size(s) == (2,2) +@test length(s) == 4 + +for (k,i) in enumerate(eachindex(t)) + @test s[i] == t[k] +end + +@test s[2,1] == 21 + +@test s[2] == 21 + +for (k,si) in enumerate(t) + @test si == s[k] +end + v = @SMatrix zeros(2,3) -w = MultiValue(v) +w = TensorValue(v) @test CartesianIndices(w) == CartesianIndices(v) @test LinearIndices(w) == LinearIndices(v) diff --git a/test/TensorValuesTests/OperationsTests.jl b/test/TensorValuesTests/OperationsTests.jl index 4ac20a2f0..2fb8e86db 100644 --- a/test/TensorValuesTests/OperationsTests.jl +++ b/test/TensorValuesTests/OperationsTests.jl @@ -2,6 +2,7 @@ module OperationsTests using Test using Gridap.TensorValues +using Gridap.Arrays using LinearAlgebra # Comparison @@ -42,17 +43,49 @@ c = a - b r = VectorValue(-1,1,-3) @test c == r +a = TensorValue(1,2,3,4) +b = TensorValue(5,6,7,8) + +c = +a +r = a +@test c==r + +c = -a +r = TensorValue(-1,-2,-3,-4) +@test c==r + +c = a - b +r = TensorValue(-4, -4, -4, -4) +@test c==r + +a = SymTensorValue(1,2,3) +b = SymTensorValue(5,6,7) + +c = -a +r = SymTensorValue(-1,-2,-3) +@test c==r + +c = a + b +r = SymTensorValue(6,8,10) +@test c==r + # Matrix Division -t = one(TensorValue{3,Int,9}) +a = VectorValue(1,2,3) +t = one(TensorValue{3,3,Int}) c = t\a +@test c == a +st = one(SymTensorValue{3,Int}) +c = st\a @test c == a # Operations by a scalar t = TensorValue(1,2,3,4,5,6,7,8,9) +st = SymTensorValue(1,2,3,5,6,9) +s4ot = one(SymFourthOrderTensorValue{2,Int}) a = VectorValue(1,2,3) c = 2 * a @@ -80,20 +113,51 @@ r = VectorValue(1/2,1.0,3/2) @test c == r c = 2 * t -@test isa(c,TensorValue{3,Int}) +@test isa(c,TensorValue{3}) r = TensorValue(2, 4, 6, 8, 10, 12, 14, 16, 18) @test c == r c = t * 2 -@test isa(c,TensorValue{3,Int}) +@test isa(c,TensorValue{3}) r = TensorValue(2, 4, 6, 8, 10, 12, 14, 16, 18) @test c == r c = t + 2 -@test isa(c,TensorValue{3,Int}) +@test isa(c,TensorValue{3,3,Int}) r = TensorValue(3, 4, 5, 6, 7, 8, 9, 10, 11) @test c == r + +c = 2 * st +@test isa(c,SymTensorValue{3}) +r = SymTensorValue(2,4,6,10,12,18) +@test c == r + +c = st * 2 +@test isa(c,SymTensorValue{3}) +r = SymTensorValue(2,4,6,10,12,18) +@test c == r + +c = st + 2 +@test isa(c,SymTensorValue{3}) +r = SymTensorValue(3,4,5,7,8,11) +@test c == r + +c = 2 * s4ot +@test isa(c,SymFourthOrderTensorValue{2}) +r = SymFourthOrderTensorValue(2,0,0, 0,1,0, 0,0,2) +@test c == r + +c = s4ot * 2 +@test isa(c,SymFourthOrderTensorValue{2}) +r = SymFourthOrderTensorValue(2,0,0, 0,1,0, 0,0,2) +@test c == r + +c = c + 0 +@test isa(c,SymFourthOrderTensorValue{2}) +r = SymFourthOrderTensorValue(2,0,0, 0,1,0, 0,0,2) +@test c == r + # Dot product (simple contraction) a = VectorValue(1,2,3) @@ -101,32 +165,44 @@ b = VectorValue(2,1,6) t = TensorValue(1,2,3,4,5,6,7,8,9) s = TensorValue(9,8,3,4,5,6,7,2,1) +st = SymTensorValue(1,2,3,5,6,9) +st2 = SymTensorValue(9,6,5,3,2,1) -c = a * b +c = a ⋅ b @test isa(c,Int) @test c == 2+2+18 -c = t * a +c = t ⋅ a @test isa(c,VectorValue{3,Int}) r = VectorValue(30,36,42) @test c == r -c = s * t -@test isa(c,TensorValue{3,Int}) +c = st ⋅ a +@test isa(c,VectorValue{3,Int}) +r = VectorValue(14,30,42) +@test c == r + +c = s ⋅ t +@test isa(c,TensorValue{3,3,Int}) r = TensorValue(38,24,18,98,69,48,158,114,78) @test c == r -c = a * t +c = st ⋅ st2 +@test isa(c,TensorValue{3,3,Int}) +r = TensorValue(36, 78, 108, 18, 39, 54, 12, 26, 36) +@test c == r + +c = a ⋅ st @test isa(c,VectorValue{3,Int}) -r = VectorValue(14, 32, 50) +r = VectorValue(14,30,42) @test c == r # Inner product (full contraction) -c = inner(2,3) +c = 2 ⊙ 3 @test c == 6 -c = inner(a,b) +c = a ⊙ b @test isa(c,Int) @test c == 2+2+18 @@ -134,6 +210,11 @@ c = inner(t,s) @test isa(c,Int) @test c == 185 +c = inner(st,st2) +c = st ⊙ st2 +@test isa(c,Int) +@test c == inner(TensorValue(get_array(st)),TensorValue(get_array(st2))) + # Reductions a = VectorValue(1,2,3) @@ -156,10 +237,12 @@ a = VectorValue(1,2,3) e = VectorValue(2,5) c = outer(2,3) +c = 2 ⊗ 3 @test c == 6 r = VectorValue(2,4,6) c = outer(2,a) +c = 2 ⊗ a @test isa(c,VectorValue{3,Int}) @test c == r @@ -168,14 +251,15 @@ c = outer(a,2) @test c == r c = outer(a,e) -@test isa(c,MultiValue{Tuple{3,2},Int}) -r = MultiValue{Tuple{3,2},Int}(2,4,6,5,10,15) +c = a ⊗ e +@test isa(c,TensorValue{3,2,Int}) +r = TensorValue{3,2,Int}(2,4,6,5,10,15) @test c == r e = VectorValue(10,20) k = TensorValue(1,2,3,4) c = outer(e,k) -@test c == MultiValue{Tuple{2,2,2}}(10, 20, 20, 40, 30, 60, 40, 80) +@test c == ThirdOrderTensorValue{2,2,2}(10, 20, 20, 40, 30, 60, 40, 80) @test tr(c) == VectorValue(50,110) @@ -185,10 +269,24 @@ t = TensorValue(10,2,30,4,5,6,70,8,9) c = det(t) @test c ≈ -8802.0 +@test det(t) == det(TensorValue(get_array(t))) +@test inv(t) == inv(TensorValue(get_array(t))) c = inv(t) @test isa(c,TensorValue{3}) +st = SymTensorValue(9,8,7,5,4,1) +@test det(st) == det(TensorValue(get_array(st))) +@test inv(st) == inv(TensorValue(get_array(st))) + +t = TensorValue(10) +@test det(t) == 10 +@test inv(t) == TensorValue(1/10) + +t = TensorValue(1,4,-1,1) +@test det(t) == det(TensorValue(get_array(t))) +@test inv(t) == inv(TensorValue(get_array(t))) + # Measure a = VectorValue(1,2,3) @@ -199,13 +297,16 @@ t = TensorValue(10,2,30,4,5,6,70,8,9) c = meas(t) @test c ≈ 8802.0 -v = MultiValue{Tuple{1,2}}(10,20) +st = SymTensorValue(1,2,3,5,6,9) +@test meas(st) == meas(TensorValue(get_array(st))) + +v = TensorValue{1,2}(10,20) @test meas(v) == sqrt(500) -v = MultiValue{Tuple{2,3}}(1,0,0,1,0,0) +v = TensorValue{2,3}(1,0,0,1,0,0) @test meas(v) ≈ 1.0 -v = MultiValue{Tuple{2,3}}(1,0,0,1,1,0) +v = TensorValue{2,3}(1,0,0,1,1,0) @test meas(v) ≈ sqrt(2) # Broadcasted operations @@ -245,25 +346,39 @@ v = VectorValue(1,0) t = TensorValue(1,2,3,4) @test tr(t) == 5 -@test tr(t) == 5 t = TensorValue(1,2,3,4,5,6,7,8,9) @test tr(t) == 15 -@test tr(t) == 15 -@test symmetric_part(t) == TensorValue(1.0, 3.0, 5.0, 3.0, 5.0, 7.0, 5.0, 7.0, 9.0) +st = SymTensorValue(1,2,3,5,6,9) +@test tr(st) == tr(TensorValue(get_array(st))) + +@test get_array(symmetric_part(t)) == get_array(TensorValue(1.0, 3.0, 5.0, 3.0, 5.0, 7.0, 5.0, 7.0, 9.0)) +@test symmetric_part(st) == symmetric_part(TensorValue(get_array(st))) a = TensorValue(1,2,3,4) b = a' @test adjoint(a) == b @test b == TensorValue(1,3,2,4) -@test a*b == TensorValue(10,14,14,20) +@test a⋅b == TensorValue(10,14,14,20) a = TensorValue(1,2,3,4) b = a' @test transpose(a) == b @test b == TensorValue(1,3,2,4) -@test a*b == TensorValue(10,14,14,20) +@test a⋅b == TensorValue(10,14,14,20) + +sa = SymTensorValue(1,2,3,5,6,9) +sb = sa' +@test adjoint(sa) == sb +@test sb == SymTensorValue(1,2,3,5,6,9) +@test sa⋅sb == TensorValue(get_array(sa))⋅TensorValue(get_array(sb)) + +sa = SymTensorValue(1,2,3,5,6,9) +sb = sa' +@test transpose(sa) == sb +@test sb == SymTensorValue(1,2,3,5,6,9) +@test sa⋅sb == TensorValue(get_array(sa))⋅TensorValue(get_array(sb)) u = VectorValue(1.0,2.0) v = VectorValue(2.0,3.0) @@ -277,4 +392,36 @@ b = VectorValue(2.0,3.0) a = VectorValue{0,Int}() @test a ≈ a +λ = 1 +μ = 1 +ε = SymTensorValue(1,2,3) +σ = λ*tr(ε)*one(ε) + 2*μ*ε +@test isa(σ,SymTensorValue) +@test (σ ⊙ ε) == 52 +#@test σ:ε == 52 + +I = one(SymFourthOrderTensorValue{2,Int}) +@test I[1,1,1,1] == 1 +@test I[1,2,1,2] == 0.5 +@test I[2,1,1,2] == 0.5 +@test I[2,2,2,2] == 1 + +@test I ⊙ ε == ε +#@test I : ε == ε + +a = TensorValue(1,2,3,4) +b = I ⊙ a +@test b == symmetric_part(a) +#b = I : a +#@test b == symmetric_part(a) + + +σ1 = λ*tr(ε)*one(ε) + 2*μ*ε +C = 2*μ*one(ε⊗ε) + λ*one(ε)⊗one(ε) +σ2 = C ⊙ ε +@test σ1 == σ2 +#σ2 = C : ε +#@test σ1 == σ2 + + end # module OperationsTests diff --git a/test/TensorValuesTests/TypesTests.jl b/test/TensorValuesTests/TypesTests.jl index 5b659cad9..b694c9224 100644 --- a/test/TensorValuesTests/TypesTests.jl +++ b/test/TensorValuesTests/TypesTests.jl @@ -4,209 +4,207 @@ using Gridap.TensorValues using Test using StaticArrays -# Constructors (MultiValue) +# Constructors (TensorValue) -a = MArray{Tuple{3,2}}((1,2,3,4,5,6)) -v = MultiValue(a) -@test isa(v,MultiValue{Tuple{3,2},Int}) -@test v.array.data === a.data +a = SMatrix{2,2}(1,2,3,4) +t = TensorValue(a) +@test isa(t,TensorValue{2,2,Int}) +@test convert(SMatrix{2,2,Int},t) == [1 3;2 4] -a = SArray{Tuple{3,2}}((1,2,3,4,5,6)) +a = MMatrix{2,2}(1,2,3,4) +t = TensorValue(a) +@test isa(t,TensorValue{2,2,Int}) +@test convert(SMatrix{2,2,Int},t) == [1 3;2 4] -v = MultiValue(a) -@test isa(v,MultiValue{Tuple{3,2},Int}) -@test v.array === a +t = TensorValue{2}((1,2,3,4)) +@test isa(t,TensorValue{2,2,Int}) +@test convert(SMatrix{2,2,Int},t) == [1 3;2 4] -v = MultiValue{Tuple{3,2}}((1,2,3,4,5,6)) -@test isa(v,MultiValue{Tuple{3,2},Int}) -@test v.array == a +t = TensorValue{2}(1,2,3,4) +@test isa(t,TensorValue{2,2,Int}) +@test convert(SMatrix{2,2,Int},t) == [1 3;2 4] -v = MultiValue{Tuple{3,2}}(1,2,3,4,5,6) -@test isa(v,MultiValue{Tuple{3,2},Int}) -@test v.array == a +t = TensorValue(1,2,3,4) +@test isa(t,TensorValue{2,2,Int}) +@test convert(SMatrix{2,2,Int},t) == [1 3;2 4] -v = MultiValue{Tuple{3,2},Float64}((1,2,3,4,5,6)) -@test isa(v,MultiValue{Tuple{3,2},Float64}) -@test v.array == a +t = TensorValue((1,2,3,4)) +@test isa(t,TensorValue{2,2,Int}) +@test convert(SMatrix{2,2,Int},t) == [1 3;2 4] -v = MultiValue{Tuple{3,2},Float64}(1,2,3,4,5,6) -@test isa(v,MultiValue{Tuple{3,2},Float64}) -@test v.array == a +t = TensorValue{1}(10) +@test isa(t,TensorValue{1,1,Int}) +@test convert(SMatrix{1,1,Int},t) == 10*ones(1,1) -a = SVector(1) -v = MultiValue{Tuple{1}}((1,)) -@test isa(v,MultiValue{Tuple{1},Int}) -@test v.array == a +t = TensorValue{1}((10,)) +@test isa(t,TensorValue{1,1,Int}) +@test convert(SMatrix{1,1,Int},t) == 10*ones(1,1) -v = MultiValue{Tuple{1}}(1) -@test isa(v,MultiValue{Tuple{1},Int}) -@test v.array == a +# Constructors (SymTensorValue) -a = SMatrix{1,1}(1) -v = MultiValue{Tuple{1,1}}(1) -@test isa(v,MultiValue{Tuple{1,1},Int}) -@test v.array == a +s = SymTensorValue( (11,21,22) ) +@test isa(s,SymTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},s) == [11 21;21 22] -a = SVector{0,Int}() -v = MultiValue{Tuple{0},Int}(()) -@test isa(v,MultiValue{Tuple{0},Int}) -@test v.array == a +s = SymTensorValue(11,21,22) +@test isa(s,SymTensorValue{2,Int}) +@test convert(SMatrix{2,2,Float64},s) == [11.0 21.0;21.0 22.0] -a = SMatrix{0,0,Int}() -v = MultiValue{Tuple{0,0},Int}() -@test isa(v,MultiValue{Tuple{0,0},Int}) -@test v.array == a +s = SymTensorValue{2}( (11,21,22) ) +@test isa(s,SymTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},s) == [11 21;21 22] -# Constructors (TensorValue) +s = SymTensorValue{2}(11,21,22) +@test isa(s,SymTensorValue{2,Int}) +@test convert(SMatrix{2,2,Float64},s) == [11.0 21.0;21.0 22.0] -a = SMatrix{2,2}(1,2,3,4) -t = TensorValue(a) -@test isa(t,TensorValue{2,Int}) -@test t.array == [1 3;2 4] +s = SymTensorValue{2,Int}( (11,21,22) ) +@test isa(s,SymTensorValue{2,Int}) +@test convert(SMatrix{2,2,Int},s) == [11 21;21 22] -a = MMatrix{2,2}(1,2,3,4) -t = TensorValue(a) -@test isa(t,TensorValue{2,Int}) -@test t.array == [1 3;2 4] +s = SymTensorValue{2,Float64}(11,21,22) +@test isa(s,SymTensorValue{2,Float64}) +@test convert(SMatrix{2,2,Float64},s) == [11.0 21.0;21.0 22.0] -t = TensorValue{2}((1,2,3,4)) -@test isa(t,TensorValue{2,Int}) -@test t.array == [1 3;2 4] +s = SymTensorValue{0,Int}( () ) +@test isa(s,SymTensorValue{0,Int}) +@test convert(SMatrix{0,0,Int},s) == Array{Any,2}(undef,0,0) -t = TensorValue{2,Float64}((1,2,3,4)) -@test isa(t,TensorValue{2,Float64}) -@test t.array == [1 3;2 4] +s = SymTensorValue{0,Int}() +@test isa(s,SymTensorValue{0,Int}) +@test convert(SMatrix{0,0,Int},s) == Array{Any,2}(undef,0,0) -t = TensorValue{2}(1,2,3,4) -@test isa(t,TensorValue{2,Int}) -@test t.array == [1 3;2 4] +# Constructors (SymFourthOrderTensorValue) -t = TensorValue{2,Float64}(1,2,3,4) -@test isa(t,TensorValue{2,Float64}) -@test t.array == [1 3;2 4] +s = SymFourthOrderTensorValue( (1111,1121,1122, 2111,2121,2122, 2211,2221,2222) ) +@test isa(s,SymFourthOrderTensorValue{2,Int}) +@test Tuple(s) == (1111,1121,1122, 2111,2121,2122, 2211,2221,2222) -t = TensorValue(1,2,3,4) -@test isa(t,TensorValue{2,Int}) -@test t.array == [1 3;2 4] +s = SymFourthOrderTensorValue(1111,2111,2211, 1121,2121,2221, 1122,2122,2222) +@test isa(s,SymFourthOrderTensorValue{2,Int}) +@test Tuple(s) == (1111,2111,2211, 1121,2121,2221, 1122,2122,2222 ) -t = TensorValue((1,2,3,4)) -@test isa(t,TensorValue{2,Int}) -@test t.array == [1 3;2 4] +s = SymFourthOrderTensorValue{2}( (1111,2111,2211, 1121,2121,2221, 1122,2122,2222) ) +@test isa(s,SymFourthOrderTensorValue{2,Int}) +@test Tuple(s) == (1111,2111,2211, 1121,2121,2221, 1122,2122,2222 ) -t = TensorValue{0,Int}() -@test isa(t,TensorValue{0,Int}) -@test t.array == zeros(0,0) +s = SymFourthOrderTensorValue{2}(1111,2111,2211, 1121,2121,2221, 1122,2122,2222) +@test isa(s,SymFourthOrderTensorValue{2,Int}) +@test Tuple(s) == (1111,2111,2211, 1121,2121,2221, 1122,2122,2222 ) -t = TensorValue{1}(10) -@test isa(t,TensorValue{1,Int}) -@test t.array == 10*ones(1,1) +s = SymFourthOrderTensorValue{2,Int}( (1111,2111,2211, 1121,2121,2221, 1122,2122,2222) ) +@test isa(s,SymFourthOrderTensorValue{2,Int}) +@test Tuple(s) == (1111,2111,2211, 1121,2121,2221, 1122,2122,2222 ) -t = TensorValue{1}((10,)) -@test isa(t,TensorValue{1,Int}) -@test t.array == 10*ones(1,1) +s = SymFourthOrderTensorValue{2,Float64}(1111,2111,2211, 1121,2121,2221, 1122,2122,2222) +@test isa(s,SymFourthOrderTensorValue{2,Float64}) +@test Tuple(s) == (1111.0,2111.0,2211.0, 1121.0,2121.0,2221.0, 1122.0,2122.0,2222.0) -t = TensorValue{1,Float64}(10) -@test isa(t,TensorValue{1,Float64}) -@test t.array == 10*ones(1,1) +s = SymFourthOrderTensorValue{0,Int}( () ) +@test isa(s,SymFourthOrderTensorValue{0,Int}) +@test Tuple(s) == () -t = TensorValue{1,Float64}((10,)) -@test isa(t,TensorValue{1,Float64}) -@test t.array == 10*ones(1,1) +s = SymFourthOrderTensorValue{0,Int}() +@test isa(s,SymFourthOrderTensorValue{0,Int}) +@test Tuple(s) == () # Constructors (VectorValue) + a = SVector(1) g = VectorValue(a) @test isa(g,VectorValue{1,Int}) -@test g.array == [1,] +@test convert(SVector{1,Int},g) == [1,] a = SVector(1,2,3,4) g = VectorValue(a) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Int},g) == [1,2,3,4] a = MVector(1,2,3,4) g = VectorValue(a) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(MVector{4,Int},g) == [1,2,3,4] g = VectorValue{4}((1,2,3,4)) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Int},g) == [1,2,3,4] g = VectorValue{1}((1,)) @test isa(g,VectorValue{1,Int}) -@test g.array == [1,] +@test convert(SVector{1,Int},g) == [1,] g = VectorValue{0,Int}(()) @test isa(g,VectorValue{0,Int}) -@test g.array == [] +@test convert(SVector{0,Int},g) == [] g = VectorValue{4}(1,2,3,4) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Int},g) == [1,2,3,4] g = VectorValue{1}(1) @test isa(g,VectorValue{1,Int}) -@test g.array == [1,] +@test convert(SVector{1,Int},g) == [1,] g = VectorValue{1,Float64}(1) @test isa(g,VectorValue{1,Float64}) -@test g.array == [1,] - -g = VectorValue{1,Float64}((1,)) -@test isa(g,VectorValue{1,Float64}) -@test g.array == [1,] +@test convert(SVector{1,Float64},g) == [1,] g = VectorValue{0,Int}() @test isa(g,VectorValue{0,Int}) -@test g.array == [] +@test convert(SVector{0,Int},g) == [] g = VectorValue{4,Float64}((1,2,3,4)) @test isa(g,VectorValue{4,Float64}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Float64},g) == [1,2,3,4] g = VectorValue{4}(1,2,3,4) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Int},g) == [1,2,3,4] g = VectorValue{4,Float64}(1,2,3,4) @test isa(g,VectorValue{4,Float64}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Float64},g) == [1,2,3,4] g = VectorValue(1,2,3,4) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Int},g) == [1,2,3,4] g = VectorValue((1,2,3,4)) @test isa(g,VectorValue{4,Int}) -@test g.array == [1,2,3,4] +@test convert(SVector{4,Int},g) == [1,2,3,4] g = VectorValue(1) @test isa(g,VectorValue{1,Int}) -@test g.array == [1,] +@test convert(SVector{1,Int},g) == [1,] # Initializers -z = zero(MultiValue{Tuple{3,2},Int,2,6}) -@test isa(z,MultiValue{Tuple{3,2},Int,2,6}) -@test z.array == zeros(Int,(3,2)) -s = zero(z) -@test s.array == zeros(Int,(3,2)) +z = zero(TensorValue{3,3,Int,9}) +@test isa(z,TensorValue{3,3,Int,9}) +@test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) -z = zero(TensorValue{3,Int,9}) -@test isa(z,TensorValue{3,Int,9}) -@test z.array == zeros(Int,(3,3)) +z = zero(SymTensorValue{3,Int}) +@test isa(z,SymTensorValue{3,Int,6}) +@test convert(SMatrix{3,3,Int},z) == zeros(Int,(3,3)) + +z = zero(SymFourthOrderTensorValue{2,Int}) +@test isa(z,SymFourthOrderTensorValue{2,Int,9}) +@test Tuple(z) == Tuple(zeros(Int,(9))) z = zero(VectorValue{3,Int}) @test isa(z,VectorValue{3,Int}) -@test z.array == zeros(Int,3) +@test convert(SVector{3,Int},z) == zeros(Int,3) -z = one(TensorValue{3,Int,9}) -@test isa(z,TensorValue{3,Int,9}) -@test z.array == [1 0 0; 0 1 0; 0 0 1] +z = one(TensorValue{3,3,Int,9}) +@test isa(z,TensorValue{3,3,Int,9}) +@test convert(SMatrix{3,3,Int},z) == [1 0 0; 0 1 0; 0 0 1] s = one(z) -@test s.array == [1 0 0; 0 1 0; 0 0 1] +@test convert(SMatrix{3,3,Int},s) == [1 0 0; 0 1 0; 0 0 1] + +z = one(SymFourthOrderTensorValue{2,Int}) +@test isa(z,SymFourthOrderTensorValue{2}) +@test Tuple(z) == (1.,0.,0., 0.,0.5,0., 0.,0.,1.) # Conversions @@ -223,12 +221,30 @@ b = convert(VectorValue{1,Int},a) @test isa(b,VectorValue{1,Int}) a = (1,2,2,1,3,2) -V = MultiValue{Tuple{3,2},Int,2,6} +V = TensorValue{3,2,Int,6} +b = convert(V,a) +@test isa(b,V) +b = V[a,a,a,] +@test isa(b,Vector{V}) + +a = (11,21,22) +V = SymTensorValue{2,Int,3} b = convert(V,a) @test isa(b,V) b = V[a,a,a,] @test isa(b,Vector{V}) +a = (1111,1121,1122, 2111,2121,2122, 2211,2221,2222) +V = SymFourthOrderTensorValue{2,Int,9} +b = convert(V,a) +@test isa(b,V) +@test b[2,2,2,1] == 2221 +@test b[2,2,1,2] == 2221 +@test b[2,1,2,1] == 2121 +@test b[1,2,2,1] == 2121 +b = V[a,a,a,] +@test isa(b,Vector{V}) + # Misc operations on the type itself V = VectorValue{3,Int} @@ -241,10 +257,18 @@ V = VectorValue{3} # Custom type printing -v = MultiValue{Tuple{3,2},Float64}(1,2,3,4,5,6) +v = TensorValue{3,2,Float64}(1,2,3,4,5,6) s = "(1.0, 2.0, 3.0, 4.0, 5.0, 6.0)" @test string(v) == s +v = SymTensorValue{3,Int64}(1, 0, 0, 1, 0, 1) +s = "(1, 0, 0, 1, 0, 1)" +@test string(v) == s + +v = SymFourthOrderTensorValue{2,Int64}(1111,1121,1122, 2111,2121,2122, 2211,2221,2222) +s = "(1111, 1121, 1122, 2111, 2121, 2122, 2211, 2221, 2222)" +@test string(v) == s + # Misc M = mutable(VectorValue{3,Int}) @@ -253,24 +277,27 @@ m = zero(M) v = VectorValue(m) @test isa(v,VectorValue{3,Int}) -@test n_components(Int) == 1 -@test n_components(Float64) == 1 -@test n_components(1.0) == 1 -@test n_components(1) == 1 -@test n_components(VectorValue{3,Float64}) == 3 -@test n_components(VectorValue(1,2,3)) == 3 +@test num_components(Int) == 1 +@test num_components(Float64) == 1 +@test num_components(1.0) == 1 +@test num_components(1) == 1 +@test num_components(VectorValue{3,Float64}) == 3 +@test num_components(VectorValue(1,2,3)) == 3 +@test num_components(TensorValue(1,2,3,4)) == 4 +@test num_components(SymTensorValue(1,2,3)) == 4 +@test num_components(SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222)) == 16 a = VectorValue(1,2,3,4) @test change_eltype(a,Float64) == VectorValue{4,Float64} a = TensorValue(1,2,3,4) -@test change_eltype(a,Float64) == TensorValue{2,Float64,4} +@test change_eltype(a,Float64) == TensorValue{2,2,Float64,4} @test change_eltype(1,Float64) == Float64 a = TensorValue(1,2,3,4) @test isa(Tuple(a),Tuple) -@test Tuple(a) == a.array.data +@test Tuple(a) == a.data p = VectorValue(2,3) t = diagonal_tensor(p) @@ -280,4 +307,14 @@ p = VectorValue(1,2,3) t = diagonal_tensor(p) @test t == TensorValue(1,0,0,0,2,0,0,0,3) +a = SymTensorValue(11,21,22) +@test change_eltype(a,Float64) == SymTensorValue{2,Float64,3} +@test isa(Tuple(a),Tuple) +@test Tuple(a) == a.data + +a = SymFourthOrderTensorValue(1111,1121,1122, 2111,2121,2122, 2211,2221,2222) +@test change_eltype(a,Float64) == SymFourthOrderTensorValue{2,Float64,9} +@test isa(Tuple(a),Tuple) +@test Tuple(a) == a.data + end # module TypesTests