Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Shape derivative #653

Merged
merged 44 commits into from
Feb 27, 2022
Merged
Show file tree
Hide file tree
Changes from 38 commits
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
7801aba
Generalising
ConnorMallon Sep 4, 2021
e1c75ae
revert accidental change
ConnorMallon Sep 5, 2021
d5f7ef7
reverting changes 2
ConnorMallon Sep 6, 2021
5b65777
revert 3
ConnorMallon Sep 6, 2021
7f082e4
clean
ConnorMallon Sep 6, 2021
fae5379
add cellfield op and some more
ConnorMallon Sep 7, 2021
1834af6
Merge remote-tracking branch 'upstream/master' into shape_derivative
ConnorMallon Sep 7, 2021
a2459dd
type unstable change to get stuff working
ConnorMallon Sep 21, 2021
0172f8e
fix type stability
ConnorMallon Sep 21, 2021
56bea13
manual type inference for bimaterial autodiff
ConnorMallon Sep 21, 2021
1ae1762
making it work in the general case
ConnorMallon Sep 22, 2021
518cfad
making work for mutifield
ConnorMallon Sep 26, 2021
d21be90
making all tests work
ConnorMallon Sep 27, 2021
e6e8885
Revert "making all tests work"
ConnorMallon Sep 27, 2021
dcb91ff
Revert "making work for mutifield"
ConnorMallon Sep 26, 2021
3e65f7e
Revert "making it work in the general case"
ConnorMallon Sep 22, 2021
ff71270
Revert "manual type inference for bimaterial autodiff"
ConnorMallon Sep 21, 2021
018e18d
Revert "fix type stability"
ConnorMallon Sep 21, 2021
99bd36e
Revert "type unstable change to get stuff working"
ConnorMallon Sep 21, 2021
6682724
Merge remote-tracking branch 'upstream/master' into shape_derivative
ConnorMallon Dec 1, 2021
a318539
fix type stability
ConnorMallon Dec 1, 2021
426c3bd
using promote_type
ConnorMallon Dec 5, 2021
d228d73
adding promotion rule + clean
ConnorMallon Dec 5, 2021
1f083df
Merge tag 'v0.17.7' into shape_derivative
ConnorMallon Dec 5, 2021
6a8c137
Merge branch 'master' of github.com:Gridap/Gridap.jl into shape_deriv…
amartinhuertas Feb 11, 2022
140413d
add news
ConnorMallon Feb 14, 2022
1941946
generalising finding type
ConnorMallon Feb 14, 2022
4186210
removing old function
ConnorMallon Feb 14, 2022
67608be
Merge branch 'master' into shape_derivative
amartinhuertas Feb 15, 2022
4ae4dea
converting objects before appending
ConnorMallon Feb 17, 2022
ce66f50
_gradient_nd! dispatch based on isbits
ConnorMallon Feb 17, 2022
dcf4648
fix error
ConnorMallon Feb 21, 2022
9111354
add name TisbitsType
ConnorMallon Feb 21, 2022
dca3992
convert before constructor
ConnorMallon Feb 22, 2022
e62a3f2
isbits based cache allocation
ConnorMallon Feb 22, 2022
038fc46
remove revise
ConnorMallon Feb 24, 2022
6abba75
revert AppendedArray constrcuter to original form
ConnorMallon Feb 24, 2022
34f7937
changes as per comments
ConnorMallon Feb 24, 2022
7b7e168
Removing redefinition of Tisbitsparameter
amartinhuertas Feb 24, 2022
5e37245
Undo old changes
amartinhuertas Feb 25, 2022
fe01bb6
More undo old changes
amartinhuertas Feb 25, 2022
3910286
More undo old changes
amartinhuertas Feb 25, 2022
56dd373
Paremeterized types of member variables of CellPoint
amartinhuertas Feb 25, 2022
eb39b81
Trying to fix failing tests
amartinhuertas Feb 25, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## Unreleased

### Added
- Extra support for dual number propagation. Since PR [#653](https://github.com/gridap/Gridap.jl/pull/653)

## [0.17.8] - 2022-02-14

### Added
Expand Down
5 changes: 5 additions & 0 deletions src/CellData/CellFields.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ function CellPoint(

cell_map = get_cell_map(trian)
cell_phys_point = lazy_map(evaluate,cell_map,cell_ref_point)

#@show typeof(cell_phys_point)
#@show isa(cell_phys_point,AbstractArray{<:Union{AbstractArray{<:VectorValue}, VectorValue}})
CellPoint(cell_ref_point,cell_phys_point,trian,domain_style)
end

Expand Down Expand Up @@ -54,6 +57,8 @@ end
function get_cell_points(trian::Triangulation)
cell_ref_coords = get_cell_ref_coordinates(trian)
cell_phys_coords = get_cell_coordinates(trian)

#@show eltype(eltype(cell_phys_coords))
CellPoint(cell_ref_coords,cell_phys_coords,trian,ReferenceDomain())
end

Expand Down
11 changes: 11 additions & 0 deletions src/Fields/AffineMaps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,14 @@ function lazy_map(::typeof(∇),a::LazyArray{<:Fill{typeof(affine_map)}})
lazy_map(constant_field,gradients)
end

Base.promote_rule(::Type{AffineMap{D1,D2,T1,L}},::Type{AffineMap{D1,D2,T2,L}}) where {D1,D2,T1,T2,L} = AffineMap{D1,D2,promote_type(T1,T2),L}

function Base.convert(
::Type{AffineMap{D1,D2,T1,L}},
arg::AffineMap{D1,D2,T2,L}) where {D1,D2,T1,T2,L}

gradient_c = Base.convert(TensorValue{D1,D2,T1,L},arg.gradient)
origin_c = Base.convert(Point{D2,T1} ,arg.origin)

AffineMap(gradient_c,origin_c)
end
1 change: 1 addition & 0 deletions src/Fields/Fields.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import LinearAlgebra: det, inv, transpose, tr, cross
import LinearAlgebra: ⋅, dot

import Base: +, -, *, /
import Base: promote_rule, convert
import Gridap.TensorValues: ⊗, ⊙, symmetric_part, outer, meas

import Gridap.Arrays: IndexStyle
Expand Down
14 changes: 12 additions & 2 deletions src/Geometry/AppendedTriangulations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,12 @@ end
function get_cell_coordinates(trian::AppendedGrid)
a = get_cell_coordinates(trian.a)
b = get_cell_coordinates(trian.b)
lazy_append(a,b)
ai = testitem(a)
bi = testitem(b)
T = promote_type(typeof(ai),typeof(bi))
ac=lazy_map(a->convert(T,a),a)
bc=lazy_map(a->convert(T,a),b)
lazy_append(ac,bc)
end

function get_cell_ref_coordinates(trian::AppendedGrid)
Expand Down Expand Up @@ -145,7 +150,12 @@ end
function get_cell_map(trian::AppendedTriangulation)
a = get_cell_map(trian.a)
b = get_cell_map(trian.b)
lazy_append(a,b)
ai = testitem(a)
bi = testitem(b)
T = promote_type(typeof(ai),typeof(bi))
ac=lazy_map(a->convert(T,a),a)
bc=lazy_map(a->convert(T,a),b)
lazy_append(ac,bc)
end

function get_facet_normal(trian::AppendedTriangulation)
Expand Down
89 changes: 75 additions & 14 deletions src/Polynomials/MonomialBases.jl
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,17 @@ end
return_type(::MonomialBasis{D,T}) where {D,T} = T

# Field implementation

function return_cache(f::MonomialBasis{D,T},x::AbstractVector{<:Point}) where {D,T}
@assert D == length(eltype(x)) "Incorrect number of point components"
zT = zero(T)
zxi = zero(eltype(eltype(x)))
Tp = typeof( zT*zxi*zxi + zT*zxi*zxi )
np = length(x)
ndof = length(f.terms)*num_components(T)
n = 1 + _maximum(f.orders)
r = CachedArray(zeros(T,(np,ndof)))
v = CachedArray(zeros(T,(ndof,)))
c = CachedArray(zeros(eltype(T),(D,n)))
r = CachedArray(zeros(Tp,(np,ndof)))
v = CachedArray(zeros(Tp,(ndof,)))
c = CachedArray(zeros(eltype(Tp),(D,n)))
(r, v, c)
end

Expand All @@ -146,48 +148,107 @@ function evaluate!(cache,f::MonomialBasis{D,T},x::AbstractVector{<:Point}) where
r.array
end

function return_cache(
function _return_cache(
fg::FieldGradientArray{1,MonomialBasis{D,V}},
x::AbstractVector{<:Point}) where {D,V}
x::AbstractVector{<:Point},
::Type{T},
TisbitsType::Val{true}) where {D,V,T}

f = fg.fa
@assert D == length(eltype(x)) "Incorrect number of point components"
np = length(x)
ndof = length(f.terms)*num_components(V)
xi = testitem(x)
T = gradient_type(V,xi)
n = 1 + _maximum(f.orders)
r = CachedArray(zeros(T,(np,ndof)))
v = CachedArray(zeros(T,(ndof,)))
c = CachedArray(zeros(eltype(T),(D,n)))
g = CachedArray(zeros(eltype(T),(D,n)))
(r, v, c, g)
(r,v,c,g)
end

function evaluate!(
function _return_cache(
amartinhuertas marked this conversation as resolved.
Show resolved Hide resolved
fg::FieldGradientArray{1,MonomialBasis{D,V}},
x::AbstractVector{<:Point},
::Type{T},
TisbitsType::Val{false}) where {D,V,T}

cache = _return_cache(fg,x,T,Val{true}())
z = CachedArray(zeros(eltype(T),D))
(cache...,z)
end

function return_cache(
fg::FieldGradientArray{1,MonomialBasis{D,V}},
x::AbstractVector{<:Point}) where {D,V}

xi = testitem(x)
T = gradient_type(V,xi)
TisbitsType = Val(isbits(T))
_return_cache(fg,x,T,TisbitsType)
end

function _evaluate!(
cache,
fg::FieldGradientArray{1,MonomialBasis{D,T}},
x::AbstractVector{<:Point}) where {D,T}
x::AbstractVector{<:Point},
TisbitsType::Val{true}) where {D,T}

f = fg.fa
r, v, c, g = cache
z = zero(Mutable(VectorValue{D,eltype(T)}))
np = length(x)
ndof = length(f.terms) * num_components(T)
n = 1 + _maximum(f.orders)
TisbitsType = Val(isbitstype(eltype(c)))
amartinhuertas marked this conversation as resolved.
Show resolved Hide resolved
setsize!(r,(np,ndof))
setsize!(v,(ndof,))
setsize!(c,(D,n))
setsize!(g,(D,n))
for i in 1:np
@inbounds xi = x[i]
_gradient_nd!(v,xi,f.orders,f.terms,c,g,T)
_gradient_nd!(v,xi,f.orders,f.terms,c,g,z,T)
for j in 1:ndof
@inbounds r[i,j] = v[j]
end
end
r.array
end

function _evaluate!(
cache,
fg::FieldGradientArray{1,MonomialBasis{D,T}},
x::AbstractVector{<:Point},
TisbitsType::Val{false}) where {D,T}

f = fg.fa
r, v, c, g, z = cache
np = length(x)
ndof = length(f.terms) * num_components(T)
n = 1 + _maximum(f.orders)
setsize!(r,(np,ndof))
setsize!(v,(ndof,))
setsize!(c,(D,n))
setsize!(g,(D,n))
for i in 1:np
@inbounds xi = x[i]
_gradient_nd!(v,xi,f.orders,f.terms,c,g,z,T)
for j in 1:ndof
@inbounds r[i,j] = v[j]
end
end
r.array
end

function evaluate!(
cache,
fg::FieldGradientArray{1,MonomialBasis{D,T}},
x::AbstractVector{<:Point}) where {D,T}

r, v, c, g = cache
TisbitsType = Val(isbitstype(eltype(c)))
_evaluate!(cache,fg,x,TisbitsType)
end

function return_cache(
fg::FieldGradientArray{2,MonomialBasis{D,V}},
x::AbstractVector{<:Point}) where {D,V}
Expand Down Expand Up @@ -372,15 +433,15 @@ function _gradient_nd!(
terms::AbstractVector{CartesianIndex{D}},
c::AbstractMatrix{T},
g::AbstractMatrix{T},
z::AbstractVector{T},
::Type{V}) where {G,T,D,V}

dim = D
for d in 1:dim
_evaluate_1d!(c,x,orders[d],d)
_gradient_1d!(g,x,orders[d],d)
end

z = zero(Mutable(VectorValue{D,T}))

o = one(T)
k = 1

Expand Down
1 change: 1 addition & 0 deletions src/TensorValues/TensorValues.jl
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ import Base: LinearIndices
import Base: adjoint
import Base: transpose
import Base: rand
import Base: promote_rule

import LinearAlgebra: det, inv, tr, cross, dot, norm
# Reexport from LinearAlgebra (just for convenience)
Expand Down
2 changes: 2 additions & 0 deletions src/TensorValues/VectorValueTypes.jl
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ change_eltype(::VectorValue{D,T1},::Type{T2}) where {D,T1,T2} = change_eltype(Ve

get_array(arg::VectorValue{D,T}) where {D,T} = convert(SVector{D,T}, arg)

promote_rule(::Type{VectorValue{D,T1}}, ::Type{VectorValue{D,T2}}) where {D,T1,T2} = VectorValue{D,promote_type(T1,T2)}

###############################################################
# Introspection (VectorValue)
###############################################################
Expand Down
4 changes: 2 additions & 2 deletions test/FESpacesTests/AppendedTriangulationsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ du = get_trial_fe_basis(V)
cellmat = integrate( ∇(dv)⋅∇(du), quad )

@test isa(cellmat,AppendedArray)
@test isa(cellmat.a,Fill)
@test isa(cellmat.b,Fill)
#@test isa(cellmat.a,Fill)
#@test isa(cellmat.b,Fill)

dΩ = Measure(Ω,2)
a(u,v) = ∫(u*v)dΩ
Expand Down