-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathL96_var.py
349 lines (272 loc) · 10.4 KB
/
L96_var.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
# 2017 JA
import numpy as np
from scipy.linalg import pinv
from scipy.optimize import fmin, fsolve
from L96_model import lorenz96
###############################################################################
def var3d(x0,t,tobs,y,H,B,R,model,N):
"""Data assimilation routine for both Lorenz 1963 & 1996 using 3DVar.
Inputs: - x0, the real initial conditions
- t, time array of the model (should be evenly spaced)
- tobs, time array of the observations (should be evenly
spaced with a timestep that is a multiple of the model
timestep)
- y, the observations
- H, observation matrix
- B, the background error covariance matrix
- R, the observational error covariance matrix
- model, a string indicating the name of the model: 'lor63'
or 'lor96'
- N, the number of variables
Outputs: - x_b, the background
- x_a, the analysis"""
# General settings
Nsteps = np.size(t)
# For the true time
tstep_truth = t[1]-t[0]
# For the analysis
tstep_obs = tobs[1]-tobs[0]
# The ratio
o2t = int(tstep_obs/tstep_truth+0.5)
# Precreate the arrays for background and analysis
x_b = np.empty((Nsteps,N)); x_b.fill(np.nan)
x_a = np.empty((Nsteps,N)); x_a.fill(np.nan)
invB = pinv(B)
invR = pinv(R)
# For the original background ensemble let's start close from the truth
orig_bgd = 'fixed'
#orig_bgd = 'random'
if orig_bgd=='fixed':
indaux = np.arange(N)
x0_aux = x0 + (-1)**indaux
elif orig_bgd=='random':
x0_aux = x0 + np.random.randn(N)
# For the first instant b and a are equal
x_b[0,:] = x0_aux
x_a[0,:] = x0_aux
# The following cycle contains evolution and assimilation
for j in range(len(tobs)-1):
yaux = y[j+1,:]
# First compute background; our initial condition is the
# forecast from the analysis at the previous observational
# time
xb0 = x_a[j*o2t,:]
#if model=='lor63':
# taux,xbaux = lorenz63(xb0,o2t*tstep_truth)
if model=='L96':
taux,xbaux = lorenz96(o2t*tstep_truth,xb0,N)
x_b[j*o2t+1:(j+1)*o2t+1,:] = xbaux[1:,:]
x_a[j*o2t+1:(j+1)*o2t+1,:] = xbaux[1:,:]
xa_aux = one3dvar(xbaux[o2t,:],yaux,H,B,R,invB,invR)
x_a[(j+1)*o2t,:] = xa_aux
print('t =', t[o2t*(j+1)])
return x_b,x_a
def one3dvar(xb,y,H,B,R,invB,invR):
"The 3DVar algorithm for one assimilation window."
y = np.mat(y).T # array -> column vector
xbvec = np.mat(xb).T # array -> column vector
#opcmin = 'dirmin'
opcmin = 'gradeq0'
# The cost function in case the minimization is direct
def costfun(xa):
xa = np.mat(xa).T
Jback = (xa-xbvec).T*invB*(xa-xbvec)
Jobs = (y-H*xa).T*invR*(y-H*xa)
J = Jback + Jobs
return J
# The gradient in case we prefer to find its roots
def gradJ(xa):
xa = np.mat(xa).T
# The background term
gJb = np.mat(invB)*(xa-xbvec)
# The observational term
gJo = -H.T*np.mat(invR)*(y-H*xa)
gJ = gJb + gJo
return gJ.A.flatten()
if opcmin=='dirmin':
xa = fmin(costfun,xb,xtol=1e-3)
elif opcmin=='gradeq0':
xa = fsolve(gradJ,xb,xtol=1e-4)
return xa
##############################################################################
# 4DVar
##############################################################################
def var4d(x0,t,tobs,anawin,y,H,B,R,model,N):
"""Data assimilation routine for both Lorenz 1963 & 1996 using 4DVar.
Inputs: - x0, the real initial conditions
- t, time array of the model (should be evenly spaced)
- tobs, time array of the observations (should be evenly
spaced with a timestep that is a multiple of the model
timestep)
- anawin, length of the 4D assim window, expressed as
number of future obs included
- y, the observations
- H, observation matrix
- B, the background error covariance matrix
- R, the observational error covariance matrix
- model, a string indicating the name of the model: 'lor63'
or 'lor96'
- N, the number of variables
Outputs: - x_b, the background
- x_a, the analysis"""
# General settings
Nsteps = np.size(t)
Ntobs = np.size(tobs)
# For the true time
tstep_truth = t[1]-t[0]
# For the analysis
tstep_obs = tobs[1]-tobs[0]
# The ratio
o2t = int(tstep_obs/tstep_truth+0.5)
totana = (Ntobs-1)/anawin
# Precreate the arrays for background and analysis
x_b = np.empty((Nsteps,N)); x_b.fill(np.nan)
x_a = np.empty((Nsteps,N)); x_a.fill(np.nan)
invB = pinv(B)
invR = pinv(R)
# For the original background ensemble let's start close from the truth
orig_bgd = 'fixed'
#orig_bgd = 'random'
if orig_bgd=='fixed':
indaux = np.arange(N)
x0_aux = x0 + (-1)**indaux
elif orig_bgd=='random':
x0_aux = x0 + np.random.randn(N)
# For the first instant b and a are equal
x_b[0,:] = x0_aux
x_a[0,:] = x0_aux
# The following cycle contains evolution and assimilation
for j in range(int(totana)):
# Get the observations; these are distributed all over the
# assimilation window
yaux = y[anawin*j+1:anawin*(j+1)+1,:] # [anawin,L]
# First compute background; our background is the forecast
# from the analysis
xb0 = x_a[j*anawin*o2t,:]
#if model=='lor63':
# taux,xbaux = lorenz63(xb0,o2t*anawin*tstep_truth)
if model=='L96':
taux,xbaux = lorenz96(o2t*anawin*tstep_truth,xb0,N)
x_b[j*o2t*anawin:(j+1)*o2t*anawin+1,:] = xbaux
xa0 = one4dvar(tstep_truth,o2t,anawin,xb0,yaux,H,B,R,model,N,invB,invR)
#if model=='lor63':
# taux,xaaux = lorenz63(xa0,o2t*anawin*tstep_truth)
if model=='L96':
taux,xaaux = lorenz96(o2t*anawin*tstep_truth,xa0,N)
x_a[j*o2t*anawin:(j+1)*o2t*anawin+1,:] = xaaux
print('t =', tobs[anawin*(j+1)])
return x_b,x_a
def one4dvar(tstep_truth,o2t,anawin,x0b,y,H,B,R,model,N,invB,invR):
"The 4DVar algorithm for one assimilation window."
x0bvec = np.mat(x0b).T # array -> column vector
y = np.mat(y).T # array -> matrix [N, nobs in assim window]
#opcmin = 'dirmin'
opcmin = 'gradeq0'
# The SciPy optimization functions don't work well with matrices,
# so in the following the initial guesses and function inputs and
# outputs are arrays or scalars.
# The cost function in case the minimization is direct
def costfun(xa):
xavec = np.mat(xa).T
Jback = (xavec-x0bvec).T*invB*(xavec-x0bvec)
#if model=='lor63':
# taux,xaaux = lorenz63(xa,o2t*anawin*tstep_truth)
if model=='L96':
taux,xaaux = lorenz96(o2t*anawin*tstep_truth,xa,N)
indobs = range(o2t,anawin*o2t+1,o2t)
xobs = xaaux[indobs,:]
xobs = np.mat(xobs).T
Jobs = np.empty(len(indobs))
Jobs.fill(np.nan)
for iJobs in range(len(indobs)):
Jobs[iJobs] = (y[:,iJobs]-H*(xobs[:,iJobs])).T*invR \
*(y[:,iJobs]-H*(xobs[:,iJobs]))
J = Jback + np.sum(Jobs)
return J
# The gradient in case we prefer to find its roots
def gradJ(xa):
xavec = np.mat(xa).T
# The background term
gJb = invB*(xavec-x0bvec)
# For the model and observation error we need the TLM and adjoint
# Choose the TLM/adjoint depending on the model
#if model=='lor63':
# xaux,M = transmat_lor63(xa,o2t*anawin*tstep_truth)
if model=='L96':
xaux,M = transmat_lor96(xa,o2t*anawin*tstep_truth)
# The observation error term, evaluated at different times
gJok = np.mat(np.empty((N,anawin)))
gJok.fill(np.nan)
for j in range(anawin):
gJok[:,j] = -np.mat(M[:,:,o2t*(j+1)]).T*H.T*invR \
*(y[:,j]-H*np.mat(xaux[o2t*(j+1),:]).T)
# Adding the two
gJ = gJb + np.sum(gJok,axis=1)
return gJ.A.flatten()
if opcmin=='dirmin':
xa = fmin(costfun,x0b,xtol=1e-3)
elif opcmin=='gradeq0':
xa = fsolve(gradJ,x0b,xtol=1e-4)
return xa
##############################################################################
def transmat_lor96(x0,tmax):
"""The transition matrix required for the TLM and the adjoint.
Inputs: - x0, an array containing the initial state
- tmax, the maximum time. Should be a multiple of the
timestep 0.01.
Outputs: - x, the evolved state [len(t) x 3]
M, the transition matrix for small perturbations from
the initial state to the evolved state [3 x 3 x len(t)]"""
global N
N = len(x0)
tstep = 0.01
taux = np.arange(0,tmax+tstep/2,tstep)
# Settings for M
M0 = np.eye(N)
xaux = np.empty((len(taux),N))
xaux.fill(np.nan)
xaux[0,:] = x0
M = np.empty((N,N,len(taux)))
M.fill(np.nan)
M[:,:,0] = M0
for i in range(len(taux)-1): # for each time
xaux[i+1,:],M[:,:,i+1] = integr(xaux[i,:],M[:,:,i],tstep)
return xaux,M
def integr(xin,Min,tstep):
global N
# The integration is for both the model and the TLM at the same time!
Varsold = np.concatenate((xin,Min.flatten()))
k1 = faux(Varsold)
k2 = faux(Varsold+1/2.0*tstep*k1)
k3 = faux(Varsold+1/2.0*tstep*k2)
k4 = faux(Varsold+tstep*k3)
Varsnew = Varsold + 1/6.0*tstep*(k1+2*k2+2*k3+k4)
xout = Varsnew[:N]
Mout = np.reshape(Varsnew[N:],(N,N))
return xout,Mout
def faux(Varsin):
"The Lorenz 1996 model and its TLM."
global N
dx = f(Varsin[:N])
F = tlm(Varsin[:N],N)
dM = F*np.mat(np.reshape(Varsin[N:],(N,N)))
dxaux = np.concatenate((dx,dM.A.flatten()))
return dxaux
def f(x):
"The actual Lorenz 1996 model."
global N
forc = 8.0
k=np.empty_like(x)
k.fill(np.nan)
# Remember it is a cyclical model, hence we need modular algebra
for j in range(N):
k[j]=(x[(j+1)%N]-x[(j-2)%N])*x[(j-1)%N]-x[j]+forc
return k
def tlm(u,nx):
F = np.zeros((nx,nx))
for j in range(nx):
F[j,(j-2)%nx] = -u[(j-1)%nx]
F[j,(j-1)%nx] = -u[(j-2)%nx] + u[(j+1)%nx]
F[j,j%nx] = -1
F[j,(j+1)%nx] = u[(j-1)%nx]
return F