Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
CTDAS
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Tsurata, Aki
CTDAS
Commits
2fc93b50
Commit
2fc93b50
authored
14 years ago
by
Peters, Wouter
Browse files
Options
Downloads
Patches
Plain Diff
put all functionality of CtOptimizer into the optimizer base class now
parent
56c60253
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
da/ct/optimizer.py
+7
-161
7 additions, 161 deletions
da/ct/optimizer.py
with
7 additions
and
161 deletions
da/ct/optimizer.py
+
7
−
161
View file @
2fc93b50
...
...
@@ -13,173 +13,19 @@ import os
import
sys
import
logging
import
datetime
from
da.baseclasses.optimizer
import
Optimizer
identifier
=
'
CarbonTracker CO2
'
################### Begin Class CtOptimizer ###################
class
CtOptimizer
(
object
):
"""
This creates an instance of a CarbonTracker optimization object. It handles the minimum least squares optimization
of the CT state vector given a set of CT sample objects. Two routines will be implemented: one where the optimization
is sequential and one where it is the equivalent matrix solution. The choice can be made based on considerations of speed
and efficiency.
class
CtOptimizer
(
Optimizer
):
"""
This creates an instance of a CarbonTracker optimization object. The base class it derives from is the optimezer object.
Additionally, this CtOptimizer implements a special localization option following the CT2007 method.
def
__init__
(
self
,
dims
):
self
.
nlag
=
dims
[
0
]
self
.
nmembers
=
dims
[
1
]
self
.
nparams
=
dims
[
2
]
self
.
nobs
=
dims
[
3
]
self
.
localization
=
False
self
.
localization
=
False
self
.
CreateMatrices
()
return
None
def
CreateMatrices
(
self
):
"""
Create Matrix space needed in optimization routine
"""
import
numpy
as
np
# mean state [X]
self
.
x
=
np
.
zeros
(
(
self
.
nlag
*
self
.
nparams
,),
float
)
# deviations from mean state [X']
self
.
X_prime
=
np
.
zeros
(
(
self
.
nlag
*
self
.
nparams
,
self
.
nmembers
,),
float
)
# mean state, transported to observation space [ H(X) ]
self
.
Hx
=
np
.
zeros
(
(
self
.
nobs
,),
float
)
# deviations from mean state, transported to observation space [ H(X') ]
self
.
HX_prime
=
np
.
zeros
(
(
self
.
nobs
,
self
.
nmembers
),
float
)
# observations
self
.
obs
=
np
.
zeros
(
(
self
.
nobs
,),
float
)
# covariance of observations
self
.
R
=
np
.
zeros
(
(
self
.
nobs
,
self
.
nobs
,),
float
)
# Total covariance of fluxes and obs in units of obs [H P H^t + R]
self
.
HPHR
=
np
.
zeros
(
(
self
.
nobs
,
self
.
nobs
,),
float
)
# Kalman Gain matrix
self
.
KG
=
np
.
zeros
(
(
self
.
nlag
*
self
.
nparams
,
self
.
nobs
,),
float
)
# flags of observations
self
.
flags
=
np
.
zeros
(
(
self
.
nobs
,),
int
)
def
StateToMatrix
(
self
,
StateVector
):
import
numpy
as
np
for
n
in
range
(
self
.
nlag
):
members
=
StateVector
.
EnsembleMembers
[
n
]
self
.
x
[
n
*
self
.
nparams
:(
n
+
1
)
*
self
.
nparams
]
=
members
[
0
].
ParameterValues
[
n
]
self
.
X_prime
[
n
*
self
.
nparams
:(
n
+
1
)
*
self
.
nparams
,:]
=
np
.
transpose
(
np
.
array
([
m
.
ParameterValues
for
m
in
members
]))
self
.
X_prime
=
self
.
X_prime
-
self
.
x
[:,
np
.
newaxis
]
# make into a deviation matrix
self
.
obs
[:]
=
StateVector
.
EnsembleMembers
[
-
1
][
0
].
ModelSample
.
MrData
.
getvalues
(
'
obs
'
)
self
.
Hx
[:]
=
StateVector
.
EnsembleMembers
[
-
1
][
0
].
ModelSample
.
MrData
.
getvalues
(
'
simulated
'
)
for
m
,
mem
in
enumerate
(
StateVector
.
EnsembleMembers
[
-
1
]):
#self.HX_prime[:,m] = mem.ModelSample.MrData.getvalues('simulated')
self
.
HX_prime
[:,
m
]
=
self
.
Hx
+
np
.
random
.
randn
(
self
.
nobs
)
*
3.0
self
.
HX_prime
=
self
.
HX_prime
-
self
.
Hx
[:,
np
.
newaxis
]
# make a deviation matrix
self
.
R
[:,:]
=
np
.
identity
(
self
.
nobs
)
return
None
def
MatrixToState
(
self
,
StateVector
):
import
numpy
as
np
for
n
in
range
(
self
.
nlag
):
members
=
StateVector
.
EnsembleMembers
[
n
]
for
m
,
mem
in
enumerate
(
members
):
members
[
m
].
ParameterValues
[:]
=
self
.
X_prime
[
n
*
self
.
nparams
:(
n
+
1
)
*
self
.
nparams
,
m
]
+
self
.
x
[
n
*
self
.
nparams
:(
n
+
1
)
*
self
.
nparams
]
return
None
def
SerialMinimumLeastSquares
(
self
):
"""
Make minimum least squares solution by looping over obs
"""
import
numpy
as
np
import
numpy
.
linalg
as
la
tvalue
=
1.97591
for
n
in
range
(
self
.
nobs
):
if
self
.
flags
[
n
]
==
1
:
continue
PHt
=
1.
/
(
self
.
nmembers
-
1
)
*
np
.
dot
(
self
.
X_prime
,
self
.
HX_prime
[
n
,:])
self
.
HPHR
[
n
,
n
]
=
1.
/
(
self
.
nmembers
-
1
)
*
(
self
.
HX_prime
[
n
,:]
*
self
.
HX_prime
[
n
,:]).
sum
()
+
self
.
R
[
n
,
n
]
self
.
KG
[:,
n
]
=
PHt
/
self
.
HPHR
[
n
,
n
]
dummy
=
self
.
Localize
(
n
)
alpha
=
np
.
double
(
1.0
)
/
(
np
.
double
(
1.0
)
+
np
.
sqrt
(
(
self
.
R
[
n
,
n
])
/
self
.
HPHR
[
n
,
n
]
)
)
res
=
self
.
obs
[
n
]
-
self
.
Hx
[
n
]
self
.
x
[:]
=
self
.
x
+
self
.
KG
[:,
n
]
*
res
for
r
in
range
(
self
.
nmembers
):
self
.
X_prime
[:,
r
]
=
self
.
X_prime
[:,
r
]
-
alpha
*
self
.
KG
[:,
n
]
*
(
self
.
HX_prime
[
n
,
r
])
#WP !!!! Very important to first do all obervations from n=1 through the end, and only then update 1,...,n. The current observation
#WP should always be updated last because it features in the loop of the adjustments !!!!
for
m
in
range
(
n
+
1
,
self
.
nobs
):
res
=
self
.
obs
[
n
]
-
self
.
Hx
[
n
]
fac
=
1.0
/
(
self
.
nmembers
-
1
)
*
(
self
.
HX_prime
[
n
,:]
*
self
.
HX_prime
[
m
,:]).
sum
()
/
self
.
HPHR
[
n
,
n
]
self
.
Hx
[
m
]
=
self
.
Hx
[
m
]
+
fac
*
res
self
.
HX_prime
[
m
,:]
=
self
.
HX_prime
[
m
,:]
-
alpha
*
fac
*
self
.
HX_prime
[
n
,:]
for
m
in
range
(
1
,
n
+
1
):
res
=
self
.
obs
[
n
]
-
self
.
Hx
[
n
]
fac
=
1.0
/
(
self
.
nmembers
-
1
)
*
(
self
.
HX_prime
[
n
,:]
*
self
.
HX_prime
[
m
,:]).
sum
()
/
self
.
HPHR
[
n
,
n
]
self
.
Hx
[
m
]
=
self
.
Hx
[
m
]
+
fac
*
res
self
.
HX_prime
[
m
,:]
=
self
.
HX_prime
[
m
,:]
-
alpha
*
fac
*
self
.
HX_prime
[
n
,:]
def
BulkMinimumLeastSquares
(
self
):
"""
Make minimum least squares solution by solving matrix equations
"""
import
numpy
as
np
import
numpy.linalg
as
la
# Create full solution, first calculate the mean of the posterior analysis
HPH
=
np
.
dot
(
self
.
HX_prime
,
np
.
transpose
(
self
.
HX_prime
))
/
(
self
.
nmembers
-
1
)
# HPH = 1/N * HX' * (HX')^T
self
.
HPHR
[:,:]
=
HPH
+
self
.
R
# HPHR = HPH + R
HPb
=
np
.
dot
(
self
.
X_prime
,
np
.
transpose
(
self
.
HX_prime
))
/
(
self
.
nmembers
-
1
)
# HP = 1/N X' * (HX')^T
self
.
KG
[:,:]
=
np
.
dot
(
HPb
,
la
.
inv
(
self
.
HPHR
))
# K = HP/(HPH+R)
for
n
in
range
(
self
.
nobs
):
dummy
=
self
.
Localize
(
n
)
self
.
x
[:]
=
self
.
x
+
np
.
dot
(
self
.
KG
,
self
.
obs
-
self
.
Hx
)
# xa = xp + K (y-Hx)
# And next make the updated ensemble deviations. Note that we calculate P by using the full equation (10) at once, and
# not in a serial update fashion as described in Whitaker and Hamill.
# For the current problem with limited N_obs this is easier, or at least more straightforward to do.
I
=
np
.
identity
(
self
.
nlag
*
self
.
nparams
)
sHPHR
=
la
.
cholesky
(
self
.
HPHR
)
# square root of HPH+R
part1
=
np
.
dot
(
HPb
,
np
.
transpose
(
la
.
inv
(
sHPHR
)))
# HP(sqrt(HPH+R))^-1
part2
=
la
.
inv
(
sHPHR
+
np
.
sqrt
(
self
.
R
))
# (sqrt(HPH+R)+sqrt(R))^-1
Kw
=
np
.
dot
(
part1
,
part2
)
# K~
self
.
X_prime
[:,:]
=
np
.
dot
(
I
,
self
.
X_prime
)
-
np
.
dot
(
Kw
,
self
.
HX_prime
)
# HX' = I - K~ * HX'
P_opt
=
np
.
dot
(
self
.
X_prime
,
np
.
transpose
(
self
.
X_prime
))
/
(
self
.
nmembers
-
1
)
# Now do the adjustments of the modeled mixing ratios using the linearized ensemble. These are not strictly needed but can be used
# for diagnosis.
part3
=
np
.
dot
(
HPH
,
np
.
transpose
(
la
.
inv
(
sHPHR
)))
# HPH(sqrt(HPH+R))^-1
Kw
=
np
.
dot
(
part3
,
part2
)
# K~
self
.
Hx
[:]
=
self
.
Hx
+
np
.
dot
(
np
.
dot
(
HPH
,
la
.
inv
(
self
.
HPHR
)),
self
.
obs
-
self
.
Hx
)
# Hx = Hx+ HPH/HPH+R (y-Hx)
self
.
HX_prime
[:,:]
=
self
.
HX_prime
-
np
.
dot
(
Kw
,
self
.
HX_prime
)
# HX' = HX'- K~ * HX'
msg
=
'
Minimum Least Squares solution was calculated, returning
'
;
logging
.
info
(
msg
)
return
None
All other methods are inherited from the base class Optimizer.
"""
def
SetLocalization
(
self
,
type
=
'
None
'
):
"""
determine which localization to use
"""
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment