optimizer.py 10.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#!/usr/bin/env python
# optimizer.py

"""
Author : peters 

Revision History:
File created on 28 Jul 2010.

"""

import os
import sys
import logging
import datetime

17
18
19
identifier = 'Optimizer baseclass'
version    = '0.0'

20
################### Begin Class Optimizer ###################
21

22
class Optimizer(object):
23
    """
24
25
        This creates an instance of an optimization object. It handles the minimum least squares optimization
        of the state vector given a set of sample objects. Two routines will be implemented: one where the optimization
26
27
28
29
        is sequential and one where it is the equivalent matrix solution. The choice can be made based on considerations of speed
        and efficiency.
    """

30
    def __init__(self):
31
32
        self.Identifier = self.getid()
        self.Version    = self.getversion()
33

34
35
36
37
38
39
40
        msg                 = 'Optimizer object initialized: %s'%self.Identifier ; logging.info(msg)

    def getid(self):
        return identifier

    def getversion(self):
        return version
41
42

    def Initialize(self, dims):
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88

        self.nlag               = dims[0]
        self.nmembers           = dims[1]
        self.nparams            = dims[2]
        self.nobs               = dims[3]
        self.localization       = False
        self.localization       = False

        self.CreateMatrices()

        return None

    def CreateMatrices(self):
        """ Create Matrix space needed in optimization routine """
        import numpy as np

        # mean state  [X]
        self.x               = np.zeros( (self.nlag*self.nparams,), float)
        # deviations from mean state  [X']
        self.X_prime         = np.zeros( (self.nlag*self.nparams,self.nmembers,), float)
        # mean state, transported to observation space [ H(X) ]
        self.Hx              = np.zeros( (self.nobs,), float)
        # deviations from mean state, transported to observation space [ H(X') ]
        self.HX_prime        = np.zeros( (self.nobs,self.nmembers), float)
        # observations
        self.obs             =  np.zeros( (self.nobs,), float)
        # covariance of observations
        self.R               =  np.zeros( (self.nobs,self.nobs,), float)
        # Total covariance of fluxes and obs in units of obs [H P H^t + R]
        self.HPHR            =  np.zeros( (self.nobs,self.nobs,), float)
        # Kalman Gain matrix
        self.KG              =  np.zeros( (self.nlag*self.nparams,self.nobs,), float)
        # flags of observations
        self.flags           =  np.zeros( (self.nobs,), int)

    def StateToMatrix(self,StateVector):
        import numpy as np

        for n in range(self.nlag):

            members                                             = StateVector.EnsembleMembers[n]
            self.x[n*self.nparams:(n+1)*self.nparams]           = members[0].ParameterValues[n]
            self.X_prime[n*self.nparams:(n+1)*self.nparams,:]   = np.transpose(np.array([m.ParameterValues for m in members]))

        self.X_prime                                            = self.X_prime - self.x[:,np.newaxis] # make into a deviation matrix

89
90
        self.obs[:]                                             = StateVector.EnsembleMembers[-1][0].ModelSample.Data.getvalues('obs')
        self.Hx[:]                                              = StateVector.EnsembleMembers[-1][0].ModelSample.Data.getvalues('simulated')
91
92
93

        for m,mem in enumerate(StateVector.EnsembleMembers[-1]):

94
            self.HX_prime[:,m]                                  = mem.ModelSample.Data.getvalues('simulated')
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196

        self.HX_prime                                           = self.HX_prime - self.Hx[:,np.newaxis] # make a deviation matrix

        self.R[:,:]                                             = np.identity(self.nobs)

        return None

    def MatrixToState(self,StateVector):
        import numpy as np

        for n in range(self.nlag):

            members                              = StateVector.EnsembleMembers[n]
            for m,mem in enumerate(members):
                members[m].ParameterValues[:]    = self.X_prime[n*self.nparams:(n+1)*self.nparams,m] + self.x[n*self.nparams:(n+1)*self.nparams]     

        return None

    def SerialMinimumLeastSquares(self):
        """ Make minimum least squares solution by looping over obs"""
        import numpy as np
        import numpy .linalg as la

        tvalue=1.97591
        for n in range(self.nobs):

            if self.flags[n] == 1: continue

            PHt                         = 1./(self.nmembers-1)*np.dot(self.X_prime,self.HX_prime[n,:])
            self.HPHR[n,n]              = 1./(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[n,:]).sum()+self.R[n,n]

            self.KG[:,n]                = PHt/self.HPHR[n,n]

            dummy                       = self.Localize(n)

            alpha                       = np.double(1.0)/(np.double(1.0)+np.sqrt( (self.R[n,n])/self.HPHR[n,n] ) )

            res                         = self.obs[n]-self.Hx[n]

            self.x[:]                   = self.x + self.KG[:,n]*res

            for r in range(self.nmembers):
                self.X_prime[:,r]       = self.X_prime[:,r]-alpha*self.KG[:,n]*(self.HX_prime[n,r])

#WP !!!! Very important to first do all obervations from n=1 through the end, and only then update 1,...,n. The current observation
#WP      should always be updated last because it features in the loop of the adjustments !!!!

            for m in range(n+1,self.nobs):
                res                     = self.obs[n]-self.Hx[n]
                fac                     = 1.0/(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[m,:]).sum()/self.HPHR[n,n]
                self.Hx[m]              = self.Hx[m] + fac * res
                self.HX_prime[m,:]      = self.HX_prime[m,:] - alpha*fac*self.HX_prime[n,:]

            for m in range(1,n+1):
                res                     = self.obs[n]-self.Hx[n]
                fac                     = 1.0/(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[m,:]).sum()/self.HPHR[n,n]
                self.Hx[m]              = self.Hx[m] + fac * res
                self.HX_prime[m,:]      = self.HX_prime[m,:] - alpha*fac*self.HX_prime[n,:]

            
    def BulkMinimumLeastSquares(self):
        """ Make minimum least squares solution by solving matrix equations"""
        import numpy as np
        import numpy.linalg as la

        # Create full solution, first calculate the mean of the posterior analysis

        HPH                 = np.dot(self.HX_prime,np.transpose(self.HX_prime))/(self.nmembers-1)   # HPH = 1/N * HX' * (HX')^T
        self.HPHR[:,:]      = HPH+self.R                                                            # HPHR = HPH + R
        HPb                 = np.dot(self.X_prime,np.transpose(self.HX_prime))/(self.nmembers-1)    # HP = 1/N X' * (HX')^T
        self.KG[:,:]        = np.dot(HPb,la.inv(self.HPHR))                                         # K = HP/(HPH+R)

        for n in range(self.nobs):
            dummy           = self.Localize(n)

        self.x[:]           = self.x + np.dot(self.KG,self.obs-self.Hx)                             # xa = xp + K (y-Hx)

        # And next make the updated ensemble deviations. Note that we calculate P by using the full equation (10) at once, and 
        # not in a serial update fashion as described in Whitaker and Hamill. 
        # For the current problem with limited N_obs this is easier, or at least more straightforward to do.

        I                   = np.identity(self.nlag*self.nparams)
        sHPHR               = la.cholesky(self.HPHR)                                  # square root of HPH+R
        part1               = np.dot(HPb,np.transpose(la.inv(sHPHR)))                 # HP(sqrt(HPH+R))^-1
        part2               = la.inv(sHPHR+np.sqrt(self.R))                           # (sqrt(HPH+R)+sqrt(R))^-1
        Kw                  = np.dot(part1,part2)                                     # K~
        self.X_prime[:,:]   = np.dot(I,self.X_prime)-np.dot(Kw,self.HX_prime)         # HX' = I - K~ * HX'

        P_opt               = np.dot(self.X_prime,np.transpose(self.X_prime))/(self.nmembers-1)

        # Now do the adjustments of the modeled mixing ratios using the linearized ensemble. These are not strictly needed but can be used
        # for diagnosis.

        part3               = np.dot(HPH,np.transpose(la.inv(sHPHR)))                           # HPH(sqrt(HPH+R))^-1
        Kw                  = np.dot(part3,part2)                                               # K~
        self.Hx[:]          = self.Hx + np.dot(np.dot(HPH,la.inv(self.HPHR)),self.obs-self.Hx)  # Hx  = Hx+ HPH/HPH+R (y-Hx)
        self.HX_prime[:,:]  = self.HX_prime-np.dot(Kw,self.HX_prime)                            # HX' = HX'- K~ * HX'

        msg = 'Minimum Least Squares solution was calculated, returning' ; logging.info(msg)

        return None

197
    def SetLocalization(self):
198
199
        """ determine which localization to use """

200
201
        self.localization = True
        self.localizetype = "None"
202
203
204
205
206
207
208
209
210
    
        msg       = "Current localization option is set to %s"%self.localizetype  ; logging.info(msg)

    def Localize(self,n):
        """ localize the Kalman Gain matrix """
        import numpy as np
    
        if not self.localization: return 

211
        return
212

213
################### End Class Optimizer ###################
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246



if __name__ == "__main__":

    sys.path.append('../../')

    import os
    import sys
    from da.tools.general import StartLogger 
    from da.tools.initexit import CycleControl 
    from da.ct.statevector import CtStateVector, PrepareState
    from da.ct.obs import CtObservations
    import numpy as np
    import datetime
    import da.tools.rc as rc

    opts = ['-v']
    args = {'rc':'../../da.rc','logfile':'da_initexit.log','jobrcfilename':'test.rc'}

    StartLogger()
    DaCycle = CycleControl(opts,args)

    DaCycle.Initialize()
    print DaCycle

    StateVector = PrepareState(DaCycle)

    samples = CtObservations(DaCycle.DaSystem,datetime.datetime(2005,3,5))
    dummy = samples.AddObs()
    dummy = samples.AddSimulations('/Users/peters/tmp/test_da/output/20050305/samples.000.nc')


247
    nobs      = len(samples.Data)
Peters, Wouter's avatar
Peters, Wouter committed
248
249
250
    dims      = ( int(DaCycle['time.nlag']),
                  int(DaCycle['forecast.nmembers']),
                  int(DaCycle.DaSystem['nparameters']),
251
252
253
254
255
256
257
258
259
                  nobs,  )

    opt = CtOptimizer(dims)

    opt.StateToMatrix(StateVector)

    opt.MinimumLeastSquares()

    opt.MatrixToState(StateVector)