Skip to content
Snippets Groups Projects
Commit 004a1930 authored by Peters, Wouter's avatar Peters, Wouter
Browse files

Two algorithms have been tested against eachother, and against two external...

Two algorithms have been tested against eachother, and against two external inversions now. The result is that they give the same result as long as no localization is used. They both correspond completely now to a full kalman filter solution as well. They do however *not* correspond to the results from CT2009 because the fortran algorithm contained an error in the update loop of the simulated mean and deviations.
parent 62be0537
Branches
No related tags found
No related merge requests found
......@@ -31,6 +31,8 @@ class CtOptimizer(object):
self.nmembers = dims[1]
self.nparams = dims[2]
self.nobs = dims[3]
self.localization = False
self.localization = False
self.CreateMatrices()
......@@ -56,6 +58,8 @@ class CtOptimizer(object):
self.HPHR = np.zeros( (self.nobs,self.nobs,), float)
# Kalman Gain matrix
self.KG = np.zeros( (self.nlag*self.nparams,self.nobs,), float)
# flags of observations
self.flags = np.zeros( (self.nobs,), int)
def FillMatrices(self,StateVector):
import numpy as np
......@@ -81,28 +85,44 @@ class CtOptimizer(object):
import numpy as np
import numpy .linalg as la
tvalue=1.97591
for n in range(self.nobs):
df_row = self.HX_prime[n,:]
QHt = 1./(self.nmembers-1)*np.dot(self.X_prime,df_row)
HQHR = 1./(self.nmembers-1)*(df_row*df_row).sum()+self.R[n,n]
K = QHt/HQHR
if self.flags[n] == 1: continue
alpha = 1.0/(1.0+np.sqrt( (self.R[n,n])/HQHR ) )
PHt = 1./(self.nmembers-1)*np.dot(self.X_prime,self.HX_prime[n,:])
self.HPHR[n,n] = 1./(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[n,:]).sum()+self.R[n,n]
self.KG[:,n] = PHt/self.HPHR[n,n]
dummy = self.Localize(n)
alpha = np.double(1.0)/(np.double(1.0)+np.sqrt( (self.R[n,n])/self.HPHR[n,n] ) )
res = self.obs[n]-self.Hx[n]
self.x[:] = self.x + K*res
self.x[:] = self.x + self.KG[:,n]*res
for r in range(self.nmembers):
self.X_prime[:,r] = self.X_prime[:,r]-alpha*K*(self.HX_prime[n,r])
self.X_prime[:,r] = self.X_prime[:,r]-alpha*self.KG[:,n]*(self.HX_prime[n,r])
#WP !!!! Very important to first do all obervations from n=1 through the end, and only then update 1,...,n. The current observation
#WP should always be updated last because it features in the loop of the adjustments !!!!
for m in range(n+1,self.nobs):
res = self.obs[n]-self.Hx[n]
fac = 1.0/(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[m,:]).sum()/self.HPHR[n,n]
self.Hx[m] = self.Hx[m] + fac * res
self.HX_prime[m,:] = self.HX_prime[m,:] - alpha*fac*self.HX_prime[n,:]
for m in range(self.nobs):
for m in range(1,n+1):
res = self.obs[n]-self.Hx[n]
fac = 1.0/(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[m,:]).sum()/HQHR
fac = 1.0/(self.nmembers-1)*(self.HX_prime[n,:]*self.HX_prime[m,:]).sum()/self.HPHR[n,n]
self.Hx[m] = self.Hx[m] + fac * res
self.HX_prime[m,:] = self.HX_prime[m,:] - alpha*fac*self.HX_prime[n,:]
def BulkMinimumLeastSquares(self):
""" Make minimum least squares solution by solving matrix equations"""
import numpy as np
......@@ -112,9 +132,12 @@ class CtOptimizer(object):
HPH = np.dot(self.HX_prime,np.transpose(self.HX_prime))/(self.nmembers-1) # HPH = 1/N * HX' * (HX')^T
self.HPHR[:,:] = HPH+self.R # HPHR = HPH + R
self.HPHR[:,:] = np.identity(self.nobs)
HPb = np.dot(self.X_prime,np.transpose(self.HX_prime))/(self.nmembers-1) # HP = 1/N X' * (HX')^T
self.KG[:,:] = np.dot(HPb,la.inv(self.HPHR)) # K = HP/(HPH+R)
for n in range(self.nobs):
dummy = self.Localize(n)
self.x[:] = self.x + np.dot(self.KG,self.obs-self.Hx) # xa = xp + K (y-Hx)
# And next make the updated ensemble deviations. Note that we calculate P by using the full equation (10) at once, and
......@@ -130,22 +153,45 @@ class CtOptimizer(object):
P_opt = np.dot(self.X_prime,np.transpose(self.X_prime))/(self.nmembers-1)
print P_opt.shape
# Now do the adjustments of the modeled mixing ratios using the linearized ensemble. These are not strictly needed but can be used
# for diagnosis.
part3 = np.dot(HPH,np.transpose(la.inv(sHPHR))) # HPH(sqrt(HPH+R))^-1
Kw = np.dot(part3,part2) # K~
Hx_opt = self.Hx + np.dot(np.dot(HPH,la.inv(self.HPHR)),self.obs-self.Hx) # Hx = Hx+ HPH/HPH+R (y-Hx)
Hx_prime_opt = self.HX_prime-np.dot(Kw,self.HX_prime) # HX' = HX'- K~ * HX'
print Hx_prime_opt.shape
self.Hx[:] = self.Hx + np.dot(np.dot(HPH,la.inv(self.HPHR)),self.obs-self.Hx) # Hx = Hx+ HPH/HPH+R (y-Hx)
self.HX_prime[:,:] = self.HX_prime-np.dot(Kw,self.HX_prime) # HX' = HX'- K~ * HX'
msg = 'Minimum Least Squares solution was calculated, returning' ; logging.info(msg)
return None
def SetLocalization(self,type='None'):
""" determine which localization to use """
if type == 'CT2007':
self.localization = True
self.localizetype = 'CT2007'
else:
self.localization = False
self.localizetype = 'None'
def Localize(self,n):
""" localize the Kalman Gain matrix """
import numpy as np
if not self.localization: return
if self.localizetype == 'CT2007':
tvalue=1.97591
if np.sqrt(self.R[n,n]) >= 1.5:
for r in range(self.nlag*self.nparams):
corr=np.corrcoef(self.HX_prime[n,:],self.X_prime[r,:].squeeze())[0,1]
prob=corr/np.sqrt((1.0-corr**2)/(self.nmembers-2))
if abs(prob) < tvalue:
self.KG[r,n]=0.0
################### End Class CtOptimizer ###################
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment