|
| 1 | +"""Stochastic iterative ensemble smoother (IES, i.e. EnRML) with *subspace* implementation.""" |
| 2 | + |
1 | 3 | import numpy as np |
2 | | -from scipy.linalg import solve |
3 | | -import copy as cp |
4 | | -from pipt.misc_tools import analysis_tools as at |
| 4 | +from scipy.linalg import solve, lu_solve, lu_factor, cho_solve |
| 5 | + |
| 6 | +import pipt.misc_tools.analysis_tools as at |
5 | 7 |
|
6 | | -class margIS_update(): |
7 | 8 |
|
| 9 | +class margIS_update(): |
8 | 10 | """ |
9 | | - Placeholder for private margIS method |
| 11 | + MargIES update from Stordal et.al. |
| 12 | + This is now implemented with perturbed observations, which means that we set a prior belief on the data uncertainty. |
| 13 | + Thus, the prior is an invers chi2 distriubtuinm and after scaling the mean varians is 1. |
10 | 14 | """ |
11 | | - def update(self): |
12 | | - if self.iteration == 1: # method requires some initiallization |
13 | | - self.aug_prior = cp.deepcopy(at.aug_state(self.prior_state, self.list_states)) |
14 | | - self.mean_prior = self.aug_prior.mean(axis=1) |
15 | | - self.X = (self.aug_prior - np.dot(np.resize(self.mean_prior, (len(self.mean_prior), 1)), |
16 | | - np.ones((1, self.ne)))) |
17 | | - self.W = np.eye(self.ne) |
18 | | - self.current_w = np.zeros((self.ne,)) |
19 | | - self.E = np.dot(self.real_obs_data, self.proj) |
20 | | - |
21 | | - M = len(self.real_obs_data) |
22 | | - Ytmp = solve(self.W, self.proj) |
23 | | - if len(self.scale_data.shape) == 1: |
24 | | - Y = np.dot(np.expand_dims(self.scale_data ** (-1), axis=1), np.ones((1, self.ne))) * \ |
25 | | - np.dot(self.aug_pred_data, Ytmp) |
26 | | - else: |
27 | | - Y = solve(self.scale_data, np.dot(self.aug_pred_data, Ytmp)) |
28 | 15 |
|
29 | | - pred_data_mean = np.mean(self.aug_pred_data, 1) |
30 | | - delta_d = (self.obs_data_vector - pred_data_mean) |
| 16 | + def update(self, enX, enY, enE, **kwargs): |
| 17 | + |
| 18 | + if self.iteration == 1: # method requires some initiallization |
| 19 | + self.current_W = np.eye(self.ne) |
| 20 | + self.current_w = np.zeros(self.ne) |
| 21 | + self.D = self.scale(enE, self.scale_data) |
| 22 | + # Scale everything so that data uncertainty is I |
| 23 | + |
| 24 | + sY = self.scale(enY, self.scale_data) #Scaling is same as with 'known' uncertainty, hence makes sense to set s = 1 |
| 25 | + self.S = 0 |
| 26 | + |
| 27 | + deltaD = 0 |
| 28 | + deltaD_sqrt = 0 |
| 29 | + |
| 30 | + Y = np.linalg.solve(self.current_W.T, sY.T).T |
| 31 | + Y = Y @ self.proj * np.sqrt(self.ne - 1) |
| 32 | + index = np.arange(0, 70, 70) # Has to be specified via data types (or select each data)... |
| 33 | + M = 1 #Numbers of data per type. Computed from index |
| 34 | + s = 1 #should be default option with possibility to change in setup |
| 35 | + nu = self.ne-1 #should be default option with possibility to change in setup |
| 36 | + for j in range(70): |
| 37 | + |
| 38 | + delta = self.D[index,:]-sY[index,:] |
| 39 | + Chi = np.sum(delta * delta, axis = 0) |
| 40 | + Chi = np.mean(Chi) |
| 41 | + Ratio = (M + nu) / (Chi + nu*s*s) |
| 42 | + #Ratio = 1 |
| 43 | + #Gradient |
| 44 | + deltaD = deltaD + (Y[index,:] * Ratio).T @ delta |
| 45 | + deltaD_sqrt = deltaD_sqrt + np.mean((Y[index, :] * Ratio).T @ delta ,axis=1) |
| 46 | + # Hessian |
| 47 | + self.S = self.S + (Y[index,:] * Ratio).T @ Y[index,:] |
| 48 | + index += 1 |
| 49 | + |
| 50 | + deltaM = (self.ne-1)*(np.eye(self.ne)-self.current_W) |
| 51 | + deltaM_sqrt = (self.ne-1)*self.current_w |
| 52 | + self.S = self.S + np.eye(self.ne) * (self.ne - 1) |
| 53 | + Delta = deltaM + deltaD |
| 54 | + Delta_sqrt = deltaM_sqrt + deltaD_sqrt |
31 | 55 |
|
32 | | - if len(self.cov_data.shape) == 1: |
33 | | - S = np.dot(delta_d, (self.cov_data**(-1)) * delta_d) |
34 | | - Ratio = M / S |
35 | | - grad_lklhd = np.dot(Y.T * Ratio, (self.cov_data**(-1)) * delta_d) |
36 | | - grad_prior = (self.ne - 1) * self.current_w |
37 | | - self.C_w = (np.dot(Ratio * Y.T, np.dot(np.diag(self.cov_data ** (-1)), Y)) + (self.ne - 1) * np.eye(self.ne)) |
| 56 | + |
| 57 | + self.W_step = np.linalg.solve(self.S, Delta) / (1 + self.lam) |
| 58 | + # self.sqrt_w_step = np.linalg.solve(self.S, Delta_sqrt) / (1 + self.lam) |
| 59 | + |
| 60 | + def scale(self, data, scaling): |
| 61 | + """ |
| 62 | + Scale the data perturbations by the data error standard deviation. |
| 63 | +
|
| 64 | + Args: |
| 65 | + data (np.ndarray): data perturbations |
| 66 | + scaling (np.ndarray): data error standard deviation |
| 67 | +
|
| 68 | + Returns: |
| 69 | + np.ndarray: scaled data perturbations |
| 70 | + """ |
| 71 | + |
| 72 | + if len(scaling.shape) == 1: |
| 73 | + return (scaling ** (-1))[:, None] * data |
38 | 74 | else: |
39 | | - S = np.dot(delta_d, solve(self.cov_data, delta_d)) |
40 | | - Ratio = M / S |
41 | | - grad_lklhd = np.dot(Y.T * Ratio, solve(self.cov_data, delta_d)) |
42 | | - grad_prior = (self.ne - 1) * self.current_w |
43 | | - self.C_w = (np.dot(Ratio * Y.T, solve(self.cov_data, Y)) + (self.ne - 1) * np.eye(self.ne)) |
| 75 | + return solve(scaling, data) |
| 76 | + |
| 77 | + |
| 78 | + |
| 79 | + |
| 80 | + |
| 81 | + |
| 82 | + |
| 83 | + |
| 84 | + |
| 85 | + |
| 86 | + |
| 87 | + |
| 88 | + |
| 89 | + |
| 90 | + |
| 91 | + |
| 92 | + |
| 93 | + |
| 94 | + |
| 95 | + |
| 96 | + |
| 97 | + |
| 98 | + |
| 99 | + |
| 100 | + |
44 | 101 |
|
45 | | - self.sqrt_w_step = solve(self.C_w, grad_prior + grad_lklhd) |
|
0 commit comments