Skip to content
Snippets Groups Projects
Commit b695793c authored by Danilo Ferreira de Lima's avatar Danilo Ferreira de Lima
Browse files

Draft.

parent 5f1f7bd1
No related branches found
No related tags found
1 merge request!3Added kernel approximation with the Nystroem sub-space projection method as an alternative
...@@ -8,11 +8,12 @@ from autograd import grad ...@@ -8,11 +8,12 @@ from autograd import grad
from scipy.signal import fftconvolve from scipy.signal import fftconvolve
from scipy.signal import find_peaks_cwt from scipy.signal import find_peaks_cwt
from scipy.optimize import fmin_l_bfgs_b from scipy.optimize import fmin_l_bfgs_b
from sklearn.decomposition import KernelPCA, PCA from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import TransformerMixin, BaseEstimator from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.base import RegressorMixin from sklearn.base import RegressorMixin
from sklearn.kernel_approximation import Nystroem from sklearn.kernel_approximation import Nystroem
from sklearn.linear_model import Ridge
from itertools import product from itertools import product
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
...@@ -264,8 +265,13 @@ class SelectRelevantLowResolution(TransformerMixin, BaseEstimator): ...@@ -264,8 +265,13 @@ class SelectRelevantLowResolution(TransformerMixin, BaseEstimator):
class FitModel(RegressorMixin, BaseEstimator): class FitModel(RegressorMixin, BaseEstimator):
""" """
Linear regression model with uncertainties. Linear regression model with uncertainties.
Args:
l: Regularization coefficient.
""" """
def __init__(self): def __init__(self, l: float=1e-6):
self.l = l
# model parameter sizes # model parameter sizes
self.Nx: int = 0 self.Nx: int = 0
self.Ny: int = 0 self.Ny: int = 0
...@@ -305,10 +311,11 @@ class FitModel(RegressorMixin, BaseEstimator): ...@@ -305,10 +311,11 @@ class FitModel(RegressorMixin, BaseEstimator):
# initial parameter values # initial parameter values
A0: np.ndarray = np.eye(self.Nx, self.Ny).reshape(self.Nx*self.Ny) A0: np.ndarray = np.eye(self.Nx, self.Ny).reshape(self.Nx*self.Ny)
Aeps: np.ndarray = np.zeros(self.Nx) #Aeps: np.ndarray = np.zeros(self.Nx)
b0: np.ndarray = np.zeros(self.Ny) b0: np.ndarray = np.zeros(self.Ny)
u0: np.ndarray = np.zeros(self.Ny) u0: np.ndarray = -2*np.ones(self.Ny)
x0: np.ndarray = np.concatenate((A0, b0, u0, Aeps), axis=0) #x0: np.ndarray = np.concatenate((A0, b0, u0, Aeps), axis=0)
x0: np.ndarray = np.concatenate((A0, b0, u0), axis=0)
# reset loss monitoring # reset loss monitoring
self.loss_train: List[float] = list() self.loss_train: List[float] = list()
...@@ -330,13 +337,20 @@ class FitModel(RegressorMixin, BaseEstimator): ...@@ -330,13 +337,20 @@ class FitModel(RegressorMixin, BaseEstimator):
b = x[self.Nx*self.Ny:(self.Nx*self.Ny+self.Ny)].reshape((1, self.Ny)) b = x[self.Nx*self.Ny:(self.Nx*self.Ny+self.Ny)].reshape((1, self.Ny))
b_eps = x[(self.Nx*self.Ny+self.Ny):(self.Nx*self.Ny+self.Ny+self.Ny)].reshape((1, self.Ny)) b_eps = x[(self.Nx*self.Ny+self.Ny):(self.Nx*self.Ny+self.Ny+self.Ny)].reshape((1, self.Ny))
A_eps = x[(self.Nx*self.Ny+self.Ny+self.Ny):].reshape((self.Nx, 1)) #A_eps = x[(self.Nx*self.Ny+self.Ny+self.Ny):].reshape((self.Nx, 1))
log_unc = X @ A_eps + b_eps #log_unc = anp.matmul(X, A_eps) + b_eps
log_unc = b_eps
#log_unc = anp.log(anp.exp(log_unc) + anp.exp(log_eps)) #log_unc = anp.log(anp.exp(log_unc) + anp.exp(log_eps))
iunc2 = anp.exp(-2*log_unc) iunc2 = anp.exp(-2*log_unc)
return anp.mean( (0.5*((X@ A + b - Y)**2)*iunc2 + log_unc).sum(axis=1), axis=0 ) # Put RELU on (XX@x) and introduce new matrix W L = anp.mean( (0.5*((anp.matmul(X, A) + b - Y)**2)*iunc2 + log_unc).sum(axis=1), axis=0 )
weights2 = (anp.sum(anp.square(A.ravel()))
#+ anp.sum(anp.square(b.ravel()))
#+ anp.sum(anp.square(A_eps.ravel()))
#+ anp.sum(anp.square(b_eps.ravel()))
)
return L + self.l/2 * weights2
def loss_history(x: np.ndarray) -> float: def loss_history(x: np.ndarray) -> float:
""" """
...@@ -372,14 +386,14 @@ class FitModel(RegressorMixin, BaseEstimator): ...@@ -372,14 +386,14 @@ class FitModel(RegressorMixin, BaseEstimator):
grad_loss, grad_loss,
disp=True, disp=True,
factr=1e7, factr=1e7,
maxiter=1000, maxiter=100,
iprint=0) iprint=0)
# Inference # Inference
self.A_inf = sc_op[0][:self.Nx*self.Ny].reshape(self.Nx, self.Ny) self.A_inf = sc_op[0][:self.Nx*self.Ny].reshape(self.Nx, self.Ny)
self.b_inf = sc_op[0][self.Nx*self.Ny:(self.Nx*self.Ny+self.Ny)].reshape(1, self.Ny) self.b_inf = sc_op[0][self.Nx*self.Ny:(self.Nx*self.Ny+self.Ny)].reshape(1, self.Ny)
self.u_inf = sc_op[0][(self.Nx*self.Ny+self.Ny):(self.Nx*self.Ny+self.Ny+self.Ny)].reshape(1, self.Ny) # removed np.exp self.u_inf = sc_op[0][(self.Nx*self.Ny+self.Ny):(self.Nx*self.Ny+self.Ny+self.Ny)].reshape(1, self.Ny) # removed np.exp
self.A_eps = sc_op[0][(self.Nx*self.Ny+self.Ny+self.Ny):].reshape(self.Nx, 1) #self.A_eps = sc_op[0][(self.Nx*self.Ny+self.Ny+self.Ny):].reshape(self.Nx, 1)
def as_dict(self) -> Dict[str, Any]: def as_dict(self) -> Dict[str, Any]:
""" """
...@@ -393,7 +407,7 @@ class FitModel(RegressorMixin, BaseEstimator): ...@@ -393,7 +407,7 @@ class FitModel(RegressorMixin, BaseEstimator):
A_inf=self.A_inf, A_inf=self.A_inf,
b_inf=self.b_inf, b_inf=self.b_inf,
u_inf=self.u_inf, u_inf=self.u_inf,
A_eps=self.A_eps, #A_eps=self.A_eps,
loss_train=self.loss_train, loss_train=self.loss_train,
loss_test=self.loss_test, loss_test=self.loss_test,
Nx=self.Nx, Nx=self.Nx,
...@@ -409,7 +423,7 @@ class FitModel(RegressorMixin, BaseEstimator): ...@@ -409,7 +423,7 @@ class FitModel(RegressorMixin, BaseEstimator):
self.A_inf = in_dict["A_inf"] self.A_inf = in_dict["A_inf"]
self.b_inf = in_dict["b_inf"] self.b_inf = in_dict["b_inf"]
self.u_inf = in_dict["u_inf"] self.u_inf = in_dict["u_inf"]
self.A_eps = in_dict["A_eps"] #self.A_eps = in_dict["A_eps"]
self.loss_train = in_dict["loss_train"] self.loss_train = in_dict["loss_train"]
self.loss_test = in_dict["loss_test"] self.loss_test = in_dict["loss_test"]
self.Nx = in_dict["Nx"] self.Nx = in_dict["Nx"]
...@@ -432,7 +446,8 @@ class FitModel(RegressorMixin, BaseEstimator): ...@@ -432,7 +446,8 @@ class FitModel(RegressorMixin, BaseEstimator):
# flat uncertainty # flat uncertainty
result["Y_unc"] = self.u_inf[0,:] result["Y_unc"] = self.u_inf[0,:]
# input-dependent uncertainty # input-dependent uncertainty
result["Y_eps"] = np.exp(X @ self.A_eps + result["Y_unc"]) #result["Y_eps"] = np.exp(X @ self.A_eps + result["Y_unc"])
result["Y_eps"] = np.exp(result["Y_unc"])
return result return result
...@@ -460,12 +475,12 @@ class Model(TransformerMixin, BaseEstimator): ...@@ -460,12 +475,12 @@ class Model(TransformerMixin, BaseEstimator):
channels:List[str]=[f"channel_{j}_{k}" channels:List[str]=[f"channel_{j}_{k}"
for j, k in product(range(1, 5), ["A", "B", "C", "D"])], for j, k in product(range(1, 5), ["A", "B", "C", "D"])],
n_pca_lr: int=600, n_pca_lr: int=600,
n_pca_hr: int=20, n_pca_hr: int=100,
high_res_sigma: float=0.2, high_res_sigma: float=0.2,
tof_start: Optional[int]=None, tof_start: Optional[int]=None,
delta_tof: Optional[int]=300, delta_tof: Optional[int]=300,
validation_size: float=0.05, validation_size: float=0.05,
n_nonlinear_kernel: int=2000): n_nonlinear_kernel: int=10000):
# models # models
self.x_model = Pipeline([ self.x_model = Pipeline([
('select', SelectRelevantLowResolution(channels, tof_start, delta_tof)), ('select', SelectRelevantLowResolution(channels, tof_start, delta_tof)),
...@@ -479,7 +494,8 @@ class Model(TransformerMixin, BaseEstimator): ...@@ -479,7 +494,8 @@ class Model(TransformerMixin, BaseEstimator):
]) ])
fit_steps = list() fit_steps = list()
if n_nonlinear_kernel > 0: if n_nonlinear_kernel > 0:
fit_steps += [('fex', Nystroem(n_components=n_nonlinear_kernel, kernel='laplacian', gamma=1.0, n_jobs=-1))] fit_steps += [('fex', Nystroem(n_components=n_nonlinear_kernel, kernel='rbf', gamma=0.5, n_jobs=-1))]
#fit_steps += [('regression', (Ridge(alpha=0.1)))]
fit_steps += [('regression', FitModel())] fit_steps += [('regression', FitModel())]
self.fit_model = Pipeline(fit_steps) self.fit_model = Pipeline(fit_steps)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment