1+ import numpy as np
2+
3+ def gradientDescent (X , y , y_pred , n_samples , lambda_param , weights ):
4+ gradient = (1 / n_samples ) * (X .T @ (y_pred - y )) + (lambda_param / n_samples ) * weights
5+ gradient [0 ] -= (lambda_param / n_samples ) * weights [0 ] # Don't regularize the bias term
6+ return gradient
7+
8+ def normalEquation (X , y , lambda_param ):
9+ # Identity matrix
10+ identity_metrix = lambda_param * np .eye (X .shape [1 ])
11+ identity_metrix [0 , 0 ] = 0 # Bias term should not be regularized
12+
13+ # Normal equation for ridge regression
14+ weights = np .dot (np .linalg .inv (np .dot (X .T , X ) + identity_metrix ), np .dot (X .T , y ))
15+ return weights
16+
17+ class RidgeRegression :
18+
19+ def __init__ (self , lambda_param , weights = None ):
20+ self .lambda_param = lambda_param
21+ self .weights = weights
22+ self .weights_history = []
23+ self .costs_history = []
24+
25+ def training (self , X , y , mode , lr = 0.1 , n_iters = 100 ):
26+ if (mode == "normalEq" ):
27+ self .weights = normalEquation (X , y , self .lambda_param )
28+ elif (mode == "gradientDes" ):
29+ n_samples , n_features = X .shape
30+ self .weights = np .zeros (n_features )
31+ for _ in range (n_iters ):
32+ self .weights_history .append (self .weights [1 ])
33+ self .costs_history .append (self .costFunction (X , y ))
34+ y_pred = self .predict (X )
35+ weights_gradient = gradientDescent (X , y , y_pred , n_samples , self .lambda_param , self .weights )
36+ self .weights -= lr * weights_gradient
37+ else :
38+ raise Exception ("Sorry, we don't have that type of optimization." )
39+
40+ def standardization (self , X ): # X_std = X - mean of X / standard deviation of X
41+ mean_x = np .array ([np .mean (X )])
42+ std_x = np .array ([np .std (X )])
43+ X_std = (X - mean_x ) / std_x
44+ return X_std
45+
46+ def test (self ):
47+ print (self .weights .shape )
48+
49+ def get_Weights_History (self ):
50+ return self .weights_history
51+
52+ def get_Costs_History (self ):
53+ return self .costs_history
54+
55+ def predict (self , X ):
56+ return np .dot (X , self .weights )
57+
58+ def costFunction (self , X , y ):
59+ y_pred = self .predict (X )
60+ mse = np .sum ((y - y_pred ) ** 2 )
61+ regularization = self .lambda_param * np .sum (self .weights [1 :] ** 2 )
62+ return mse + regularization
0 commit comments