Skip to content

Commit ff075ab

Browse files
committed
new hyperparameter outcomes
1 parent 604cb74 commit ff075ab

2 files changed

Lines changed: 236 additions & 87 deletions

File tree

datasets/primordial/surrogates_config.py

Lines changed: 120 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -5,76 +5,148 @@
55

66
@dataclass
77
class MultiONetConfig:
8-
"""Model config for MultiONet for the simple_ode dataset"""
8+
"""Model config for MultiONet for the primordial dataset"""
99

10-
# Naive configuration
11-
loss_function: nn.Module = nn.MSELoss()
12-
optimizer: str = "adamw"
10+
# primordial_final, trial 196
1311
scheduler: str = "schedulefree"
14-
branch_hidden_layers: int = 5
15-
hidden_size: int = 256
16-
output_factor: int = 200
17-
trunk_hidden_layers: int = 5
18-
learning_rate: float = 1e-04
19-
regularization_factor: float = 1e-04
20-
activation: nn.Module = nn.ReLU()
12+
optimizer: str = "AdamW"
13+
loss_function: nn.Module = nn.SmoothL1Loss()
14+
beta: float = 0.845
15+
branch_hidden_layers: int = 3
16+
hidden_size: int = 160
17+
output_factor: int = 130
18+
trunk_hidden_layers: int = 2
19+
learning_rate: float = 9.6e-04
20+
regularization_factor: float = 0.283
21+
activation: nn.Module = nn.LeakyReLU()
2122

2223

2324
@dataclass
2425
class LatentNeuralODEConfig:
25-
"""Model config for LatentNeuralODE for the simple_ode dataset"""
26+
"""Model config for LatentNeuralODE for the primordial dataset"""
2627

27-
# Naive configuration
28-
loss_function: nn.Module = nn.MSELoss()
29-
optimizer: str = "adamw"
28+
# primordial_final, trial 243
3029
scheduler: str = "schedulefree"
31-
ode_tanh_reg: bool = False
32-
latent_features: int = 10
33-
coder_layers: int = 3
34-
coder_width: int = 256
35-
ode_layers: int = 3
36-
ode_width: int = 256
37-
learning_rate: float = 1e-04
38-
regularization_factor: float = 1e-04
39-
activation: nn.Module = nn.ReLU()
30+
optimizer: str = "SGD"
31+
loss_function: nn.Module = nn.MSELoss()
32+
momentum: float = 0.085
33+
latent_features: int = 9
34+
coder_layers: int = 1
35+
coder_width: int = 580
36+
ode_tanh_reg: bool = True
37+
ode_layers: int = 6
38+
ode_width: int = 480
39+
learning_rate: float = 9.68e-5
40+
regularization_factor: float = 0.0544
41+
activation: nn.Module = nn.SiLU()
4042

4143

4244
@dataclass
4345
class FullyConnectedConfig:
44-
"""Model config for FullyConnected for the simple_ode dataset"""
46+
"""Model config for FullyConnected for the primordial dataset"""
4547

46-
# Naive configuration
48+
# primordial_final, trial 174
49+
scheduler: str = "poly"
50+
optimizer: str = "adam"
4751
loss_function: nn.Module = nn.MSELoss()
48-
optimizer: str = "adamw"
49-
scheduler: str = "schedulefree"
50-
# other params from primordial_tuning, trial 63
51-
hidden_size: int = 256
52-
num_hidden_layers: int = 5
53-
learning_rate: float = 1e-04
54-
regularization_factor: float = 1e-04
52+
momentum: float = 0.0132
53+
degree: int = 4
54+
latent_features: int = 8
55+
coder_layers: int = 2
56+
coder_width: int = 470
57+
learning_rate: float = 1.77e-04
58+
regularization_factor: float = 9.20e-03
5559
activation: nn.Module = nn.ReLU()
5660

5761

5862
@dataclass
5963
class LatentPolyConfig:
60-
"""Model config for LatentPoly for the simple_ode dataset"""
64+
"""Model config for LatentPoly for the primordial dataset"""
6165

62-
# Naive configuration
63-
loss_function: nn.Module = nn.MSELoss()
64-
optimizer: str = "adamw"
66+
# primordial_final, trial 31
6567
scheduler: str = "schedulefree"
66-
degree: int = 3
67-
latent_features: int = 10
68-
coder_layers: int = 3
69-
coder_width: int = 256
70-
learning_rate: float = 1e-04
71-
regularization_factor: float = 1e-04
72-
activation: nn.Module = nn.ReLU()
68+
optimizer: str = "SGD"
69+
loss_function: nn.Module = nn.SmoothL1Loss()
70+
beta: float = 3.73
71+
hidden_size: int = 470
72+
num_hidden_layers: int = 5
73+
learning_rate: float = 1.27e-03
74+
regularization_factor: float = 3.23e-05
75+
activation: nn.Module = nn.ELU()
76+
77+
78+
# @dataclass
79+
# class MultiONetConfig:
80+
# """Model config for MultiONet for the primordial dataset"""
81+
82+
# # Naive configuration
83+
# loss_function: nn.Module = nn.MSELoss()
84+
# optimizer: str = "adamw"
85+
# scheduler: str = "schedulefree"
86+
# branch_hidden_layers: int = 5
87+
# hidden_size: int = 256
88+
# output_factor: int = 200
89+
# trunk_hidden_layers: int = 5
90+
# learning_rate: float = 1e-04
91+
# regularization_factor: float = 1e-04
92+
# activation: nn.Module = nn.ReLU()
93+
94+
95+
# @dataclass
96+
# class LatentNeuralODEConfig:
97+
# """Model config for LatentNeuralODE for the primordial dataset"""
98+
99+
# # Naive configuration
100+
# loss_function: nn.Module = nn.MSELoss()
101+
# optimizer: str = "adamw"
102+
# scheduler: str = "schedulefree"
103+
# ode_tanh_reg: bool = False
104+
# latent_features: int = 10
105+
# coder_layers: int = 3
106+
# coder_width: int = 256
107+
# ode_layers: int = 3
108+
# ode_width: int = 256
109+
# learning_rate: float = 1e-04
110+
# regularization_factor: float = 1e-04
111+
# activation: nn.Module = nn.ReLU()
112+
113+
114+
# @dataclass
115+
# class FullyConnectedConfig:
116+
# """Model config for FullyConnected for the primordial dataset"""
117+
118+
# # Naive configuration
119+
# loss_function: nn.Module = nn.MSELoss()
120+
# optimizer: str = "adamw"
121+
# scheduler: str = "schedulefree"
122+
# # other params from primordial_tuning, trial 63
123+
# hidden_size: int = 256
124+
# num_hidden_layers: int = 5
125+
# learning_rate: float = 1e-04
126+
# regularization_factor: float = 1e-04
127+
# activation: nn.Module = nn.ReLU()
128+
129+
130+
# @dataclass
131+
# class LatentPolyConfig:
132+
# """Model config for LatentPoly for the primordial dataset"""
133+
134+
# # Naive configuration
135+
# loss_function: nn.Module = nn.MSELoss()
136+
# optimizer: str = "adamw"
137+
# scheduler: str = "schedulefree"
138+
# degree: int = 3
139+
# latent_features: int = 10
140+
# coder_layers: int = 3
141+
# coder_width: int = 256
142+
# learning_rate: float = 1e-04
143+
# regularization_factor: float = 1e-04
144+
# activation: nn.Module = nn.ReLU()
73145

74146

75147
# @dataclass
76148
# class MultiONetConfig:
77-
# """Model config for MultiONet for the simple_ode dataset"""
149+
# """Model config for MultiONet for the primordial dataset"""
78150

79151
# # primordial_tuning, trial 18
80152
# scheduler: str = "poly"
@@ -93,7 +165,7 @@ class LatentPolyConfig:
93165

94166
# @dataclass
95167
# class LatentNeuralODEConfig:
96-
# """Model config for LatentNeuralODE for the simple_ode dataset"""
168+
# """Model config for LatentNeuralODE for the primordial dataset"""
97169

98170
# # primordial_tuning, trial 186
99171
# scheduler: str = "poly"
@@ -114,7 +186,7 @@ class LatentPolyConfig:
114186

115187
# @dataclass
116188
# class FullyConnectedConfig:
117-
# """Model config for FullyConnected for the simple_ode dataset"""
189+
# """Model config for FullyConnected for the primordial dataset"""
118190

119191
# # primordial_tuning, trial 63
120192
# scheduler: str = "poly"
@@ -131,7 +203,7 @@ class LatentPolyConfig:
131203

132204
# @dataclass
133205
# class LatentPolyConfig:
134-
# """Model config for LatentPoly for the simple_ode dataset"""
206+
# """Model config for LatentPoly for the primordial dataset"""
135207

136208
# # primordial_tuning, trial 176
137209
# scheduler: str = "poly"

datasets/primordial_parametric/surrogates_config.py

Lines changed: 116 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -5,59 +5,136 @@
55

66
@dataclass
77
class MultiONetConfig:
8-
"""Model config for MultiONet for the simple_ode dataset"""
9-
10-
# primordialparams, trial 56
11-
12-
branch_hidden_layers: int = 2
13-
trunk_hidden_layers: int = 7
14-
hidden_size: int = 164
15-
output_factor: int = 63
16-
learning_rate: float = 4e-5
17-
activation: nn.Module = nn.GELU()
8+
"""Model config for MultiONet for the primordial_parametric dataset"""
9+
10+
# primordial_parametric_final_multionet, trial 18
11+
scheduler: str = "poly"
12+
optimizer: str = "AdamW"
13+
loss_function: nn.Module = nn.MSELoss()
14+
activation: nn.Module = nn.PReLU()
15+
branch_hidden_layers: int = 5
16+
hidden_size: int = 50
17+
learning_rate: float = 0.0018
18+
output_factor: int = 74
1819
params_branch: bool = True
20+
poly_power: float = 0.64
21+
regularization_factor: float = 0.00114
22+
trunk_hidden_layers: bool = True
1923

2024

2125
@dataclass
2226
class LatentNeuralODEConfig:
23-
"""Model config for LatentNeuralODE for the simple_ode dataset"""
24-
25-
# primordialparams, trial 75
26-
27-
latent_features: int = 5
28-
coder_layers: int = 4
29-
coder_width: int = 268
30-
learning_rate: float = 4e-5
31-
ode_layers: int = 9
32-
ode_width: int = 125
33-
ode_tanh_reg: bool = False
34-
activation: nn.Module = nn.Tanh()
35-
model_version: str = "v2"
27+
"""Model config for LatentNeuralODE for the primordial_parametric dataset"""
28+
29+
# primordial_parametric_final_latentneuralode, trial 234
30+
scheduler: str = "cosine"
31+
optimizer: str = "SGD"
32+
loss_function: nn.Module = nn.MSELoss()
33+
activation: nn.Module = nn.SiLU()
34+
coder_layers: bool = True
35+
coder_width: int = 360
3636
encode_params: bool = False
37+
eta_min: float = 0.00191
38+
latent_features: int = 10
39+
learning_rate: float = 2.04e-05
40+
momentum: float = 0.823
41+
ode_layers: int = 8
42+
ode_tanh_reg: bool = True
43+
ode_width: int = 220
44+
regularization_factor: float = 1.87e-05
3745

3846

3947
@dataclass
4048
class FullyConnectedConfig:
41-
"""Model config for FullyConnected for the simple_ode dataset"""
49+
"""Model config for FullyConnected for the primordial_parametric dataset"""
4250

43-
# primordialparams, trial 40
44-
45-
hidden_size: int = 629
46-
num_hidden_layers: int = 1
47-
learning_rate: float = 6e-5
48-
activation: nn.Module = nn.LeakyReLU()
51+
# primordial_parametric_final_fullyconnected, trial 1
52+
scheduler: str = "poly"
53+
optimizer: str = "AdamW"
54+
loss_function: nn.Module = nn.SmoothL1Loss()
55+
activation: nn.Module = nn.Mish()
56+
beta: float = 6.69
57+
hidden_size: int = 470
58+
learning_rate: float = 0.00129
59+
num_hidden_layers: int = 3
60+
poly_power: float = 0.899
61+
regularization_factor: float = 0.0144
4962

5063

5164
@dataclass
5265
class LatentPolyConfig:
53-
"""Model config for LatentPoly for the simple_ode dataset"""
66+
"""Model config for LatentPoly for the primordial_parametric dataset"""
67+
68+
# primordial_parametric_final_latentpoly, trial 16
69+
scheduler: str = "poly"
70+
optimizer: str = "AdamW"
71+
loss_function: nn.Module = nn.MSELoss()
72+
activation: nn.Module = nn.ELU()
73+
coder_layers: bool = True
74+
coder_width: int = 700
75+
coeff_network: bool = False
76+
degree: int = 3
77+
latent_features: int = 7
78+
learning_rate: float = 1.33e-05
79+
poly_power: float = 0.804
80+
regularization_factor: float = 0.0174
5481

55-
# primordialparams, trial 32
5682

57-
latent_features: int = 10
58-
degree: int = 7
59-
learning_rate: float = 5e-4
60-
coder_layers: int = 2
61-
coder_width: int = 267
62-
activation: nn.Module = nn.LeakyReLU()
63-
coeff_network: bool = False
83+
# @dataclass
84+
# class MultiONetConfig:
85+
# """Model config for MultiONet for the simple_ode dataset"""
86+
87+
# # primordialparams, trial 56
88+
89+
# branch_hidden_layers: int = 2
90+
# trunk_hidden_layers: int = 7
91+
# hidden_size: int = 164
92+
# output_factor: int = 63
93+
# learning_rate: float = 4e-5
94+
# activation: nn.Module = nn.GELU()
95+
# params_branch: bool = True
96+
97+
98+
# @dataclass
99+
# class LatentNeuralODEConfig:
100+
# """Model config for LatentNeuralODE for the simple_ode dataset"""
101+
102+
# # primordialparams, trial 75
103+
104+
# latent_features: int = 5
105+
# coder_layers: int = 4
106+
# coder_width: int = 268
107+
# learning_rate: float = 4e-5
108+
# ode_layers: int = 9
109+
# ode_width: int = 125
110+
# ode_tanh_reg: bool = False
111+
# activation: nn.Module = nn.Tanh()
112+
# model_version: str = "v2"
113+
# encode_params: bool = False
114+
115+
116+
# @dataclass
117+
# class FullyConnectedConfig:
118+
# """Model config for FullyConnected for the simple_ode dataset"""
119+
120+
# # primordialparams, trial 40
121+
122+
# hidden_size: int = 629
123+
# num_hidden_layers: int = 1
124+
# learning_rate: float = 6e-5
125+
# activation: nn.Module = nn.LeakyReLU()
126+
127+
128+
# @dataclass
129+
# class LatentPolyConfig:
130+
# """Model config for LatentPoly for the simple_ode dataset"""
131+
132+
# # primordialparams, trial 32
133+
134+
# latent_features: int = 10
135+
# degree: int = 7
136+
# learning_rate: float = 5e-4
137+
# coder_layers: int = 2
138+
# coder_width: int = 267
139+
# activation: nn.Module = nn.LeakyReLU()
140+
# coeff_network: bool = False

0 commit comments

Comments
 (0)