Skip to content

Commit b0a16a2

Browse files
committed
Added TODO for run_ml_sim. Updated valid_config to match modern configuration files
1 parent e74d326 commit b0a16a2

2 files changed

Lines changed: 25 additions & 16 deletions

File tree

run_ml_sim.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
from config_scripts.setup_config import read_config
1212
from helper_scripts.ml_helpers import process_data, plot_confusion
1313
from helper_scripts.ml_helpers import save_model
14-
14+
#TODO: Raise Not Implemented Error
15+
#TODO: This node is still in active development
1516

1617
def _train_test_knn(df_processed: pd.DataFrame, sim_dict: dict, erlang: str):
1718
predictor_df = df_processed['num_segments']

tests/fixtures/valid_config.ini

Lines changed: 23 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -48,31 +48,39 @@ xt_noise = False
4848
requested_xt = {"QPSK": -26.19, "16-QAM": -36.69, "64-QAM": -41.69}
4949

5050
[rl_settings]
51+
n_trials = 2
5152
device = cpu
5253
optimize = False
5354
is_training = True
54-
path_algorithm = ucb_bandit
55-
path_model = greedy_bandit/NSFNet/0617/16_47_22_694727/state_vals_e750.0_routes_c4.json
55+
56+
path_algorithm = ppo
57+
path_model = None
5658
core_algorithm = first_fit
57-
core_model = greedy_bandit/NSFNet/0617/16_57_13_315030/state_vals_e750.0_cores_c4.json
59+
core_model = None
5860
spectrum_algorithm = first_fit
59-
spectrum_model = ppo/NSFNet/0512/12_57_55_484293
60-
# Only for DRL
61+
spectrum_model = None
62+
6163
render_mode = None
6264
super_channel_space = 3
63-
# Only for q-learning
64-
learn_rate = 0.01
65-
discount_factor = 0.95
66-
epsilon_start = 0.2
67-
epsilon_end = 0.05
68-
reward = 1
69-
penalty = -100
70-
dynamic_reward = False
71-
# TODO: Sim helpers has not been updated for this! (Only support for 2)
65+
66+
alpha_start = 0.1
67+
alpha_end = 0.01
68+
alpha_update = linear_decay
69+
70+
gamma = 0.1
71+
72+
epsilon_start = 0.0
73+
epsilon_end = 0.0
74+
epsilon_update = linear_decay
75+
7276
path_levels = 2
7377
decay_factor = 0.01
78+
79+
reward = 1
80+
penalty = -10
81+
dynamic_reward = False
7482
core_beta = 0.1
75-
gamma = 0.1
83+
7684

7785
[ml_settings]
7886
deploy_model = False

0 commit comments

Comments
 (0)