Skip to content

Commit c01155f

Browse files
committed
Gray-box implementation
1 parent d5ba91c commit c01155f

1,277 files changed

Lines changed: 1378 additions & 705 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ __pycache__/
1010
rtac/logs/Cadical_ReACTR
1111
rtac/logs/TSP_Q_ReACTR
1212
rtac/logs/TSP_RT_ReACTR
13+
rtac/logs/Cadical_CPPL
14+
rtac/logs/Cadicalpp_CPPL_gb
15+
rtac/logs/TSP_Qpp_ReACTRpp
16+
rtac/logs/TSP_RT_CPPL
17+
rtac/logs/TSP_RTpp_ReACTRpp
1318
rtac/data/instances/power_law_easy
1419
rtac/data/solvers/cadical
1520

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# RTAC
22
Realtime Algorithm Configuration Methods
33

4-
This Software is a refactored version of the software used in "Pool-Based Realtime Algorithm Configuration" and "Realtime gray-box algorithm configuration using cost-sensitive classification". It has also extended options regarding logging, input, e.g. parameter space via PCS files, and target algorithm calls.
4+
This Software is a reimplementation of the realtime algorithm configurators (RAC) described in "ReACTR: Realtime Algorithm Configuration through Tournament Rankings", "Pool-Based Realtime Algorithm Configuration" and "Realtime gray-box algorithm configuration using cost-sensitive classification" into a collective RAC suite. It also includes extended options regarding logging, input, e.g. parameter space via PCS files, and target algorithm calls.
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"cells": [],
3+
"metadata": {},
4+
"nbformat": 4,
5+
"nbformat_minor": 5
6+
}

rtac/ac_functionalities/logs.py

Lines changed: 41 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@ def __init__(self, scenario: argparse.Namespace):
4848
self.log_path = scenario.log_folder + '/' \
4949
+ scenario.wrapper_name + '_' \
5050
+ str(scenario.ac).split('.')[1]
51+
if scenario.gray_box:
52+
self.log_path += '_gb'
5153
if not os.path.isdir(self.log_path):
5254
os.makedirs(self.log_path)
5355
self.experimental = scenario.experimental
@@ -63,6 +65,7 @@ def __init__(self, scenario: argparse.Namespace):
6365
for f in filelist:
6466
os.remove(os.path.join(self.log_path, f))
6567
self.objective_min = scenario.objective_min
68+
print('\n')
6669
print(f'Logging to {self.log_path}')
6770

6871
def init_rtac_logs(self) -> None:
@@ -266,21 +269,31 @@ def ranking_log(self, pool: dict[str: Configuration],
266269
os.mkdir(f'{self.log_path}/bandit_models')
267270
self.bm_path = f'{self.log_path}/bandit_models'
268271
bandit_models = kwargs['bandit_models']
269-
joblib.dump(
270-
bandit_models['standard_scaler'],
271-
f'{self.bm_path}/standard_scaler_{tourn_nr}.pkl')
272-
joblib.dump(
273-
bandit_models['min_max_scaler'],
274-
f'{self.bm_path}/min_max_scaler_{tourn_nr}.pkl')
275-
joblib.dump(
276-
bandit_models['one_hot_encoder'],
277-
f'{self.bm_path}/one_hot_encoder_{tourn_nr}.pkl')
278-
joblib.dump(
279-
bandit_models['pca_obj_params'],
280-
f'{self.bm_path}/pca_obj_params_{tourn_nr}.pkl')
281-
joblib.dump(
282-
bandit_models['pca_obj_inst'],
283-
f'{self.bm_path}/pca_obj_inst_{tourn_nr}.pkl')
272+
if tourn_nr == 0:
273+
joblib.dump(
274+
bandit_models['standard_scaler'],
275+
f'{self.bm_path}/standard_scaler_{tourn_nr}.pkl')
276+
joblib.dump(
277+
bandit_models['min_max_scaler'],
278+
f'{self.bm_path}/min_max_scaler_{tourn_nr}.pkl')
279+
joblib.dump(
280+
bandit_models['one_hot_encoder'],
281+
f'{self.bm_path}/one_hot_encoder_{tourn_nr}.pkl')
282+
joblib.dump(
283+
bandit_models['pca_obj_params'],
284+
f'{self.bm_path}/pca_obj_params_{tourn_nr}.pkl')
285+
joblib.dump(
286+
bandit_models['pca_obj_inst'],
287+
f'{self.bm_path}/pca_obj_inst_{tourn_nr}.pkl')
288+
elif self.scenario.online_instance_train and tourn_nr > 0:
289+
joblib.dump(
290+
bandit_models['standard_scaler'],
291+
f'{self.bm_path}/standard_scaler_{tourn_nr}.pkl')
292+
joblib.dump(
293+
bandit_models['pca_obj_inst'],
294+
f'{self.bm_path}/pca_obj_inst_{tourn_nr}.pkl')
295+
elif len(bandit_models) == 0:
296+
pass
284297
self.bandit_log.handlers.clear()
285298
b_fh = logging.FileHandler(
286299
f'{self.log_path}/bandit_tourn_{tourn_nr}.log')
@@ -335,21 +348,23 @@ def load_data(self, tourn_nr: int | None = None) \
335348
assessment = dict(zip(list(pool.keys()), assessment.values()))
336349
elif self.ranking is ACMethod.CPPL:
337350
self.bm_path = f'{self.log_path}/bandit_models'
351+
if not self.scenario.online_instance_train:
352+
tourn_nr = 0
338353
with open(
339354
f'{self.log_path}/bandit_tourn_{tourn_nr}.log', 'r') as f:
340355
assessment = f.read()
341-
#assessment = ast.literal_eval(assessment)
356+
342357
assessment = eval(assessment, {"array": np.array})
343358
assessment = \
344359
{k: self.parse_array(v) for k, v in assessment.items()}
345360
standard_scaler = \
346361
joblib.load(f'{self.bm_path}/standard_scaler_{tourn_nr}.pkl')
347362
min_max_scaler = \
348-
joblib.load(f'{self.bm_path}/min_max_scaler_{tourn_nr}.pkl')
363+
joblib.load(f'{self.bm_path}/min_max_scaler_0.pkl')
349364
one_hot_encoder = \
350-
joblib.load(f'{self.bm_path}/one_hot_encoder_{tourn_nr}.pkl')
365+
joblib.load(f'{self.bm_path}/one_hot_encoder_0.pkl')
351366
pca_obj_params = \
352-
joblib.load(f'{self.bm_path}/pca_obj_params_{tourn_nr}.pkl')
367+
joblib.load(f'{self.bm_path}/pca_obj_params_0.pkl')
353368
pca_obj_inst = \
354369
joblib.load(f'{self.bm_path}/pca_obj_inst_{tourn_nr}.pkl')
355370
bandit_models = {'standard_scaler': standard_scaler,
@@ -367,11 +382,9 @@ def load_data(self, tourn_nr: int | None = None) \
367382
os.remove(f'{self.log_path}/scores_tourn_{tourn_nr}.log')
368383
elif self.ranking is ACMethod.CPPL:
369384
os.remove(f'{self.log_path}/bandit_tourn_{tourn_nr}.log')
370-
os.remove(f'{self.bm_path}/standard_scaler_{tourn_nr}.pkl')
371-
os.remove(f'{self.bm_path}/min_max_scaler_{tourn_nr}.pkl')
372-
os.remove(f'{self.bm_path}/one_hot_encoder_{tourn_nr}.pkl')
373-
os.remove(f'{self.bm_path}/pca_obj_params_{tourn_nr}.pkl')
374-
os.remove(f'{self.bm_path}/pca_obj_inst_{tourn_nr}.pkl')
385+
filelist = [f for f in os.listdir(self.bm_path)]
386+
for f in filelist:
387+
os.remove(os.path.join(self.bm_path, f))
375388
os.remove(f'{self.log_path}/pool_tourn_{tourn_nr}.log')
376389
os.remove(f'{self.log_path}/contender_dict_tourn_{tourn_nr}.log')
377390

@@ -383,3 +396,7 @@ def load_data(self, tourn_nr: int | None = None) \
383396
return pool, assessment, contender_dict, tourn_nr
384397
elif self.ranking is ACMethod.CPPL:
385398
return pool, assessment, contender_dict, tourn_nr, bandit_models
399+
400+
401+
if __name__ == '__main__':
402+
pass

rtac/ac_functionalities/ranking/cppl.py

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ def __init__(self, scenario, pool, contender_dict=None):
9898
self.t = 1
9999
self.Y_t = 0
100100
self.S_t = []
101+
self.S_t_prior = []
101102
self.grad = np.zeros(self.dim, dtype=np.float32)
102103

103104
self.pool_replacement = False
@@ -132,11 +133,23 @@ def record_bandit(self):
132133
'Y_t': self.Y_t,
133134
'S_t': self.S_t,
134135
'grad': np.array2string(self.grad, threshold=np.inf)}
135-
self.bandit_models = {'standard_scaler': self.standard_scaler,
136-
'min_max_scaler': self.min_max_scaler,
137-
'one_hot_encoder': self.one_hot_enc,
138-
'pca_obj_params': self.pca_obj_params,
139-
'pca_obj_inst': self.pca_obj_inst}
136+
137+
if hasattr(self, 'X_t'):
138+
self.bandit['X_t'] = np.array2string(self.X_t, threshold=np.inf)
139+
if hasattr(self, 'hess'):
140+
self.bandit['hess'] = np.array2string(self.hess, threshold=np.inf)
141+
142+
if self.tourn == 0:
143+
self.bandit_models = {'standard_scaler': self.standard_scaler,
144+
'min_max_scaler': self.min_max_scaler,
145+
'one_hot_encoder': self.one_hot_enc,
146+
'pca_obj_params': self.pca_obj_params,
147+
'pca_obj_inst': self.pca_obj_inst}
148+
elif self.scenario.online_instance_train:
149+
self.bandit_models = {'standard_scaler': self.standard_scaler,
150+
'pca_obj_inst': self.pca_obj_inst}
151+
else:
152+
self.bandit_models = {}
140153

141154
def process_results(self, queue1=None, queue2=None):
142155

@@ -207,27 +220,6 @@ def update_data(self):
207220

208221
results = self.results
209222

210-
if self.scenario.verbosity == 2:
211-
if not self.scenario.objective_min:
212-
unit = 'seconds'
213-
else:
214-
unit = 'objective value'
215-
self.time_sum += round(min(results), 3)
216-
len_str = len('Instance nr. ' + str(self.tourn) + ' : ' + str(
217-
round(self.time_sum, 3)) + f' {unit} total')
218-
print('\n')
219-
print('-' * len_str)
220-
if min(results) == self.scenario.timeout:
221-
print('Instance nr.', self.tourn, ':',
222-
round(self.time_sum, 3), f'{unit} total'
223-
' *** TIMEOUT on instance')
224-
else:
225-
print('Instance nr.', self.tourn, ':',
226-
round(self.time_sum, 3), f'{unit} total')
227-
print('-' * len_str)
228-
print('\n')
229-
self.tourn += 1
230-
231223
# Get pool index of winner
232224
self.Y_t = min(range(len(results)), key=results.__getitem__)
233225
contender_ids = list(copy.deepcopy(self.pool).keys())
@@ -305,7 +297,7 @@ def discard_configs(self):
305297

306298
def crossover(self, parents, nr_children, core):
307299

308-
parent_a, parent_b = parents #[core]
300+
parent_a, parent_b = parents
309301

310302
new_candids = []
311303
for child in range(nr_children):
@@ -478,7 +470,10 @@ def insert_in_pool(self, configs):
478470
'by contender generated via', c.gen, '\n')
479471

480472
def skill_and_confidence(self):
481-
with threadpool_limits(limits=1):
473+
with threadpool_limits(limits=1):
474+
475+
self.S_t_prior = copy.deepcopy(self.S_t)
476+
482477
# Estimated skill parameters
483478
self.v_hat = np.zeros(self.pool_size)
484479
for i in range(self.pool_size):
@@ -548,15 +543,16 @@ def skill_and_confidence(self):
548543
-(self.v_hat)
549544
).argsort()[0:self.scenario.number_cores]
550545

551-
if self.scenario.verbosity in (1, 2):
546+
if self.scenario.verbosity in (1, 2) and len(self.S_t_prior) != 0:
552547
print('\nSkill and confidence assessment of the contenders',
553548
'from tournament:\n \nContender',
554-
' ' * 34, ' (v_hat', ' ' * 13, ', c_t)')
549+
' ' * 34, ' ( v_hat', ' ' * 12, ', c_t', + 14 * ' ',
550+
')')
555551
pool_ids = list(self.pool.keys())
556552
for core in range(self.scenario.number_cores):
557-
print(pool_ids[self.S_t[core]], 'assessment is:',
558-
'(', self.v_hat[self.S_t[core]], ',',
559-
self.c_t[self.S_t[core]], ')')
553+
print(pool_ids[self.S_t_prior[core]], 'assessment is:',
554+
'(', self.v_hat[self.S_t_prior[core]], ',',
555+
self.c_t[self.S_t_prior[core]], ')')
560556
print('\n\n')
561557

562558
def assess_children(self, transformed_configs, nd):
@@ -741,7 +737,7 @@ def transform_conf(self, conf):
741737
for k in
742738
self.cat_param_names]])
743739
# One-hot encoding increases impact of the feature in the model
744-
# so we reduce the impact the number of binary features created
740+
# so we reduce the impact by the number of binary features created
745741
normalized = []
746742
start = 0
747743
for cat in self.cat_values:

0 commit comments

Comments
 (0)