Skip to content

Commit b1a582b

Browse files
committed
RENAME:Configs
1 parent 39a6519 commit b1a582b

11 files changed

Lines changed: 87 additions & 44 deletions

File tree

File renamed without changes.

config/CNN/NP/model_conf.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,17 @@
2323
"comment" : "batch_size :: specify the mini batch size while training, default 256",
2424
"batch_size" : 256,
2525

26-
"comment" : "momentum :: Specify the momentum factor while training default 0.5",
27-
"momentum" : 0.5,
26+
"comment" : "finetune_momentum :: Specify the momentum factor while training default 0.5",
27+
"finetune_momentum" : 0.5,
2828

2929
"comment" : "n_outs :: Specify the number of outputs",
3030
"n_outs" : 10,
3131

32-
"comment" : "l_rate_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33-
"l_rate_method" : "C",
32+
"comment" : "finetune_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33+
"finetune_method" : "C",
3434

35-
"comment" : "l_rate :: learning rate configuration",
36-
"l_rate" : {
35+
"comment" : "finetune_rate :: learning rate configuration",
36+
"finetune_rate" : {
3737
"learning_rate" : 0.08,
3838
"epoch_num" : 0,
3939

config/CNN/T/model_conf.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,17 @@
2323
"comment" : "batch_size :: specify the mini batch size while training, default 256",
2424
"batch_size" : 8,
2525

26-
"comment" : "momentum :: Specify the momentum factor while training default 0.5",
27-
"momentum" : 0.5,
26+
"comment" : "finetune_momentum :: Specify the momentum factor while training default 0.5",
27+
"finetune_momentum" : 0.5,
2828

2929
"comment" : "n_outs :: Specify the number of outputs",
3030
"n_outs" : 8,
3131

32-
"comment" : "l_rate_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33-
"l_rate_method" : "C",
32+
"comment" : "finetune_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33+
"finetune_method" : "C",
3434

35-
"comment" : "l_rate :: learning rate configuration",
36-
"l_rate" : {
35+
"comment" : "finetune_rate :: learning rate configuration",
36+
"finetune_rate" : {
3737
"learning_rate" : 0.08,
3838
"epoch_num" : 10,
3939

config/DBN/README.md

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
==Model Config==
2+
nnetType: (Mandatory) specify Type of Network (CNN,RBM)
3+
train_data : (Mandatory) specify the working directory containing data configuration and output
4+
wdir : wdir
5+
data_spec:(Mandatory) specify the path of the validation data relative to the working directory
6+
nnet_spec: (Mandatory) specify the path of RBM/CNN/DNN/SDA network configuration specification relative to working directory
7+
output_file : (Mandatory) specify the path of RBM network output file relative to working directory
8+
input_file : specify the path of RBM network inpu file relative to working directory
9+
batch_size : specify the mini batch size while training, default 128
10+
n_ins:784
11+
n_outs:10
12+
13+
gbrbm_learning_rate: pretraining
14+
pretraining_learning_rate: pretraining
15+
pretraining_epochs:
16+
17+
initial_pretrain_momentum:Specify the momentum factor while training default 0.5
18+
final_pretrain_momentum:Specify the momentum factor while training default 0.9
19+
initial_pretrain_momentum_epoch : Specify the momentum factor while training default 5
20+
21+
22+
finetune_method: Two methods are supported C: Constant learning rate and E : Exponential decay"
23+
24+
finetune_rate : learning rate configuration"
25+
>learning_rate: 0.08
26+
>epoch_num: 10
27+
28+
>start_rate: 0.08
29+
>scale_by: 0.5
30+
>min_derror_decay_start: 0.05
31+
>min_derror_stop: 0.05
32+
>min_epoch_decay_start: 15
33+
>init_error:100
34+
35+
finetune_momentum : Specify the momentum factor while finetuning"
36+
finetune_momentum : 0.5
37+
38+
processes:
39+
>pretraining":true
40+
>finetuning":true
41+
>testing":true
42+
>export_data":false
43+

config/DBN/model_conf.json

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@
2525

2626
"comment" :"TODO",
2727
"gbrbm_learning_rate":0.005,
28-
"learning_rate":0.08,
28+
"pretraining_learning_rate":0.08,
2929
"pretraining_epochs":10,
3030

31-
"comment" : "initial_momentum,final_momentum,initial_momentum_epoch :: Specify the momentum factor while training default 0.5,0.9,5",
32-
"initial_momentum":0.5,
33-
"final_momentum":0.9,
34-
"initial_momentum_epoch":5,
31+
"comment" : "initial_pretrain_momentum,final_pretrain_momentum,initial_pretrain_momentum_epoch :: Specify the momentum factor while training default 0.5,0.9,5",
32+
"initial_pretrain_momentum":0.5,
33+
"final_pretrain_momentum":0.9,
34+
"initial_pretrain_momentum_epoch":5,
3535

3636
"comment" : "finetune_method:: Two methods are supported C: Constant learning rate and E : Exponential decay",
3737
"finetune_method":"C",
File renamed without changes.

config/SDA/README

Whitespace-only changes.
File renamed without changes.

run/run_CNN.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ def runCNN(arg):
6969

7070
if model_config['processes']['finetuning']:
7171
#learning rate, batch-size and momentum
72-
lrate = LearningRate.get_instance(model_config['l_rate_method'],model_config['l_rate']);
73-
momentum = model_config['momentum']
72+
lrate = LearningRate.get_instance(model_config['finetune_method'],model_config['finetune_rate']);
73+
momentum = model_config['finetune_momentum']
7474

7575
train_sets, train_xy, train_x, train_y = read_dataset(data_spec['training'])
7676
valid_sets, valid_xy, valid_x, valid_y = read_dataset(data_spec['validation'])

run/run_DBN.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@ def preTraining(nnetModel,train_sets,train_xy,train_x,train_y,model_config):
5050
pretrainingEpochs = model_config['pretraining_epochs']
5151
keep_layer_num=model_config['keep_layer_num']
5252

53-
initialMomentum = model_config['initial_momentum']
54-
initMomentumEpochs = model_config['initial_momentum_epoch']
55-
finalMomentum = model_config['final_momentum']
53+
initialMomentum = model_config['initial_pretrain_momentum']
54+
initMomentumEpochs = model_config['initial_pretrain_momentum_epoch']
55+
finalMomentum = model_config['final_pretrain_momentum']
5656

5757
logger.info('Pre-training the model ...')
5858
start_time = time.clock()
@@ -63,7 +63,7 @@ def preTraining(nnetModel,train_sets,train_xy,train_x,train_y,model_config):
6363
if (nnetModel.rbm_layers[i].is_gbrbm()):
6464
pretrain_lr = model_config['gbrbm_learning_rate']
6565
else:
66-
pretrain_lr = model_config['learning_rate']
66+
pretrain_lr = model_config['pretraining_learning_rate']
6767
# go through pretraining epochs
6868
momentum = initialMomentum
6969
for epoch in xrange(pretrainingEpochs):

0 commit comments

Comments
 (0)