Skip to content

Commit 7057ba8

Browse files
committed
Merge branch 'test'
2 parents b254d19 + 71a4ced commit 7057ba8

11 files changed

Lines changed: 93 additions & 44 deletions

File tree

File renamed without changes.

config/CNN/NP/model_conf.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,17 @@
2323
"comment" : "batch_size :: specify the mini batch size while training, default 256",
2424
"batch_size" : 256,
2525

26-
"comment" : "momentum :: Specify the momentum factor while training default 0.5",
27-
"momentum" : 0.5,
26+
"comment" : "finetune_momentum :: Specify the momentum factor while training default 0.5",
27+
"finetune_momentum" : 0.5,
2828

2929
"comment" : "n_outs :: Specify the number of outputs",
3030
"n_outs" : 10,
3131

32-
"comment" : "l_rate_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33-
"l_rate_method" : "C",
32+
"comment" : "finetune_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33+
"finetune_method" : "C",
3434

35-
"comment" : "l_rate :: learning rate configuration",
36-
"l_rate" : {
35+
"comment" : "finetune_rate :: learning rate configuration",
36+
"finetune_rate" : {
3737
"learning_rate" : 0.08,
3838
"epoch_num" : 0,
3939

config/CNN/T/model_conf.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,17 +23,17 @@
2323
"comment" : "batch_size :: specify the mini batch size while training, default 256",
2424
"batch_size" : 8,
2525

26-
"comment" : "momentum :: Specify the momentum factor while training default 0.5",
27-
"momentum" : 0.5,
26+
"comment" : "finetune_momentum :: Specify the momentum factor while training default 0.5",
27+
"finetune_momentum" : 0.5,
2828

2929
"comment" : "n_outs :: Specify the number of outputs",
3030
"n_outs" : 8,
3131

32-
"comment" : "l_rate_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33-
"l_rate_method" : "C",
32+
"comment" : "finetune_method :: Two method l_rate method are supported C: Constant learning rate and E : Exponential decay",
33+
"finetune_method" : "C",
3434

35-
"comment" : "l_rate :: learning rate configuration",
36-
"l_rate" : {
35+
"comment" : "finetune_rate :: learning rate configuration",
36+
"finetune_rate" : {
3737
"learning_rate" : 0.08,
3838
"epoch_num" : 10,
3939

config/DBN/README.md

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
Model Config
2+
------------
3+
* `nnetType` : (Mandatory) specify Type of Network (CNN/RBM/SDA/DNN)
4+
* `train_data` : (Mandatory) specify the working directory containing data configuration and output
5+
* `wdir` : (Mandatory) Working Directory.
6+
* `data_spec` : (Mandatory) specify the path of the data sepification relative to `model_config.json`
7+
* `nnet_spec` : (Mandatory) specify the path of network configuration specification relative to `model_config.json`
8+
9+
* `output_file` : (Mandatory) specify the path of RBM network output file relative to working directory
10+
* `input_file` : specify the path of RBM network inpu file relative to working directory
11+
12+
* `batch_size` : specify the mini batch size while training, default 128
13+
14+
* `n_ins` :784
15+
* `n_outs` :10
16+
17+
* `gbrbm_learning_rate` : pretraining
18+
* `pretraining_learning_rate` : pretraining
19+
* `pretraining_epochs` :
20+
21+
* `initial_pretrain_momentum` :Specify the momentum factor while training default 0.5
22+
* `final_pretrain_momentum` :Specify the momentum factor while training default 0.9
23+
* `initial_pretrain_momentum_epoch` : Specify the momentum factor while training default 5
24+
25+
* `finetune_method` : Two methods are supported C: Constant learning rate and E: Exponential decay
26+
* `finetune_rate` : learning rate configuration
27+
28+
> param | description | default value |learning method
29+
> :-----|:------------|:--------------:|:---------------:
30+
> `learning_rate` ||0.08 | C
31+
> `epoch_num` ||10 | C
32+
> `start_rate` ||0.08 | E
33+
> `scale_by` ||0.5 | E
34+
> `min_derror_decay_start` ||0.05 | E
35+
> `min_derror_stop` ||0.05 | E
36+
> `min_epoch_decay_start` || 15 | E
37+
> `init_error` || 100 | E
38+
39+
* `finetune_momentum` : Specify the momentum factor while finetuning
40+
41+
* `processes` :
42+
43+
>> * `pretraining` : default:false
44+
>> * `finetuning` : default:false
45+
>> * `testing` : default:false
46+
>> * `export_data` : default:false
47+
48+
* `export_path` : path (realative to wdir) for writting (bottleneck) features.
49+

config/DBN/model_conf.json

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@
2525

2626
"comment" :"TODO",
2727
"gbrbm_learning_rate":0.005,
28-
"learning_rate":0.08,
28+
"pretraining_learning_rate":0.08,
2929
"pretraining_epochs":10,
3030

31-
"comment" : "initial_momentum,final_momentum,initial_momentum_epoch :: Specify the momentum factor while training default 0.5,0.9,5",
32-
"initial_momentum":0.5,
33-
"final_momentum":0.9,
34-
"initial_momentum_epoch":5,
31+
"comment" : "initial_pretrain_momentum,final_pretrain_momentum,initial_pretrain_momentum_epoch :: Specify the momentum factor while training default 0.5,0.9,5",
32+
"initial_pretrain_momentum":0.5,
33+
"final_pretrain_momentum":0.9,
34+
"initial_pretrain_momentum_epoch":5,
3535

3636
"comment" : "finetune_method:: Two methods are supported C: Constant learning rate and E : Exponential decay",
3737
"finetune_method":"C",
File renamed without changes.

config/SDA/README

Whitespace-only changes.
File renamed without changes.

run/run_CNN.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ def runCNN(arg):
6969

7070
if model_config['processes']['finetuning']:
7171
#learning rate, batch-size and momentum
72-
lrate = LearningRate.get_instance(model_config['l_rate_method'],model_config['l_rate']);
73-
momentum = model_config['momentum']
72+
lrate = LearningRate.get_instance(model_config['finetune_method'],model_config['finetune_rate']);
73+
momentum = model_config['finetune_momentum']
7474

7575
train_sets, train_xy, train_x, train_y = read_dataset(data_spec['training'])
7676
valid_sets, valid_xy, valid_x, valid_y = read_dataset(data_spec['validation'])

run/run_DBN.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@ def preTraining(nnetModel,train_sets,train_xy,train_x,train_y,model_config):
5050
pretrainingEpochs = model_config['pretraining_epochs']
5151
keep_layer_num=model_config['keep_layer_num']
5252

53-
initialMomentum = model_config['initial_momentum']
54-
initMomentumEpochs = model_config['initial_momentum_epoch']
55-
finalMomentum = model_config['final_momentum']
53+
initialMomentum = model_config['initial_pretrain_momentum']
54+
initMomentumEpochs = model_config['initial_pretrain_momentum_epoch']
55+
finalMomentum = model_config['final_pretrain_momentum']
5656

5757
logger.info('Pre-training the model ...')
5858
start_time = time.clock()
@@ -63,7 +63,7 @@ def preTraining(nnetModel,train_sets,train_xy,train_x,train_y,model_config):
6363
if (nnetModel.rbm_layers[i].is_gbrbm()):
6464
pretrain_lr = model_config['gbrbm_learning_rate']
6565
else:
66-
pretrain_lr = model_config['learning_rate']
66+
pretrain_lr = model_config['pretraining_learning_rate']
6767
# go through pretraining epochs
6868
momentum = initialMomentum
6969
for epoch in xrange(pretrainingEpochs):

0 commit comments

Comments
 (0)