2020import time
2121
2222import numpy
23+ from collections import OrderedDict
2324
2425import theano
2526import theano .tensor as T
3031
3132from models import nnet
3233
34+
3335class DNN_Dropout (nnet ):
3436
3537 def __init__ (self , numpy_rng , theano_rng = None , n_ins = 784 ,
@@ -127,6 +129,9 @@ def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
127129 self .finetune_cost = self .dropout_logLayer .negative_log_likelihood (self .y )
128130 self .errors = self .logLayer .errors (self .y )
129131
132+ self .output = self .logLayer .prediction ();
133+ self .features = self .sigmoid_layers [- 2 ].output ;
134+
130135 if self .l1_reg is not None :
131136 for i in xrange (self .n_layers ):
132137 W = self .params [i * 2 ]
@@ -138,19 +143,43 @@ def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
138143 self .finetune_cost += self .l2_reg * T .sqr (W ).sum ()
139144
140145 def build_finetune_functions (self , train_shared_xy , valid_shared_xy , batch_size ):
146+ """
147+ Generates a function `train` that implements one step of
148+ finetuning and a function `validate` that computes the error on
149+ a batch from the validation set
150+
151+ :type train_shared_xy: pairs of theano.tensor.TensorType
152+ :param train_shared_xy: It is a list that contain all the train dataset,
153+ pair is formed of two Theano variables, one for the datapoints,
154+ the other for the labels
155+
156+ :type valid_shared_xy: pairs of theano.tensor.TensorType
157+ :param valid_shared_xy: It is a list that contain all the valid dataset,
158+ pair is formed of two Theano variables, one for the datapoints,
159+ the other for the labels
160+
161+ :type batch_size: int
162+ :param batch_size: size of a minibatch
163+
164+ :returns (theano.function,theano.function)
165+ * A function for training takes minibatch_index,learning_rate,momentum
166+ which updates weights,and return error rate
167+ * A function for validation takes minibatch_indexand return error rate
168+
169+ """
141170
142171 (train_set_x , train_set_y ) = train_shared_xy
143172 (valid_set_x , valid_set_y ) = valid_shared_xy
144173
145174 index = T .lscalar ('index' ) # index to a [mini]batch
146- learning_rate = T .fscalar ('learning_rate' )
147- momentum = T .fscalar ('momentum' )
175+ learning_rate = T .scalar ('learning_rate' , dtype = theano . config . floatX )
176+ momentum = T .scalar ('momentum' , dtype = theano . config . floatX )
148177
149178 # compute the gradients with respect to the model parameters
150179 gparams = T .grad (self .finetune_cost , self .params )
151180
152181 # compute list of fine-tuning updates
153- updates = {}
182+ updates = OrderedDict ()
154183 for dparam , gparam in zip (self .delta_params , gparams ):
155184 updates [dparam ] = momentum * dparam - gparam * learning_rate
156185 for dparam , param in zip (self .delta_params , self .params ):
0 commit comments