Skip to content

Commit 326b262

Browse files
committed
Fix:CNN
1 parent ff5a0d0 commit 326b262

2 files changed

Lines changed: 1 addition & 138 deletions

File tree

models/__init__.py

Lines changed: 1 addition & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
from collections import OrderedDict
33
import theano
44
import theano.tensor as T
5-
import time
65

76
class nnet(object):
87
"""Abstract class for all Network Models"""
@@ -133,97 +132,4 @@ def getFeaturesFunction(self):
133132
in_x = x.type('in_x');
134133
fn = theano.function(inputs=[in_x],outputs=[self.features],
135134
givens={self.x: in_x},name='features')#,on_unused_input='warn')
136-
return fn
137-
138-
139-
import logging
140-
logger = logging.getLogger(__name__)
141-
142-
def testing(nnetModel,test_sets, test_xy, test_x, test_y,batch_size):
143-
144-
# get the testing function for the model
145-
logger.info('Getting the Test function')
146-
test_fn = nnetModel.build_test_function((test_x, test_y), batch_size=batch_size)
147-
148-
logger.info('Starting Testing');
149-
150-
test_error = []
151-
test_output = numpy.array([],int);
152-
while not test_sets.is_finish():
153-
test_sets.make_partition_shared(test_xy)
154-
n_test_batches= test_sets.cur_frame_num / batch_size;
155-
for i in xrange(n_test_batches):
156-
pred, err = test_fn(i)
157-
test_error.append(err)
158-
test_output=numpy.append(test_output,pred)
159-
test_sets.read_next_partition_data()
160-
logger.debug("Test Error (upto curr part) = %f",numpy.mean(test_error))
161-
test_sets.initialize_read();
162-
163-
test_loss=numpy.mean(test_error)
164-
logger.info('Optimization complete with best Test score of %f %%',test_loss * 100)
165-
166-
return test_output,test_loss;
167-
168-
def fineTunning(nnetModel,train_sets,train_xy,train_x,train_y,
169-
valid_sets,valid_xy,valid_x,valid_y,lrate,momentum,batch_size):
170-
171-
def valid_score():
172-
valid_error = []
173-
while not valid_sets.is_finish():
174-
valid_sets.make_partition_shared(valid_xy)
175-
n_valid_batches= valid_sets.cur_frame_num / batch_size;
176-
validation_losses = [validate_fn(i) for i in xrange(n_valid_batches)]
177-
valid_error.append(validation_losses)
178-
valid_sets.read_next_partition_data()
179-
logger.debug("Valid Error (upto curr part) = %f",numpy.mean(valid_error))
180-
valid_sets.initialize_read();
181-
return numpy.mean(valid_error);
182-
183-
# get the training, validation function for the model
184-
logger.info('Getting the finetuning functions')
185-
train_fn, validate_fn = nnetModel.build_finetune_functions((train_x, train_y),
186-
(valid_x, valid_y), batch_size=batch_size)
187-
188-
best_validation_loss=float('Inf')
189-
190-
logger.info('Finetunning the model..');
191-
start_time = time.clock()
192-
193-
while (lrate.get_rate() != 0):
194-
train_error = []
195-
while not train_sets.is_finish():
196-
train_sets.make_partition_shared(train_xy)
197-
for batch_index in xrange(train_sets.cur_frame_num / batch_size): # loop over mini-batches
198-
train_error.append(train_fn(index=batch_index,
199-
learning_rate = lrate.get_rate(), momentum = momentum))
200-
logger.debug('Training batch %d error %f',batch_index, numpy.mean(train_error))
201-
train_sets.read_next_partition_data()
202-
logger.info('Fine Tunning:epoch %d, training error %f',lrate.epoch, numpy.mean(train_error));
203-
train_sets.initialize_read()
204-
205-
valid_error = valid_score()
206-
if valid_error < best_validation_loss:
207-
best_validation_loss=valid_error
208-
logger.info('Fine Tunning:epoch %d, validation error %f',lrate.epoch, valid_error);
209-
lrate.get_next_rate(current_error = 100 * valid_error)
210-
211-
end_time = time.clock()
212-
213-
logger.info('Best validation error %f',best_validation_loss)
214-
215-
logger.info('The Fine tunning ran for %.2fm' % ((end_time - start_time) / 60.))
216-
logger.info('Optimization complete with best validation score of %f %%', best_validation_loss * 100)
217-
218-
return best_validation_loss
219-
220-
221-
def getFeatures(nnetModel,data_spec_testing):
222-
out_function = nnetModel.getFeaturesFunction()
223-
test_sets, test_xy, test_x, test_y = read_dataset(data_spec_testing)
224-
while (not test_sets.is_finish()):
225-
data = out_function(test_sets.feat)
226-
test_sets.read_next_partition_data()
227-
#TODO write data
228-
229-
135+
return fn

models/cnn.py

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -84,46 +84,3 @@ def __init__(self, numpy_rng, theano_rng, batch_size, n_outs,conv_layer_configs,
8484

8585
self.errors = self.logLayer.errors(self.y)
8686
self.output = self.logLayer.prediction();
87-
88-
89-
"Getting CNN Feats Outputs"
90-
def build_out_function(self):
91-
feat = T.tensor4('feat', dtype=theano.config.floatX)
92-
out_da = theano.function([feat], self.conv_layers[-1].output, \
93-
updates = None, givens={self.x:feat}, on_unused_input='warn')
94-
return out_da
95-
96-
"Building fine tuning operation "
97-
def build_finetune_functions(self, train_shared_xy, valid_shared_xy, batch_size):
98-
99-
(train_set_x, train_set_y) = train_shared_xy
100-
(valid_set_x, valid_set_y) = valid_shared_xy
101-
102-
index = T.lscalar('index') # index to a [mini]batch
103-
learning_rate = T.fscalar('learning_rate')
104-
momentum = T.fscalar('momentum')
105-
106-
# compute the gradients with respect to the model parameters
107-
gparams = T.grad(self.finetune_cost, self.params)
108-
109-
# compute list of fine-tuning updates
110-
updates = OrderedDict()
111-
112-
for dparam, gparam in zip(self.delta_params, gparams):
113-
updates[dparam] = momentum * dparam - gparam*learning_rate
114-
115-
for dparam, param in zip(self.delta_params, self.params):
116-
updates[param] = param + updates[dparam]
117-
118-
train_fn = theano.function(inputs=[index, theano.Param(learning_rate, default = 0.001),
119-
theano.Param(momentum, default = 0.5)],outputs=self.errors, updates=updates,
120-
givens={self.x: train_set_x[index * batch_size:(index + 1) * batch_size],
121-
self.y: train_set_y[index * batch_size:(index + 1) * batch_size]})
122-
123-
valid_fn = theano.function(inputs=[index, theano.Param(learning_rate, default = 0.001),
124-
theano.Param(momentum, default = 0.5)],outputs=self.errors, updates=updates,
125-
givens={self.x: valid_set_x[index * batch_size:(index + 1) * batch_size],
126-
self.y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
127-
128-
return train_fn, valid_fn
129-

0 commit comments

Comments
 (0)