2929 Systems 19, 2007
3030
3131"""
32+
33+ from __future__ import print_function
34+
3235import os
3336import sys
3437import timeit
@@ -116,7 +119,7 @@ def __init__(
116119 # stochastich gradient descent on the MLP
117120
118121 # start-snippet-2
119- for i in xrange (self .n_layers ):
122+ for i in range (self .n_layers ):
120123 # construct the sigmoidal layer
121124
122125 # the size of the input is either the number of hidden units of
@@ -254,9 +257,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
254257
255258 # compute number of minibatches for training, validation and testing
256259 n_valid_batches = valid_set_x .get_value (borrow = True ).shape [0 ]
257- n_valid_batches /= batch_size
260+ n_valid_batches // = batch_size
258261 n_test_batches = test_set_x .get_value (borrow = True ).shape [0 ]
259- n_test_batches /= batch_size
262+ n_test_batches // = batch_size
260263
261264 index = T .lscalar ('index' ) # index to a [mini]batch
262265
@@ -314,11 +317,11 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):
314317
315318 # Create a function that scans the entire validation set
316319 def valid_score ():
317- return [valid_score_i (i ) for i in xrange (n_valid_batches )]
320+ return [valid_score_i (i ) for i in range (n_valid_batches )]
318321
319322 # Create a function that scans the entire test set
320323 def test_score ():
321- return [test_score_i (i ) for i in xrange (n_test_batches )]
324+ return [test_score_i (i ) for i in range (n_test_batches )]
322325
323326 return train_fn , valid_score , test_score
324327
@@ -357,12 +360,12 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
357360
358361 # compute number of minibatches for training, validation and testing
359362 n_train_batches = train_set_x .get_value (borrow = True ).shape [0 ]
360- n_train_batches /= batch_size
363+ n_train_batches // = batch_size
361364
362365 # numpy random generator
363366 # start-snippet-3
364367 numpy_rng = numpy .random .RandomState (89677 )
365- print '... building the model'
368+ print ( '... building the model' )
366369 # construct the stacked denoising autoencoder class
367370 sda = SdA (
368371 numpy_rng = numpy_rng ,
@@ -374,52 +377,51 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
374377 #########################
375378 # PRETRAINING THE MODEL #
376379 #########################
377- print '... getting the pretraining functions'
380+ print ( '... getting the pretraining functions' )
378381 pretraining_fns = sda .pretraining_functions (train_set_x = train_set_x ,
379382 batch_size = batch_size )
380383
381- print '... pre-training the model'
384+ print ( '... pre-training the model' )
382385 start_time = timeit .default_timer ()
383386 ## Pre-train layer-wise
384387 corruption_levels = [.1 , .2 , .3 ]
385- for i in xrange (sda .n_layers ):
388+ for i in range (sda .n_layers ):
386389 # go through pretraining epochs
387- for epoch in xrange (pretraining_epochs ):
390+ for epoch in range (pretraining_epochs ):
388391 # go through the training set
389392 c = []
390- for batch_index in xrange (n_train_batches ):
393+ for batch_index in range (n_train_batches ):
391394 c .append (pretraining_fns [i ](index = batch_index ,
392395 corruption = corruption_levels [i ],
393396 lr = pretrain_lr ))
394- print 'Pre-training layer %i, epoch %d, cost ' % (i , epoch ),
395- print numpy .mean (c )
397+ print ('Pre-training layer %i, epoch %d, cost %f' % (i , epoch , numpy .mean (c )))
396398
397399 end_time = timeit .default_timer ()
398400
399- print >> sys . stderr , ('The pretraining code for file ' +
400- os .path .split (__file__ )[1 ] +
401- ' ran for %.2fm' % ((end_time - start_time ) / 60. ))
401+ print ( ('The pretraining code for file ' +
402+ os .path .split (__file__ )[1 ] +
403+ ' ran for %.2fm' % ((end_time - start_time ) / 60. )), file = sys . stderr )
402404 # end-snippet-4
403405 ########################
404406 # FINETUNING THE MODEL #
405407 ########################
406408
407409 # get the training, validation and testing function for the model
408- print '... getting the finetuning functions'
410+ print ( '... getting the finetuning functions' )
409411 train_fn , validate_model , test_model = sda .build_finetune_functions (
410412 datasets = datasets ,
411413 batch_size = batch_size ,
412414 learning_rate = finetune_lr
413415 )
414416
415- print '... finetunning the model'
417+ print ( '... finetunning the model' )
416418 # early-stopping parameters
417419 patience = 10 * n_train_batches # look as this many examples regardless
418420 patience_increase = 2. # wait this much longer when a new best is
419421 # found
420422 improvement_threshold = 0.995 # a relative improvement of this much is
421423 # considered significant
422- validation_frequency = min (n_train_batches , patience / 2 )
424+ validation_frequency = min (n_train_batches , patience // 2 )
423425 # go through this many
424426 # minibatche before checking the network
425427 # on the validation set; in this case we
@@ -434,7 +436,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
434436
435437 while (epoch < training_epochs ) and (not done_looping ):
436438 epoch = epoch + 1
437- for minibatch_index in xrange (n_train_batches ):
439+ for minibatch_index in range (n_train_batches ):
438440 minibatch_avg_cost = train_fn (minibatch_index )
439441 iter = (epoch - 1 ) * n_train_batches + minibatch_index
440442
@@ -480,9 +482,9 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
480482 )
481483 % (best_validation_loss * 100. , best_iter + 1 , test_score * 100. )
482484 )
483- print >> sys . stderr , ('The training code for file ' +
484- os .path .split (__file__ )[1 ] +
485- ' ran for %.2fm' % ((end_time - start_time ) / 60. ))
485+ print ( ('The training code for file ' +
486+ os .path .split (__file__ )[1 ] +
487+ ' ran for %.2fm' % ((end_time - start_time ) / 60. )), file = sys . stderr )
486488
487489
488490if __name__ == '__main__' :
0 commit comments