@@ -94,14 +94,6 @@ def __init__(self, params: Parameters):
9494 # initialize the parent class
9595 super (Network , self ).__init__ ()
9696
97- # Mappings for parsing of the activation layers.
98- self ._activation_mappings = {
99- "Sigmoid" : nn .Sigmoid ,
100- "ReLU" : nn .ReLU ,
101- "LeakyReLU" : nn .LeakyReLU ,
102- "Tanh" : nn .Tanh ,
103- }
104-
10597 # initialize the layers
10698 self .number_of_layers = len (self .params .layer_sizes ) - 1
10799
@@ -316,9 +308,13 @@ def _append_activation_function(self, activation_function):
316308 if activation_function is None :
317309 pass
318310 elif isinstance (activation_function , str ):
319- self .layers .append (
320- self ._activation_mappings [activation_function ]()
321- )
311+ try :
312+ self .layers .append (getattr (torch .nn , activation_function )())
313+ except AttributeError :
314+ raise Exception (
315+ "Torch does not contain the specified "
316+ "activation function: " + activation_function
317+ )
322318 elif isinstance (activation_function , nn .Module ):
323319 self .layers .append (activation_function )
324320 elif issubclass (activation_function , nn .Module ):
@@ -367,9 +363,7 @@ def __init__(self, params):
367363 self .params .num_hidden_layers ,
368364 batch_first = True ,
369365 )
370- self .activation = self ._activation_mappings [
371- self .params .layer_activations [0 ]
372- ]()
366+ self .activation = getattr (torch .nn , self .params .layer_activations [0 ])()
373367
374368 self .batch_size = None
375369 # Once everything is done, we can move the Network on the target
@@ -505,9 +499,7 @@ def __init__(self, params):
505499 self .params .num_hidden_layers ,
506500 batch_first = True ,
507501 )
508- self .activation = self ._activation_mappings [
509- self .params .layer_activations [0 ]
510- ]()
502+ self .activation = getattr (torch .nn , self .params .layer_activations [0 ])()
511503
512504 if params .use_gpu :
513505 self .to ("cuda" )
0 commit comments