Back to index

python-biopython  1.60
Layer.py
Go to the documentation of this file.
00001 """Model a single layer in a nueral network.
00002 
00003 These classes deal with a layers in the neural network (ie. the input layer,
00004 hidden layers and the output layer).
00005 """
00006 # standard library
00007 import math
00008 import random
00009 
00010 def logistic_function(value):
00011     """Transform the value with the logistic function.
00012 
00013     XXX This is in the wrong place -- I need to find a place to put it
00014     that makes sense.
00015     """
00016     return 1.0 / (1.0 + math.exp(-value))
00017 
00018 class AbstractLayer(object):
00019     """Abstract base class for all layers.
00020     """
00021     def __init__(self, num_nodes, has_bias_node):
00022         """Initialize the layer.
00023 
00024         Arguments:
00025 
00026         o num_nodes -- The number of nodes that are contained in this layer.
00027 
00028         o has_bias_node -- Specify whether or not this node has a bias
00029         node. This node is not included in the number of nodes in the network,
00030         but is used in constructing and dealing with the network.
00031         """
00032         # specify all of the nodes in the network
00033         if has_bias_node:
00034             lower_range = 0
00035         else:
00036             lower_range = 1
00037 
00038         self.nodes = range(lower_range, num_nodes + 1)
00039 
00040         self.weights = {}
00041 
00042     def __str__(self):
00043         """Debugging output.
00044         """
00045         return "weights: %s" % self.weights
00046 
00047     def set_weight(self, this_node, next_node, value):
00048         """Set a weight value from one node to the next.
00049 
00050         If weights are not explicitly set, they will be initialized to
00051         random values to start with.
00052         """
00053         if (this_node, next_node) not in self.weights:
00054             raise ValueError("Invalid node values passed.")
00055         
00056         self.weights[(this_node, next_node)] = value
00057 
00058 class InputLayer(AbstractLayer):
00059     def __init__(self, num_nodes, next_layer):
00060         """Initialize the input layer.
00061 
00062         Arguments:
00063 
00064         o num_nodes -- The number of nodes in the input layer.
00065 
00066         o next_layer -- The next layer in the neural network this is
00067         connected to.
00068         """
00069         AbstractLayer.__init__(self, num_nodes, 1)
00070 
00071         self._next_layer = next_layer
00072 
00073         # set up the weights
00074         self.weights = {}
00075         for own_node in self.nodes:
00076             for other_node in self._next_layer.nodes:
00077                 self.weights[(own_node, other_node)] = \
00078                                         random.randrange(-2.0, 2.0)
00079 
00080         # set up the weight changes
00081         self.weight_changes = {}
00082         for own_node in self.nodes:
00083             for other_node in self._next_layer.nodes:
00084                 self.weight_changes[(own_node, other_node)] = 0.0
00085 
00086         # set up the calculated values for each node -- these will
00087         # actually just be set from inputs into the network.
00088         self.values = {}
00089         for node in self.nodes:
00090             # set the bias node -- always has a value of 1
00091             if node == 0:
00092                 self.values[0] = 1
00093             else:
00094                 self.values[node] = 0
00095 
00096     def update(self, inputs):
00097         """Update the values of the nodes using given inputs.
00098 
00099         Arguments:
00100 
00101         o inputs -- A list of inputs into the network -- this must be
00102         equal to the number of nodes in the layer.
00103         """
00104         if len(inputs) != len(self.values.keys()) - 1:
00105             raise ValueError("Inputs do not match input layer nodes.")
00106 
00107         # set the node values from the inputs
00108         for input_num in range(len(inputs)):
00109             self.values[input_num + 1] = inputs[input_num]
00110 
00111         # propogate the update to the next layer
00112         self._next_layer.update(self)
00113 
00114     def backpropagate(self, outputs, learning_rate, momentum):
00115         """Recalculate all weights based on the last round of prediction.
00116 
00117         Arguments:
00118         
00119         o learning_rate -- The learning rate of the network
00120 
00121         o momentum - The amount of weight to place on the previous weight
00122         change.
00123 
00124         o outputs - The output info we are using to calculate error.
00125         """
00126         # first backpropogate to the next layers
00127         next_errors = self._next_layer.backpropagate(outputs, learning_rate,
00128                                                      momentum)
00129         
00130         for this_node in self.nodes:
00131             for next_node in self._next_layer.nodes:
00132                 error_deriv = (next_errors[next_node] *
00133                                self.values[this_node])
00134 
00135                 delta = (learning_rate * error_deriv +
00136                         momentum * self.weight_changes[(this_node, next_node)])
00137 
00138                 # apply the change to the weight
00139                 self.weights[(this_node, next_node)] += delta
00140 
00141                 # remember the weight change for next time
00142                 self.weight_changes[(this_node, next_node)] = delta
00143 
00144 class HiddenLayer(AbstractLayer):
00145     def __init__(self, num_nodes, next_layer, activation = logistic_function):
00146         """Initialize a hidden layer.
00147 
00148         Arguments:
00149 
00150         o num_nodes -- The number of nodes in this hidden layer.
00151 
00152         o next_layer -- The next layer in the neural network that this
00153         is connected to.
00154 
00155         o activation -- The transformation function used to transform
00156         predicted values.
00157         """
00158         AbstractLayer.__init__(self, num_nodes, 1)
00159 
00160         self._next_layer = next_layer
00161         self._activation = activation
00162 
00163         # set up the weights
00164         self.weights = {}
00165         for own_node in self.nodes:
00166             for other_node in self._next_layer.nodes:
00167                 self.weights[(own_node, other_node)] = \
00168                                         random.randrange(-2.0, 2.0)
00169 
00170         # set up the weight changes
00171         self.weight_changes = {}
00172         for own_node in self.nodes:
00173             for other_node in self._next_layer.nodes:
00174                 self.weight_changes[(own_node, other_node)] = 0.0
00175 
00176         # set up the calculated values for each node
00177         self.values = {}
00178         for node in self.nodes:
00179             # bias node
00180             if node == 0:
00181                 self.values[node] = 1
00182             else:
00183                 self.values[node] = 0
00184         
00185     def update(self, previous_layer):
00186         """Update the values of nodes from the previous layer info.
00187 
00188         Arguments:
00189 
00190         o previous_layer -- The previous layer in the network.
00191         """
00192         # update each node in this network
00193         for update_node in self.nodes[1:]:
00194             # sum up the weighted inputs from the previous network
00195             sum = 0.0
00196             for node in previous_layer.nodes:
00197                 sum += (previous_layer.values[node] *
00198                         previous_layer.weights[(node, update_node)])
00199 
00200             self.values[update_node] = self._activation(sum)
00201 
00202         # propogate the update to the next layer
00203         self._next_layer.update(self)
00204 
00205     def backpropagate(self, outputs, learning_rate, momentum):
00206         """Recalculate all weights based on the last round of prediction.
00207 
00208         Arguments:
00209 
00210         o learning_rate -- The learning rate of the network
00211 
00212         o momentum - The amount of weight to place on the previous weight
00213         change.
00214 
00215         o outputs - The output values we are using to see how good our
00216         network is at predicting things.
00217         """
00218         # first backpropogate to the next layers
00219         next_errors = self._next_layer.backpropagate(outputs, learning_rate,
00220                                                      momentum)
00221 
00222         # --- update the weights
00223         for this_node in self.nodes:
00224             for next_node in self._next_layer.nodes:
00225                 error_deriv = (next_errors[next_node] *
00226                                self.values[this_node])
00227 
00228                 delta = (learning_rate * error_deriv +
00229                         momentum * self.weight_changes[(this_node, next_node)])
00230 
00231                 # apply the change to the weight
00232                 self.weights[(this_node, next_node)] += delta
00233 
00234                 # remember the weight change for next time
00235                 self.weight_changes[(this_node, next_node)] = delta
00236 
00237         # --- calculate error terms
00238         errors = {}
00239         for error_node in self.nodes:
00240             # get the error info propogated from the next layer
00241             previous_error = 0.0
00242             for next_node in self._next_layer.nodes:
00243                 previous_error += (next_errors[next_node] *
00244                                    self.weights[(error_node, next_node)])
00245 
00246             # get the correction factor
00247             corr_factor = (self.values[error_node] *
00248                            (1 - self.values[error_node]))
00249 
00250             # calculate the error
00251             errors[error_node] = previous_error * corr_factor
00252 
00253         return errors
00254                 
00255 class OutputLayer(AbstractLayer):
00256     def __init__(self, num_nodes, activation = logistic_function):
00257         """Initialize the Output Layer.
00258 
00259         Arguments:
00260 
00261         o num_nodes -- The number of nodes in this layer. This corresponds
00262         to the number of outputs in the neural network.
00263 
00264         o activation -- The transformation function used to transform
00265         predicted values.
00266         """
00267         AbstractLayer.__init__(self, num_nodes, 0)
00268 
00269         self._activation = activation
00270 
00271         self.values = {}
00272         for node in self.nodes:
00273             self.values[node] = 0
00274 
00275     def update(self, previous_layer):
00276         """Update the value of output nodes from the previous layers.
00277 
00278         Arguments:
00279 
00280         o previous_layer -- The hidden layer preceeding this.
00281         """
00282         # update all of the nodes in this layer
00283         for update_node in self.nodes:
00284             # sum up the contribution from all of the previous inputs
00285             sum = 0.0
00286             for node in previous_layer.nodes:
00287                 sum += (previous_layer.values[node] *
00288                         previous_layer.weights[(node, update_node)])
00289 
00290             self.values[update_node] = self._activation(sum)
00291     
00292     def backpropagate(self, outputs, learning_rate, momentum):
00293         """Calculate the backpropagation error at a given node.
00294 
00295         This calculates the error term using the formula:
00296 
00297         p = (z - t) z (1 - z)
00298 
00299         where z is the calculated value for the node, and t is the
00300         real value.
00301 
00302         Arguments:
00303 
00304         o outputs - The list of output values we use to calculate the
00305         errors in our predictions.
00306         """
00307         errors = {}
00308         for node in self.nodes:
00309             calculated_value = self.values[node]
00310             real_value = outputs[node - 1]
00311 
00312             errors[node] = ((real_value - calculated_value) *
00313                             calculated_value *
00314                             (1 - calculated_value))
00315 
00316         return errors
00317 
00318     def get_error(self, real_value, node_number):
00319         """Return the error value at a particular node.
00320         """
00321         predicted_value = self.values[node_number]
00322         return 0.5 * math.pow((real_value - predicted_value), 2)
00323 
00324     def set_weight(self, this_node, next_node, value):
00325         raise NotImplementedError("Can't set weights for the output layer")