1 """Model a single layer in a nueral network.
2
3 These classes deal with a layers in the neural network (ie. the input layer,
4 hidden layers and the output layer).
5 """
6
7 import math
8 import random
9
11 """Transform the value with the logistic function.
12
13 XXX This is in the wrong place -- I need to find a place to put it
14 that makes sense.
15 """
16 return 1.0 / (1.0 + math.exp(-value))
17
19 """Abstract base class for all layers.
20 """
21 - def __init__(self, num_nodes, has_bias_node):
22 """Initialize the layer.
23
24 Arguments:
25
26 o num_nodes -- The number of nodes that are contained in this layer.
27
28 o has_bias_node -- Specify whether or not this node has a bias
29 node. This node is not included in the number of nodes in the network,
30 but is used in constructing and dealing with the network.
31 """
32
33 if has_bias_node:
34 lower_range = 0
35 else:
36 lower_range = 1
37
38 self.nodes = range(lower_range, num_nodes + 1)
39
40 self.weights = {}
41
43 """Debugging output.
44 """
45 return "weights: %s" % self.weights
46
47 - def set_weight(self, this_node, next_node, value):
48 """Set a weight value from one node to the next.
49
50 If weights are not explicitly set, they will be initialized to
51 random values to start with.
52 """
53 if (this_node, next_node) not in self.weights:
54 raise ValueError("Invalid node values passed.")
55
56 self.weights[(this_node, next_node)] = value
57
143
146 """Initialize a hidden layer.
147
148 Arguments:
149
150 o num_nodes -- The number of nodes in this hidden layer.
151
152 o next_layer -- The next layer in the neural network that this
153 is connected to.
154
155 o activation -- The transformation function used to transform
156 predicted values.
157 """
158 AbstractLayer.__init__(self, num_nodes, 1)
159
160 self._next_layer = next_layer
161 self._activation = activation
162
163
164 self.weights = {}
165 for own_node in self.nodes:
166 for other_node in self._next_layer.nodes:
167 self.weights[(own_node, other_node)] = \
168 random.randrange(-2.0, 2.0)
169
170
171 self.weight_changes = {}
172 for own_node in self.nodes:
173 for other_node in self._next_layer.nodes:
174 self.weight_changes[(own_node, other_node)] = 0.0
175
176
177 self.values = {}
178 for node in self.nodes:
179
180 if node == 0:
181 self.values[node] = 1
182 else:
183 self.values[node] = 0
184
185 - def update(self, previous_layer):
186 """Update the values of nodes from the previous layer info.
187
188 Arguments:
189
190 o previous_layer -- The previous layer in the network.
191 """
192
193 for update_node in self.nodes[1:]:
194
195 sum = 0.0
196 for node in previous_layer.nodes:
197 sum += (previous_layer.values[node] *
198 previous_layer.weights[(node, update_node)])
199
200 self.values[update_node] = self._activation(sum)
201
202
203 self._next_layer.update(self)
204
206 """Recalculate all weights based on the last round of prediction.
207
208 Arguments:
209
210 o learning_rate -- The learning rate of the network
211
212 o momentum - The amount of weight to place on the previous weight
213 change.
214
215 o outputs - The output values we are using to see how good our
216 network is at predicting things.
217 """
218
219 next_errors = self._next_layer.backpropagate(outputs, learning_rate,
220 momentum)
221
222
223 for this_node in self.nodes:
224 for next_node in self._next_layer.nodes:
225 error_deriv = (next_errors[next_node] *
226 self.values[this_node])
227
228 delta = (learning_rate * error_deriv +
229 momentum * self.weight_changes[(this_node, next_node)])
230
231
232 self.weights[(this_node, next_node)] += delta
233
234
235 self.weight_changes[(this_node, next_node)] = delta
236
237
238 errors = {}
239 for error_node in self.nodes:
240
241 previous_error = 0.0
242 for next_node in self._next_layer.nodes:
243 previous_error += (next_errors[next_node] *
244 self.weights[(error_node, next_node)])
245
246
247 corr_factor = (self.values[error_node] *
248 (1 - self.values[error_node]))
249
250
251 errors[error_node] = previous_error * corr_factor
252
253 return errors
254
257 """Initialize the Output Layer.
258
259 Arguments:
260
261 o num_nodes -- The number of nodes in this layer. This corresponds
262 to the number of outputs in the neural network.
263
264 o activation -- The transformation function used to transform
265 predicted values.
266 """
267 AbstractLayer.__init__(self, num_nodes, 0)
268
269 self._activation = activation
270
271 self.values = {}
272 for node in self.nodes:
273 self.values[node] = 0
274
275 - def update(self, previous_layer):
276 """Update the value of output nodes from the previous layers.
277
278 Arguments:
279
280 o previous_layer -- The hidden layer preceeding this.
281 """
282
283 for update_node in self.nodes:
284
285 sum = 0.0
286 for node in previous_layer.nodes:
287 sum += (previous_layer.values[node] *
288 previous_layer.weights[(node, update_node)])
289
290 self.values[update_node] = self._activation(sum)
291
293 """Calculate the backpropagation error at a given node.
294
295 This calculates the error term using the formula:
296
297 p = (z - t) z (1 - z)
298
299 where z is the calculated value for the node, and t is the
300 real value.
301
302 Arguments:
303
304 o outputs - The list of output values we use to calculate the
305 errors in our predictions.
306 """
307 errors = {}
308 for node in self.nodes:
309 calculated_value = self.values[node]
310 real_value = outputs[node - 1]
311
312 errors[node] = ((real_value - calculated_value) *
313 calculated_value *
314 (1 - calculated_value))
315
316 return errors
317
318 - def get_error(self, real_value, node_number):
319 """Return the error value at a particular node.
320 """
321 predicted_value = self.values[node_number]
322 return 0.5 * math.pow((real_value - predicted_value), 2)
323
324 - def set_weight(self, this_node, next_node, value):
325 raise NotImplementedError("Can't set weights for the output layer")
326