Multiple objects somehow interfering with each oth

2019-03-03 20:26发布

问题:

I have a neural network (NN) which works perfectly when applied to a single data set. However if I want to run the NN on, for example, one set of data and then create a new instance of the NN to run on different set of data (or even the same set again) then the new instance will produce completely incorrect predictions.

For example, training on an XOR pattern:

    test=[[0,0],[0,1],[1,0],[1,1]]
    data = [[[0,0], [0]],[[0,1], [0]],[[1,0], [0]],[[1,1], [1]]]

    n = NN(2, 3, 1) # Create a neural network with 2 input, 3 hidden and 1 output nodes
    n.train(data,500,0.5,0) # Train it for 500 iterations with learning rate 0.5 and momentum 0

    prediction = np.zeros((len(test)))
    for row in range(len(test)):
        prediction[row] = n.runNetwork(test[row])[0]

    print prediction

    #
    # Now do the same thing again but with a new instance and new version of the data.
    #

    test2=[[0,0],[0,1],[1,0],[1,1]]
    data2 = [[[0,0], [0]],[[0,1], [0]],[[1,0], [0]],[[1,1], [1]]]

    p = NN(2, 3, 1)
    p.train(data2,500,0.5,0)

    prediction2 = np.zeros((len(test2)))
    for row in range(len(test2)):
        prediction2[row] = p.runNetwork(test2[row])[0]

    print prediction2

Will output:

    [-0.01 -0.   -0.06  0.97]
    [ 0.  0.  1.  1.]

Notice that the first prediction is quite good where as the second is completely wrong, and I can't see anything wrong with the class:

    import math
    import random
    import itertools
    import numpy as np

    random.seed(0)

    def rand(a, b):
        return (b-a)*random.random() + a

    def sigmoid(x):
        return math.tanh(x)

    def dsigmoid(y):
        return 1.0 - y**2

    class NN:
        def __init__(self, ni, nh, no):
            # number of input, hidden, and output nodes
            self.ni = ni + 1 # +1 for bias node
            self.nh = nh + 1
            self.no = no

            # activations for nodes
            self.ai = [1.0]*self.ni
            self.ah = [1.0]*self.nh
            self.ao = [1.0]*self.no

            # create weights (rows=number of features, columns=number of processing nodes)
            self.wi = np.zeros((self.ni, self.nh))
            self.wo = np.zeros((self.nh, self.no))
            # set them to random vaules
            for i in range(self.ni):
                for j in range(self.nh):
                    self.wi[i][j] = rand(-5, 5)
            for j in range(self.nh):
                for k in range(self.no):
                    self.wo[j][k] = rand(-5, 5)

            # last change in weights for momentum   
            self.ci = np.zeros((self.ni, self.nh))
            self.co = np.zeros((self.nh, self.no))


        def runNetwork(self, inputs):
            if len(inputs) != self.ni-1:
                raise ValueError('wrong number of inputs')

            # input activations
            for i in range(self.ni-1):
                #self.ai[i] = sigmoid(inputs[i])
                self.ai[i] = inputs[i]

            # hidden activations   
            for j in range(self.nh-1):
                sum = 0.0
                for i in range(self.ni):
                    sum = sum + self.ai[i] * self.wi[i][j]
                self.ah[j] = sigmoid(sum)

            # output activations
            for k in range(self.no):
                sum = 0.0
                for j in range(self.nh):
                    sum = sum + self.ah[j] * self.wo[j][k]
                self.ao[k] = sigmoid(sum)

            ao_simplified = [round(a,2) for a in self.ao[:]]
            return ao_simplified  


        def backPropagate(self, targets, N, M):
            if len(targets) != self.no:
                raise ValueError('wrong number of target values')

            # calculate error terms for output
            output_deltas = [0.0] * self.no
            for k in range(self.no):
                error = targets[k]-self.ao[k]
                output_deltas[k] = dsigmoid(self.ao[k]) * error

            # calculate error terms for hidden
            hidden_deltas = [0.0] * self.nh
            for j in range(self.nh):
                error = 0.0
                for k in range(self.no):
                    error = error + output_deltas[k]*self.wo[j][k]
                hidden_deltas[j] = dsigmoid(self.ah[j]) * error

            # update output weights
            for j in range(self.nh):
                for k in range(self.no):
                    change = output_deltas[k]*self.ah[j]
                    self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
                    self.co[j][k] = change
                    #print N*change, M*self.co[j][k]

            # update input weights
            for i in range(self.ni):
                for j in range(self.nh):
                    change = hidden_deltas[j]*self.ai[i]
                    self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
                    self.ci[i][j] = change

            # calculate error
            error = 0.0
            for k in range(len(targets)):
                error = error + 0.5*(targets[k]-self.ao[k])**2
            return error

        def train(self, patterns, iterations=1000, N=0.5, M=0.1):
            # N: learning rate
            # M: momentum factor
            for i in range(iterations):
                error = 0.0
                for p in patterns:
                    inputs = p[0]
                    targets = p[1]
                    self.runNetwork(inputs)
                    error = error + self.backPropagate(targets, N, M)
                if i % 100 == 0: # Prints error every 100 iterations
                    print('error %-.5f' % error)

Any help would be greatly appreciated!

回答1:

Your error -- if there is one -- doesn't have anything to do with the class. As @Daniel Roseman suggested, the natural guess would be that it was a class/instance variable issue, or maybe a mutable default argument, or multiplication of a list, or something, the most common causes of mysterious behaviour.

Here, though, you're getting different results only because you're using different random numbers each time. If you random.seed(0) before you call NN(2,3,1), you get exactly the same results:

error 2.68110
error 0.44049
error 0.39256
error 0.26315
error 0.00584
[ 0.01  0.01  0.07  0.97]
error 2.68110
error 0.44049
error 0.39256
error 0.26315
error 0.00584
[ 0.01  0.01  0.07  0.97]

I can't judge whether your algorithm is right. Incidentally, I think your rand function is reinventing random.uniform.