I am trying to write a stacked autoencoder. Since this a stacked autoencoder we need to train the first autoencoder and pass the weights to the second autoencoder. So during training we need to define train_data_for_next_layer. Here I am getting error:
InvalidType:
Invalid operation is performed in: LinearFunction (Forward)
Expect: x.shape[1] == W.shape[1]
Actual: 784 != 250
I am having issue with the last line. Is this problem due to incorrect model layer, I want to know what is the issue here. I have faced this problem several times before and any detailed explanation is welcome. The code is as follows:
class AutoEncoder(chainer.Chain):
def __init__(self, n_in, n_out, activation='relu', tied=True):
if tied:
super(AutoEncoder, self).__init__(
l1 = L.Linear(n_in, n_out)
)
self.add_param('decoder_bias', n_in)
self.decoder_bias.data[...] = 0
else:
super(AutoEncoder, self).__init__(
l1 = L.Linear(n_in, n_out),
l2 = L.Linear(n_out, n_in)
)
self.tied = tied
self.n_in = n_in
self.n_out = n_out
self.activation = {'relu': F.relu, 'sigmoid': F.sigmoid,
'identity': F.identity}[activation]
def __call__(self, x, train=True):
h1 = F.dropout(self.activation(self.l1(x)), train=train)
if self.tied:
return self.activation(F.linear(h1, F.transpose(self.l1.W),
self.decoder_bias))
else:
return self.activation(self.l2(h1))
def encode(self, x, train=True):
return F.dropout(self.activation(self.l1(x)), train=train)
def decode(self, x, train=True):
if self.tied:
return self.activation(F.linear(x, F.transpose(self.l1.W),
self.decoder_bias))
else:
return self.activation(self.l2(x))
class StackedAutoEncoder(chainer.ChainList):
def __init__(self, autoencoders):
super(StackedAutoEncoder, self).__init__()
for ae in autoencoders:
self.add_link(ae)
def __call__(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
h = self[i].encode(h, train=train)
for i in range(depth):
if i == depth-1: # do not use dropout in the output layer
train = False
h = self[depth-1-i].decode(h, train=train)
return h
def encode(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
h = self[i].encode(h, train=train)
return h
def decode(self, x, train=True, depth=0):
if depth == 0: depth = len(self)
h = x
for i in range(depth):
if i == depth-1: # do not use dropout in the output layer
train = False
h = self[depth-1-i].decode(h, train=train)
return h
class Regression(chainer.Chain):
def __init__(self, predictor):
super(Regression, self).__init__(predictor=predictor)
def __call__(self, x, t):
y = self.predictor(x, True)
self.loss = F.mean_squared_error(y, t)
return self.loss
def dump(self, x):
return self.predictor(x, False)
initmodel = ''resume = ''
gpu = -1
epoch_pre = 20
epoch_fine = 20
batchsize = 100
noise = 0
optimizer = 'adam'
learningrate = 0.01
alpha = 0.001
unit = '1000, 500, 250, 2'
activation = 'sigmoid'
untied = False
batchsize = batchsize
n_epoch = epoch_pre
n_epoch_fine = epoch_fine
n_units = list(map(int, unit.split(',')))
activation = activation
mnist = fetch_mldata('MNIST original', data_home='.')
perm = np.random.permutation(len(mnist.data))
mnist.data = mnist.data.astype(np.float32) / 255
train_data = mnist.data[perm][:60000]
test_data = mnist.data[perm][60000:]
# prepare layers
aes = []
for idx in range(len(n_units)):
n_in = n_units[idx-1] if idx > 0 else 28*28
n_out = n_units[idx]
ae = AutoEncoder(n_in, n_out, activation, tied = True)
aes.append(ae)
# prepare train data for next layer
x = chainer.Variable(np.array(train_data))
train_data_for_next_layer = cuda.to_cpu(ae.encode(x, train=False))
The
InvalidType
error indicates that the input shape of the array given toF.linear
is wrong.In this case, for the given input
x
andW
,F.linear
expects thatx.shape[1]
is the same asW.shape[1]
, but it does not.For more detailed description of the error message, see https://docs.chainer.org/en/stable/tips.html#how-do-i-fix-invalidtype-error to understand how to interpret that error message.