0

我早些时候在 Jupiter 笔记本中使用此代码它没有显示错误但准确性非常低然后我在 google colab 中尝试了相同的代码它显示错误,请提出一些提高准确性的方法。我正在尝试执行多级 CNN 来检测具有图像下采样的叶子

import torch
import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1,32,2)
        self.conv2 = nn.Conv2d(32,64,2)
        self.conv2_bn = nn.BatchNorm2d(64)
        x= torch.randn(256,256).view(-1,1,256,256)
        self._to_linear = None
        self.convs(x)

        self.fc1= nn.Linear(self._to_linear, 512)
        self.fc2 = nn.Linear(512,6)

    def convs(self,x):
        y=torch.nn.functional.interpolate(x, size=([128,128]), scale_factor=None, mode='nearest', align_corners=None)
        z=torch.nn.functional.interpolate(x, size=([64,64]), scale_factor=None, mode='nearest', align_corners=None)
        w=torch.nn.functional.interpolate(x, size=([32,32]), scale_factor=None, mode='nearest', align_corners=None)

        # print(x[0].shape)

        x= F.relu(self.conv1(x))
        m = nn.ConstantPad2d(1,0)
        x=m(x)
        x = F.relu(F.max_pool2d(self.conv2_bn(self.conv2(x)), 2))
#         print(x[0].shape)


        y= F.relu(self.conv1(y))
        m = nn.ConstantPad2d(1,0)
        y=m(y)
        y = F.relu(self.conv2_bn(self.conv2(y)), 2)
#         print(y[0].shape)

        CAT_1=torch.cat((x,y),1)
        CAT_1=F.max_pool2d(CAT_1,(2,2))
#         print(CAT_1[0].shape)

        z= F.relu(self.conv1(z))
        m = nn.ConstantPad2d(1,0)
        z=m(z)
        z= F.relu(self.conv2_bn(self.conv2(z)))
#         print(z[0].shape)

        CAT_2=torch.cat((CAT_1,z),1)
        CAT_2=F.max_pool2d(CAT_2,(2,2))
#         print(CAT_2[0].shape)

        w= F.relu(self.conv1(w))
        m = nn.ConstantPad2d(1,0)
        w=m(w)
        w = F.relu((self.conv2_bn(self.conv2(w))))
#         print(w[0].shape)

        x=torch.cat((CAT_2,w),1)
        x=F.max_pool2d(x,(2,2))
#         print("i lov pp")
#         print(x[0].shape)
        x=torch.nn.functional.avg_pool2d(x, (2,2))
#         print("i lov pp")
#         print(x[0].shape)


        if self._to_linear is None:
            self._to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
        return x



    def forward(self, x):
#         print("i lov pp")
        x=self.convs(x)
        x=x.view(-1, self._to_linear)

        x= F.relu(self.fc1(x))
        x= self.fc2(x)
        return F.softmax(x, dim=1)
#         print(x[0].shape)
net=Net()        



import torch.optim as optim

optimizer = optim.Adam(net.parameters(), lr=0.001)
loss_function = nn.MSELoss()

X = torch.Tensor([i[0] for i in training_data]).view(-1,256,256)
X=X/255.0
y = torch.Tensor([i[1] for i in training_data])


VAL_PCT = 0.1
val_size=int (len(X)*VAL_PCT)
print(val_size)


train_X= X[:-val_size]
train_y= y[:-val_size]

test_X=X[-val_size:]
test_y = y[-val_size:]

print(len(train_X))
print(len(test_X))


BATCH_SIZE =10
EPOCHS = 1


for epoch in range(EPOCHS):
    for i in  (range(0, len(train_X), BATCH_SIZE)):
        #print(i, i+BATCH_SIZE)
        batch_X = train_X[i:i+BATCH_SIZE].view(-1,1,256,256)
        # print(batch_X.shape)

        batch_y = train_y[i:i+BATCH_SIZE]
        #print(batch_y.shape)
        net.zero_grad()

        outputs = net(batch_X)
        #print (outputs.shape)
        loss = loss_function(outputs, batch_y) 
        loss.backward()
        optimizer.step()
        #print(loss)
        #print(f"Epoch: {epoch}. Loss: {loss}")


/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py:432: UserWarning: Using a target size (torch.Size([10, 256, 256, 3])) that is different to the input size (torch.Size([10, 6])). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
  return F.mse_loss(input, target, reduction=self.reduction)
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-38-a154b102127f> in <module>()
     15         outputs = net(batch_X)
     16         #print (outputs.shape)
---> 17         loss = loss_function(outputs, batch_y)
     18         loss.backward()
     19         optimizer.step()

3 frames
/usr/local/lib/python3.6/dist-packages/torch/functional.py in broadcast_tensors(*tensors)
     60         if any(type(t) is not Tensor for t in tensors) and has_torch_function(tensors):
     61             return handle_torch_function(broadcast_tensors, tensors, *tensors)
---> 62     return _VF.broadcast_tensors(tensors)
     63 
     64 

RuntimeError: The size of tensor a (10) must match the size of tensor b (9) at non-singleton dimension 3
4

0 回答 0