解决TypeError: conv2d() received an invalid combination of arguments
生活随笔
收集整理的這篇文章主要介紹了
解决TypeError: conv2d() received an invalid combination of arguments
小編覺(jué)得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.
1. ?????
????????看b站視頻用卷積進(jìn)行手寫(xiě)數(shù)字識(shí)別源代碼如下:
#輸入為28*28*1 經(jīng)過(guò)第一次卷積計(jì)算后為 28*28*16 池化變?yōu)?4*14*16 第二次卷積14*14*32 池化7*7*32即全連接(10為10分類) #前向傳播:經(jīng)過(guò)卷積1、2 再把結(jié)果轉(zhuǎn)化為向量格式 結(jié)果×全連接層(w*x+b)import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets,transforms#定義超參數(shù) input_size = 28 #圖像的總尺寸28*28 num_classes = 10 #標(biāo)簽的種類數(shù) num_epochs = 3 #訓(xùn)練的總循環(huán)周期 batch_size = 64 #一個(gè)撮(批次)的大小,64張圖片# 數(shù)據(jù)集 train_dataset = datasets.MNIST("./data",train=True,transform=transforms.ToTensor(),download=True) test_dataset = datasets.MNIST("./data",train=False,transform=transforms.ToTensor()) #構(gòu)建batch數(shù)據(jù) train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=True)class CNN(nn.Module):def __init__(self):super(CNN,self).__init__()self.conv1=nn.Sequential( #輸入大小(1, 28,28)nn.Conv2d(in_channels=1, #灰度圖out_channels=16, #要得到幾多少個(gè)特征圖kernel_size=5, #卷積核大小stride=1, #步長(zhǎng)padding=2, #如果希望卷積后大小跟原來(lái)一樣, 需要設(shè)置padding=(kerneI_size-1)/2 if stride=1), #輸出的特征圖為(16, 28, 28)nn.ReLU(), # relu層nn.MaxPool2d(kernel_size=2), #進(jìn)行池化操作(2x2區(qū)域),輸出結(jié)果為: (16, 14, 14))self.conv2 = nn.Sequential( #下一個(gè)套餐的輸入(16, 14, 14)nn.Conv2d(16,32,5,1,2), #輸出(32, 14,14)此處的16為上面的到的16個(gè)特征圖nn.ReLU(), # relu層nn.MaxPool2d(2), #輸出(32,7,7))self.out = nn.Linear(32*7*7,10) # 全連接層得到的結(jié)果(輸入32*37*7 輸出10)def forward(self,x): #前向傳播x = self.conv1(x),x = self.conv2(x)x = x.view(x.size(0),-1) #flatten操作,結(jié)果為: (batch_ size, 32*7*7)output = self.out(x) #上步向量乘全連接層return outputdef accuracy(predictions, labels):pred = torch.max(predictions.data,1)[1]rights = pred.eq(labels.data.view_as(pred)).sum()return rights, len(labels)#實(shí)例化 net = CNN() #損失函數(shù) criterion = nn.CrossEntropyLoss() #優(yōu)化器 optimizer = optim.Adam(net.parameters(), lr=0.001) #定義優(yōu)化器,普通的隨機(jī)梯度下降算法#開(kāi)始訓(xùn)練循環(huán) for epoch in range(num_epochs):#當(dāng)前epoch的結(jié)果保存下來(lái)train_rights = []for batch_idx,(data,target) in enumerate(train_loader): #針對(duì)容器中的每一個(gè)批進(jìn)行循環(huán)#對(duì)于一個(gè)可迭代的(iterable)/可遍歷的對(duì)象(如列表、字符串),enumerate將其組成一個(gè)索引序列,利用它可以同時(shí)獲得索引和值net.train()output=net(data)loss = criterion(output,target)optimizer.zero_grad()loss.backward()optimizer.step()right = accuracy(output,target)train_rights.append(right)if batch_idx % 100 == 0:net.eval()val_rights = [] #保存精度f(wàn)or (data,target) in test_loader:output = net(data)right = accuracy(output,target)val_rights.append(right)#準(zhǔn)確率計(jì)算train_r = (sum([tup[0] for tup in train_rights]),sum([tup[1] for tup in train_rights]))val_r = (sum([tup[0] for tup in val_rights]),sum([tup[1] for tup in val_rights]))print('當(dāng)前epoch:{} [{}/{} ({:.0f}%)]\t 損失: {:.6f}\t 訓(xùn)練集準(zhǔn)確率: {:.2f}%\t 測(cè)試集正確率: {:.2f}%'.format(epoch, batch_idx * batch_size, len(train_loader.dataset),100.*batch_idx / len(train_loader),loss.data,100. * train_r[0].numpy() / train_r[1],100. * val_r[0].numpy() / val_r[1]))????????運(yùn)行代碼報(bào)如下錯(cuò)誤:
??????? 看了一下大概意思就是往conv2傳的參數(shù)格式不對(duì),檢查一下,發(fā)現(xiàn)太粗心,在54行多打了個(gè)“,”修改后正常運(yùn)行了
?2.
????????改完后搜了一下,看看有沒(méi)有同學(xué)跟我一樣粗心的,發(fā)現(xiàn)一個(gè)同學(xué)也報(bào)同樣的錯(cuò)誤,但不是多了一個(gè)逗號(hào),他的源代碼如下:
import torch import torch.nn as nn from torchvision import datasets, transforms from torchvision.datasets import MNIST import torch.optim as optim import numpy as np import torch.nn.functional as F import matplotlib.pyplot as plt from torch.utils.data import DataLoader# -- 定義超參數(shù) input_size = 28 # -- 圖片尺寸28*28 num_class = 10 # -- 最后分類情況 10分類0-9 num_epochs = 3 # -- 循環(huán)3個(gè)周期 batch_size = 64 # -- batch大小64# -- 訓(xùn)練集 train_dataset = MNIST(root="./data/", train=True, transform=transforms.ToTensor(), download=True) # -- 測(cè)試集 test_dataset = MNIST(root="./data/", train=False, transform=transforms.ToTensor())# -- 構(gòu)建batch數(shù)據(jù) train_loader = DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True) test_loader = DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=True)# -- 定義模型 class CNN_Model(nn.Module):def __init__(self):super(CNN_Model, self).__init__()# -- 第一個(gè)卷積單元 卷積 relu 池化 輸入圖為28*28*1 輸出結(jié)果為 16 *14 *14self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, # -- 輸入的灰度圖 1通道 所以是1out_channels=16, # -- 想得到多少個(gè)特征圖 輸出的特征圖16個(gè)kernel_size=5, # -- 卷積核的大小 5*5 提取成1個(gè)點(diǎn)stride=1, # -- 步長(zhǎng)為1padding=2 # -- 填充0),nn.ReLU(), # -- relu層nn.MaxPool2d(kernel_size=2) # -- 池化操作 輸出結(jié)果為 16 *14 *14)# -- 第二個(gè)卷積單元 輸入為16*14*14 輸出10分類self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, 1, 2), # -- 輸入為16*14*14nn.ReLU(),nn.MaxPool2d(2) # -- 輸出為32*7*7)self.out = nn.Linear(32 * 7 * 7, 10) # -- 全連接層 輸出10分類# -- 前向傳播def forward(self, x):x = self.conv1(x)x = self.conv2(x)x = x.view(x.size(0), -1) # -- flatten操作 拉成一維數(shù)組形式 才能進(jìn)行全連接輸出output = self.out(x)return output# -- 定義一個(gè)評(píng)估函數(shù) def pinggu(predictions, labels):pred = torch.max(predictions.data, 1)[1]right = pred.eq(labels.data.view_as(pred)).sum()return right, len(labels)# -- 訓(xùn)練網(wǎng)絡(luò)模型 # -- 實(shí)例化 mymodel = CNN_Model() # -- 損失函數(shù) loss_fn = nn.CrossEntropyLoss() # -- 優(yōu)化器 # 普通的隨機(jī)梯度下降 optimizer = optim.Adam(mymodel.parameters(), lr=0.001)# -- 開(kāi)始訓(xùn)練 for epoch in range(num_epochs):# -- 當(dāng)前epoch結(jié)果保存起來(lái)train_right = []for batch_idx, (data, target) in enumerate(train_loader):mymodel.train()output = mymodel(data)loss = loss_fn(output, target)optimizer.zero_grad()loss.backward()optimizer.step()right = pinggu(output, target)train_right.append(right)if batch_idx % 100 == 0:mymodel.eval()val_right = []for (data, target) in enumerate(test_loader):output = mymodel(data)right = pinggu(output, target)val_right.append(right)# -- 準(zhǔn)確率計(jì)算train_r = (sum([tup[0] for tup in train_right]), sum([tup[1] for tup in train_right]))val_r = (sum([tup[0] for tup in val_right]), sum([tup[1] for tup in val_right]))print('當(dāng)前epoch:{} [{}/{} ({:.0f}%)]\t 損失:{:.6f}\t 訓(xùn)練集準(zhǔn)確率: {:.2f}%\t 測(cè)試集準(zhǔn)確率:{:.2f}%'.format(epoch, batch_idx * batch_size, len(train_loader.dataset),100. * batch_idx / len(train_loader),loss.data,100. * train_r[0].numpy() / train_r[1],100. * val_r[0].numpy() / val_r[1]))????????他報(bào)的錯(cuò)誤如下:
??????? 看了一下也是傳入的參數(shù)格式不對(duì),只需要把95行修改一下
for (data, target) in enumerate(test_loader): 改為 for (data, target) in test_loader: 修改前: for (data, target) in enumerate(test_loader): #輸出data結(jié)果為 0 1 2 . . data格式為int修改后: for (data, target) in test_loader: 輸出結(jié)果為: tensor([[[[0., 0., 0., ..., 0., 0., 0.],[0., 0., 0., ..., 0., 0., 0.],[0., 0., 0., ..., 0., 0., 0.], 格式為T(mén)ensor????????再看一下報(bào)錯(cuò):
???????? 原因:人家要的是Tensor,你傳的是int
3.總結(jié)
????????該問(wèn)題就是傳入的參數(shù)格式不對(duì),出現(xiàn)問(wèn)題可以檢查一下問(wèn)題處的數(shù)據(jù)傳入
總結(jié)
以上是生活随笔為你收集整理的解决TypeError: conv2d() received an invalid combination of arguments的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。
- 上一篇: Jpeg怎么变成jpg格式?教你三步快速
- 下一篇: Debain查看ip地址