溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊(cè)×
其他方式登錄
點(diǎn)擊 登錄注冊(cè) 即表示同意《億速云用戶服務(wù)條款》

利用PyTorch如何實(shí)現(xiàn)VGG16的方法

發(fā)布時(shí)間:2020-06-24 18:01:33 來(lái)源:億速云 閱讀:945 作者:清晨 欄目:開(kāi)發(fā)技術(shù)

這篇文章主要介紹利用PyTorch如何實(shí)現(xiàn)VGG16,文中示例代碼介紹的非常詳細(xì),具有一定的參考價(jià)值,感興趣的小伙伴們一定要看完!

我就廢話不多說(shuō)了,大家還是直接看代碼吧~

import torch
import torch.nn as nn
import torch.nn.functional as F
 
class VGG16(nn.Module):
  
  def __init__(self):
    super(VGG16, self).__init__()
    
    # 3 * 224 * 224
    self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222
    self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222
    self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112
    
    self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110
    self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110
    self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56
    
    self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54
    self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
    self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54
    self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28
    
    self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26
    self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
    self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26
    self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14
    
    self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12
    self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
    self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12
    self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7
    
    # view
    
    self.fc1 = nn.Linear(512 * 7 * 7, 4096)
    self.fc2 = nn.Linear(4096, 4096)
    self.fc3 = nn.Linear(4096, 1000)
    # softmax 1 * 1 * 1000
    
  def forward(self, x):
    
    # x.size(0)即為batch_size
    in_size = x.size(0)
    
    out = self.conv1_1(x) # 222
    out = F.relu(out)
    out = self.conv1_2(out) # 222
    out = F.relu(out)
    out = self.maxpool1(out) # 112
    
    out = self.conv2_1(out) # 110
    out = F.relu(out)
    out = self.conv2_2(out) # 110
    out = F.relu(out)
    out = self.maxpool2(out) # 56
    
    out = self.conv3_1(out) # 54
    out = F.relu(out)
    out = self.conv3_2(out) # 54
    out = F.relu(out)
    out = self.conv3_3(out) # 54
    out = F.relu(out)
    out = self.maxpool3(out) # 28
    
    out = self.conv4_1(out) # 26
    out = F.relu(out)
    out = self.conv4_2(out) # 26
    out = F.relu(out)
    out = self.conv4_3(out) # 26
    out = F.relu(out)
    out = self.maxpool4(out) # 14
    
    out = self.conv5_1(out) # 12
    out = F.relu(out)
    out = self.conv5_2(out) # 12
    out = F.relu(out)
    out = self.conv5_3(out) # 12
    out = F.relu(out)
    out = self.maxpool5(out) # 7
    
    # 展平
    out = out.view(in_size, -1)
    
    out = self.fc1(out)
    out = F.relu(out)
    out = self.fc2(out)
    out = F.relu(out)
    out = self.fc3(out)
    
    out = F.log_softmax(out, dim=1)
    return out

補(bǔ)充知識(shí):Pytorch實(shí)現(xiàn)VGG(GPU版)

看代碼吧~

import torch
from torch import nn
from torch import optim

from PIL import Image
import numpy as np

print(torch.cuda.is_available())
device = torch.device('cuda:0')
path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/"

train_X=np.empty((2000,224,224,3),dtype="float32")
train_Y=np.empty((2000,),dtype="int")
train_XX=np.empty((2000,3,224,224),dtype="float32")

for i in range(1000):
  file_path=path+"cat."+str(i)+".jpg"
  image=Image.open(file_path)
  resized_image = image.resize((224, 224), Image.ANTIALIAS)
  img=np.array(resized_image)
  train_X[i,:,:,:]=img
  train_Y[i]=0

for i in range(1000):
  file_path=path+"dog."+str(i)+".jpg"
  image = Image.open(file_path)
  resized_image = image.resize((224, 224), Image.ANTIALIAS)
  img = np.array(resized_image)
  train_X[i+1000, :, :, :] = img
  train_Y[i+1000] = 1

train_X /= 255

index = np.arange(2000)
np.random.shuffle(index)

train_X = train_X[index, :, :, :]
train_Y = train_Y[index]

for i in range(3):
  train_XX[:,i,:,:]=train_X[:,:,:,i]

# 創(chuàng)建網(wǎng)絡(luò)

class Net(nn.Module):

  def __init__(self):
    super(Net, self).__init__()
    self.conv1 = nn.Sequential(
      nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True),
      nn.MaxPool2d(kernel_size=2,stride=2)
    )
    self.conv2 = nn.Sequential(
      nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True),
      nn.MaxPool2d(kernel_size=2,stride=2)
    )
    self.conv3 = nn.Sequential(
      nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True),
      nn.MaxPool2d(kernel_size=2, stride=2)
    )
    self.conv4 = nn.Sequential(
      nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
      nn.MaxPool2d(kernel_size=2, stride=2)
    )
    self.conv5 = nn.Sequential(
      nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
      nn.ReLU(),
      nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True),
      nn.MaxPool2d(kernel_size=2, stride=2)
    )
    self.dense1 = nn.Sequential(
      nn.Linear(7*7*512,4096),
      nn.ReLU(),
      nn.Linear(4096,4096),
      nn.ReLU(),
      nn.Linear(4096,2)
    )


  def forward(self, x):
    x=self.conv1(x)
    x=self.conv2(x)
    x=self.conv3(x)
    x=self.conv4(x)
    x=self.conv5(x)
    x=x.view(-1,7*7*512)
    x=self.dense1(x)
    return x

batch_size=16
net = Net().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.0005)

train_loss = []

for epoch in range(10):

  for i in range(2000//batch_size):
    x=train_XX[i*batch_size:i*batch_size+batch_size]
    y=train_Y[i*batch_size:i*batch_size+batch_size]

    x = torch.from_numpy(x)    #(batch_size,input_feature_shape)
    y = torch.from_numpy(y)    #(batch_size,label_onehot_shape)
    x = x.cuda()
    y = y.long().cuda()

    out = net(x)

    loss = criterion(out, y)     # 計(jì)算兩者的誤差
    optimizer.zero_grad()       # 清空上一步的殘余更新參數(shù)值
    loss.backward()          # 誤差反向傳播, 計(jì)算參數(shù)更新值
    optimizer.step()         # 將參數(shù)更新值施加到 net 的 parameters 上
    train_loss.append(loss.item())

    print(epoch, i*batch_size, np.mean(train_loss))
    train_loss=[]

total_correct = 0
for i in range(2000):
  x = train_XX[i].reshape(1,3,224,224)
  y = train_Y[i]
  x = torch.from_numpy(x)

  x = x.cuda()
  out = net(x).cpu()
  out = out.detach().numpy()
  pred=np.argmax(out)
  if pred==y:
    total_correct += 1
  print(total_correct)

acc = total_correct / 2000.0
print('test acc:', acc)

torch.cuda.empty_cache()

將上面代碼中batch_size改為32,訓(xùn)練次數(shù)改為100輪,得到如下準(zhǔn)確率

利用PyTorch如何實(shí)現(xiàn)VGG16的方法

以上是利用PyTorch如何實(shí)現(xiàn)VGG16的所有內(nèi)容,感謝各位的閱讀!希望分享的內(nèi)容對(duì)大家有幫助,更多相關(guān)知識(shí),歡迎關(guān)注億速云行業(yè)資訊頻道!

向AI問(wèn)一下細(xì)節(jié)

免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。

AI