您好,登錄后才能下訂單哦!
本篇文章給大家分享的是有關(guān)Pytorch中怎么實(shí)現(xiàn)softmax回歸,小編覺得挺實(shí)用的,因此分享給大家學(xué)習(xí),希望大家閱讀完這篇文章后可以有所收獲,話不多說,跟著小編一起來看看吧。
如果采用自定義方式搭建網(wǎng)絡(luò)方式:
import torch import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import time import sys import numpy as np import requests #設(shè)置訓(xùn)練集和測(cè)試集,下載在本地 mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor()) mnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor()) batch_size = 256 if sys.platform.startswith('win'): num_workers = 0 # 0表示不用額外的進(jìn)程來加速讀取數(shù)據(jù) else: num_workers = 4 train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers) test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers) num_inputs=784 #圖片為1*28*28,單通道,所以輸入為28*28=784 num_outputs=10 #最終的類別為10個(gè)類別,所以輸出是10; w=torch.tensor(np.random.normal(0,0.01,(num_inputs,num_outputs)),dtype=torch.float) b=torch.zeros(num_outputs,dtype=torch.float) #使得w,b可以反向傳播 w.requires_grad_(requires_grad=True) b.requires_grad_(requires_grad=True) def cross_entropy(y_hat, y): return - torch.log(y_hat.gather(1, y.view(-1, 1))) def accuracy(y_hat,y): return (y_hat.argmax(dim=1)==y).float().mean().item() ''' 這里注意下正確率計(jì)算的寫法; 對(duì)于y_hat,返回的是batch_size*10的一個(gè)矩陣,argemax(dim=1)為選擇一個(gè)維度上的最大數(shù)的索引; 所以y_hat.argmax(dim=1)返回一個(gè)batch_size*1的向量; 針對(duì)于返回的向量和y進(jìn)行諸位比較,平均化取值,即可得到該批次的正確率 ''' def sgd(params, lr, batch_size): # 本函數(shù)已保存在d2lzh_pytorch包中方便以后使用 for param in params: param.data -= lr * param.grad / batch_size # 注意這里更改param時(shí)用的param.data def evalute_accuracy(data_iter,net): acc_sum,n=0.0,0 for X,y in data_iter: acc_sum+=(net(X).argmax(dim=1)==y).float().sum().item() n+=y.shape[0] return acc_sum/n num_epochs,lr=30,0.1 def softmax(X): X_exp = X.exp() partition = X_exp.sum(dim=1, keepdim=True) return X_exp / partition # 這里應(yīng)用了廣播機(jī)制 def net(X): return softmax(torch.mm(X.view((-1, num_inputs)), w) + b) def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None): for epoch in range(num_epochs): train_l_sum,train_acc_sum,n=0.0,0.0,0 for X,y in train_iter: y_hat=net(X) l=loss(y_hat,y).sum() if optimizer is not None: optimizer.zero_grad() elif params is not None and params[0].grad is not None: for param in params: param.grad.data.zero_() l.backward() if optimizer is None: sgd(params, lr, batch_size) else: optimizer.step() # “softmax回歸的簡(jiǎn)潔實(shí)現(xiàn)”一節(jié)將用到 train_l_sum+=l.item() train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().item() n+=y.shape[0] test_acc=evalute_accuracy(test_iter,net); print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f' % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) if __name__ == '__main__': print(mnist_train) X, y = [], [] ''' for i in range(10): X.append(mnist_train[i][0]) y.append(mnist_train[i][1]) show_fashion_mnist(X, get_fashion_mnist_labels(y)) ''' train(net, train_iter, test_iter, cross_entropy, num_epochs, batch_size, [w, b], lr)
如果采用典型的層數(shù)搭建函數(shù),能有更加簡(jiǎn)潔的實(shí)現(xiàn)版本:
import torch import torchvision import torchvision.transforms as transforms from torch import nn from collections import OrderedDict import matplotlib.pyplot as plt import time import sys import numpy as np import requests #設(shè)置訓(xùn)練集和測(cè)試集,下載在本地 from torch.nn import init mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=True, download=True, transform=transforms.ToTensor()) mnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST', train=False, download=True, transform=transforms.ToTensor()) batch_size = 256 train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True) test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False) num_inputs=784 #圖片為1*28*28,單通道,所以輸入為28*28=784 num_outputs=10 #最終的類別為10個(gè)類別,所以輸出是10; class LinearNet(nn.Module): def __init__(self,num_inputs,num_outputs): super(LinearNet,self).__init__() self.linear=nn.Linear(num_inputs,num_outputs) def forward(self,x): y=self.linear(x.view(x.shape[0],-1)) return y def evalute_accuracy(data_iter,net): acc_sum,n=0.0,0 for X,y in data_iter: acc_sum+=(net(X).argmax(dim=1)==y).float().sum().item() n+=y.shape[0] return acc_sum/n class FlattenLayer(nn.Module): def __init__(self): super(FlattenLayer, self).__init__() def forward(self,x): return x.view(x.shape[0],-1) #把1*28*28轉(zhuǎn)化為1*784 net=LinearNet(num_inputs,num_outputs) #使用orderdict來進(jìn)行網(wǎng)絡(luò)結(jié)構(gòu)搭建 #第一層flatten層把1*28*28轉(zhuǎn)化為1*784 #第二層為實(shí)際工作層 net=nn.Sequential( OrderedDict( [ ('flatten',FlattenLayer()), ('linear',nn.Linear(num_inputs,num_outputs)) ] ) ) init.normal_(net.linear.weight, mean=0, std=0.01) init.constant_(net.linear.bias, val=0) loss=nn.CrossEntropyLoss() optimizer=torch.optim.SGD(net.parameters(),lr=0.1) num_epochs=10 def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None): for epoch in range(num_epochs): train_l_sum,train_acc_sum,n=0.0,0.0,0 for X,y in train_iter: y_hat=net(X) l=loss(y_hat,y).sum() if optimizer is not None: optimizer.zero_grad() elif params is not None and params[0].grad is not None: for param in params: param.grad.data.zero_() l.backward() optimizer.step() # “softmax回歸的簡(jiǎn)潔實(shí)現(xiàn)”一節(jié)將用到 train_l_sum+=l.item() train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().item() n+=y.shape[0] test_acc=evalute_accuracy(test_iter,net); print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f' % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc)) if __name__ == '__main__': print(mnist_train) X, y = [], [] ''' for i in range(10): X.append(mnist_train[i][0]) y.append(mnist_train[i][1]) show_fashion_mnist(X, get_fashion_mnist_labels(y)) ''' train(net, train_iter, test_iter, loss, num_epochs, batch_size,None,None,optimizer)
以上就是Pytorch中怎么實(shí)現(xiàn)softmax回歸,小編相信有部分知識(shí)點(diǎn)可能是我們?nèi)粘9ぷ鲿?huì)見到或用到的。希望你能通過這篇文章學(xué)到更多知識(shí)。更多詳情敬請(qǐng)關(guān)注億速云行業(yè)資訊頻道。
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場(chǎng),如果涉及侵權(quán)請(qǐng)聯(lián)系站長(zhǎng)郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。