您好,登錄后才能下訂單哦!
小編給大家分享一下pytorch中網(wǎng)絡(luò)和損失函數(shù)可視化的示例分析,相信大部分人都還不怎么了解,因此分享這篇文章給大家參考一下,希望大家閱讀完這篇文章后大有收獲,下面讓我們一起去了解一下吧!
1.pytorch
2.visdom
3.python3.5
# coding:utf8 import torch from torch import nn, optim # nn 神經(jīng)網(wǎng)絡(luò)模塊 optim優(yōu)化函數(shù)模塊 from torch.utils.data import DataLoader from torch.autograd import Variable from torchvision import transforms, datasets from visdom import Visdom # 可視化處理模塊 import time import numpy as np # 可視化app viz = Visdom() # 超參數(shù) BATCH_SIZE = 40 LR = 1e-3 EPOCH = 2 # 判斷是否使用gpu USE_GPU = True if USE_GPU: gpu_status = torch.cuda.is_available() else: gpu_status = False transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))]) # 數(shù)據(jù)引入 train_dataset = datasets.MNIST('../data', True, transform, download=False) test_dataset = datasets.MNIST('../data', False, transform) train_loader = DataLoader(train_dataset, BATCH_SIZE, True) # 為加快測試,把測試數(shù)據(jù)從10000縮小到2000 test_data = torch.unsqueeze(test_dataset.test_data, 1)[:1500] test_label = test_dataset.test_labels[:1500] # visdom可視化部分?jǐn)?shù)據(jù) viz.images(test_data[:100], nrow=10) #viz.images(test_data[:100], nrow=10) # 為防止可視化視窗重疊現(xiàn)象,停頓0.5秒 time.sleep(0.5) if gpu_status: test_data = test_data.cuda() test_data = Variable(test_data, volatile=True).float() # 創(chuàng)建線圖可視化窗口 line = viz.line(np.arange(10)) # 創(chuàng)建cnn神經(jīng)網(wǎng)絡(luò) class CNN(nn.Module): def __init__(self, in_dim, n_class): super(CNN, self).__init__() self.conv = nn.Sequential( # channel 為信息高度 padding為圖片留白 kernel_size 掃描模塊size(5x5) nn.Conv2d(in_channels=in_dim, out_channels=16,kernel_size=5,stride=1, padding=2), nn.ReLU(), # 平面縮減 28x28 >> 14*14 nn.MaxPool2d(kernel_size=2), nn.Conv2d(16, 32, 3, 1, 1), nn.ReLU(), # 14x14 >> 7x7 nn.MaxPool2d(2) ) self.fc = nn.Sequential( nn.Linear(32*7*7, 120), nn.Linear(120, n_class) ) def forward(self, x): out = self.conv(x) out = out.view(out.size(0), -1) out = self.fc(out) return out net = CNN(1,10) if gpu_status : net = net.cuda() #print("#"*26, "使用gpu", "#"*26) else: #print("#" * 26, "使用cpu", "#" * 26) pass # loss、optimizer 函數(shù)設(shè)置 loss_f = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=LR) # 起始時間設(shè)置 start_time = time.time() # 可視化所需數(shù)據(jù)點 time_p, tr_acc, ts_acc, loss_p = [], [], [], [] # 創(chuàng)建可視化數(shù)據(jù)視窗 text = viz.text("<h2>convolution Nueral Network</h2>") for epoch in range(EPOCH): # 由于分批次學(xué)習(xí),輸出loss為一批平均,需要累積or平均每個batch的loss,acc sum_loss, sum_acc, sum_step = 0., 0., 0. for i, (tx, ty) in enumerate(train_loader, 1): if gpu_status: tx, ty = tx.cuda(), ty.cuda() tx = Variable(tx) ty = Variable(ty) out = net(tx) loss = loss_f(out, ty) #print(tx.size()) #print(ty.size()) #print(out.size()) sum_loss += loss.item()*len(ty) #print(sum_loss) pred_tr = torch.max(out,1)[1] sum_acc += sum(pred_tr==ty).item() sum_step += ty.size(0) # 學(xué)習(xí)反饋 optimizer.zero_grad() loss.backward() optimizer.step() # 每40個batch可視化一下數(shù)據(jù) if i % 40 == 0: if gpu_status: test_data = test_data.cuda() test_out = net(test_data) print(test_out.size()) # 如果用gpu運行out數(shù)據(jù)為cuda格式需要.cpu()轉(zhuǎn)化為cpu數(shù)據(jù) 在進(jìn)行比較 pred_ts = torch.max(test_out, 1)[1].cpu().data.squeeze() print(pred_ts.size()) rightnum = pred_ts.eq(test_label.view_as(pred_ts)).sum().item() #rightnum =sum(pred_tr==ty).item() # sum_acc += sum(pred_tr==ty).item() acc = rightnum/float(test_label.size(0)) print("epoch: [{}/{}] | Loss: {:.4f} | TR_acc: {:.4f} | TS_acc: {:.4f} | Time: {:.1f}".format(epoch+1, EPOCH, sum_loss/(sum_step), sum_acc/(sum_step), acc, time.time()-start_time)) # 可視化部分 time_p.append(time.time()-start_time) tr_acc.append(sum_acc/sum_step) ts_acc.append(acc) loss_p.append(sum_loss/sum_step) viz.line(X=np.column_stack((np.array(time_p), np.array(time_p), np.array(time_p))), Y=np.column_stack((np.array(loss_p), np.array(tr_acc), np.array(ts_acc))), win=line, opts=dict(legend=["Loss", "TRAIN_acc", "TEST_acc"])) # visdom text 支持html語句 viz.text("<p style='color:red'>epoch:{}</p><br><p style='color:blue'>Loss:{:.4f}</p><br>" "<p style='color:BlueViolet'>TRAIN_acc:{:.4f}</p><br><p style='color:orange'>TEST_acc:{:.4f}</p><br>" "<p style='color:green'>Time:{:.2f}</p>".format(epoch, sum_loss/sum_step, sum_acc/sum_step, acc, time.time()-start_time), win=text) sum_loss, sum_acc, sum_step = 0., 0., 0.
以上是“pytorch中網(wǎng)絡(luò)和損失函數(shù)可視化的示例分析”這篇文章的所有內(nèi)容,感謝各位的閱讀!相信大家都有了一定的了解,希望分享的內(nèi)容對大家有所幫助,如果還想學(xué)習(xí)更多知識,歡迎關(guān)注億速云行業(yè)資訊頻道!
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點不代表本網(wǎng)站立場,如果涉及侵權(quán)請聯(lián)系站長郵箱:is@yisu.com進(jìn)行舉報,并提供相關(guān)證據(jù),一經(jīng)查實,將立刻刪除涉嫌侵權(quán)內(nèi)容。