import?torch
import?torch.nn?as?nn
import?torch.nn.functional?as?F
import?torch.optim?as?optim
import?torchvision
import?torchvision.transforms?as?transforms
import?torchvision.datasets?as?datasets
import?time
#歸一化處理
##?Compose?是創建
transform?=?transforms.Compose([transforms.ToTensor(),#把數據轉換成tensor
????????????????????????????????transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))?#數據歸一化?,第一組參數是所有channel的平均值,第2組參數是方差
?????????????????????????????????????????????????????????????#?為什么需要平均值的原因是?,所有圖片都有的?,數據不需要學習,因為平均值都在每張圖片里了是干擾項目
????????????????????????????????])
#訓練數據
##?數據在哪
train_data?=?datasets.CIFAR10('./data',train=True,transform=transform,download=True)
##?數據加載
train_loader?=?torch.utils.data.DataLoader(dataset=train_data,batch_size=4,shuffle=True,num_workers=2)
#測試數據
##?數據在哪里
test_data?=??datasets.CIFAR10('./test',train=False,transform=transform,download=True)
##數據加載
test_loader?=?torch.utils.data.DataLoader(dataset=test_data,batch_size=4,shuffle=True,num_workers=2)
#?min-batch的圖片顯示
import?matplotlib.pyplot?as?plt
import?numpy?as?np
##?一般顯示圖片都是numpy?array的數據來進行顯示的
def?imgsshow(img):??##???這里有個包升級的問題?,導致目前沒有解決
????img?=?img/2?+?0.5??#由于在歸一化?transform里有去掉平均的處理?在這里需要變回來
????img?=?img.numpy()?#?numpay?array
????img?=?np.transpose(img,(1,2,0))?#?(c,h,w)?->?(h,w,c)?符合正常顯示的數據的方式
????plt.show(img)
data_iter?=?iter(train_loader)?#隨機加載一個min?batch
images?,?labels?=?data_iter.next()?#?把圖片?和?label?分開
#?imgsshow(torchvision.utils.make_grid(images))
class?Net(nn.Module):
????def?__init__(self):
????????super(Net,self).__init__()
????????#?LeCun
????????#?N?=?(input_size?-?kernel_size?+?2?padding)/stripe?+1
????????self.conv1?=?nn.Sequential(nn.Conv2d(3,6,5,1),??#?out?=??(32?-5?+?2*0)/1+1?=28
???????????????????????????????????nn.ReLU(),
???????????????????????????????????nn.MaxPool2d(kernel_size=2,stride=2))??#?out?=?14
????????self.conv2?=?nn.Sequential(
???????????nn.Conv2d(6,16,5),?#?out?=?(14?-5?+0)1?+1??=10
????????????nn.ReLU(),
????????????nn.MaxPool2d(kernel_size=2,stride?=2)??#?out?=?10/2??=?5
????????)
????????self.fc1?=?nn.Sequential(
????????????nn.Linear(16*5*5,120),
????????????nn.ReLU()
????????)
????????self.fc2?=?nn.Sequential(
????????????nn.Linear(120,84),
????????????nn.ReLU()
????????)
????????self.fc3?=?nn.Linear(84,10)
????def?forward(self,?x):
????????x?=?self.conv1(x)
????????x?=?self.conv2(x)
????????x?=?x.view(-1,?16*5*5)
????????print?("%%"*50)
????????print?('dimision??change',x.shape)
????????x?=?self.fc1(x)
????????x?=?self.fc2(x)
????????x?=?self.fc3(x)
????????return?x
net?=?Net()
print("current?net?is?",net)
#?定義損失
criterion?=?nn.CrossEntropyLoss()
#定義優化器?,(一旦損失backforward?后,如何更新?weight?,已經更新誰的weight)
tunner?=?optim.SGD(net.parameters(),lr=0.0001,momentum=0.9)
traning_loss_history?=?[]
test_loss_history?=?[]
for?epoch?in?range(1000):
????net.train()
????running_loss?=?0.0
????print?("training....?epoch{0}".format(epoch))
????start_epoc?=?time.time()
????for?i?,data?in?enumerate(train_loader,0):?#一次僅僅提取一個minbatch?,一致到所有的數據取完
????????batch_time?=?time.time()
????????imges,?labels?=?data
????????imges,?labels?=?Variable(images),Variable(labels)
????????tunner.zero_grad()
????????outs?=?net(images)
????????loss?=?criterion(outs,?labels)
????????#?tunner.zero_grad()
????????loss.backward()
????????tunner.step()
????????if?i?%?1000?==?0:
????????????print?("i?{0}?:?loss?{1}?:?duration?{2}".format(i,?loss.item(),?(time.time()-batch_time)))
????????running_loss?+=?loss.item()
????????if?i%250?==?0?:
????????????net.eval()
????????????with?torch.no_grad():
????????????????for?data?in?test_loader:
????????????????????test_images?,test_label?=?data
????????????????????test_outs?=?net(test_images)
????????????????????test_loss?=?criterion(test_outs,test_label)
????????????traning_loss_history?.append(running_loss/250)
????????????test_loss_history.append(test_loss.item())
????????????running_loss?=?0.0
????print("epoch?{0}?::?loss?{1}?::?duration?{2}".format(epoch,loss.item(),time.time()-start_epoc))
????#?為什么損失函數會有regression呢???原因?1?,minbatch的偶然性導致?,比如這幾次都是飛機?2,?learning?rrate?{}
#畫圖
plt.figure()
plt.plot(traning_loss_history)
plt.plot(test_loss_history)
plt.legend('training?loss','test?loss')
plt.tile("Traing?/Test?loss")
plt.xlabel('#mini_batch?*250')
plt.ylabel('Loss')