帝王谷资源网 Design By www.wdxyy.com
我就废话不多说了,大家还是直接看代码吧~
import torch import torch.nn as nn import torch.nn.functional as F class VGG16(nn.Module): def __init__(self): super(VGG16, self).__init__() # 3 * 224 * 224 self.conv1_1 = nn.Conv2d(3, 64, 3) # 64 * 222 * 222 self.conv1_2 = nn.Conv2d(64, 64, 3, padding=(1, 1)) # 64 * 222* 222 self.maxpool1 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 64 * 112 * 112 self.conv2_1 = nn.Conv2d(64, 128, 3) # 128 * 110 * 110 self.conv2_2 = nn.Conv2d(128, 128, 3, padding=(1, 1)) # 128 * 110 * 110 self.maxpool2 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 128 * 56 * 56 self.conv3_1 = nn.Conv2d(128, 256, 3) # 256 * 54 * 54 self.conv3_2 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54 self.conv3_3 = nn.Conv2d(256, 256, 3, padding=(1, 1)) # 256 * 54 * 54 self.maxpool3 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 256 * 28 * 28 self.conv4_1 = nn.Conv2d(256, 512, 3) # 512 * 26 * 26 self.conv4_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26 self.conv4_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 26 * 26 self.maxpool4 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 14 * 14 self.conv5_1 = nn.Conv2d(512, 512, 3) # 512 * 12 * 12 self.conv5_2 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12 self.conv5_3 = nn.Conv2d(512, 512, 3, padding=(1, 1)) # 512 * 12 * 12 self.maxpool5 = nn.MaxPool2d((2, 2), padding=(1, 1)) # pooling 512 * 7 * 7 # view self.fc1 = nn.Linear(512 * 7 * 7, 4096) self.fc2 = nn.Linear(4096, 4096) self.fc3 = nn.Linear(4096, 1000) # softmax 1 * 1 * 1000 def forward(self, x): # x.size(0)即为batch_size in_size = x.size(0) out = self.conv1_1(x) # 222 out = F.relu(out) out = self.conv1_2(out) # 222 out = F.relu(out) out = self.maxpool1(out) # 112 out = self.conv2_1(out) # 110 out = F.relu(out) out = self.conv2_2(out) # 110 out = F.relu(out) out = self.maxpool2(out) # 56 out = self.conv3_1(out) # 54 out = F.relu(out) out = self.conv3_2(out) # 54 out = F.relu(out) out = self.conv3_3(out) # 54 out = F.relu(out) out = self.maxpool3(out) # 28 out = self.conv4_1(out) # 26 out = F.relu(out) out = self.conv4_2(out) # 26 out = F.relu(out) out = self.conv4_3(out) # 26 out = F.relu(out) out = self.maxpool4(out) # 14 out = self.conv5_1(out) # 12 out = F.relu(out) out = self.conv5_2(out) # 12 out = F.relu(out) out = self.conv5_3(out) # 12 out = F.relu(out) out = self.maxpool5(out) # 7 # 展平 out = out.view(in_size, -1) out = self.fc1(out) out = F.relu(out) out = self.fc2(out) out = F.relu(out) out = self.fc3(out) out = F.log_softmax(out, dim=1) return out
补充知识:Pytorch实现VGG(GPU版)
看代码吧~
import torch from torch import nn from torch import optim from PIL import Image import numpy as np print(torch.cuda.is_available()) device = torch.device('cuda:0') path="/content/drive/My Drive/Colab Notebooks/data/dog_vs_cat/" train_X=np.empty((2000,224,224,3),dtype="float32") train_Y=np.empty((2000,),dtype="int") train_XX=np.empty((2000,3,224,224),dtype="float32") for i in range(1000): file_path=path+"cat."+str(i)+".jpg" image=Image.open(file_path) resized_image = image.resize((224, 224), Image.ANTIALIAS) img=np.array(resized_image) train_X[i,:,:,:]=img train_Y[i]=0 for i in range(1000): file_path=path+"dog."+str(i)+".jpg" image = Image.open(file_path) resized_image = image.resize((224, 224), Image.ANTIALIAS) img = np.array(resized_image) train_X[i+1000, :, :, :] = img train_Y[i+1000] = 1 train_X /= 255 index = np.arange(2000) np.random.shuffle(index) train_X = train_X[index, :, :, :] train_Y = train_Y[index] for i in range(3): train_XX[:,i,:,:]=train_X[:,:,:,i] # 创建网络 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True), nn.MaxPool2d(kernel_size=2,stride=2) ) self.conv2 = nn.Sequential( nn.Conv2d(in_channels=64,out_channels=128,kernel_size=3,stride=1,padding=1), nn.ReLU(), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.BatchNorm2d(128,eps=1e-5,momentum=0.1,affine=True), nn.MaxPool2d(kernel_size=2,stride=2) ) self.conv3 = nn.Sequential( nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.BatchNorm2d(256,eps=1e-5, momentum=0.1, affine=True), nn.MaxPool2d(kernel_size=2, stride=2) ) self.conv4 = nn.Sequential( nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True), nn.MaxPool2d(kernel_size=2, stride=2) ) self.conv5 = nn.Sequential( nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.BatchNorm2d(512, eps=1e-5, momentum=0.1, affine=True), nn.MaxPool2d(kernel_size=2, stride=2) ) self.dense1 = nn.Sequential( nn.Linear(7*7*512,4096), nn.ReLU(), nn.Linear(4096,4096), nn.ReLU(), nn.Linear(4096,2) ) def forward(self, x): x=self.conv1(x) x=self.conv2(x) x=self.conv3(x) x=self.conv4(x) x=self.conv5(x) x=x.view(-1,7*7*512) x=self.dense1(x) return x batch_size=16 net = Net().to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=0.0005) train_loss = [] for epoch in range(10): for i in range(2000//batch_size): x=train_XX[i*batch_size:i*batch_size+batch_size] y=train_Y[i*batch_size:i*batch_size+batch_size] x = torch.from_numpy(x) #(batch_size,input_feature_shape) y = torch.from_numpy(y) #(batch_size,label_onehot_shape) x = x.cuda() y = y.long().cuda() out = net(x) loss = criterion(out, y) # 计算两者的误差 optimizer.zero_grad() # 清空上一步的残余更新参数值 loss.backward() # 误差反向传播, 计算参数更新值 optimizer.step() # 将参数更新值施加到 net 的 parameters 上 train_loss.append(loss.item()) print(epoch, i*batch_size, np.mean(train_loss)) train_loss=[] total_correct = 0 for i in range(2000): x = train_XX[i].reshape(1,3,224,224) y = train_Y[i] x = torch.from_numpy(x) x = x.cuda() out = net(x).cpu() out = out.detach().numpy() pred=np.argmax(out) if pred==y: total_correct += 1 print(total_correct) acc = total_correct / 2000.0 print('test acc:', acc) torch.cuda.empty_cache()
将上面代码中batch_size改为32,训练次数改为100轮,得到如下准确率
过拟合了~
以上这篇利用PyTorch实现VGG16教程就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
标签:
PyTorch,VGG16
帝王谷资源网 Design By www.wdxyy.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
帝王谷资源网 Design By www.wdxyy.com
暂无评论...
P70系列延期,华为新旗舰将在下月发布
3月20日消息,近期博主@数码闲聊站 透露,原定三月份发布的华为新旗舰P70系列延期发布,预计4月份上市。
而博主@定焦数码 爆料,华为的P70系列在定位上已经超过了Mate60,成为了重要的旗舰系列之一。它肩负着重返影像领域顶尖的使命。那么这次P70会带来哪些令人惊艳的创新呢?
根据目前爆料的消息来看,华为P70系列将推出三个版本,其中P70和P70 Pro采用了三角形的摄像头模组设计,而P70 Art则采用了与上一代P60 Art相似的不规则形状设计。这样的外观是否好看见仁见智,但辨识度绝对拉满。
更新日志
2024年12月26日
2024年12月26日
- 小骆驼-《草原狼2(蓝光CD)》[原抓WAV+CUE]
- 群星《欢迎来到我身边 电影原声专辑》[320K/MP3][105.02MB]
- 群星《欢迎来到我身边 电影原声专辑》[FLAC/分轨][480.9MB]
- 雷婷《梦里蓝天HQⅡ》 2023头版限量编号低速原抓[WAV+CUE][463M]
- 群星《2024好听新歌42》AI调整音效【WAV分轨】
- 王思雨-《思念陪着鸿雁飞》WAV
- 王思雨《喜马拉雅HQ》头版限量编号[WAV+CUE]
- 李健《无时无刻》[WAV+CUE][590M]
- 陈奕迅《酝酿》[WAV分轨][502M]
- 卓依婷《化蝶》2CD[WAV+CUE][1.1G]
- 群星《吉他王(黑胶CD)》[WAV+CUE]
- 齐秦《穿乐(穿越)》[WAV+CUE]
- 发烧珍品《数位CD音响测试-动向效果(九)》【WAV+CUE】
- 邝美云《邝美云精装歌集》[DSF][1.6G]
- 吕方《爱一回伤一回》[WAV+CUE][454M]