• unbuntu 16.04 MS-Celeb-1M + alexnet + pytorch


    最近被保研的事情搞的头大,拖了半天才勉强算结束这个了。从熟悉unbantu 16.04的环境(搭个翻墙的梯子都搞了一上午 呸!)到搭建python,pytorch环境。然后花了一个上午熟悉py的基本语法就开始强撸了,具体的过程等保研结束了再补吧,贴个代码意思一下先。

    数据集用的是清洗过的MS-Celeb-1M(em...怎么清洗的之后再补吧)

    python用的不熟,踩了很多坑,用pytorch的时候也是,打死不看python中文的官方文档(http://pytorch-cn.readthedocs.io/zh/latest/package_references/functional/ 真香)

    后续的慢慢补吧很多细节也在完善....

    
    
    import torch
    import torch.utils.data as data
    import torchvision.transforms as transforms
    import torchvision.models as models
    from torch.autograd import Variable
    import torch.optim as optim
    import torch.nn as nn
    import torch.nn.functional as F
    import numpy as np
    from torch.utils.data import DataLoader
    from PIL import Image
    from torch.optim import lr_scheduler
    # ------------------ ready for the dataset ------------------
    transform = transforms.Compose([
    transforms.Scale(227),
    transforms.CenterCrop(227),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])

    class MyDateset (data.Dataset):

    def __init__(self,data_txt,transform):
    imgs = []
    with open(data_txt , 'r') as f:
    for line in f:
    line = line.strip(' ')
    line = line.rstrip()
    words = line.split()
    labelList = int(words[1])
    imageList = words[0]
    imgs.append((imageList, labelList))

    self.transform = transform
    self.imgs = imgs

    def __getitem__(self, index):
    image_dir,target = self.imgs[index]
    image = Image.open(image_dir)
    image = transform(image)

    return image, target

    def __len__(self):
    return len(self.imgs)

    train_data = MyDateset("/home/fuckman/FaceImage/Traindata.txt", transform)
    train_loader = DataLoader(train_data,batch_size = 128 ,shuffle=True,num_workers= 8,drop_last=False)

    # for img,label in train_data:
    # print(img.size(),label)

    text_data = MyDateset("/home/fuckman/FaceImage/Testdata.txt", transform)
    test_loader = DataLoader(dataset=text_data, batch_size = 128 ,shuffle=False, num_workers= 8, drop_last=False)

    # print(train_data.__len__())

    # --------------- creat the net and train --------------------

    class Net(torch.nn.Module):
    def __init__(self):
    super(Net, self).__init__()
    self.conv1 = torch.nn.Sequential(
    torch.nn.Conv2d(3, 96, 11, 4, 0),
    torch.nn.ReLU(),
    torch.nn.MaxPool2d(3, 2)
    )
    self.conv2 = torch.nn.Sequential(
    torch.nn.Conv2d(96, 256, 5, 1, 2),
    torch.nn.ReLU(),
    torch.nn.MaxPool2d(3, 2)
    )
    self.conv3 = torch.nn.Sequential(
    torch.nn.Conv2d(256, 384, 3, 1, 1),
    torch.nn.ReLU(),
    )
    self.conv4 = torch.nn.Sequential(
    torch.nn.Conv2d(384, 384, 3, 1, 1),
    torch.nn.ReLU(),
    )
    self.conv5 = torch.nn.Sequential(
    torch.nn.Conv2d(384, 256, 3, 1, 1),
    torch.nn.ReLU(),
    torch.nn.MaxPool2d(3, 2)
    )
    self.dense = torch.nn.Sequential(
    torch.nn.Dropout(0.5),
    torch.nn.Linear(9216, 4096),
    torch.nn.ReLU(),
    torch.nn.Dropout(0.5),
    torch.nn.Linear(4096, 4096),
    torch.nn.ReLU(),
    torch.nn.Linear(4096,1000)
    )

    def forward(self, x):

    conv1_out = self.conv1(x)
    conv2_out = self.conv2(conv1_out)
    conv3_out = self.conv3(conv2_out)
    conv4_out = self.conv4(conv3_out)
    conv5_out = self.conv5(conv4_out)
    res = conv5_out.view(conv5_out.size(0), -1)
    out = self.dense(res)
    return out

    alexnet = Net()
    alexnet.load_state_dict(torch.load('net_params.pkl'))
    alexnet.cuda()

    # print( alexnet )

    #----------------- training ----------------

    # crossentryopyloss
    criterion = nn.CrossEntropyLoss()

    # SGD with momentum
    optimizer = optim.SGD(alexnet.parameters(),lr = 0.01, momentum = 0.9)

    # learning rate decay
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[10,60], gamma=0.1)

    # training
    for epoch in range(100):
    scheduler.step()
    running_loss = 0.0
    for i, data in enumerate(train_loader,0):
    inputs, labels = data
    # print(inputs.size())
    # labels have to be longTensor
    # inputs, labels = Variable(inputs),Variable(labels).long()
    inputs, labels = Variable(inputs.cuda()),Variable(labels.cuda()).long()

    optimizer.zero_grad()
    # inputs should be n * c * w * h n refer to the mini-batch c refer to the num of channel
    outputs = alexnet(inputs)
    # print(outputs)
    criterion.cuda()

    # outputs should be N*C labels should be N N refer to the mini-batch c refer to the num of class
    # print the size of outputs and labels may help you find the question
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    running_loss += loss.data[0]
    if i % 100 == 99:
    print('[%d, %5d] loss : %.3f' %(epoch+1,i+1,running_loss / 100))
    running_loss = 0.0
    if epoch % 10 == 9:
    torch.save(alexnet.state_dict(), 'net_params.pkl')
    print("success")

    print("Finished Training")
    torch.save(alexnet.state_dict(), 'net_params.pkl')

    # ----------------- Test -------------------


    correct =0
    total = 0
    for i, data in enumerate(test_loader,0):
    images, labels = data
    labels = labels.cuda()
    # outputs = alexnet(Variable(images))
    outputs = alexnet(Variable(images.cuda()))
    _, predicted = torch.max(outputs.data, 1) # max_value and the index of max
    total += labels.size(0)
    new_label = labels.int()
    #print(predicted)
    #print(labels)
    new_predic = predicted.int()
    correct += (new_predic == new_label).sum()

    print('Accuracy of the network on the 1000 test images: %d %%' % (100 * correct / total))
     
  • 相关阅读:
    【洛谷6776】[NOI2020] 超现实树(思维)
    【洛谷6773】[NOI2020] 命运(线段树合并优化DP)
    【洛谷5467】[PKUSC2018] PKUSC(计算几何)
    【洛谷3688】[ZJOI2017] 树状数组(二维线段树)
    【BZOJ4543】[POI2014] Hotel加强版(长链剖分优化DP)
    【洛谷5466】[PKUSC2018] 神仙的游戏(FFT)
    【BZOJ4574】[ZJOI2016] 线段树(动态规划)
    【洛谷7114】[NOIP2020] 字符串匹配(Z函数)
    扩展 KMP(Z 函数)学习笔记
    最后的胡言乱语
  • 原文地址:https://www.cnblogs.com/z1141000271/p/9394738.html
Copyright © 2020-2023  润新知