第一份开源代码:PointNet

server/2024/10/19 17:23:01/

        这里我们选择PointNet的Pytorch实现而不是tensorflow的实现来分析。

总共分析这几个代码文件:model.py、dataset.py、train_classification.py和train_segmentation.py

(一) model.py文件

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as Fclass STN3d(nn.Module):def __init__(self):super(STN3d, self).__init__()self.conv1 = torch.nn.Conv1d(3, 64, 1)self.conv2 = torch.nn.Conv1d(64, 128, 1)self.conv3 = torch.nn.Conv1d(128, 1024, 1)self.fc1 = nn.Linear(1024, 512)self.fc2 = nn.Linear(512, 256)self.fc3 = nn.Linear(256, 9)self.relu = nn.ReLU()self.bn1 = nn.BatchNorm1d(64)self.bn2 = nn.BatchNorm1d(128)self.bn3 = nn.BatchNorm1d(1024)self.bn4 = nn.BatchNorm1d(512)self.bn5 = nn.BatchNorm1d(256)def forward(self, x):batchsize = x.size()[0]x = F.relu(self.bn1(self.conv1(x)))x = F.relu(self.bn2(self.conv2(x)))x = F.relu(self.bn3(self.conv3(x)))x = torch.max(x, 2, keepdim=True)[0]x = x.view(-1, 1024)x = F.relu(self.bn4(self.fc1(x)))x = F.relu(self.bn5(self.fc2(x)))x = self.fc3(x)iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)if x.is_cuda:iden = iden.cuda()x = x + idenx = x.view(-1, 3, 3)return xclass STNkd(nn.Module):def __init__(self, k=64):super(STNkd, self).__init__()self.conv1 = torch.nn.Conv1d(k, 64, 1)self.conv2 = torch.nn.Conv1d(64, 128, 1)self.conv3 = torch.nn.Conv1d(128, 1024, 1)self.fc1 = nn.Linear(1024, 512)self.fc2 = nn.Linear(512, 256)self.fc3 = nn.Linear(256, k*k)self.relu = nn.ReLU()self.bn1 = nn.BatchNorm1d(64)self.bn2 = nn.BatchNorm1d(128)self.bn3 = nn.BatchNorm1d(1024)self.bn4 = nn.BatchNorm1d(512)self.bn5 = nn.BatchNorm1d(256)self.k = kdef forward(self, x):batchsize = x.size()[0]x = F.relu(self.bn1(self.conv1(x)))x = F.relu(self.bn2(self.conv2(x)))x = F.relu(self.bn3(self.conv3(x)))x = torch.max(x, 2, keepdim=True)[0]x = x.view(-1, 1024)x = F.relu(self.bn4(self.fc1(x)))x = F.relu(self.bn5(self.fc2(x)))x = self.fc3(x)iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)if x.is_cuda:iden = iden.cuda()x = x + idenx = x.view(-1, self.k, self.k)return x# 定义PointNet的feature提取模块的定义
class PointNetfeat(nn.Module):def __init__(self, global_feat = True, feature_transform = False):super(PointNetfeat, self).__init__()self.stn = STN3d()self.conv1 = torch.nn.Conv1d(3, 64, 1)self.conv2 = torch.nn.Conv1d(64, 128, 1)self.conv3 = torch.nn.Conv1d(128, 1024, 1)self.bn1 = nn.BatchNorm1d(64)self.bn2 = nn.BatchNorm1d(128)self.bn3 = nn.BatchNorm1d(1024)self.global_feat = global_featself.feature_transform = feature_transformif self.feature_transform:self.fstn = STNkd(k=64)def forward(self, x):n_pts = x.size()[2]   #首先获取points的数量n,然后通过STNet对x进行空间变换,x改为batch*n_points*attribute,trans = self.stn(x)   #然后和trans进行batch矩阵乘法,之后又把x回到batch*attribute*n_pointsx = x.transpose(2, 1)x = torch.bmm(x, trans)x = x.transpose(2, 1)x = F.relu(self.bn1(self.conv1(x)))if self.feature_transform:  # 有需要的话就对feature进行trans操作trans_feat = self.fstn(x)x = x.transpose(2,1)x = torch.bmm(x, trans_feat)x = x.transpose(2,1)else:trans_feat = Nonepointfeat = xx = F.relu(self.bn2(self.conv2(x)))x = self.bn3(self.conv3(x))x = torch.max(x, 2, keepdim=True)[0]x = x.view(-1, 1024)# 返回变换后的x以及trans和trans_feature矩阵if self.global_feat:return x, trans, trans_featelse:x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)return torch.cat([x, pointfeat], 1), trans, trans_feat# 这个是一个k分类的网络 
class PointNetCls(nn.Module):def __init__(self, k=2, feature_transform=False):super(PointNetCls, self).__init__()self.feature_transform = feature_transformself.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform)self.fc1 = nn.Linear(1024, 512)self.fc2 = nn.Linear(512, 256)self.fc3 = nn.Linear(256, k)self.dropout = nn.Dropout(p=0.3)self.bn1 = nn.BatchNorm1d(512)self.bn2 = nn.BatchNorm1d(256)self.relu = nn.ReLU()def forward(self, x):x, trans, trans_feat = self.feat(x)x = F.relu(self.bn1(self.fc1(x)))x = F.relu(self.bn2(self.dropout(self.fc2(x))))x = self.fc3(x)return F.log_softmax(x, dim=1), trans, trans_feat# 这是一个啥?这个是上面那个PointNetCls的加强版本,层数增加了
class PointNetDenseCls(nn.Module):def __init__(self, k = 2, feature_transform=False):super(PointNetDenseCls, self).__init__()self.k = kself.feature_transform=feature_transformself.feat = PointNetfeat(global_feat=False, feature_transform=feature_transform)self.conv1 = torch.nn.Conv1d(1088, 512, 1)self.conv2 = torch.nn.Conv1d(512, 256, 1)self.conv3 = torch.nn.Conv1d(256, 128, 1)self.conv4 = torch.nn.Conv1d(128, self.k, 1)self.bn1 = nn.BatchNorm1d(512)self.bn2 = nn.BatchNorm1d(256)self.bn3 = nn.BatchNorm1d(128)def forward(self, x):batchsize = x.size()[0]n_pts = x.size()[2]x, trans, trans_feat = self.feat(x)x = F.relu(self.bn1(self.conv1(x)))x = F.relu(self.bn2(self.conv2(x)))x = F.relu(self.bn3(self.conv3(x)))x = self.conv4(x)x = x.transpose(2,1).contiguous()x = F.log_softmax(x.view(-1,self.k), dim=-1)x = x.view(batchsize, n_pts, self.k)return x, trans, trans_feat# 定义这个feature_transform_regularizer函数
# 正则化函数的目的是确保变换矩阵尽可能接近于单位矩阵(恒等变换)
def feature_transform_regularizer(trans):d = trans.size()[1]batchsize = trans.size()[0]I = torch.eye(d)[None, :, :]if trans.is_cuda:I = I.cuda()loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2,1)) - I, dim=(1,2)))return loss# main里面用来测试创建model实例
if __name__ == '__main__':# 首先创建一个32x3x2500的变量,作为单个batch的 模拟变量,这种做法还挺好的,可以随时检查是否有bug。其中3是attribute(x,y,z)而2500是n_pointssim_data = Variable(torch.rand(32,3,2500))trans = STN3d()out = trans(sim_data)  # 先经过一个STNprint('stn', out.size())print('loss', feature_transform_regularizer(out))sim_data_64d = Variable(torch.rand(32, 64, 2500)) # 这次设置的有64的attribute,应该是模拟中间的feature的attributetrans = STNkd(k=64)out = trans(sim_data_64d)print('stn64d', out.size())print('loss', feature_transform_regularizer(out))# 检查两个PointNetfeat的实例情况pointfeat = PointNetfeat(global_feat=True)out, _, _ = pointfeat(sim_data)print('global feat', out.size())pointfeat = PointNetfeat(global_feat=False)out, _, _ = pointfeat(sim_data)print('point feat', out.size())# 检查Cls网络的实例情况cls = PointNetCls(k = 5)       #这个是k分类out, _, _ = cls(sim_data)print('class', out.size())seg = PointNetDenseCls(k = 3)  #这个k分割out, _, _ = seg(sim_data)print('seg', out.size())

(二) dataset.py文件

from __future__ import print_function
import torch.utils.data as data
import os
import os.path
import torch
import numpy as np
import sys
from tqdm import tqdm 
import json
from plyfile import PlyData, PlyElementdef get_segmentation_classes(root):catfile = os.path.join(root, 'synsetoffset2category.txt')cat = {}meta = {}with open(catfile, 'r') as f:for line in f:ls = line.strip().split()cat[ls[0]] = ls[1]for item in cat:dir_seg = os.path.join(root, cat[item], 'points_label')dir_point = os.path.join(root, cat[item], 'points')fns = sorted(os.listdir(dir_point))meta[item] = []for fn in fns:token = (os.path.splitext(os.path.basename(fn))[0])meta[item].append((os.path.join(dir_point, token + '.pts'), os.path.join(dir_seg, token + '.seg')))with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../misc/num_seg_classes.txt'), 'w') as f:for item in cat:datapath = []num_seg_classes = 0for fn in meta[item]:datapath.append((item, fn[0], fn[1]))for i in tqdm(range(len(datapath))):l = len(np.unique(np.loadtxt(datapath[i][-1]).astype(np.uint8)))if l > num_seg_classes:num_seg_classes = lprint("category {} num segmentation classes {}".format(item, num_seg_classes))f.write("{}\t{}\n".format(item, num_seg_classes))def gen_modelnet_id(root):classes = []with open(os.path.join(root, 'train.txt'), 'r') as f:for line in f:classes.append(line.strip().split('/')[0])classes = np.unique(classes)with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../misc/modelnet_id.txt'), 'w') as f:for i in range(len(classes)):f.write('{}\t{}\n'.format(classes[i], i))# 总的来说,就是2个定义的函数,加上2个Dataset的定义,其中一个是ShapeNet的Dataset的定义,另一个是ModelNet的Dataset的定义
class ShapeNetDataset(data.Dataset):def __init__(self,root,npoints=2500,classification=False,class_choice=None,split='train',data_augmentation=True):self.npoints = npointsself.root = rootself.catfile = os.path.join(self.root, 'synsetoffset2category.txt')self.cat = {}self.data_augmentation = data_augmentationself.classification = classificationself.seg_classes = {}with open(self.catfile, 'r') as f:for line in f:ls = line.strip().split()self.cat[ls[0]] = ls[1]#print(self.cat)if not class_choice is None:self.cat = {k: v for k, v in self.cat.items() if k in class_choice}self.id2cat = {v: k for k, v in self.cat.items()}self.meta = {}splitfile = os.path.join(self.root, 'train_test_split', 'shuffled_{}_file_list.json'.format(split))#from IPython import embed; embed()filelist = json.load(open(splitfile, 'r'))for item in self.cat:self.meta[item] = []for file in filelist:_, category, uuid = file.split('/')if category in self.cat.values():self.meta[self.id2cat[category]].append((os.path.join(self.root, category, 'points', uuid+'.pts'),os.path.join(self.root, category, 'points_label', uuid+'.seg')))self.datapath = []for item in self.cat:for fn in self.meta[item]:self.datapath.append((item, fn[0], fn[1]))self.classes = dict(zip(sorted(self.cat), range(len(self.cat))))print(self.classes)with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../misc/num_seg_classes.txt'), 'r') as f:for line in f:ls = line.strip().split()self.seg_classes[ls[0]] = int(ls[1])self.num_seg_classes = self.seg_classes[list(self.cat.keys())[0]]print(self.seg_classes, self.num_seg_classes)def __getitem__(self, index):fn = self.datapath[index]cls = self.classes[self.datapath[index][0]]point_set = np.loadtxt(fn[1]).astype(np.float32)seg = np.loadtxt(fn[2]).astype(np.int64)#print(point_set.shape, seg.shape)choice = np.random.choice(len(seg), self.npoints, replace=True)#resamplepoint_set = point_set[choice, :]point_set = point_set - np.expand_dims(np.mean(point_set, axis = 0), 0) # centerdist = np.max(np.sqrt(np.sum(point_set ** 2, axis = 1)),0)point_set = point_set / dist #scaleif self.data_augmentation:theta = np.random.uniform(0,np.pi*2)rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])point_set[:,[0,2]] = point_set[:,[0,2]].dot(rotation_matrix) # random rotationpoint_set += np.random.normal(0, 0.02, size=point_set.shape) # random jitterseg = seg[choice]point_set = torch.from_numpy(point_set)seg = torch.from_numpy(seg)cls = torch.from_numpy(np.array([cls]).astype(np.int64))if self.classification:return point_set, clselse:return point_set, segdef __len__(self):return len(self.datapath)class ModelNetDataset(data.Dataset):def __init__(self,root,npoints=2500,split='train',data_augmentation=True):self.npoints = npointsself.root = rootself.split = splitself.data_augmentation = data_augmentationself.fns = []with open(os.path.join(root, '{}.txt'.format(self.split)), 'r') as f:for line in f:self.fns.append(line.strip())self.cat = {}with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../misc/modelnet_id.txt'), 'r') as f:for line in f:ls = line.strip().split()self.cat[ls[0]] = int(ls[1])print(self.cat)self.classes = list(self.cat.keys())def __getitem__(self, index):fn = self.fns[index]cls = self.cat[fn.split('/')[0]]with open(os.path.join(self.root, fn), 'rb') as f:plydata = PlyData.read(f)pts = np.vstack([plydata['vertex']['x'], plydata['vertex']['y'], plydata['vertex']['z']]).Tchoice = np.random.choice(len(pts), self.npoints, replace=True)point_set = pts[choice, :]point_set = point_set - np.expand_dims(np.mean(point_set, axis=0), 0)  # centerdist = np.max(np.sqrt(np.sum(point_set ** 2, axis=1)), 0)point_set = point_set / dist  # scaleif self.data_augmentation:theta = np.random.uniform(0, np.pi * 2)rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])point_set[:, [0, 2]] = point_set[:, [0, 2]].dot(rotation_matrix)  # random rotationpoint_set += np.random.normal(0, 0.02, size=point_set.shape)  # random jitterpoint_set = torch.from_numpy(point_set.astype(np.float32))cls = torch.from_numpy(np.array([cls]).astype(np.int64))return point_set, clsdef __len__(self):return len(self.fns)# 测试这个 ShapeNet的dataset的设计情况,希望没有bug
if __name__ == '__main__':dataset = sys.argv[1]datapath = sys.argv[2]if dataset == 'shapenet':d = ShapeNetDataset(root = datapath, class_choice = ['Chair'])print(len(d))ps, seg = d[0]print(ps.size(), ps.type(), seg.size(),seg.type())d = ShapeNetDataset(root = datapath, classification = True)print(len(d))ps, cls = d[0]print(ps.size(), ps.type(), cls.size(),cls.type())# get_segmentation_classes(datapath)if dataset == 'modelnet':gen_modelnet_id(datapath)d = ModelNetDataset(root=datapath)print(len(d))print(d[0])

(三) train_classification.py文件

from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import ShapeNetDataset, ModelNetDataset
from pointnet.model import PointNetCls, feature_transform_regularizer
import torch.nn.functional as F
from tqdm import tqdm# train这个classification部分的代码倒是不多哈
# 第一部分:通过parser解析需要的全局变量:batchsize、num_points、nepoch、outf、model、dataset、dataset_type和feature_transformparser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--num_points', type=int, default=2500, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument('--dataset_type', type=str, default='shapenet', help="dataset type shapenet|modelnet40")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")opt = parser.parse_args()
print(opt)# 第二部分blue = lambda x: '\033[94m' + x + '\033[0m'# 生成一个随机整数,然后通过这个整数设置python的random.seed和pytorch的torch.manual_seed
opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)# 根据dataset_type创建对应的dataset和dataloader
if opt.dataset_type == 'shapenet':dataset = ShapeNetDataset(root=opt.dataset,classification=True,npoints=opt.num_points)test_dataset = ShapeNetDataset(root=opt.dataset,classification=True,split='test',npoints=opt.num_points,data_augmentation=False)elif opt.dataset_type == 'modelnet40':dataset = ModelNetDataset(root=opt.dataset,npoints=opt.num_points,split='trainval')test_dataset = ModelNetDataset(root=opt.dataset,split='test',npoints=opt.num_points,data_augmentation=False)
else:exit('wrong dataset type')dataloader = torch.utils.data.DataLoader(dataset,batch_size=opt.batchSize,shuffle=True,num_workers=int(opt.workers))testdataloader = torch.utils.data.DataLoader(test_dataset,batch_size=opt.batchSize,shuffle=True,num_workers=int(opt.workers))print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)try:os.makedirs(opt.outf)
except OSError:pass# 实例化classifier model 包括把num_classes赋值给最终分类k,以及opt中确认是否使用对feature进行transform的bool值
classifier = PointNetCls(k=num_classes, feature_transform=opt.feature_transform)if opt.model != '':classifier.load_state_dict(torch.load(opt.model))  # 如果opt.model参数地址不是空,说明可以载入一个权重weight给模型# 设置optimizer和scheduler,并且把model实例载入到cuda上面classifier.cuda 
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)   # 将optimizer作为参数传给scheduler
classifier.cuda()num_batch = len(dataset) / opt.batchSize# 开始training这个model
for epoch in range(opt.nepoch):scheduler.step()                         # 每个epoch都要对scheduler进行一次清零for i, data in enumerate(dataloader, 0): # 因为dataloader里面的enumerate是按照batch作为单位的points, target = datatarget = target[:, 0]points = points.transpose(2, 1)points, target = points.cuda(), target.cuda()optimizer.zero_grad()classifier = classifier.train()pred, trans, trans_feat = classifier(points)loss = F.nll_loss(pred, target)if opt.feature_transform:loss += feature_transform_regularizer(trans_feat) * 0.001loss.backward()optimizer.step()pred_choice = pred.data.max(1)[1]correct = pred_choice.eq(target.data).cpu().sum()print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(opt.batchSize)))# 每10个batch进行一次val进行输出 和 调整if i % 10 == 0:j, data = next(enumerate(testdataloader, 0))points, target = datatarget = target[:, 0]points = points.transpose(2, 1)points, target = points.cuda(), target.cuda()classifier = classifier.eval()pred, _, _ = classifier(points)loss = F.nll_loss(pred, target)pred_choice = pred.data.max(1)[1]correct = pred_choice.eq(target.data).cpu().sum()print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize)))# 每个train的epoch存储一个 pth权重torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))# 开始在testdataloader上进行测试已经训练好的model
total_correct = 0
total_testset = 0
for i,data in tqdm(enumerate(testdataloader, 0)):points, target = datatarget = target[:, 0]points = points.transpose(2, 1)points, target = points.cuda(), target.cuda()classifier = classifier.eval()pred, _, _ = classifier(points)pred_choice = pred.data.max(1)[1]correct = pred_choice.eq(target.data).cpu().sum()total_correct += correct.item()total_testset += points.size()[0]print("final accuracy {}".format(total_correct / float(total_testset)))

(四) train_segmentation.py文件

from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from pointnet.dataset import ShapeNetDataset
from pointnet.model import PointNetDenseCls, feature_transform_regularizer
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np# 最开始依然是利用parser解析输入的额外的hyperparameter包括:batchsize、nepoch、outf、model、dataset、class_choice、feature_transform
parser = argparse.ArgumentParser()
parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='seg', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument('--class_choice', type=str, default='Chair', help="class_choice")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")opt = parser.parse_args()
print(opt)# 设置fix seed
opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)# 设置dataset和dataloader
dataset = ShapeNetDataset(root=opt.dataset,classification=False,class_choice=[opt.class_choice])
dataloader = torch.utils.data.DataLoader(dataset,batch_size=opt.batchSize,shuffle=True,num_workers=int(opt.workers))test_dataset = ShapeNetDataset(root=opt.dataset,classification=False,class_choice=[opt.class_choice],split='test',data_augmentation=False)
testdataloader = torch.utils.data.DataLoader(test_dataset,batch_size=opt.batchSize,shuffle=True,num_workers=int(opt.workers))print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:os.makedirs(opt.outf)
except OSError:passblue = lambda x: '\033[94m' + x + '\033[0m'# 实例化model作为分割的Denseclassifier
classifier = PointNetDenseCls(k=num_classes, feature_transform=opt.feature_transform)if opt.model != '':classifier.load_state_dict(torch.load(opt.model))optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()num_batch = len(dataset) / opt.batchSize# 让我看看segmentation版本的Denseclassifier和一般的classifier的train里面有什么区别,至于val和test只是没有loss的计算罢了
for epoch in range(opt.nepoch):scheduler.step()for i, data in enumerate(dataloader, 0):      # 只有target这个地方不同points, target = datapoints = points.transpose(2, 1)points, target = points.cuda(), target.cuda()optimizer.zero_grad()classifier = classifier.train()pred, trans, trans_feat = classifier(points)pred = pred.view(-1, num_classes)target = target.view(-1, 1)[:, 0] - 1#print(pred.size(), target.size())loss = F.nll_loss(pred, target)if opt.feature_transform:loss += feature_transform_regularizer(trans_feat) * 0.001loss.backward()optimizer.step()pred_choice = pred.data.max(1)[1]correct = pred_choice.eq(target.data).cpu().sum()print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item()/float(opt.batchSize * 2500)))if i % 10 == 0:j, data = next(enumerate(testdataloader, 0))points, target = datapoints = points.transpose(2, 1)points, target = points.cuda(), target.cuda()classifier = classifier.eval()pred, _, _ = classifier(points)pred = pred.view(-1, num_classes)target = target.view(-1, 1)[:, 0] - 1loss = F.nll_loss(pred, target)pred_choice = pred.data.max(1)[1]correct = pred_choice.eq(target.data).cpu().sum()print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize * 2500)))torch.save(classifier.state_dict(), '%s/seg_model_%s_%d.pth' % (opt.outf, opt.class_choice, epoch))## benchmark mIOU
shape_ious = []
for i,data in tqdm(enumerate(testdataloader, 0)):points, target = datapoints = points.transpose(2, 1)points, target = points.cuda(), target.cuda()classifier = classifier.eval()pred, _, _ = classifier(points)pred_choice = pred.data.max(2)[1]pred_np = pred_choice.cpu().data.numpy()target_np = target.cpu().data.numpy() - 1for shape_idx in range(target_np.shape[0]):parts = range(num_classes)#np.unique(target_np[shape_idx])part_ious = []for part in parts:I = np.sum(np.logical_and(pred_np[shape_idx] == part, target_np[shape_idx] == part))U = np.sum(np.logical_or(pred_np[shape_idx] == part, target_np[shape_idx] == part))if U == 0:iou = 1 #If the union of groundtruth and prediction points is empty, then count part IoU as 1else:iou = I / float(U)part_ious.append(iou)shape_ious.append(np.mean(part_ious))print("mIOU for class {}: {}".format(opt.class_choice, np.mean(shape_ious)))


http://www.ppmy.cn/server/127220.html

相关文章

Leetcode 3301. Maximize the Total Height of Unique Towers

Leetcode 3301. Maximize the Total Height of Unique Towers 1. 解题思路2. 代码实现 题目链接:3301. Maximize the Total Height of Unique Towers 1. 解题思路 这一题思路上还是比较直接的,我们只需要排序之后从大到小依次分配最大可能的高度即可。…

Pinia只能存储简单数据类型

最近用vue做了一个管理后台,登录后使用Pinia存储一个User对象,包括name,token,avator。 在接口中统一拦截,header中添加token。获取Pinia里的对象对应的token值时发现是undefined,于是会出现登录后可以正常…

【CSS in Depth 2 精译_043】6.5 CSS 中的粘性定位技术 + 本章小结

当前内容所在位置(可进入专栏查看其他译好的章节内容) 第一章 层叠、优先级与继承(已完结)第二章 相对单位(已完结)第三章 文档流与盒模型(已完结)第四章 Flexbox 布局(已…

OpenGL ES简述(1)

OpenGL ES简述(1) 简述 这个章节我们会来介绍一下OpenGL ES,学习OpenGL ES主要是为了了解GPU为我们提供了怎样的能力,Android系统中使用的是OpenGL ES,但是核心是一样的,了解OpenGL ES后我们可以对Android渲染有更深的理解。 An…

jmeter中token测试

案例: 网站:http://shop.duoceshi.com 讲解:用三个接口来讲解 第一个接口code:GET http://manage.duoceshi.com/auth/code 第二个登录接口:http://manage.duoceshi.com/auth/login 第三个接口:http://…

C++(string类的实现)

1. 迭代器、返回capacity、返回size、判空、c_str、重载[]和clear的实现 string类的迭代器的功能就类似于一个指针,所以我们可以直接使用一个指针来实现迭代器,但如下图可见迭代器有两个,一个是指向的内容可以被修改,另一个则是指…

样式重置 normalize.css

安装normalize.css npm install --save normalize.csspnpm add normalize.css安装less yarn add less -Dmain.ts import { createApp } from vue import App from ./App.vue // 引入 import normalize.csscreateApp(App).mount(#app)index.less import less中的语法 imp…

sadTalker本地编译

SadTalker一款开源的可生成逼真的人像动画的工具。它利用深度学习技术,根据输入的图像和音频,生成具有生动表情和动作的视频。用户可以通过上传照片或使用预设的模型,轻松创建个性化的动画内容. 以上是官网的图, 下边是本地部署生成的,效果差…