dl----yolov4深度解析

news/2024/11/6 11:08:47/

在这里插入图片描述
yolov4关键参数:

[net]
batch=64    # 所有的图片分成all_num/batch个批次,每batch个样本(64)更新一次参数,尽量保证一个batch里面各个类别都能取到样本
subdivisions=64   # 决定每次送入显卡的图片数目 batch/subdivisions
width=608   # 图片宽度
height=608   # 图片高度
channels=3   # 图片通道数
momentum=0.949   # 动量参数,表示梯度下降到最优值的速度
decay=0.0005  # 权重衰减正则项,防止过拟合.
angle=0      #  图片角度变化
saturation = 1.5   # 图片饱和度
exposure = 1.5   #  图片曝光量
hue=.1     # 图片色调learning_rate=0.0013  # 学习率,影响权值更新的速度
burn_in=1000   
max_batches = 500500  # 训练最大次数
policy=steps   # 学习率调整的策略
steps=400000,450000  # 学习率调整时间
scales=.1,.1     # 学习率调整倍数#cutmix=1  # 数据增强cutmix
mosaic=0   # 数据增强mosaic#:104x104 54:52x52 85:26x26 104:13x13 for 416[convolutional]   # 利用32个大小为3*3*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1  # 是否做BN
filters=32         # 输出特征图的数量
size=3             # 卷积核的尺寸
stride=1           # 做卷积运算的步长
pad=1              # 是否做pad
activation=mish    # 激活函数类型     #  (608 + 2 * 1 - 3)/ 1 + 1 = 608   # 608 * 608 * 32# Downsample[convolutional]  # 利用64个大小为3*3*3的滤波器步长为2,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=64
size=3
stride=2
pad=1
activation=mish     #  (608 + 2 * 1 - 3)/ 2 + 1 = 304   # 304 * 304 * 64[convolutional]  # 利用64个大小为1*1*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish   # (304 + 2 * 1 -1) / 1 + 1 = 306   # 304 * 304 * 64[route]
layers = -2   # 当属性layers只有一个值时,它会输出由该值索引的网络层的特征图  # 304 * 304 * 64[convolutional]   # 利用64个大小为1*1*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish    # (304 + 2 * 1 -1) / 1 + 1 = 306   # 304 * 304 * 64[convolutional]   # 利用32个大小为1*1*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=32
size=1
stride=1
pad=1
activation=mish   # (304 + 2 * 1 -1) / 1 + 1 = 306   # 304 * 304 * 32[convolutional]   # 利用64个大小为3*3*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish   # (304 + 2 * 1 -3) / 1 + 1 = 304   # 304 * 304 * 64[shortcut]   #shortcut 操作是类似 ResNet 的跨层连接
from=-3  # 参数 from 是 −3,意思是 shortcut 的输出是当前层与先前的倒数第三层相加而得到,通俗来讲就是 add 操作
activation=linear   # 304 * 304 * 64[convolutional]    # 利用64个大小为1*1*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish   # (304 + 2 * 1 -1) / 1 + 1 = 306   # 304 * 304 * 64[route]    # 304 * 304 * 128
layers = -1,-7   # 当属性layers有两个值,就是将上一层和从当前层倒数第7层进行融合,大于两个值同理[convolutional]       # 利用64个大小为1*1*3的滤波器步长为1,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish    # (304 + 2 * 1 -1) / 1 + 1 = 306   # 304 * 304 * 32# Downsample[convolutional]   # 利用128个大小为3*3*3的滤波器步长为2,填充值为1进行过滤然后用mish函数进行激活
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=mish   # (304 + 2 * 1 -3) / 2 + 1 = 152   # 152 * 152 * 32[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish[route]
layers = -2[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=mish[route]
layers = -1,-10[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish# Downsample[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[route]
layers = -2[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=mish[route]
layers = -1,-28[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish# Downsample[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[route]
layers = -2[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=mish[route]
layers = -1,-28[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish# Downsample[convolutional]
batch_normalize=1
filters=1024
size=3
stride=2
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[route]
layers = -2[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=mish[shortcut]
from=-3
activation=linear[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=mish[route]
layers = -1,-16[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=mish##########################[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky### SPP ###  
[maxpool]
stride=1
size=5[route]
layers=-2[maxpool]
stride=1
size=9[route]
layers=-4[maxpool]
stride=1
size=13[route]
layers=-1,-3,-5,-6
### End SPP ###  每个maxpool的padding=size/2[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[upsample]
stride=2[route]
layers = 85[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[route]
layers = -1, -3[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky[upsample]
stride=2[route]
layers = 54[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky[route]
layers = -1, -3[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky##########################[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=256
activation=leaky[convolutional]
size=1
stride=1
pad=1
filters=33
activation=linear[yolo]
mask = 0,1,2  #对应的anchors索引值
anchors =  23, 40,  25, 79,  44, 77,  30,124,  54,121,  42,197,  87,112,  76,193, 126,255
classes=6
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.2
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
max_delta=5[route]
layers = -4[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=256
activation=leaky[route]
layers = -1, -16[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=512
activation=leaky[convolutional]
size=1
stride=1
pad=1
filters=33
activation=linear[yolo]
mask = 3,4,5
#anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
anchors =  23, 40,  25, 79,  44, 77,  30,124,  54,121,  42,197,  87,112,  76,193, 126,255
classes=6
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
scale_x_y = 1.1
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
max_delta=5[route]
layers = -4[convolutional]
batch_normalize=1
size=3
stride=2
pad=1
filters=512
activation=leaky[route]
layers = -1, -37[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky[convolutional]
batch_normalize=1
size=3
stride=1
pad=1
filters=1024
activation=leaky[convolutional]
size=1
stride=1
pad=1
filters=33
activation=linear[yolo]
mask = 6,7,8
#anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
anchors =  23, 40,  25, 79,  44, 77,  30,124,  54,121,  42,197,  87,112,  76,193, 126,255
classes=6
num=9
jitter=.3
ignore_thresh = .7
truth_thresh = 1
random=0
scale_x_y = 1.05
iou_thresh=0.213
cls_normalizer=1.0
iou_normalizer=0.07
iou_loss=ciou
nms_kind=greedynms
beta_nms=0.6
max_delta=5

http://www.ppmy.cn/news/169300.html

相关文章

大数据时代——生活、工作与思维的重大变革

最近读了维克托迈尔 – 舍恩伯格的《大数据时代》,觉得有不少收获,让我这个大数据的小白第一次理解了大数据。 作者是大数据的元老级先驱。 放一张帅照,膜拜下。 不过这本书我本人不推荐从头读一遍,因为书中的核心理念并不是特…

赶紧收藏!2023年成人高考【复习大纲】

▶高起点考试题型 高起点-语文 试卷满分为150分。考试用时120分钟。 >>>考试内容: 语言知识及运用、现代文阅读、古代诗文阅读和鉴赏、写作四个方面,考试以测试阅读和写作能力为重点。 >>>备考分析: 语文学习要注意平…

入职字节跳动那一天,我哭了(蘑菇街被裁,奋战7个月拿下offer)

前言 先说一下自己的个人情况,20届应届生,通过校招进入到了蘑菇街,然后一待就待了差不多2年多的时间,可惜的是去年8月份受疫情影响遇到了大裁员,而我也是其中一员。好在之前一直想去字节跳动,年尾就已经在…

买个24的显示器

我真正想要的是28的瀚视奇,但液晶电视与显示器合成的效果大打折扣 换成一个24的吧,BENQ 24宽屏幕,效果还可以,颜色太刺眼睛了。 在中关会转了N久也没能找到心仪的电纸书 sony rps 505 不过看了汉王的 电纸书,失望大于期望。 大大打…

显示屏种类(未完)

嵌入式之路,贵在日常点滴 ---阿杰在线送代码 常见的显示设备:LCD OLED 数码管 点阵屏 目录 屏幕分辨率(screen resolution) LCD、LED、IPS、OLED、AMOLED屏幕的区别 常用显示屏 LCD(Liquid Crystal Display,液晶显示器) OLED(Organic Light Emitti…

液晶显示器点距大全

液晶实际上只有一种分辨率,其他的分辨率都是差值计算出来的,无论文字还是图形,效果都打了折扣。 点距的大小直接决定了文档文字的大小,决定了图形的精细程度。专注于办公及文档网页浏览的用户,适宜选择点距较大的液晶…

session.upload_progress文件包含漏洞

session.upload_progress文件包含漏洞 前言 之前学习了该漏洞,但是没有做笔记,导致容易遗忘。在此用一个题目来理解session.upload_progress漏洞 基础知识 session存储 我们在phpinfo可以看到session的存储路径: 以下是一些session在lin…

项目上线出Bug:我踩过的4个大坑及事后反思

最近参与的拥有7大模块的系统项目,从去年11月开始开发,共5个月左右。 该项目是用JavaNode.js开发,开发人员含外包将近10位,测试人员A从头跟到尾,其他测试人员都是紧急时刻从其他项目临时调来,包含兄弟部门…