1.介绍
本文整合了部分深度残差收缩网络以及残差神经网络现有的2D及1D版本资源,并给出TensorFlow&Keras环境下的1D ResNet和DRSN程序和使用示例。
2.资源整合
深度残差收缩网络:
-介绍:http://t.csdn.cn/DvnBL
-(pytorch,2D)https://cloud.tencent.com/developer/article/1813376
-(pytorch,2D)https://zhuanlan.zhihu.com/p/337346575
-(pytorch,2D)https://blog.csdn.net/weixin_47174159/article/details/115409058
-(Tensorflow2.0&Keras,2D)https://blog.csdn.net/qq_36758914/article/details/109452735
-(pytorch,1D)https://github.com/liguge/Deep-Residual-Shrinkage-Networks-for-intelligent-fault-diagnosis-DRSN-
-(官方,TFLearn-1D、TensorFlow&Keras-2D、TFLearn-2D)https://github.com/zhao62/Deep-Residual-Shrinkage-Networks
残差神经网络
-(pytorch,1D)https://github.com/StChenHaoGitHub/1D-deeplearning-model-LeNet-AlexNet-ZFNet-VGG-GoogLeNet-ResNet-DenseNet
-(TensorFlow,1D)https://github.com/ralasun/tensorflow-resnet-1d/blob/master/Resnet.ipynb
-(Keras,2D)https://keras.io/api/applications/resnet/#resnet50v2-function
3.ResNet-1D
修改内容:参考DRSN风格,以keras.applications.ResNet50V21为基础,将2D操作更改为1D操作。
代码片段:复制以下内容或前往github下载https://github.com/M73ACat/ResNet-1D-DRSN-1D
示例结构图,参考2
from keras.layers import (Activation, Add, BatchNormalization, Conv1D, Dense,GlobalAveragePooling1D, Input)
from keras.models import Model
from keras.optimizers import Nadamdef res_block(x, filters, block_nums, kernel_size=3, stride=1):"""A residual block.Arguments:x: input tensor.filters: integer, filters of the bottleneck layer.block_nums: integer, numbers of block. kernel_size: default 3, kernel size of the bottleneck layer.stride: default 1, stride of the first layer.Returns:Output tensor for the residual block."""for _ in range(block_nums):preact = BatchNormalization(epsilon=1.001e-5)(x)preact = Activation('relu')(preact)shortcut = Conv1D(4 * filters, 1, strides=stride,padding='same')(preact)x = Conv1D(filters, 1, strides=1, use_bias=False)(preact)x = BatchNormalization(epsilon=1.001e-5)(x)x = Activation('relu')(x)x = Conv1D(filters,kernel_size,strides=stride,use_bias=False,padding='same')(x)x = BatchNormalization(epsilon=1.001e-5)(x)x = Activation('relu')(x)x = Conv1D(4 * filters, 1)(x)x = Add()([shortcut, x])return x
使用示例(建立图1所示ResNet模型):
inputs = 2048
outputs = 8x_input = Input(shape=(inputs,1))
x = Conv1D(4,3,2,padding='same')(x_input)x = res_block(x,filters=4,block_nums=1,stride=2)
x = res_block(x,filters=4,block_nums=3,stride=1)x = res_block(x,filters=8,block_nums=1,stride=2)
x = res_block(x,filters=8,block_nums=3,stride=1)x = res_block(x,filters=16,block_nums=1,stride=2)
x = res_block(x,filters=16,block_nums=3,stride=1)x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling1D()(x)x = Dense(outputs,activation='softmax')(x)model = Model(inputs=x_input,outputs=x)
optimizers = Nadam(lr=1e-5)
model.compile(optimizer = optimizers, loss= 'categorical_crossentropy',metrics=['accuracy'])
model.summary()
4.DRSN-1D
修改内容:以DRSN Keras2D3为基础,将2D操作更改为1D操作。
代码片段:复制以下内容或前往github下载https://github.com/M73ACat/ResNet-1D-DRSN-1D
import keras
from keras import backend as K
from keras.layers import (Activation, AveragePooling1D, BatchNormalization,Conv1D, Dense, GlobalAveragePooling1D, Input)
from keras.layers.core import Lambda
from keras.models import Model
from keras.optimizers import Nadam
from keras.regularizers import l2def abs_backend(inputs):return K.abs(inputs)def expand_dim_backend(inputs):return K.expand_dims(inputs,1)def sign_backend(inputs):return K.sign(inputs)def pad_backend(inputs, in_channels, out_channels):pad_dim = (out_channels - in_channels)//2inputs = K.expand_dims(inputs)inputs = K.spatial_2d_padding(inputs,padding=((0,0),(pad_dim,pad_dim)))return K.squeeze(inputs,-1)def residual_shrinkage_block(incoming, nb_blocks, out_channels, downsample=False,downsample_strides=2):"""A residual_shrinkage_block.Arguments:incoming: input tensor.nb_blocks: integer, numbers of block. out_channels: integer, filters of the conv1d layer.downsample: default False, downsample or not.downsample_strides: default 2, stride of the first layer.Returns:Output tensor for the residual block."""residual = incomingin_channels = incoming.get_shape().as_list()[-1]for _ in range(nb_blocks):identity = residualif not downsample:downsample_strides = 1residual = BatchNormalization()(residual)residual = Activation('relu')(residual)residual = Conv1D(out_channels, 3, strides=downsample_strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(1e-4))(residual)residual = BatchNormalization()(residual)residual = Activation('relu')(residual)residual = Conv1D(out_channels, 3, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(1e-4))(residual)# Calculate global meansresidual_abs = Lambda(abs_backend)(residual)abs_mean = GlobalAveragePooling1D()(residual_abs)# Calculate scaling coefficientsscales = Dense(out_channels, activation=None, kernel_initializer='he_normal', kernel_regularizer=l2(1e-4))(abs_mean)scales = BatchNormalization()(scales)scales = Activation('relu')(scales)scales = Dense(out_channels, activation='sigmoid', kernel_regularizer=l2(1e-4))(scales)scales = Lambda(expand_dim_backend)(scales)# Calculate thresholdsthres = keras.layers.multiply([abs_mean, scales])# Soft thresholdingsub = keras.layers.subtract([residual_abs, thres])zeros = keras.layers.subtract([sub, sub])n_sub = keras.layers.maximum([sub, zeros])residual = keras.layers.multiply([Lambda(sign_backend)(residual), n_sub])# Downsampling using the pooL-size of (1, 1)if downsample_strides > 1:identity = AveragePooling1D(pool_size=1, strides=2)(identity)# Zero_padding or Conv1D to match channelsif in_channels != out_channels:""" padding """identity = Lambda(pad_backend, arguments={'in_channels':in_channels,'out_channels':out_channels})(identity)""" Conv1D """# identity = Conv1D(out_channels,1,strides=1,padding='same')(identity)residual = keras.layers.add([residual, identity])return residual
使用示例(建立图1所示DRSN模型):
inputs = 2048
outputs = 8x_input = Input(shape=(inputs,1))
x = Conv1D(4,3,2,padding='same')(x_input)
x = residual_shrinkage_block(x, 1, 4, downsample=True)
x = residual_shrinkage_block(x, 3, 4, downsample=False)x = residual_shrinkage_block(x, 1, 8, downsample=True)
x = residual_shrinkage_block(x, 3, 8, downsample=False)x = residual_shrinkage_block(x, 1, 16, downsample=True)
x = residual_shrinkage_block(x, 3, 16, downsample=False)x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling1D()(x)x = Dense(outputs,activation='softmax')(x)model = Model(inputs=x_input,outputs=x)
optimizers = Nadam(lr=1e-5)
model.compile(optimizer = optimizers, loss= 'categorical_crossentropy',metrics=['accuracy'])
model.summary()
参考文献
https://keras.io/api/applications/resnet/#resnet50v2-function ↩︎
ZHAO M., ZHONG S., FU X., et al. Deep Residual Shrinkage Networks for Fault Diagnosis[J]. IEEE Transactions on Industrial Informatics, 2020, 16(7):4681-4690. ↩︎
https://github.com/zhao62/Deep-Residual-Shrinkage-Networks/blob/master/DRSN_Keras.py ↩︎