本文主要是介绍SE_densenet+efficient memory,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
SE_densenet参考http://www.zhouyuangan.cn/2018/11/se_densenet-modify-densenet-with-champion-network-of-the-2017-classification-task-named-squeeze-and-excitation-network/
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
class SELayer(nn.Module):def __init__(self, channel, reduction=16):assert channel > reduction, "Make sure your input channel \bigger than reduction which equals to {}".format(reduction)super(SELayer, self).__init__()self.avg_pool = nn.AdaptiveAvgPool2d(1)self.fc = nn.Sequential(nn.Linear(channel, channel // reduction),nn.ReLU(inplace=True),nn.Linear(channel // reduction, channel),nn.Sigmoid())def forward(self, x):b, c, _, _ = x.size()y = self.avg_pool(x).view(b, c)y = self.fc(y).view(b, c, 1, 1)return x * ydef _bn_function_factory(norm, relu, conv):def bn_function(*inputs):concated_features = torch.cat(inputs, 1)bottleneck_output = conv(relu(norm(concated_features)))return bottleneck_outputreturn bn_functionclass _DenseLayer(nn.Module):def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, efficient=False):super(_DenseLayer, self).__init__()self.add_module('norm1', nn.BatchNorm2d(num_input_features)),self.add_module('relu1', nn.ReLU(inplace=True)),self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate,kernel_size=1, stride=1, bias=False)),self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),self.add_module('relu2', nn.ReLU(inplace=True)),self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,kernel_size=3, stride=1, padding=1, bias=False)),self.drop_rate = drop_rateself.efficient = efficientdef forward(self, *prev_features):bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)if self.efficient and any(prev_feature.requires_grad for prev_feature in prev_features):bottleneck_output = cp.checkpoint(bn_function, *prev_features)else:bottleneck_output = bn_function(*prev_features)new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))if self.drop_rate > 0:new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)return new_featuresclass _Transition(nn.Sequential):def __init__(self, num_input_features, num_output_features):super(_Transition, self).__init__()self.add_module('norm', nn.BatchNorm2d(num_input_features))self.add_module('relu', nn.ReLU(inplace=True))self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,kernel_size=1, stride=1, bias=False))self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))class _DenseBlock(nn.Module):def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, efficient=True):super(_DenseBlock, self).__init__()for i in range(num_layers):layer = _DenseLayer(num_input_features + i * growth_rate,growth_rate=growth_rate,bn_size=bn_size,drop_rate=drop_rate,efficient=efficient,)self.add_module('denselayer%d' % (i + 1), layer)def forward(self, init_features):features = [init_features]for name, layer in self.named_children():new_features = layer(*features)features.append(new_features)return torch.cat(features, 1)class DenseNet(nn.Module):r"""Densenet-BC model class, based on`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`Args:growth_rate (int) - how many filters to add each layer (`k` in paper)block_config (list of 3 or 4 ints) - how many layers in each pooling blocknum_init_features (int) - the number of filters to learn in the first convolution layerbn_size (int) - multiplicative factor for number of bottle neck layers(i.e. bn_size * k features in the bottleneck layer)drop_rate (float) - dropout rate after each dense layernum_classes (int) - number of classification classessmall_inputs (bool) - set to True if images are 32x32. Otherwise assumes images are larger.efficient (bool) - set to True to use checkpointing. Much more memory efficient, but slower."""def __init__(self, growth_rate=8, block_config=(2,3,4,2), compression=0.5,num_init_features=24, bn_size=4, drop_rate=0,num_classes=10, small_inputs=True, efficient=False):super(DenseNet, self).__init__()assert 0 < compression <= 1, 'compression of densenet should be between 0 and 1'self.avgpool_size = 4 if small_inputs else 4# First convolutionif small_inputs:self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(4, num_init_features, kernel_size=3, stride=1, padding=1, bias=False)),]))else:self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(4, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),]))self.features.add_module('norm0', nn.BatchNorm2d(num_init_features))self.features.add_module('relu0', nn.ReLU(inplace=True))self.features.add_module('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1,ceil_mode=False))self.features.add_module('SELayer0a',SELayer(channel=num_init_features))# Each denseblocknum_features = num_init_featuresfor i, num_layers in enumerate(block_config):#Add a SELayerself.features.add_module('SELayer%da'%(i+1),SELayer(channel=num_features))block = _DenseBlock(num_layers=num_layers,num_input_features=num_features,bn_size=bn_size,growth_rate=growth_rate,drop_rate=drop_rate,efficient=efficient,)self.features.add_module('denseblock%d'%(i + 1), block)num_features = num_features + num_layers * growth_rateif i != len(block_config) - 1:#Add a SELayer behind each transition blockself.features.add_module('SELayer%db'%(i+1),SELayer(channel=num_features))trans = _Transition(num_input_features=num_features,num_output_features=int(num_features * compression))self.features.add_module('transition%d' % (i + 1), trans)num_features = int(num_features * compression)# Final batch normself.features.add_module('norm_final', nn.BatchNorm2d(num_features))#Add SELayerself.features.add_module('SELayer0b',SELayer(channel=num_features))# Linear layerself.classifier = nn.Linear(num_features, num_classes)# Initializationfor name, param in self.named_parameters():if 'conv' in name and 'weight' in name:n = param.size(0) * param.size(2) * param.size(3)param.data.normal_().mul_(math.sqrt(2. / n))elif 'norm' in name and 'weight' in name:param.data.fill_(1)elif 'norm' in name and 'bias' in name:param.data.fill_(0)elif 'classifier' in name and 'bias' in name:param.data.fill_(0)def forward(self, x):features = self.features(x)out = F.relu(features, inplace=True)out = F.avg_pool2d(out, kernel_size=self.avgpool_size).view(features.size(0), -1)out = self.classifier(out)return out
if __name__ == "__main__":model = DenseNet(num_classes=4,efficient=True)print(model)input = torch.randn(8,4,28,28)outputs = model(input)print(outputs.size())
efficient_memory
class _DenseLayer(nn.Module):def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, efficient=False):super(_DenseLayer, self).__init__()self.add_module('norm1', nn.BatchNorm2d(num_input_features)),self.add_module('relu1', nn.ReLU(inplace=True)),self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate,kernel_size=1, stride=1, bias=False)),self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),self.add_module('relu2', nn.ReLU(inplace=True)),self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,kernel_size=3, stride=1, padding=1, bias=False)),self.drop_rate = drop_rateself.efficient = efficientdef forward(self, *prev_features):bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)if self.efficient and any(prev_feature.requires_grad for prev_feature in prev_features):bottleneck_output = cp.checkpoint(bn_function, *prev_features)else:bottleneck_output = bn_function(*prev_features)new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))if self.drop_rate > 0:new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)return new_features
完整代码链接https://download.csdn.net/download/shouhan6396/11165319
这篇关于SE_densenet+efficient memory的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!