本文主要是介绍FCOS加入ASFF的Attention机制,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
FCOS加入ASFF加权结构
这里是我对FCOS的结构加入了ASFF的Attention机制,将FCOS中fpn.py替换为下面代码即可。
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn.functional as F
from torch import nnclass ASFF(nn.Module):def __init__(self, level, rfb=False, vis=False):super(ASFF, self).__init__()self.level = level# self.dim = [512, 256, 256]self.dim = [256, 256, 256]self.inter_dim = self.dim[self.level]if level == 0:self.stride_level_1 = add_conv(256, self.inter_dim, 3, 2)self.stride_level_2 = add_conv(256, self.inter_dim, 3, 2)# self.expand = add_conv(self.inter_dim, 1024, 3, 1)self.expand = add_conv(self.inter_dim, 256, 3, 1)elif level == 1:# self.compress_level_0 = add_conv(512, self.inter_dim, 1, 1)self.compress_level_0 = add_conv(256, self.inter_dim, 1, 1)self.stride_level_2 = add_conv(256, self.inter_dim, 3, 2)# self.expand = add_conv(self.inter_dim, 512, 3, 1)self.expand = add_conv(self.inter_dim, 256, 3, 1)elif level == 2:# self.compress_level_0 = add_conv(512, self.inter_dim, 1, 1)self.compress_level_0 = add_conv(256, self.inter_dim, 1, 1)self.expand = add_conv(self.inter_dim, 256, 3, 1)# when adding rfb, we use half number of channels to save memorycompress_c = 8 if rfb else 16self.weight_level_0 = add_conv(self.inter_dim, compress_c, 1, 1)self.weight_level_1 = add_conv(self.inter_dim, compress_c, 1, 1)self.weight_level_2 = add_conv(self.inter_dim, compress_c, 1, 1)self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)self.vis = visdef forward(self, x_level_0, x_level_1, x_level_2):# import ipdb# ipdb.set_trace()if self.level == 0:level_0_resized = x_level_0level_1_resized = self
这篇关于FCOS加入ASFF的Attention机制的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!