RecResNetvd.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. from __future__ import absolute_import
  2. from __future__ import division
  3. from __future__ import print_function
  4. from collections import OrderedDict
  5. import torch
  6. from torch import nn
  7. from torchocr.networks.CommonModules import HSwish
  8. class ConvBNACT(nn.Module):
  9. def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
  10. super().__init__()
  11. self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
  12. stride=stride, padding=padding, groups=groups,
  13. bias=False)
  14. self.bn = nn.BatchNorm2d(out_channels)
  15. if act == 'relu':
  16. self.act = nn.ReLU()
  17. elif act == 'hard_swish':
  18. self.act = HSwish()
  19. elif act is None:
  20. self.act = None
  21. def forward(self, x):
  22. x = self.conv(x)
  23. x = self.bn(x)
  24. if self.act is not None:
  25. x = self.act(x)
  26. return x
  27. class ConvBNACTWithPool(nn.Module):
  28. def __init__(self, in_channels, out_channels, kernel_size, stride=1, groups=1, act=None):
  29. super().__init__()
  30. self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, padding=0, ceil_mode=True)
  31. self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,
  32. padding=(kernel_size - 1) // 2,
  33. groups=groups,
  34. bias=False)
  35. self.bn = nn.BatchNorm2d(out_channels)
  36. if act is None:
  37. self.act = None
  38. else:
  39. self.act = nn.ReLU()
  40. def forward(self, x):
  41. x = self.pool(x)
  42. x = self.conv(x)
  43. x = self.bn(x)
  44. if self.act is not None:
  45. x = self.act(x)
  46. return x
  47. class ShortCut(nn.Module):
  48. def __init__(self, in_channels, out_channels, stride, name, if_first=False):
  49. super().__init__()
  50. assert name is not None, 'shortcut must have name'
  51. self.name = name
  52. if in_channels != out_channels or stride[0] != 1:
  53. if if_first:
  54. self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
  55. padding=0, groups=1, act=None)
  56. else:
  57. self.conv = ConvBNACTWithPool(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
  58. stride=stride, groups=1, act=None)
  59. elif if_first:
  60. self.conv = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride,
  61. padding=0, groups=1, act=None)
  62. else:
  63. self.conv = None
  64. def forward(self, x):
  65. if self.conv is not None:
  66. x = self.conv(x)
  67. return x
  68. class BasicBlock(nn.Module):
  69. def __init__(self, in_channels, out_channels, stride, if_first, name):
  70. super().__init__()
  71. assert name is not None, 'block must have name'
  72. self.name = name
  73. self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride,
  74. padding=1, groups=1, act='relu')
  75. self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1,
  76. groups=1, act=None)
  77. self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels, stride=stride,
  78. name=f'{name}_branch1', if_first=if_first, )
  79. self.relu = nn.ReLU()
  80. self.output_channels = out_channels
  81. def forward(self, x):
  82. y = self.conv0(x)
  83. y = self.conv1(y)
  84. y = y + self.shortcut(x)
  85. return self.relu(y)
  86. class BottleneckBlock(nn.Module):
  87. def __init__(self, in_channels, out_channels, stride, if_first, name):
  88. super().__init__()
  89. assert name is not None, 'bottleneck must have name'
  90. self.name = name
  91. self.conv0 = ConvBNACT(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0,
  92. groups=1, act='relu')
  93. self.conv1 = ConvBNACT(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride,
  94. padding=1, groups=1, act='relu')
  95. self.conv2 = ConvBNACT(in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, stride=1,
  96. padding=0, groups=1, act=None)
  97. self.shortcut = ShortCut(in_channels=in_channels, out_channels=out_channels * 4, stride=stride,
  98. if_first=if_first, name=f'{name}_branch1')
  99. self.relu = nn.ReLU()
  100. self.output_channels = out_channels * 4
  101. def forward(self, x):
  102. y = self.conv0(x)
  103. y = self.conv1(y)
  104. y = self.conv2(y)
  105. y = y + self.shortcut(x)
  106. return self.relu(y)
  107. class ResNet(nn.Module):
  108. def __init__(self, in_channels, layers, **kwargs):
  109. super().__init__()
  110. supported_layers = {
  111. 18: {'depth': [2, 2, 2, 2], 'block_class': BasicBlock},
  112. 34: {'depth': [3, 4, 6, 3], 'block_class': BasicBlock},
  113. 50: {'depth': [3, 4, 6, 3], 'block_class': BottleneckBlock},
  114. 101: {'depth': [3, 4, 23, 3], 'block_class': BottleneckBlock},
  115. 152: {'depth': [3, 8, 36, 3], 'block_class': BottleneckBlock},
  116. 200: {'depth': [3, 12, 48, 3], 'block_class': BottleneckBlock}
  117. }
  118. assert layers in supported_layers, "supported layers are {} but input layer is {}".format(supported_layers,
  119. layers)
  120. depth = supported_layers[layers]['depth']
  121. block_class = supported_layers[layers]['block_class']
  122. num_filters = [64, 128, 256, 512]
  123. self.conv1 = nn.Sequential(
  124. ConvBNACT(in_channels=in_channels, out_channels=32, kernel_size=3, stride=1, padding=1, act='relu'),
  125. ConvBNACT(in_channels=32, out_channels=32, kernel_size=3, stride=1, act='relu', padding=1),
  126. ConvBNACT(in_channels=32, out_channels=64, kernel_size=3, stride=1, act='relu', padding=1)
  127. )
  128. self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  129. self.stages = nn.ModuleList()
  130. in_ch = 64
  131. for block_index in range(len(depth)):
  132. block_list = []
  133. for i in range(depth[block_index]):
  134. if layers >= 50:
  135. if layers in [101, 152, 200] and block_index == 2:
  136. if i == 0:
  137. conv_name = "res" + str(block_index + 2) + "a"
  138. else:
  139. conv_name = "res" + str(block_index + 2) + "b" + str(i)
  140. else:
  141. conv_name = "res" + str(block_index + 2) + chr(97 + i)
  142. else:
  143. conv_name = f'res{str(block_index + 2)}{chr(97 + i)}'
  144. if i == 0 and block_index != 0:
  145. stride = (2, 1)
  146. else:
  147. stride = (1, 1)
  148. block_list.append(block_class(in_channels=in_ch, out_channels=num_filters[block_index],
  149. stride=stride,
  150. if_first=block_index == i == 0, name=conv_name))
  151. in_ch = block_list[-1].output_channels
  152. self.stages.append(nn.Sequential(*block_list))
  153. self.out_channels = in_ch
  154. self.out = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
  155. def forward(self, x):
  156. x = self.conv1(x)
  157. x = self.pool1(x)
  158. for stage in self.stages:
  159. x = stage(x)
  160. x = self.out(x)
  161. return x