Skip to content

Commit

Permalink
Merge pull request #9 from ai-med/oct-conv-modules
Browse files Browse the repository at this point in the history
new build with octave modules
  • Loading branch information
jyotirmay123 authored Feb 2, 2021
2 parents 9cbe2e8 + 17ad404 commit cf512d2
Show file tree
Hide file tree
Showing 5 changed files with 382 additions and 5 deletions.
12 changes: 9 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,19 @@ This project has 3 modules
3. CrossEntropyLoss2d
4. CombinedLoss
5. FocalLoss

* Modules (modules.py) -> It has all the commonly used building blocks of an FCN
1. DenseBlock
2. EncoderBlock
3. DecoderBlock
4. ClassifierBlock
5. GenericBlock
6. SDNetEncoderBlock
7. SDNetDecoderBlock
7. SDNetDecoderBlock
8. OctaveDenseBlock
9. OctaveEncoderBlock
10. OctaveDecoderBlock

* Bayesian Modules (bayesian_modules.py) -> It has all the commonly used building blocks of a Bayesian FCN
1. BayesianConvolutionBlock
2. BayesianEncoderBlock
Expand All @@ -34,15 +39,16 @@ You need to have following in order for this library to work as expected

### Installing

Always use the latest release. Use following command with appropriate version no(v1.0) in this particular case to install. You can find the link for the latest release in the release section of this github repo
Always use the latest release. Use following command with appropriate version no(v1.4) in this particular case to install. You can find the link for the latest release in the release section of this github repo

```
pip install https://github.com/ai-med/nn-common-modules/releases/download/v1.1/nn_common_modules-1.3-py3-none-any.whl
pip install https://github.com/ai-med/nn-common-modules/releases/download/v1.1/nn_common_modules-1.4-py3-none-any.whl
```

## Authors

* **Shayan Ahmad Siddiqui** - [shayansiddiqui](https://github.com/shayansiddiqui)
* **Jyotirmay Senapati** - [jyotirmays](https://www.linkedin.com/in/jyotirmay-senapati-30615421)
* **Abhijit Guha Roy** - [abhi4ssj](https://github.com/abhi4ssj)


Expand Down
233 changes: 232 additions & 1 deletion build/lib/nn_common_modules/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import torch.nn as nn
from squeeze_and_excitation import squeeze_and_excitation as se
import torch.nn.functional as F

from octave_convolution_block import *

class DenseBlock(nn.Module):
"""Block with dense connections
Expand Down Expand Up @@ -489,3 +489,234 @@ def forward(self, input, out_block=None, indices=None):
x1 = self.conv(concat)
x2 = self.relu(x1)
return x2


class OctaveDenseBlock(nn.Module):
"""Block with dense connections
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: forward passed tensor
:rtype: torch.tonsor [FloatTensor]
"""

def __init__(self, params, se_block_type=None, is_decoder=False):
super(OctaveDenseBlock, self).__init__()
print(se_block_type)
if se_block_type == se.SELayer.CSE.value:
self.SELayer = se.ChannelSELayer(params['num_filters'])

elif se_block_type == se.SELayer.SSE.value:
self.SELayer = se.SpatialSELayer(params['num_filters'])

elif se_block_type == se.SELayer.CSSE.value:
self.SELayer = se.ChannelSpatialSELayer(params['num_filters'])
else:abdominal_segmentation_2] - 1) / 2)
padding_w = int((params['kernel_w'] - 1) / 2)

conv1_out_size = int(params['num_channels'] + params['num_filters'])
conv2_out_size = int(
params['num_channels'] + params['num_filters'] + params['num_filters'])

self.conv1 = OctConv2d('first', in_channels=params['num_channels'], out_channels=params['num_filters'], kernel_size=(
params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w),
stride=params['stride_conv'])


self.conv2 = OctConv2d('regular', in_channels=conv1_out_size, out_channels=params['num_filters'], kernel_size=(
params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w),
stride=params['stride_conv'])


self.conv3 = OctConv2d('last', in_channels=conv2_out_size, out_channels=params['num_filters'], kernel_size=(
params['kernel_h'], params['kernel_w']), padding=(padding_h, padding_w),
stride=params['stride_conv'])

self.avgpool1 = nn.AvgPool2d(2)

alpha_in, alpha_out = 0.5, 0.5

if is_decoder:
# Channel sizes for dcoder blocks
self.batchnorm1 = nn.BatchNorm2d(params['num_channels'])
oct_unit_decoder_channel_size = params['num_channels'] // 4 #128
self.batchnorm2_h = nn.BatchNorm2d(int(oct_unit_decoder_channel_size*3)) #conv1_out_size * (1 - alpha_out))+1)
self.batchnorm2_l = nn.BatchNorm2d(int(oct_unit_decoder_channel_size*3)) #conv1_out_size-1))

self.batchnorm3_h = nn.BatchNorm2d(int(oct_unit_decoder_channel_size*4)) #conv2_out_size * (1 - alpha_out))+1)
self.batchnorm3_l = nn.BatchNorm2d(int(oct_unit_decoder_channel_size*4)) #conv2_out_size-1))
else:
# channel sizes for encoder blocks
self.batchnorm1 = nn.BatchNorm2d(params['num_channels'])
oct_unit_encoder_channel_size = params['num_channels'] // 2 #64
self.batchnorm2_h = nn.BatchNorm2d(int(oct_unit_encoder_channel_size*2)) #conv1_out_size * (1 - alpha_out)
self.batchnorm2_l = nn.BatchNorm2d(int(oct_unit_encoder_channel_size*2)) #(conv1_out_size * alpha_out))

self.batchnorm3_h = nn.BatchNorm2d(int(oct_unit_encoder_channel_size*3)) #conv2_out_size * (1 - alpha_out)))
self.batchnorm3_l = nn.BatchNorm2d(int(oct_unit_encoder_channel_size*3)) #conv2_out_size * alpha_out))

self.prelu = nn.PReLU()
self.prelu_h = nn.PReLU()
self.prelu_l = nn.PReLU()

if params['drop_out'] > 0:
self.drop_out_needed = True
self.drop_out = nn.Dropout2d(params['drop_out'])
else:
self.drop_out_needed = False

def forward(self, input):
"""Forward pass
:param input: Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:return: Forward passed tensor
:rtype: torch.tensor [FloatTensor]
"""

o1 = self.batchnorm1(input)
o2 = self.prelu(o1)

ch = input.shape[1] // 2
inp_h, inp_l = input[:, :ch, :, :], input[:, ch:, :, :]
inp_ll = self.avgpool1(inp_l)

o3_h, o3_l = self.conv1(o2)
o4_h = torch.cat((inp_h, o3_h), dim=1)
o4_l = torch.cat((inp_ll, o3_l), dim=1)
o5_h = self.batchnorm2_h(o4_h)
o5_l = self.batchnorm2_l(o4_l)
o6_h = self.prelu_h(o5_h)
o6_l = self.prelu_l(o5_l)
o7_h, o7_l = self.conv2((o6_h, o6_l))

o8_h = torch.cat((inp_h, o3_h, o7_h), dim=1)
o8_l = torch.cat((inp_ll, o3_l, o7_l), dim=1)

o9_h = self.batchnorm3_h(o8_h)
o9_l = self.batchnorm3_l(o8_l)
o10_h = self.prelu_h(o9_h)
o10_l = self.prelu_l(o9_l)
out = self.conv3((o10_h, o10_l))

return out


class OctaveEncoderBlock(OctaveDenseBlock):
"""Dense encoder block with maxpool and an optional SE block
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: output tensor with maxpool, output tensor without maxpool, indices for unpooling
:rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor]
"""

def __init__(self, params, se_block_type=None):
super(OctaveEncoderBlock, self).__init__(params, se_block_type=se_block_type, is_decoder=False)
self.maxpool = nn.MaxPool2d(
kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True)

def forward(self, input, weights=None):
"""Forward pass
:param input: Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None
:type weights: torch.tensor, optional
:return: output tensor with maxpool, output tensor without maxpool, indices for unpooling
:rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor]
"""

out_block = super(OctaveEncoderBlock, self).forward(input)
if self.SELayer:
out_block = self.SELayer(out_block)

if self.drop_out_needed:
out_block = self.drop_out(out_block)

out_encoder, indices = self.maxpool(out_block)
return out_encoder, out_block, indices


class OctaveDecoderBlock(OctaveDenseBlock):
"""Dense decoder block with maxunpool and an optional skip connections and SE block
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: forward passed tensor
:rtype: torch.tensor [FloatTensor]
"""

def __init__(self, params, se_block_type=None):
super(OctaveDecoderBlock, self).__init__(params, se_block_type=se_block_type, is_decoder=True)
self.unpool = nn.MaxUnpool2d(
kernel_size=params['pool'], stride=params['stride_pool'])

def forward(self, input, out_block=None, indices=None, weights=None):
"""Forward pass
:param input: Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None
:type out_block: torch.tensor [FloatTensor], optional
:param indices: Indices used for unpooling operation, defaults to None
:type indices: torch.tensor, optional
:param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None
:type weights: torch.tensor, optional
:return: Forward passed tensor
:rtype: torch.tensor [FloatTensor]
"""
if indices is not None:
unpool = self.unpool(input, indices)
else:
# TODO: Implement Conv Transpose
print("You have to use Conv Transpose")

if out_block is not None:
concat = torch.cat((out_block, unpool), dim=1)
else:
concat = unpool
out_block = super(OctaveDecoderBlock, self).forward(concat)

if self.SELayer:
out_block = self.SELayer(out_block)

if self.drop_out_needed:
out_block = self.drop_out(out_block)
return out_block
Loading

0 comments on commit cf512d2

Please sign in to comment.