Commit 30c1d40b authored by Alessia Marcolini's avatar Alessia Marcolini
Browse files

Remove not used networks

parent 4486ce8d
......@@ -10,109 +10,6 @@ import torch
import resnet
class Ciompi(nn.Module):
def __init__(self, n_classes=2, n_channels=2):
super(Ciompi, self).__init__()
self.n_classes = n_classes
self.n_channels = n_channels
#dropout = 0.5
self.CT_branch = nn.Sequential( #64x64x64
nn.BatchNorm3d(1),
nn.Conv3d(1, 32, 5), #64-5+1 = 60x60x60
nn.BatchNorm3d(32),
nn.ReLU(),
nn.MaxPool3d(2), #30x30x30
nn.Conv3d(32, 64, 3), #30-3+1 = 28x28x28
nn.BatchNorm3d(64),
nn.Conv3d(64, 64, 3), #28-3+1 = 26x26x26
nn.BatchNorm3d(64),
nn.ReLU(),
nn.MaxPool3d(2), #13x13x13
nn.Conv3d(64, 128, 3), #13-3+1 = 11x11x11
nn.BatchNorm3d(128),
nn.ReLU(),
nn.MaxPool3d(2), #5x5x5
nn.Conv3d(128, 256, 3), #5-3+1 = 2x2x2
nn.BatchNorm3d(256),
nn.ReLU(),
nn.MaxPool3d(2), #1x1x1
nn.AdaptiveAvgPool3d(1) #<- questo e' inutile per input di 64, ma se si passa a 128 meglio tenerlo o bisogna cambiare il primo layer lineare
)
self.PT_branch = nn.Sequential(
nn.BatchNorm3d(1),
nn.Conv3d(1, 32, 5), #64-5+1 = 60x60x60
nn.BatchNorm3d(32),
nn.ReLU(),
nn.MaxPool3d(2), #30x30x30
nn.Conv3d(32, 64, 3), #30-3+1 = 28x28x28
nn.BatchNorm3d(64),
nn.Conv3d(64, 64, 3), #28-3+1 = 26x26x26
nn.BatchNorm3d(64),
nn.ReLU(),
nn.MaxPool3d(2), #13x13x13
nn.Conv3d(64, 128, 3), #13-3+1 = 11x11x11
nn.BatchNorm3d(128),
nn.ReLU(),
nn.MaxPool3d(2), #5x5x5
nn.Conv3d(128, 256, 3), #5-3+1 = 2x2x2
nn.BatchNorm3d(256),
nn.ReLU(),
nn.MaxPool3d(2), #1x1x1
nn.AdaptiveAvgPool3d(1) #<- questo e' inutile per input di 64, ma se si passa a 128 meglio tenerlo o bisogna cambiare il primo layer lineare
)
self.linear = nn.Sequential(
nn.Linear(256*2, 50),
nn.ReLU(),
nn.Linear(50, self.n_classes),
nn.Softmax(1)
)
def forward(self, x): #x = N_batch x N_ch * s x s x s
x_CT = x[:, 0, :,:,:].unsqueeze(1)#x = N_batch * s x s x s
features_CT = self.CT_branch(x_CT)
features_CT = features_CT.view(x.shape[0], -1) #n_batch * X
x_PT = x[:, 1, :,:,:].unsqueeze(1)
features_PT = self.PT_branch(x_PT)
features_PT = features_PT.view(x.shape[0], -1)
out_merged = torch.cat([features_CT, features_PT], dim=1)
out = self.linear(out_merged)
return(out)
def extract_features(self, x):
x_CT = x[:, 0, :,:,:].unsqueeze(1)
features_CT = self.CT_branch(x_CT)
features_CT = features_CT.view(x.shape[0], -1)
x_PT = x[:, 1, :,:,:].unsqueeze(1)
features_PT = self.PT_branch(x_PT)
features_PT = features_PT.view(x.shape[0], -1)
out_merged = torch.cat([x_CT, x_PT], dim=1)
return(out_merged)
def initialize_weights(self):
for m in self.modules():
#if isinstance(m, nn.Linear):
# nn.init.xavier_uniform_(m.weight)
# nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class CiompiDO(nn.Module):
def __init__(self, n_classes=2, n_channels=2, modality='CT/PET', dropout=0.5):
assert modality in ['CT', 'PET', 'CT/PET']
......@@ -284,103 +181,3 @@ class ResNet50_3d(resnet.ResNet):
x = x[:, 1, :,:,:] # only PET volumes
return super(ResNet50_3d, self).forward(x)
class Dummy(nn.Module):
def __init__(self):
super(Dummy, self).__init__()
self.n_classes = 2
self.n_channels = 2
dropout = 0.5
self.CT_branch = nn.Sequential(nn.Conv3d(1, 20, 3),
nn.BatchNorm3d(20),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout),
nn.Conv3d(20, 40, 3),
nn.BatchNorm3d(40),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout/2),
nn.Conv3d(40, 80, 3),
nn.BatchNorm3d(80),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout/4),
nn.Conv3d(80, 160, 3),
nn.BatchNorm3d(160),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout/4),
nn.AdaptiveAvgPool3d(1))
self.PT_branch = nn.Sequential(nn.Conv3d(1, 20, 3),
nn.BatchNorm3d(20),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout),
nn.Conv3d(20, 40, 3),
nn.BatchNorm3d(40),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout/2),
nn.Conv3d(40, 80, 3),
nn.BatchNorm3d(80),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout/4),
nn.Conv3d(80, 160, 3),
nn.BatchNorm3d(160),
nn.ReLU(True),
nn.MaxPool3d(2,stride=2),
nn.Dropout3d(dropout/4),
nn.AdaptiveAvgPool3d(1))
self.linear = nn.Sequential(nn.Linear(160*2, 50),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(50, self.n_classes),
nn.Softmax(1))
def forward(self, x): #x = N_batch x N_ch * s x s x s
x_CT = x[:, 0, :,:,:].unsqueeze(1)#x = N_batch * s x s x s
features_CT = self.CT_branch(x_CT)
features_CT = features_CT.view(x.shape[0], -1) #n_batxh * X
x_PT = x[:, 1, :,:,:].unsqueeze(1)
features_PT = self.PT_branch(x_PT)
features_PT = features_PT.view(x.shape[0], -1)
out_merged = torch.cat([features_CT, features_PT], dim=1)
out = self.linear(out_merged)
return(out)
def extract_features(self, x):
x_CT = x[:, 0, :,:,:].unsqueeze(1)
features_CT = self.CT_branch(x_CT)
features_CT = features_CT.view(x.shape[0], -1)
x_PT = x[:, 1, :,:,:].unsqueeze(1)
features_PT = self.PT_branch(x_PT)
features_PT = features_PT.view(x.shape[0], -1)
out_merged = torch.cat([x_CT, x_PT], dim=1)
return(out_merged)
def initialize_weights(self):
for m in self.modules():
#if isinstance(m, nn.Linear):
# nn.init.xavier_uniform_(m.weight, mode='fan_out', nonlinearity='relu')
# nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment