Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
MPBA Radiomics
RADLER
Commits
798aa7bd
Commit
798aa7bd
authored
Mar 11, 2020
by
Alessia Marcolini
Browse files
Rename training file
parent
eb6c83a5
Changes
1
Hide whitespace changes
Inline
Side-by-side
training
Tstage
.py
→
training.py
View file @
798aa7bd
...
...
@@ -12,6 +12,7 @@
# get_ipython().run_line_magic('autoreload', '2')
import
os
PATH
=
os
.
getcwd
()
print
(
PATH
)
...
...
@@ -30,21 +31,27 @@ import time
import
numpy
as
np
import
pandas
as
pd
import
matplotlib.pyplot
as
plt
from
sklearn.metrics
import
matthews_corrcoef
as
mcor
,
accuracy_score
as
acc
,
recall_score
as
recall
,
precision_score
as
precision
,
confusion_matrix
from
sklearn.metrics
import
(
matthews_corrcoef
as
mcor
,
accuracy_score
as
acc
,
recall_score
as
recall
,
precision_score
as
precision
,
confusion_matrix
,
)
import
torch
import
torch.nn
as
nn
from
torch.utils.data
import
DataLoader
from
torch.utils.tensorboard
import
SummaryWriter
from
networks
import
CiompiDO
,
ResNet50_3d
from
dataset
import
NumpyCSVDataset
,
augment_3D_HN
from
dataset
import
NumpyCSVDataset
,
augment_3D_HN
from
split
import
train_test_indexes_patient_wise
# In[ ]:
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
#
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
device
=
torch
.
device
(
"cuda"
if
torch
.
cuda
.
is_available
()
else
"cpu"
)
multigpu
=
True
...
...
@@ -52,11 +59,15 @@ multigpu = True
# In[ ]:
DATASET_DIR
=
f
"/thunderdisk/HN/processed/bbox_64_augmented/"
#Not augmented but already 64**3 (for faster loading)
DATASET_DIR
=
(
f
"/thunderdisk/HN/processed/bbox_fixed2_64"
)
# Not augmented but already 64**3 (for faster loading)
EXPERIMENT_DIR
=
f
"
{
PATH
}
/experiments"
PRETRAINED_MED3D_WEIGHTS
=
'/thunderdisk/HN/MedicalNet_pytorch_files/pretrain/resnet_50.pth'
PRETRAINED_T_STAGE
=
f
'
{
EXPERIMENT_DIR
}
/Tstage_binary_augmented_noTx_branch_wise_20191028-104101/checkpoint_40.pth'
PRETRAINED_MED3D_WEIGHTS
=
(
"/thunderdisk/HN/MedicalNet_pytorch_files/pretrain/resnet_50.pth"
)
PRETRAINED_T_STAGE
=
f
"
{
EXPERIMENT_DIR
}
/Tstage_4_noTx_CT_20191114-163418/weights.pth"
# ### Settings
...
...
@@ -64,44 +75,46 @@ PRETRAINED_T_STAGE = f'{EXPERIMENT_DIR}/Tstage_binary_augmented_noTx_branch_wise
# In[ ]:
EXPERIMENT_NAME
=
'Tstage_binary_augmented_noTx_branch_wise_valieres_'
+
datetime
.
datetime
.
now
().
strftime
(
"%Y%m%d-%H%M%S"
)
EXPERIMENT_NAME
=
"prova"
+
datetime
.
datetime
.
now
().
strftime
(
"%Y%m%d-%H%M%S"
)
settings
=
{
'
model
'
:
CiompiDO
,
'
batch_size
'
:
32
,
'
lr
'
:
1e-5
,
'
epochs
'
:
5
0
,
'
optim
'
:
torch
.
optim
.
Adam
,
'K'
:
0.2
,
'
n_classes
'
:
2
,
#
TSTAGE
'
seed
'
:
1234
,
'
dropout
'
:
0.5
,
'
split
'
:
'valieres'
,
'
size
'
:
64
,
'
pretrained
'
:
'branch-wise'
,
}
assert
settings
[
'
split
'
]
in
[
'
valieres
'
,
'
8020
'
]
assert
settings
[
'
pretrained
'
]
in
[
'
Med3D
'
,
'
branch-wise
'
,
'
T-stage
'
,
''
]
os
.
makedirs
(
f
'
{
EXPERIMENT_DIR
}
/
{
EXPERIMENT_NAME
}
'
,
exist_ok
=
False
)
"
model
"
:
CiompiDO
,
"
batch_size
"
:
16
,
"
lr
"
:
1e-5
,
"
epochs
"
:
30
0
,
"
optim
"
:
torch
.
optim
.
Adam
,
"K"
:
0.2
,
"
n_classes
"
:
4
,
#
TSTAGE
"
seed
"
:
1234
,
"
dropout
"
:
0.5
,
"
split
"
:
"8020"
,
"
size
"
:
64
,
"
pretrained
"
:
""
,
}
assert
settings
[
"
split
"
]
in
[
"
valieres
"
,
"
8020
"
]
assert
settings
[
"
pretrained
"
]
in
[
"
Med3D
"
,
"
branch-wise
"
,
"
T-stage
"
,
""
]
os
.
makedirs
(
f
"
{
EXPERIMENT_DIR
}
/
{
EXPERIMENT_NAME
}
"
,
exist_ok
=
False
)
# In[ ]:
MODEL
=
settings
[
'
model
'
]
BATCH_SIZE
=
settings
[
'
batch_size
'
]
LR
=
settings
[
'
lr
'
]
EPOCHS
=
settings
[
'
epochs
'
]
OPTIMIZER
=
settings
[
'
optim
'
]
K
=
settings
[
'K'
]
N_CLASSES
=
settings
[
'
n_classes
'
]
SEED
=
settings
[
'
seed
'
]
DROPOUT
=
settings
[
'
dropout
'
]
SPLIT
=
settings
[
'
split
'
]
SIZE
=
settings
[
'
size
'
]
PRETRAINED
=
settings
[
'
pretrained
'
]
MODEL
=
settings
[
"
model
"
]
BATCH_SIZE
=
settings
[
"
batch_size
"
]
LR
=
settings
[
"
lr
"
]
EPOCHS
=
settings
[
"
epochs
"
]
OPTIMIZER
=
settings
[
"
optim
"
]
K
=
settings
[
"K"
]
N_CLASSES
=
settings
[
"
n_classes
"
]
SEED
=
settings
[
"
seed
"
]
DROPOUT
=
settings
[
"
dropout
"
]
SPLIT
=
settings
[
"
split
"
]
SIZE
=
settings
[
"
size
"
]
PRETRAINED
=
settings
[
"
pretrained
"
]
# ### Tensorboard settings
...
...
@@ -109,15 +122,16 @@ PRETRAINED = settings['pretrained']
# In[ ]:
def
new_run_log_dir
(
experiment_name
):
log_dir
=
os
.
path
.
join
(
PATH
,
'
tb-runs
'
)
if
not
os
.
path
.
exists
(
log_dir
):
os
.
makedirs
(
log_dir
)
def
new_run_log_dir
(
experiment_name
):
log_dir
=
os
.
path
.
join
(
PATH
,
"
tb-runs
"
)
if
not
os
.
path
.
exists
(
log_dir
):
os
.
makedirs
(
log_dir
)
run_log_dir
=
os
.
path
.
join
(
log_dir
,
experiment_name
)
return
run_log_dir
log_dir
=
new_run_log_dir
(
EXPERIMENT_NAME
)
print
(
f
'
Tensorboard folder:
{
log_dir
}
'
)
print
(
f
"
Tensorboard folder:
{
log_dir
}
"
)
writer
=
SummaryWriter
(
log_dir
)
...
...
@@ -127,8 +141,8 @@ writer = SummaryWriter(log_dir)
# In[ ]:
clinical_data
=
f
'
{
PATH
}
/data/clinical_data_noTx.csv
'
target_column
=
'
T-stage_
binary'
clinical_data
=
f
"
{
PATH
}
/data/clinical_data_noTx.csv
"
target_column
=
"
T-stage_
grouped"
# In[ ]:
...
...
@@ -144,27 +158,65 @@ dataset = NumpyCSVDataset(DATASET_DIR, clinical_data, target_column, SIZE, seed=
# In[ ]:
if
SPLIT
==
'valieres'
:
dataset_train
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
'train'
,
transforms
=
augment_3D_HN
)
if
SPLIT
==
"valieres"
:
dataset_train
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
"train"
,
transforms
=
augment_3D_HN
,
)
# in this particular case getting `dataset_train._files_full` or `dataset_train.get_files()` is the same
idx_train
=
[
i
for
i
,
f
in
enumerate
(
dataset_train
.
get_files
())
if
f
.
split
(
'-'
)[
1
]
in
[
'CHUS'
,
'HGJ'
]]
idx_train
=
[
i
for
i
,
f
in
enumerate
(
dataset_train
.
get_files
())
if
f
.
split
(
"-"
)[
1
]
in
[
"CHUS"
,
"HGJ"
]
]
dataset_train
.
indexes
=
np
.
array
(
idx_train
)
dataset_test
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
'test'
,
transforms
=
augment_3D_HN
)
dataset_test
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
"test"
,
transforms
=
augment_3D_HN
,
)
# in this particular case getting `dataset_train._files_full` or `dataset_train.get_files()` is the same
idx_test
=
[
i
for
i
,
f
in
enumerate
(
dataset_test
.
get_files
())
if
f
.
split
(
'-'
)[
1
]
in
[
'HMR'
,
'CHUM'
]]
idx_test
=
[
i
for
i
,
f
in
enumerate
(
dataset_test
.
get_files
())
if
f
.
split
(
"-"
)[
1
]
in
[
"HMR"
,
"CHUM"
]
]
dataset_test
.
indexes
=
np
.
array
(
idx_test
)
else
:
idx_train
,
idx_test
=
train_test_indexes_patient_wise
(
dataset
,
test_size
=
K
,
stratify
=
True
)
dataset_test
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
'test'
,
transforms
=
augment_3D_HN
)
idx_train
,
idx_test
=
train_test_indexes_patient_wise
(
dataset
,
test_size
=
K
,
stratify
=
True
)
dataset_test
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
"test"
,
transforms
=
augment_3D_HN
,
)
dataset_test
.
indexes
=
np
.
array
(
idx_test
)
dataset_train
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
'train'
,
transforms
=
augment_3D_HN
)
dataset_train
=
NumpyCSVDataset
(
DATASET_DIR
,
clinical_data
,
target_column
,
SIZE
,
mode
=
"train"
,
transforms
=
augment_3D_HN
,
)
dataset_train
.
indexes
=
np
.
array
(
idx_train
)
...
...
@@ -176,11 +228,11 @@ else:
labels_test
=
dataset_test
.
get_labels
()
labels_train
=
dataset_train
.
get_labels
()
c
,
n
=
np
.
unique
(
labels_test
,
return_counts
=
True
)
print
(
np
.
c_
[
c
,
n
/
len
(
labels_test
)])
c
,
n
=
np
.
unique
(
labels_test
,
return_counts
=
True
)
print
(
np
.
c_
[
c
,
n
/
len
(
labels_test
)])
c
,
n
=
np
.
unique
(
labels_train
,
return_counts
=
True
)
print
(
np
.
c_
[
c
,
n
/
len
(
labels_train
)])
c
,
n
=
np
.
unique
(
labels_train
,
return_counts
=
True
)
print
(
np
.
c_
[
c
,
n
/
len
(
labels_train
)])
# Create loaders
...
...
@@ -188,8 +240,12 @@ print(np.c_[c,n/len(labels_train)])
# In[ ]:
loader_test
=
DataLoader
(
dataset_test
,
batch_size
=
BATCH_SIZE
//
2
,
num_workers
=
12
,
shuffle
=
True
)
loader_train
=
DataLoader
(
dataset_train
,
batch_size
=
BATCH_SIZE
,
num_workers
=
12
,
pin_memory
=
True
,
shuffle
=
True
)
loader_test
=
DataLoader
(
dataset_test
,
batch_size
=
BATCH_SIZE
//
2
,
num_workers
=
12
,
shuffle
=
True
)
loader_train
=
DataLoader
(
dataset_train
,
batch_size
=
BATCH_SIZE
,
num_workers
=
12
,
pin_memory
=
True
,
shuffle
=
True
)
# Compute weights
...
...
@@ -199,10 +255,12 @@ loader_train = DataLoader(dataset_train, batch_size=BATCH_SIZE, num_workers=12,
labels
=
dataset_train
.
get_labels
()
#class_sample_count = np.array([len(np.where( labels == t )[0]) for t in np.unique( labels )])
#
class_sample_count = np.array([len(np.where( labels == t )[0]) for t in np.unique( labels )])
_
,
class_sample_count
=
np
.
unique
(
labels
,
return_counts
=
True
)
n_min
=
np
.
min
(
class_sample_count
)
weights
=
n_min
/
class_sample_count
# versione proporzionale, usare n_min invece che 1 per pesi ~1
weights
=
(
n_min
/
class_sample_count
)
# versione proporzionale, usare n_min invece che 1 per pesi ~1
weights
=
torch
.
Tensor
(
weights
).
to
(
device
)
...
...
@@ -211,7 +269,7 @@ weights = torch.Tensor(weights).to(device)
# In[ ]:
model
=
MODEL
(
n_classes
=
N_CLASSES
,
n_channels
=
2
,
modality
=
'
CT/PET
'
,
dropout
=
DROPOUT
)
model
=
MODEL
(
n_classes
=
N_CLASSES
,
n_channels
=
2
,
modality
=
"
CT/PET
"
,
dropout
=
DROPOUT
)
if
multigpu
:
model
=
nn
.
DataParallel
(
model
.
to
(
device
))
...
...
@@ -221,62 +279,77 @@ if multigpu:
# In[ ]:
model
.
initialize_weights
()
#
model.initialize_weights()
if
PRETRAINED
==
'
Med3D
'
:
pretrained_dict
=
torch
.
load
(
PRETRAINED_MED3D_WEIGHTS
)[
'
state_dict
'
]
if
PRETRAINED
==
"
Med3D
"
:
pretrained_dict
=
torch
.
load
(
PRETRAINED_MED3D_WEIGHTS
)[
"
state_dict
"
]
model_dict
=
model
.
state_dict
()
# discard layers not present in destination network or with different shape
pretrained_dict
=
{
k
:
v
for
k
,
v
in
pretrained_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_dict
[
k
].
shape
)}
pretrained_dict
=
{
k
:
v
for
k
,
v
in
pretrained_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_dict
[
k
].
shape
)
}
for
name
in
model
.
state_dict
().
keys
():
if
name
in
pretrained_dict
.
keys
():
#print(name)
#
print(name)
model
.
state_dict
()[
name
].
copy_
(
pretrained_dict
[
name
])
elif
PRETRAINED
==
'branch-wise'
:
pretrained_CT_dict
=
torch
.
load
(
f
'
{
EXPERIMENT_DIR
}
/Tstage_grouped_noTx_CT_valieres_20191029-173736/checkpoint_290.pth'
)
pretrained_PT_dict
=
torch
.
load
(
f
'
{
EXPERIMENT_DIR
}
/Tstage_grouped_noTx_PET_valieres_20191029-195338/checkpoint_290.pth'
)
elif
PRETRAINED
==
"branch-wise"
:
pretrained_CT_dict
=
torch
.
load
(
f
"
{
EXPERIMENT_DIR
}
/Tstage_grouped_noTx_CT_valieres_20191029-173736/checkpoint_290.pth"
)
pretrained_PT_dict
=
torch
.
load
(
f
"
{
EXPERIMENT_DIR
}
/Tstage_grouped_noTx_PET_valieres_20191029-195338/checkpoint_290.pth"
)
model_dict
=
model
.
state_dict
()
pretrained_CT_dict
=
{
k
:
v
for
k
,
v
in
pretrained_CT_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_CT_dict
[
k
].
shape
)}
pretrained_PT_dict
=
{
k
:
v
for
k
,
v
in
pretrained_PT_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_PT_dict
[
k
].
shape
)}
to_add
=
'module.'
if
multigpu
else
''
pretrained_CT_dict
=
{
k
:
v
for
k
,
v
in
pretrained_CT_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_CT_dict
[
k
].
shape
)
}
pretrained_PT_dict
=
{
k
:
v
for
k
,
v
in
pretrained_PT_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_PT_dict
[
k
].
shape
)
}
to_add
=
"module."
if
multigpu
else
""
for
name
in
model
.
CT_branch
.
state_dict
().
keys
():
name_complete
=
to_add
+
'
CT_branch.
'
+
name
#print(name_complete)
name_complete
=
to_add
+
"
CT_branch.
"
+
name
#
print(name_complete)
if
name_complete
in
pretrained_CT_dict
.
keys
():
print
(
name
)
model
.
CT_branch
.
state_dict
()[
name
].
copy_
(
pretrained_CT_dict
[
name_complete
])
for
name
in
model
.
PT_branch
.
state_dict
().
keys
():
name_complete
=
to_add
+
'
PT_branch.
'
+
name
#print(name_complete)
name_complete
=
to_add
+
"
PT_branch.
"
+
name
#
print(name_complete)
if
name_complete
in
pretrained_PT_dict
.
keys
():
print
(
name
)
model
.
PT_branch
.
state_dict
()[
name
].
copy_
(
pretrained_PT_dict
[
name_complete
])
elif
PRETRAINED
==
'
T-stage
'
:
pretrained_dict
=
torch
.
load
(
PRETRAINED_T_STAGE
)
elif
PRETRAINED
==
"
T-stage
"
:
pretrained_dict
=
torch
.
load
(
PRETRAINED_T_STAGE
)
model_dict
=
model
.
state_dict
()
# discard layers not present in destination network or with different shape
pretrained_dict
=
{
k
:
v
for
k
,
v
in
pretrained_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_dict
[
k
].
shape
)}
pretrained_dict
=
{
k
:
v
for
k
,
v
in
pretrained_dict
.
items
()
if
(
k
in
model_dict
)
and
(
model_dict
[
k
].
shape
==
pretrained_dict
[
k
].
shape
)
}
for
name
in
model
.
state_dict
().
keys
():
if
name
in
pretrained_dict
.
keys
():
#print(name)
#
print(name)
model
.
state_dict
()[
name
].
copy_
(
pretrained_dict
[
name
])
# Optimizer
...
...
@@ -290,7 +363,7 @@ optimizer = torch.optim.Adam(model.parameters(), lr=LR)
# In[ ]:
#[x.shape for x in model.parameters()]
#
[x.shape for x in model.parameters()]
# Loss
...
...
@@ -306,7 +379,7 @@ criterion = nn.CrossEntropyLoss(weight=weights)
NEW_LABELS
=
list
(
range
(
len
(
list
(
np
.
unique
(
labels_train
)))))
dictionary
=
dict
(
zip
(
list
(
np
.
unique
(
labels_train
)),
NEW_LABELS
))
dictionary
dictionary
# ### Train
...
...
@@ -326,90 +399,99 @@ iteration = 0
start_time
=
time
.
time
()
for
epoch
in
range
(
EPOCHS
):
#print(epoch)
if
epoch
%
10
==
0
:
#save checkpoint
torch
.
save
(
model
.
state_dict
(),
f
'
{
EXPERIMENT_DIR
}
/
{
EXPERIMENT_NAME
}
/checkpoint_
{
epoch
}
.pth'
)
# print(epoch)
if
epoch
%
10
==
0
:
# save checkpoint
torch
.
save
(
model
.
state_dict
(),
f
"
{
EXPERIMENT_DIR
}
/
{
EXPERIMENT_NAME
}
/checkpoint_
{
epoch
}
.pth"
,
)
for
j
,
data
in
enumerate
(
loader_train
):
global_i
+=
1
if
j
%
10
==
0
:
print
(
time
.
time
()
-
start_time
)
start_time
=
time
.
time
()
for
j
,
data
in
enumerate
(
loader_train
):
global_i
+=
1
if
j
%
10
==
0
:
print
(
time
.
time
()
-
start_time
)
start_time
=
time
.
time
()
optimizer
.
zero_grad
()
optimizer
.
zero_grad
()
images_tr
=
data
[
"data"
].
to
(
device
)
labels_tr
=
torch
.
LongTensor
([
dictionary
[
i
]
for
i
in
data
[
"target"
]]).
to
(
device
)
outputs_tr
=
model
(
images_tr
).
to
(
device
)
images_tr
=
data
[
'data'
].
to
(
device
)
labels_tr
=
torch
.
LongTensor
([
dictionary
[
i
]
for
i
in
data
[
'target'
]]).
to
(
device
)
outputs_tr
=
model
(
images_tr
).
to
(
device
)
# backward
loss
=
criterion
(
outputs_tr
,
labels_tr
)
loss
.
backward
(
)
# backward
loss
=
criterion
(
outputs_tr
,
labels_tr
)
loss
.
backward
()
optimizer
.
step
()
optimizer
.
step
()
# check test set
if
j
%
int
(
len
(
loader_train
)
/
2
)
==
0
and
j
!=
0
:
model
.
eval
()
with
torch
.
no_grad
():
# check test set
if
j
%
int
(
len
(
loader_train
)
/
2
)
==
0
and
j
!=
0
:
model
.
eval
()
with
torch
.
no_grad
():
losses_sum
=
0
num_samples_test
=
0
losses_sum
=
0
num_samples_test
=
0
for
data_test
in
loader_test
:
for
data_test
in
loader_test
:
images_ts
=
data_test
[
"data"
].
to
(
device
)
labels_ts
=
torch
.
LongTensor
(
[
dictionary
[
i
]
for
i
in
data_test
[
"target"
]]
).
to
(
device
)
images_ts
=
data_test
[
'data'
].
to
(
device
)
labels_ts
=
torch
.
LongTensor
([
dictionary
[
i
]
for
i
in
data_test
[
'target'
]]).
to
(
device
)
outputs_ts
=
model
.
forward
(
images_ts
)
outputs_ts
=
model
.
forward
(
images_ts
)
loss_test_sum
=
criterion
(
outputs_ts
,
labels_ts
).
item
()
losses_sum
+=
loss_test_sum
num_samples_test
+=
1
loss_test_sum
=
criterion
(
outputs_ts
,
labels_ts
).
item
()
losses_sum
+=
loss_test_sum
num_samples_test
+=
1
loss_test_avg
=
losses_sum
/
num_samples_test
loss_test_avg
=
losses_sum
/
num_samples_test
writer
.
add_scalar
(
f
'
{
EXPERIMENT_NAME
}
/test_loss'
,
loss_test_avg
,
global_i
)
writer
.
flush
()
#is_best = loss_val_avg < last_loss_val
#if is_best:
# torch.save(model.state_dict(),
# f'{EXPERIMENT_DIR}/{EXPERIMENT_NAME}/checkpoint_best_{epoch}.pth')
writer
.
add_scalar
(
f
"
{
EXPERIMENT_NAME
}
/test_loss"
,
loss_test_avg
,
global_i
)
writer
.
flush
()
last_loss_test
=
loss_test_avg
# is_best = loss_val_avg < last_loss_val
# if is_best:
# torch.save(model.state_dict(),
# f'{EXPERIMENT_DIR}/{EXPERIMENT_NAME}/checkpoint_best_{epoch}.pth')
losses_tr
.
append
(
loss
.
item
())
losses_ts
.
append
(
loss_test_avg
)
last_loss_test
=
loss_test_avg
del
images_ts
,
labels_ts
losses_tr
.
append
(
loss
.
item
())
losses_ts
.
append
(
loss_test_avg
)
iteration
+=
1
del
images_tr
,
labels_tr
gc
.
collect
()
model
.
train
()
del
images_ts
,
labels_ts
# sys.stdout.write
writer
.
add_scalar
(
f
'
{
EXPERIMENT_NAME
}
/train_loss'
,
loss
.
item
(),
global_i
)
writer
.
flush
()
sys
.
stdout
.
write
(
'
\r
Epoch {} of {} [{:.2f}%] - loss TR/TS: {:.4f} / {:.4f} - {}'
.
format
(
epoch
+
1
,
EPOCHS
,
100
*
j
/
len
(
loader_train
),
loss
.
item
(),
last_loss_test
,
optimizer
.
param_groups
[
0
][
'lr'
]))
iteration
+=
1
del
images_tr
,
labels_tr
gc
.
collect
()
model
.
train
()
# sys.stdout.write
writer
.
add_scalar
(
f
"
{
EXPERIMENT_NAME
}
/train_loss"
,
loss
.
item
(),
global_i
)
writer
.
flush
()
sys
.
stdout
.
write
(
"
\r
Epoch {} of {} [{:.2f}%] - loss TR/TS: {:.4f} / {:.4f} - {}"
.
format
(
epoch
+
1
,
EPOCHS
,
100
*
j
/
len
(
loader_train
),
loss
.
item
(),
last_loss_test
,
optimizer
.
param_groups
[
0
][
"lr"
],
)
)
# ### Predict on Train
# In[ ]:
]
# In[ ]:
model
.
eval
()
dataset_train
.
mode
=
'
test
'
#no augmentation
dataset_train
.
mode
=
"
test
"
#
no augmentation
preds_tr
=
[]
trues_tr
=
[]
...
...
@@ -420,14 +502,14 @@ with torch.no_grad():
for
data
in
dataset_train
:
image
=
data
[
"data"
].
unsqueeze
(
0
).
to
(
device
)
label
=
data
[
"target"
]
output
=
model
(
image
)
#
forward
_
,
pred
=
torch
.
max
(
output
,
1
)
output
=
model
(
image
)
#
forward
_
,
pred
=
torch
.
max
(
output
,
1
)
preds_tr
.
append
(
pred
.
data
.
cpu
().
numpy
())
# trues.append(label)
# trues.append(label)
trues_tr
.
append
(
dictionary
[
label
])
probs_tr
.
append
(
output
.
data
.
cpu
().
numpy
())
filenames_tr
.
append
(
data
[
'
filename
'
])
filenames_tr
.
append
(
data
[
"
filename
"
])
probs_tr
=
np
.
concatenate
(
probs_tr
)
preds_tr
=
np
.
concatenate
(
preds_tr
)
...
...
@@ -436,13 +518,18 @@ filenames_tr = np.array(filenames_tr)
MCC_tr
=
mcor
(
trues_tr
,
preds_tr
)
ACC_tr
=
acc
(
trues_tr
,
preds_tr
)
prec_tr
=
precision
(
trues_tr
,
preds_tr
,
average
=
'
weighted
'
)
rec_tr
=
recall
(
trues_tr
,
preds_tr
,
average
=
'
weighted
'
)
prec_tr
=
precision
(
trues_tr
,
preds_tr
,
average
=
"
weighted
"
)
rec_tr
=
recall
(
trues_tr
,
preds_tr
,
average
=
"
weighted
"
)
print
(
"MCC train"
,
round
(
MCC_tr
,
3
),
"ACC train"
,
round
(
ACC_tr
,
3
))
print
(
"MCC train"
,
round
(
MCC_tr
,
3
),
"ACC train"
,
round
(
ACC_tr
,
3
))
print
(
"precision train"
,
round
(
prec_tr
,
3
),
"recall train"
,
round
(
rec_tr
,
3