Commit 149b70ac authored by Alessia Marcolini's avatar Alessia Marcolini
Browse files

Fix typos

parent ced520e8
......@@ -33,12 +33,12 @@ print(PATH)
#%%
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
multigpu = True
# In[ ]:
#
DATASET = 'HN_val'
BBOX_SUBDATASET = 'bbox_64'
DATASET_DIR = PATH / 'data' / DATASET / 'processed' / 'bbox' / BBOX_SUBDATASET
EXPERIMENT_DIR = PATH / 'experiment'
EXPERIMENT_DIR = PATH / 'experiments'
PRETRAINED_MED3D_WEIGHTS = PATH / 'pretrained_weights' / 'resnet_50.pth'
PRETRAINED_T_STAGE = EXPERIMENT_DIR / 'Tstage_4_noTx_CT_20191114-163418' / 'weights.pth'
......@@ -62,7 +62,7 @@ settings = {
}
assert settings["split"] in ["vallieres", "8020"]
assert not settings["splits"] == "vallieres" or DATASET == 'HN_val'
assert not settings["split"] == "vallieres" or DATASET == 'HN_val'
assert settings["pretrained"] in ["Med3D", "branch-wise", "T-stage", ""]
os.makedirs(EXPERIMENT_DIR / EXPERIMENT_NAME, exist_ok=False)
......@@ -107,7 +107,7 @@ np.random.seed(SEED)
dataset_train = NumpyCSVDataset(
data_dir=DATASET_DIR,
clinical_file=clinical_data,
clinical_file=clinical_file,
label_col=target_column,
size=SIZE,
mode='train',
......@@ -116,7 +116,7 @@ dataset_train = NumpyCSVDataset(
dataset_test = NumpyCSVDataset(
data_dir=DATASET_DIR,
clinical_file=clinical_data,
clinical_file=clinical_file,
label_col=target_column,
size=SIZE,
mode='test',
......@@ -145,15 +145,6 @@ else:
dataset_train.indices = np.array(idx_train)
dataset_test.indices = np.array(idx_test)
# %%
# Check class balance
labels_train = dataset_train.labels
labels_test = dataset_test.labels
c, n = np.unique(labels_train, return_counts=True)
print(np.c_[c, n / len(labels_train)])
c, n = np.unique(labels_test, return_counts=True)
print(np.c_[c, n / len(labels_test)])
# %%
# Create loaders
loader_train = DataLoader(
dataset_train, batch_size=BATCH_SIZE, num_workers=12, pin_memory=True, shuffle=True
......@@ -164,15 +155,12 @@ loader_test = DataLoader(
)
# %%
# Compute weights
# Compute weights only on training set
labels_train = dataset_train.labels
# class_sample_count = np.array([len(np.where( labels == t )[0]) for t in np.unique( labels )])
_, class_sample_count = np.unique(labels_train, return_counts=True)
n_min = np.min(class_sample_count)
weights = (
n_min / class_sample_count
) # versione proporzionale, usare n_min invece che 1 per pesi ~1
n_max = np.max(class_sample_count)
weights = n_max / class_sample_count
weights = torch.Tensor(weights).to(device)
# %%
# ### Initialize Model
......@@ -487,3 +475,4 @@ metrics_out.to_csv(EXPERIMENT_DIR / EXPERIMENT_NAME / 'metrics_out.csv')
# Save model weights
torch.save(model.state_dict(), EXPERIMENT_DIR / EXPERIMENT_NAME / 'weights.pth')
# %%
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment