Commit acca89d5 authored by Gabriele Franch's avatar Gabriele Franch
Browse files

add print statement

parent 83625e31
......@@ -5,7 +5,7 @@ import argparse
import numpy as np
from tqdm import tqdm
import os
import glob
import sys
import core.trainer as trainer
from cikm_inter_dst_predrnn_run_taasss_utils import get_batcher, padding_taasss, change_taasss_dims
......@@ -139,7 +139,7 @@ def wrapper_train(model: Model):
if file.endswith(".pth"):
maxiter = max([int(file[:-4]), maxiter])
args.pretrained_model = f'{args.pretrained_model}/{maxiter}.pth'
print(f"Loading pretrained model {args.pretrained_model}")
print(f"Loading pretrained model {args.pretrained_model}", file=sys.stderr)
itr, eta = model.load(args.pretrained_model)
iterator = get_batcher(args)
......@@ -157,6 +157,6 @@ def wrapper_train(model: Model):
model.save(itr, eta)
print("Initializing models")
print("Initializing models", file=sys.stderr)
model = Model(args)
wrapper_train(model)
......@@ -4,6 +4,7 @@ import torch.nn as nn
from torch.optim import Adam
from core.models import predict
from pathlib import Path
import sys
class Model(object):
......@@ -64,7 +65,7 @@ class Model(object):
else:
self.network.load_state_dict(stats["model_state_dict"])
self.optimizer.load_state_dict(stats["optimizer_state_dict"])
print("Model loaded")
print("Model loaded", file=sys.stderr)
return stats['iter'], stats['eta']
def train(self, frames, mask):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment