조현아

classifier tensorboard

......@@ -6,7 +6,7 @@ from pprint import pprint
import torch
import torch.nn as nn
import torchvision.transforms as transforms
#from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard import SummaryWriter
from utils import *
......@@ -44,8 +44,9 @@ def eval(model_path):
test_loader = iter(get_dataloader(args, test_dataset)) ###
# print('\n[+] Start testing')
# writer = SummaryWriter(log_dir=model_path)
print('\n[+] Start testing')
os.makedirs(os.path.join(model_path, 'test'))
writer = SummaryWriter(log_dir=os.path.join(model_path, 'test'))
_test_res = validate(args, model, criterion, test_loader, step=0)
print('\n[+] Valid results')
......@@ -53,7 +54,10 @@ def eval(model_path):
print(' Loss : {:.3f}'.format(_test_res[1].data))
print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[2]*1000 / len(test_dataset)))
#writer.close()
writer.add_scalar('test/acc1', _test_res[0])
writer.add_scalar('test/loss', _test_res[1])
writer.close()
if __name__ == '__main__':
fire.Fire(eval)
......
......@@ -7,7 +7,7 @@ from pprint import pprint
import torch.nn as nn
import torch.backends.cudnn as cudnn
#from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard import SummaryWriter
from networks import *
from utils import *
......@@ -27,7 +27,9 @@ def train(**kwargs):
log_dir = os.path.join('/content/drive/My Drive/CD2 Project/runs/classify/', model_name)
os.makedirs(os.path.join(log_dir, 'model'))
json.dump(kwargs, open(os.path.join(log_dir, 'kwargs.json'), 'w'))
#writer = SummaryWriter(log_dir=log_dir)
os.makedirs(os.path.join(log_dir, 'train'))
writer = SummaryWriter(log_dir=os.path.join(log_dir, 'train'))
if args.seed is not None:
random.seed(args.seed)
......@@ -69,11 +71,11 @@ def train(**kwargs):
if step % args.print_step == 0:
print('\n[+] Training step: {}/{}\tTraining epoch: {}/{}\tElapsed time: {:.2f}min\tLearning rate: {}'.format(
step, args.max_step, current_epoch, max_epoch, (time.time()-start_t)/60, optimizer.param_groups[0]['lr']))
# writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step)
# writer.add_scalar('train/acc1', _train_res[0], global_step=step)
# writer.add_scalar('train/loss', _train_res[1], global_step=step)
# writer.add_scalar('train/forward_time', _train_res[2], global_step=step)
# writer.add_scalar('train/backward_time', _train_res[3], global_step=step)
writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step)
writer.add_scalar('train/acc1', _train_res[0], global_step=step)
writer.add_scalar('train/loss', _train_res[1], global_step=step)
writer.add_scalar('train/forward_time', _train_res[2], global_step=step)
writer.add_scalar('train/backward_time', _train_res[3], global_step=step)
print(' Acc@1 : {:.3f}%'.format(_train_res[0].data.cpu().numpy()[0]*100))
print(' Loss : {}'.format(_train_res[1].data))
print(' FW Time : {:.3f}ms'.format(_train_res[2]*1000))
......@@ -84,8 +86,8 @@ def train(**kwargs):
valid_loader = iter(get_dataloader(args, valid_dataset))
_valid_res = validate(args, model, criterion, valid_loader, step)
print('\n[+] (Valid results) Valid step: {}/{}'.format(step, args.max_step))
# writer.add_scalar('valid/acc1', _valid_res[0], global_step=step)
# writer.add_scalar('valid/loss', _valid_res[1], global_step=step)
writer.add_scalar('valid/acc1', _valid_res[0], global_step=step)
writer.add_scalar('valid/loss', _valid_res[1], global_step=step)
print(' Acc@1 : {:.3f}%'.format(_valid_res[0].data.cpu().numpy()[0]*100))
print(' Loss : {}'.format(_valid_res[1].data))
......