Showing
2 changed files
with
19 additions
and
13 deletions
... | @@ -6,7 +6,7 @@ from pprint import pprint | ... | @@ -6,7 +6,7 @@ from pprint import pprint |
6 | import torch | 6 | import torch |
7 | import torch.nn as nn | 7 | import torch.nn as nn |
8 | import torchvision.transforms as transforms | 8 | import torchvision.transforms as transforms |
9 | -#from torch.utils.tensorboard import SummaryWriter | 9 | +from torch.utils.tensorboard import SummaryWriter |
10 | 10 | ||
11 | from utils import * | 11 | from utils import * |
12 | 12 | ||
... | @@ -44,8 +44,9 @@ def eval(model_path): | ... | @@ -44,8 +44,9 @@ def eval(model_path): |
44 | 44 | ||
45 | test_loader = iter(get_dataloader(args, test_dataset)) ### | 45 | test_loader = iter(get_dataloader(args, test_dataset)) ### |
46 | 46 | ||
47 | - # print('\n[+] Start testing') | 47 | + print('\n[+] Start testing') |
48 | - # writer = SummaryWriter(log_dir=model_path) | 48 | + os.makedirs(os.path.join(model_path, 'test')) |
49 | + writer = SummaryWriter(log_dir=os.path.join(model_path, 'test')) | ||
49 | _test_res = validate(args, model, criterion, test_loader, step=0) | 50 | _test_res = validate(args, model, criterion, test_loader, step=0) |
50 | 51 | ||
51 | print('\n[+] Valid results') | 52 | print('\n[+] Valid results') |
... | @@ -53,7 +54,10 @@ def eval(model_path): | ... | @@ -53,7 +54,10 @@ def eval(model_path): |
53 | print(' Loss : {:.3f}'.format(_test_res[1].data)) | 54 | print(' Loss : {:.3f}'.format(_test_res[1].data)) |
54 | print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[2]*1000 / len(test_dataset))) | 55 | print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[2]*1000 / len(test_dataset))) |
55 | 56 | ||
56 | - #writer.close() | 57 | + writer.add_scalar('test/acc1', _test_res[0]) |
58 | + writer.add_scalar('test/loss', _test_res[1]) | ||
59 | + | ||
60 | + writer.close() | ||
57 | 61 | ||
58 | if __name__ == '__main__': | 62 | if __name__ == '__main__': |
59 | fire.Fire(eval) | 63 | fire.Fire(eval) | ... | ... |
... | @@ -7,7 +7,7 @@ from pprint import pprint | ... | @@ -7,7 +7,7 @@ from pprint import pprint |
7 | 7 | ||
8 | import torch.nn as nn | 8 | import torch.nn as nn |
9 | import torch.backends.cudnn as cudnn | 9 | import torch.backends.cudnn as cudnn |
10 | -#from torch.utils.tensorboard import SummaryWriter | 10 | +from torch.utils.tensorboard import SummaryWriter |
11 | 11 | ||
12 | from networks import * | 12 | from networks import * |
13 | from utils import * | 13 | from utils import * |
... | @@ -27,7 +27,9 @@ def train(**kwargs): | ... | @@ -27,7 +27,9 @@ def train(**kwargs): |
27 | log_dir = os.path.join('/content/drive/My Drive/CD2 Project/runs/classify/', model_name) | 27 | log_dir = os.path.join('/content/drive/My Drive/CD2 Project/runs/classify/', model_name) |
28 | os.makedirs(os.path.join(log_dir, 'model')) | 28 | os.makedirs(os.path.join(log_dir, 'model')) |
29 | json.dump(kwargs, open(os.path.join(log_dir, 'kwargs.json'), 'w')) | 29 | json.dump(kwargs, open(os.path.join(log_dir, 'kwargs.json'), 'w')) |
30 | - #writer = SummaryWriter(log_dir=log_dir) | 30 | + |
31 | + os.makedirs(os.path.join(log_dir, 'train')) | ||
32 | + writer = SummaryWriter(log_dir=os.path.join(log_dir, 'train')) | ||
31 | 33 | ||
32 | if args.seed is not None: | 34 | if args.seed is not None: |
33 | random.seed(args.seed) | 35 | random.seed(args.seed) |
... | @@ -69,11 +71,11 @@ def train(**kwargs): | ... | @@ -69,11 +71,11 @@ def train(**kwargs): |
69 | if step % args.print_step == 0: | 71 | if step % args.print_step == 0: |
70 | print('\n[+] Training step: {}/{}\tTraining epoch: {}/{}\tElapsed time: {:.2f}min\tLearning rate: {}'.format( | 72 | print('\n[+] Training step: {}/{}\tTraining epoch: {}/{}\tElapsed time: {:.2f}min\tLearning rate: {}'.format( |
71 | step, args.max_step, current_epoch, max_epoch, (time.time()-start_t)/60, optimizer.param_groups[0]['lr'])) | 73 | step, args.max_step, current_epoch, max_epoch, (time.time()-start_t)/60, optimizer.param_groups[0]['lr'])) |
72 | - # writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step) | 74 | + writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step) |
73 | - # writer.add_scalar('train/acc1', _train_res[0], global_step=step) | 75 | + writer.add_scalar('train/acc1', _train_res[0], global_step=step) |
74 | - # writer.add_scalar('train/loss', _train_res[1], global_step=step) | 76 | + writer.add_scalar('train/loss', _train_res[1], global_step=step) |
75 | - # writer.add_scalar('train/forward_time', _train_res[2], global_step=step) | 77 | + writer.add_scalar('train/forward_time', _train_res[2], global_step=step) |
76 | - # writer.add_scalar('train/backward_time', _train_res[3], global_step=step) | 78 | + writer.add_scalar('train/backward_time', _train_res[3], global_step=step) |
77 | print(' Acc@1 : {:.3f}%'.format(_train_res[0].data.cpu().numpy()[0]*100)) | 79 | print(' Acc@1 : {:.3f}%'.format(_train_res[0].data.cpu().numpy()[0]*100)) |
78 | print(' Loss : {}'.format(_train_res[1].data)) | 80 | print(' Loss : {}'.format(_train_res[1].data)) |
79 | print(' FW Time : {:.3f}ms'.format(_train_res[2]*1000)) | 81 | print(' FW Time : {:.3f}ms'.format(_train_res[2]*1000)) |
... | @@ -84,8 +86,8 @@ def train(**kwargs): | ... | @@ -84,8 +86,8 @@ def train(**kwargs): |
84 | valid_loader = iter(get_dataloader(args, valid_dataset)) | 86 | valid_loader = iter(get_dataloader(args, valid_dataset)) |
85 | _valid_res = validate(args, model, criterion, valid_loader, step) | 87 | _valid_res = validate(args, model, criterion, valid_loader, step) |
86 | print('\n[+] (Valid results) Valid step: {}/{}'.format(step, args.max_step)) | 88 | print('\n[+] (Valid results) Valid step: {}/{}'.format(step, args.max_step)) |
87 | - # writer.add_scalar('valid/acc1', _valid_res[0], global_step=step) | 89 | + writer.add_scalar('valid/acc1', _valid_res[0], global_step=step) |
88 | - # writer.add_scalar('valid/loss', _valid_res[1], global_step=step) | 90 | + writer.add_scalar('valid/loss', _valid_res[1], global_step=step) |
89 | print(' Acc@1 : {:.3f}%'.format(_valid_res[0].data.cpu().numpy()[0]*100)) | 91 | print(' Acc@1 : {:.3f}%'.format(_valid_res[0].data.cpu().numpy()[0]*100)) |
90 | print(' Loss : {}'.format(_valid_res[1].data)) | 92 | print(' Loss : {}'.format(_valid_res[1].data)) |
91 | 93 | ... | ... |
-
Please register or login to post a comment