조현아

backup acc100 prob

...@@ -12,13 +12,15 @@ from utils import * ...@@ -12,13 +12,15 @@ from utils import *
12 # command 12 # command
13 # python eval.py --model_path='logs/April_16_00:26:10__resnet50__None/' 13 # python eval.py --model_path='logs/April_16_00:26:10__resnet50__None/'
14 14
15 -def eval(model_path): 15 +def eval(model_path, num_data):
16 print('\n[+] Parse arguments') 16 print('\n[+] Parse arguments')
17 kwargs_path = os.path.join(model_path, 'kwargs.json') 17 kwargs_path = os.path.join(model_path, 'kwargs.json')
18 kwargs = json.loads(open(kwargs_path).read()) 18 kwargs = json.loads(open(kwargs_path).read())
19 args, kwargs = parse_args(kwargs) 19 args, kwargs = parse_args(kwargs)
20 + args.batch_size = num_data
20 pprint(args) 21 pprint(args)
21 device = torch.device('cuda' if args.use_cuda else 'cpu') 22 device = torch.device('cuda' if args.use_cuda else 'cpu')
23 +
22 24
23 print('\n[+] Create network') 25 print('\n[+] Create network')
24 model = select_model(args) 26 model = select_model(args)
...@@ -45,8 +47,9 @@ def eval(model_path): ...@@ -45,8 +47,9 @@ def eval(model_path):
45 print('\n[+] Valid results') 47 print('\n[+] Valid results')
46 print(' Acc@1 : {:.3f}%'.format(_test_res[0].data.cpu().numpy()[0]*100)) 48 print(' Acc@1 : {:.3f}%'.format(_test_res[0].data.cpu().numpy()[0]*100))
47 print(' Acc@5 : {:.3f}%'.format(_test_res[1].data.cpu().numpy()[0]*100)) 49 print(' Acc@5 : {:.3f}%'.format(_test_res[1].data.cpu().numpy()[0]*100))
48 - print(' Loss : {:.3f}'.format(_test_res[2].data)) 50 + print(' Acc_all : {:.3f}%'.format(_test_res[2].data.cpu().numpy()[0]*100))
49 - print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[3]*1000 / len(test_dataset))) 51 + print(' Loss : {:.3f}'.format(_test_res[3].data))
52 + print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[4]*1000 / len(test_dataset)))
50 53
51 writer.close() 54 writer.close()
52 55
......
...@@ -69,15 +69,13 @@ def train(**kwargs): ...@@ -69,15 +69,13 @@ def train(**kwargs):
69 step, args.max_step, current_epoch, max_epoch, (time.time()-start_t)/60, optimizer.param_groups[0]['lr'])) 69 step, args.max_step, current_epoch, max_epoch, (time.time()-start_t)/60, optimizer.param_groups[0]['lr']))
70 writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step) 70 writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step)
71 writer.add_scalar('train/acc1', _train_res[0], global_step=step) 71 writer.add_scalar('train/acc1', _train_res[0], global_step=step)
72 - writer.add_scalar('train/acc5', _train_res[1], global_step=step) 72 + writer.add_scalar('train/loss', _train_res[1], global_step=step)
73 - writer.add_scalar('train/loss', _train_res[2], global_step=step) 73 + writer.add_scalar('train/forward_time', _train_res[2], global_step=step)
74 - writer.add_scalar('train/forward_time', _train_res[3], global_step=step) 74 + writer.add_scalar('train/backward_time', _train_res[3], global_step=step)
75 - writer.add_scalar('train/backward_time', _train_res[4], global_step=step)
76 print(' Acc@1 : {:.3f}%'.format(_train_res[0].data.cpu().numpy()[0]*100)) 75 print(' Acc@1 : {:.3f}%'.format(_train_res[0].data.cpu().numpy()[0]*100))
77 - print(' Acc@5 : {:.3f}%'.format(_train_res[1].data.cpu().numpy()[0]*100)) 76 + print(' Loss : {}'.format(_train_res[1].data))
78 - print(' Loss : {}'.format(_train_res[2].data)) 77 + print(' FW Time : {:.3f}ms'.format(_train_res[2]*1000))
79 - print(' FW Time : {:.3f}ms'.format(_train_res[3]*1000)) 78 + print(' BW Time : {:.3f}ms'.format(_train_res[3]*1000))
80 - print(' BW Time : {:.3f}ms'.format(_train_res[4]*1000))
81 79
82 if step % args.val_step == args.val_step-1: 80 if step % args.val_step == args.val_step-1:
83 # print("\nstep, args.val_step: ", step, args.val_step) 81 # print("\nstep, args.val_step: ", step, args.val_step)
...@@ -85,11 +83,9 @@ def train(**kwargs): ...@@ -85,11 +83,9 @@ def train(**kwargs):
85 _valid_res = validate(args, model, criterion, valid_loader, step, writer) 83 _valid_res = validate(args, model, criterion, valid_loader, step, writer)
86 print('\n[+] Valid results') 84 print('\n[+] Valid results')
87 writer.add_scalar('valid/acc1', _valid_res[0], global_step=step) 85 writer.add_scalar('valid/acc1', _valid_res[0], global_step=step)
88 - writer.add_scalar('valid/acc5', _valid_res[1], global_step=step) 86 + writer.add_scalar('valid/loss', _valid_res[1], global_step=step)
89 - writer.add_scalar('valid/loss', _valid_res[2], global_step=step)
90 print(' Acc@1 : {:.3f}%'.format(_valid_res[0].data.cpu().numpy()[0]*100)) 87 print(' Acc@1 : {:.3f}%'.format(_valid_res[0].data.cpu().numpy()[0]*100))
91 - print(' Acc@5 : {:.3f}%'.format(_valid_res[1].data.cpu().numpy()[0]*100)) 88 + print(' Loss : {}'.format(_valid_res[1].data))
92 - print(' Loss : {}'.format(_valid_res[2].data))
93 89
94 if _valid_res[0] >= best_acc: 90 if _valid_res[0] >= best_acc:
95 best_acc = _valid_res[0] 91 best_acc = _valid_res[0]
......
...@@ -33,6 +33,26 @@ TEST_TARGET_PATH = '/content/drive/My Drive/CD2 Project/data/test_nonaug_classif ...@@ -33,6 +33,26 @@ TEST_TARGET_PATH = '/content/drive/My Drive/CD2 Project/data/test_nonaug_classif
33 33
34 current_epoch = 0 34 current_epoch = 0
35 35
36 +
37 +def split_dataset(args, dataset, k):
38 + # load dataset
39 + X = list(range(len(dataset)))
40 + Y = dataset.targets
41 +
42 + # split to k-fold
43 + assert len(X) == len(Y)
44 +
45 + def _it_to_list(_it):
46 + return list(zip(*list(_it)))
47 +
48 + sss = StratifiedShuffleSplit(n_splits=k, random_state=args.seed, test_size=0.1)
49 + Dm_indexes, Da_indexes = _it_to_list(sss.split(X, Y))
50 +
51 + return Dm_indexes, Da_indexes
52 +
53 +
54 +
55 +
36 def concat_image_features(image, features, max_features=3): 56 def concat_image_features(image, features, max_features=3):
37 _, h, w = image.shape 57 _, h, w = image.shape
38 #print("\nfsize: ", features.size()) # (1, 240, 240) 58 #print("\nfsize: ", features.size()) # (1, 240, 240)
...@@ -222,14 +242,6 @@ def get_dataloader(args, dataset, shuffle=False, pin_memory=True): ...@@ -222,14 +242,6 @@ def get_dataloader(args, dataset, shuffle=False, pin_memory=True):
222 return data_loader 242 return data_loader
223 243
224 244
225 -def get_aug_dataloader(args, dataset, shuffle=False, pin_memory=True):
226 - data_loader = torch.utils.data.DataLoader(dataset,
227 - batch_size=args.batch_size,
228 - shuffle=shuffle,
229 - num_workers=args.num_workers,
230 - pin_memory=pin_memory)
231 - return data_loader
232 -
233 245
234 def get_inf_dataloader(args, dataset): 246 def get_inf_dataloader(args, dataset):
235 global current_epoch 247 global current_epoch
...@@ -268,9 +280,9 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer ...@@ -268,9 +280,9 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer
268 loss = criterion(output, target) 280 loss = criterion(output, target)
269 281
270 # measure accuracy and record loss 282 # measure accuracy and record loss
271 - acc1, acc5 = accuracy(output, target, topk=(1, 5)) 283 + acc1 = accuracy(output, target, topk=(1, ))[0]
272 acc1 /= images.size(0) 284 acc1 /= images.size(0)
273 - acc5 /= images.size(0) 285 +
274 286
275 # compute gradient and do SGD step 287 # compute gradient and do SGD step
276 optimizer.zero_grad() 288 optimizer.zero_grad()
...@@ -287,10 +299,10 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer ...@@ -287,10 +299,10 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer
287 # writer.add_image(tag, 299 # writer.add_image(tag,
288 # concat_image_features(images[j], first[j]), global_step=step) 300 # concat_image_features(images[j], first[j]), global_step=step)
289 301
290 - return acc1, acc5, loss, forward_t, backward_t 302 + return acc1, loss, forward_t, backward_t
291 303
292 304
293 -#_acc1, _acc5 = accuracy(output, target, topk=(1, 5)) 305 +#_acc1= accuracy(output, target, topk=(1,))
294 def accuracy(output, target, topk=(1,)): 306 def accuracy(output, target, topk=(1,)):
295 """Computes the accuracy over the k top predictions for the specified values of k""" 307 """Computes the accuracy over the k top predictions for the specified values of k"""
296 with torch.no_grad(): 308 with torch.no_grad():
...@@ -301,7 +313,9 @@ def accuracy(output, target, topk=(1,)): ...@@ -301,7 +313,9 @@ def accuracy(output, target, topk=(1,)):
301 pred = pred.t() 313 pred = pred.t()
302 correct = pred.eq(target.view(1, -1).expand_as(pred)) 314 correct = pred.eq(target.view(1, -1).expand_as(pred))
303 315
304 - 316 + # print("\noutout: ", output.size()) #(32, 1000)
317 + # print("\npred: ", pred.size()) #(5, 32)
318 + # print("\ncorrect: ", correct.size()) #(5, 32)
305 319
306 res = [] 320 res = []
307 for k in topk: 321 for k in topk:
...@@ -313,7 +327,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None): ...@@ -313,7 +327,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
313 # switch to evaluate mode 327 # switch to evaluate mode
314 model.eval() 328 model.eval()
315 329
316 - acc1, acc5 = 0, 0 330 + acc1 = 0
317 samples = 0 331 samples = 0
318 infer_t = 0 332 infer_t = 0
319 333
...@@ -335,13 +349,12 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None): ...@@ -335,13 +349,12 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
335 infer_t += time.time() - start_t 349 infer_t += time.time() - start_t
336 350
337 # measure accuracy and record loss 351 # measure accuracy and record loss
338 - _acc1, _acc5 = accuracy(output, target, topk=(1, 5)) 352 + _acc1 = accuracy(output, target, topk=(1, ))[0]
339 acc1 += _acc1 353 acc1 += _acc1
340 - acc5 += _acc5
341 samples += images.size(0) 354 samples += images.size(0)
342 355
356 + #print("\nsamples: ", samples) 4640
343 acc1 /= samples 357 acc1 /= samples
344 - acc5 /= samples
345 358
346 # if writer: 359 # if writer:
347 # n_imgs = min(images.size(0), 10) 360 # n_imgs = min(images.size(0), 10)
...@@ -349,4 +362,4 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None): ...@@ -349,4 +362,4 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
349 # writer.add_image('valid/input_image', 362 # writer.add_image('valid/input_image',
350 # concat_image_features(images[j], first[j]), global_step=step) 363 # concat_image_features(images[j], first[j]), global_step=step)
351 364
352 - return acc1, acc5, loss, infer_t 365 + return acc1, loss, infer_t
......