Toggle navigation
Toggle navigation
This project
Loading...
Sign in
2020-1-capstone-design2
/
2016104167
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Snippets
Network
Create a new issue
Builds
Commits
Issue Boards
Authored by
조현아
2020-04-17 02:57:19 +0900
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
d3bf0a480ab044155100b9b6338ee67fdd69bacd
d3bf0a48
1 parent
d96c0c41
vm get augmented data
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
153 additions
and
37 deletions
code/FAA2_VM/eval.py
code/FAA2_VM/fast_auto_augment.py
code/FAA2_VM/getAugmented.py
code/FAA2_VM/read_cp.py
code/FAA2_VM/requirements.txt
code/FAA2_VM/transforms.py
code/FAA2_VM/utils.py
code/FAA2_VM/eval.py
View file @
d3bf0a4
...
...
@@ -10,7 +10,7 @@ from torch.utils.tensorboard import SummaryWriter
from
utils
import
*
# command
# python
"eval.py" --model_path='logs
/'
# python
eval.py --model_path='logs/April_16_00:26:10__resnet50__None
/'
def
eval
(
model_path
):
print
(
'
\n
[+] Parse arguments'
)
...
...
@@ -34,9 +34,17 @@ def eval(model_path):
print
(
'
\n
[+] Load dataset'
)
test_transform
=
get_valid_transform
(
args
,
model
)
test_dataset
=
get_dataset
(
args
,
test_transform
,
'test'
)
#print("len(dataset): ", len(test_dataset), type(test_dataset)) # 590
#print('\nTEST Transform\n', test_transform)
test_dataset
=
get_dataset
(
args
,
test_transform
,
'test'
)
"""
test_transform
Compose(
Resize(size=[224, 224], interpolation=PIL.Image.BILINEAR)
ToTensor()
)
"""
test_loader
=
iter
(
get_dataloader
(
args
,
test_dataset
))
###
print
(
'
\n
[+] Start testing'
)
...
...
code/FAA2_VM/fast_auto_augment.py
View file @
d3bf0a4
...
...
@@ -17,15 +17,15 @@ from utils import *
DEFALUT_CANDIDATES
=
[
ShearXY
,
TranslateXY
,
#
Rotate,
Rotate
,
# AutoContrast,
# Invert,
Equalize
,
Solarize
,
Equalize
,
# Histogram Equalize --> white tumor
#
Solarize,
Posterize
,
Contrast
,
#
Contrast,
# Color,
Brightness
,
#
Brightness,
Sharpness
,
Cutout
,
# SamplePairing,
...
...
@@ -154,8 +154,9 @@ def search_subpolicies_hyperopt(args, transform_candidates, child_model, dataset
subpolicy
=
transforms
.
Compose
([
## baseline augmentation
transforms
.
Pad
(
4
),
transforms
.
RandomCrop
(
32
),
# transforms.RandomCrop(240), #32 ->240
transforms
.
RandomHorizontalFlip
(),
transforms
.
Resize
([
240
,
240
]),
## policy
*
subpolicy
,
## to tensor
...
...
@@ -191,8 +192,8 @@ def process_fn(args_str, model, dataset, Dm_indx, Da_indx, T, transform_candidat
return
_transform
#fast_auto_augment(args, model, K=4, B=1, num_process=4)
def
fast_auto_augment
(
args
,
model
,
transform_candidates
=
None
,
K
=
5
,
B
=
100
,
T
=
2
,
N
=
10
,
num_process
=
5
):
#fast_auto_augment(args, model, K=4, B=1
00
, num_process=4)
def
fast_auto_augment
(
args
,
model
,
transform_candidates
=
None
,
K
=
5
,
B
=
100
,
T
=
2
,
N
=
2
,
num_process
=
5
):
args_str
=
json
.
dumps
(
args
.
_asdict
())
dataset
=
get_dataset
(
args
,
None
,
'trainval'
)
num_process
=
min
(
torch
.
cuda
.
device_count
(),
num_process
)
...
...
@@ -215,6 +216,6 @@ def fast_auto_augment(args, model, transform_candidates=None, K=5, B=100, T=2, N
for
future
in
futures
:
transform
.
extend
(
future
.
result
())
transform
=
transforms
.
RandomChoice
(
transform
)
#
transform = transforms.RandomChoice(transform)
return
transform
...
...
code/FAA2_VM/getAugmented.py
0 → 100644
View file @
d3bf0a4
import
os
import
fire
import
json
from
pprint
import
pprint
import
pickle
import
torch
import
torch.nn
as
nn
from
torch.utils.tensorboard
import
SummaryWriter
from
utils
import
*
# command
# python getAugmented.py --model_path='logs/April_16_21:50:17__resnet50__None/'
def
eval
(
model_path
):
print
(
'
\n
[+] Parse arguments'
)
kwargs_path
=
os
.
path
.
join
(
model_path
,
'kwargs.json'
)
kwargs
=
json
.
loads
(
open
(
kwargs_path
)
.
read
())
args
,
kwargs
=
parse_args
(
kwargs
)
pprint
(
args
)
device
=
torch
.
device
(
'cuda'
if
args
.
use_cuda
else
'cpu'
)
cp_path
=
os
.
path
.
join
(
model_path
,
'augmentation.cp'
)
writer
=
SummaryWriter
(
log_dir
=
model_path
)
print
(
'
\n
[+] Load transform'
)
# list
with
open
(
cp_path
,
'rb'
)
as
f
:
aug_transform_list
=
pickle
.
load
(
f
)
augmented_image_list
=
[
torch
.
Tensor
(
240
,
0
)]
*
len
(
get_dataset
(
args
,
None
,
'test'
))
print
(
'
\n
[+] Load dataset'
)
for
aug_idx
,
aug_transform
in
enumerate
(
aug_transform_list
):
dataset
=
get_dataset
(
args
,
aug_transform
,
'test'
)
loader
=
iter
(
get_aug_dataloader
(
args
,
dataset
))
for
i
,
(
images
,
target
)
in
enumerate
(
loader
):
images
=
images
.
view
(
240
,
240
)
# concat image
augmented_image_list
[
i
]
=
torch
.
cat
([
augmented_image_list
[
i
],
images
],
dim
=
1
)
if
i
%
1000
==
0
:
print
(
"
\n
images size: "
,
augmented_image_list
[
i
]
.
size
())
# [240, 240]
break
# break
# print(augmented_image_list)
print
(
'
\n
[+] Write on tensorboard'
)
if
writer
:
for
i
,
data
in
enumerate
(
augmented_image_list
):
tag
=
'img/'
+
str
(
i
)
writer
.
add_image
(
tag
,
data
.
view
(
1
,
240
,
-
1
),
global_step
=
0
)
break
writer
.
close
()
# if writer:
# for j in range():
# tag = 'img/' + str(img_count) + '_' + str(j)
# # writer.add_image(tag,
# # concat_image_features(images[j], first[j]), global_step=step)
# # if j > 0:
# # fore = concat_image_features(fore, images[j])
# writer.add_image(tag, fore, global_step=0)
# img_count = img_count + 1
# writer.close()
if
__name__
==
'__main__'
:
fire
.
Fire
(
eval
)
code/FAA2_VM/read_cp.py
0 → 100644
View file @
d3bf0a4
import
pickle
with
open
(
'logs/April_16_21:50:17__resnet50__None/augmentation.cp'
,
'rb'
)
as
f
:
data
=
pickle
.
load
(
f
)
print
(
data
)
print
(
type
(
data
))
\ No newline at end of file
code/FAA2_VM/requirements.txt
View file @
d3bf0a4
...
...
@@ -4,7 +4,7 @@ hyperopt
pillow==6.2.1
natsort
fire
torch
torch
vision==0.4.1
torch
vision==0.2.2
torch
==1.1.0
pandas
sklearn
sklearn
\ No newline at end of file
...
...
code/FAA2_VM/transforms.py
View file @
d3bf0a4
...
...
@@ -40,12 +40,12 @@ class TranslateXY(BaseTransform):
return
t
(
img
)
#
class Rotate(BaseTransform):
class
Rotate
(
BaseTransform
):
#
def transform(self, img):
#
degrees = self.mag * 360
#
t = transforms.RandomRotation(degrees, Image.BILINEAR)
#
return t(img)
def
transform
(
self
,
img
):
degrees
=
self
.
mag
*
360
t
=
transforms
.
RandomRotation
(
degrees
,
Image
.
BILINEAR
)
return
t
(
img
)
class
AutoContrast
(
BaseTransform
):
...
...
@@ -55,10 +55,10 @@ class AutoContrast(BaseTransform):
return
ImageOps
.
autocontrast
(
img
,
cutoff
=
cutoff
)
#
class Invert(BaseTransform):
class
Invert
(
BaseTransform
):
#
def transform(self, img):
#
return ImageOps.invert(img)
def
transform
(
self
,
img
):
return
ImageOps
.
invert
(
img
)
class
Equalize
(
BaseTransform
):
...
...
@@ -88,11 +88,11 @@ class Contrast(BaseTransform):
return
ImageEnhance
.
Contrast
(
img
)
.
enhance
(
factor
)
#
class Color(BaseTransform):
class
Color
(
BaseTransform
):
#
def transform(self, img):
#
factor = self.mag * 10
#
return ImageEnhance.Color(img).enhance(factor)
def
transform
(
self
,
img
):
factor
=
self
.
mag
*
10
return
ImageEnhance
.
Color
(
img
)
.
enhance
(
factor
)
class
Brightness
(
BaseTransform
):
...
...
@@ -159,7 +159,10 @@ class CutoutOp(object):
# print("\nnp.asarray(img) max: \n", np.amax(np.asarray(img)), np.asarray(img).shape) #(32, 32, 32)
# img = Image.fromarray(mask*np.asarray(img)) #(32, 32, 32)
mask
=
np
.
reshape
(
mask
,
(
32
,
32
))
#mask = np.reshape(mask, (32, 32)) # (32, 32) -> (240, 240)
# getAugmented.py
mask
=
np
.
reshape
(
mask
,
(
240
,
240
))
#print("\n(img) max: \n", np.amax(np.asarray(img)), np.asarray(img).shape) #[0, 255] (32, 32)
# print("\nmask: ", mask.shape) #(32, 32)
...
...
code/FAA2_VM/utils.py
View file @
d3bf0a4
...
...
@@ -59,11 +59,15 @@ def split_dataset(args, dataset, k):
return
Dm_indexes
,
Da_indexes
#
(images[j], first[j]), global_step=step
)
#
concat_image_features(images[j], first[j]
)
def
concat_image_features
(
image
,
features
,
max_features
=
3
):
_
,
h
,
w
=
image
.
shape
#print("\nfsize: ", features.size()) # (1, 240, 240)
# features.size(0) = 64
#print(features.size(0))
#max_features = min(features.size(0), max_features)
max_features
=
min
(
features
.
size
(
0
),
max_features
)
max_features
=
features
.
size
(
0
)
image_feature
=
image
.
clone
()
for
i
in
range
(
max_features
):
...
...
@@ -139,12 +143,12 @@ def parse_args(kwargs):
kwargs
[
'use_cuda'
]
=
kwargs
[
'use_cuda'
]
if
'use_cuda'
in
kwargs
else
True
kwargs
[
'use_cuda'
]
=
kwargs
[
'use_cuda'
]
and
torch
.
cuda
.
is_available
()
kwargs
[
'num_workers'
]
=
kwargs
[
'num_workers'
]
if
'num_workers'
in
kwargs
else
4
kwargs
[
'print_step'
]
=
kwargs
[
'print_step'
]
if
'print_step'
in
kwargs
else
50
0
kwargs
[
'val_step'
]
=
kwargs
[
'val_step'
]
if
'val_step'
in
kwargs
else
50
0
kwargs
[
'print_step'
]
=
kwargs
[
'print_step'
]
if
'print_step'
in
kwargs
else
50
kwargs
[
'val_step'
]
=
kwargs
[
'val_step'
]
if
'val_step'
in
kwargs
else
50
kwargs
[
'scheduler'
]
=
kwargs
[
'scheduler'
]
if
'scheduler'
in
kwargs
else
'exp'
kwargs
[
'batch_size'
]
=
kwargs
[
'batch_size'
]
if
'batch_size'
in
kwargs
else
64
kwargs
[
'batch_size'
]
=
kwargs
[
'batch_size'
]
if
'batch_size'
in
kwargs
else
8
kwargs
[
'start_step'
]
=
kwargs
[
'start_step'
]
if
'start_step'
in
kwargs
else
0
kwargs
[
'max_step'
]
=
kwargs
[
'max_step'
]
if
'max_step'
in
kwargs
else
50
00
kwargs
[
'max_step'
]
=
kwargs
[
'max_step'
]
if
'max_step'
in
kwargs
else
1
00
kwargs
[
'fast_auto_augment'
]
=
kwargs
[
'fast_auto_augment'
]
if
'fast_auto_augment'
in
kwargs
else
False
kwargs
[
'augment_path'
]
=
kwargs
[
'augment_path'
]
if
'augment_path'
in
kwargs
else
None
...
...
@@ -225,6 +229,7 @@ class CustomDataset(Dataset):
if
self
.
transform
is
not
None
:
tensor_image
=
self
.
transform
(
image
)
##
return
tensor_image
,
targets
def
get_dataset
(
args
,
transform
,
split
=
'train'
):
...
...
@@ -276,6 +281,14 @@ def get_dataloader(args, dataset, shuffle=False, pin_memory=True):
pin_memory
=
pin_memory
)
return
data_loader
def
get_aug_dataloader
(
args
,
dataset
,
shuffle
=
False
,
pin_memory
=
True
):
data_loader
=
torch
.
utils
.
data
.
DataLoader
(
dataset
,
# batch_size=args.batch_size,
shuffle
=
shuffle
,
num_workers
=
args
.
num_workers
,
pin_memory
=
pin_memory
)
return
data_loader
def
get_inf_dataloader
(
args
,
dataset
):
global
current_epoch
...
...
@@ -304,7 +317,7 @@ def get_train_transform(args, model, log_dir=None):
os
.
system
(
'cp {} {}'
.
format
(
args
.
augment_path
,
os
.
path
.
join
(
log_dir
,
'augmentation.cp'
)))
else
:
transform
=
fast_auto_augment
(
args
,
model
,
K
=
4
,
B
=
1
,
num_process
=
4
)
##
transform
=
fast_auto_augment
(
args
,
model
,
K
=
4
,
B
=
1
00
,
num_process
=
4
)
##
if
log_dir
:
cp
.
dump
(
transform
,
open
(
os
.
path
.
join
(
log_dir
,
'augmentation.cp'
),
'wb'
))
...
...
@@ -436,7 +449,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
samples
+=
images
.
size
(
0
)
if
writer
:
# print("\n3 images.size(0): ", images.size(0)
)
# print("\n images.size(0): ", images.size(0)) # batch size (last = n(imgs)%batch_size
)
n_imgs
=
min
(
images
.
size
(
0
),
10
)
for
j
in
range
(
n_imgs
):
tag
=
'valid/'
+
str
(
img_count
)
...
...
Please
register
or
login
to post a comment