신은섭(Shin Eun Seop)

t2

......@@ -84,10 +84,10 @@ if __name__ == '__main__':
# User options
args.add_argument('--output', type=int, default=1)
args.add_argument('--epochs', type=int, default=100)
args.add_argument('--epochs', type=int, default=200)
args.add_argument('--batch', type=int, default=3000)
args.add_argument('--strmaxlen', type=int, default=400)
args.add_argument('--embedding', type=int, default=30)
args.add_argument('--embedding', type=int, default=50)
args.add_argument('--threshold', type=float, default=0.5)
config = args.parse_args()
......@@ -97,18 +97,17 @@ if __name__ == '__main__':
# 모델의 specification
input_size = config.embedding*config.strmaxlen
output_size = 1
learning_rate = 0.001
learning_rate = 0.0003
character_size = 251
x = tf.placeholder(tf.int32, [None, config.strmaxlen])
y_ = tf.placeholder(tf.float32, [None, output_size])
keep_probs = tf.placeholder(tf.float32)
# 임베딩
with tf.name_scope('embedding'):
char_embedding = tf.get_variable('char_embedding', [character_size, config.embedding])
embedded_chars_base = tf.nn.embedding_lookup(char_embedding, x)
embedded = tf.expand_dims(embedded_chars_base, -1)
print("emb", embedded.shape)
char_embedding = tf.get_variable('char_embedding', [character_size, config.embedding])
embedded_chars_base = tf.nn.embedding_lookup(char_embedding, x)
embedded = tf.expand_dims(embedded_chars_base, -1)
print("emb", embedded.shape)
# MODEL
l2_conv = tf.layers.conv2d(embedded, 256, [2, config.embedding], activation=tf.nn.relu)
......
No preview for this file type
# -*- coding: utf-8 -*-
"""
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import numpy as np
from kor_char_parser import decompose_str_as_one_hot
class MovieReviewDataset():
"""
영화리뷰 데이터를 읽어서, tuple (데이터, 레이블)의 형태로 리턴하는 파이썬 오브젝트 입니다.
"""
def __init__(self, dataset_path: str, max_length: int):
"""
initializer
:param dataset_path: 데이터셋 root path
:param max_length: 문자열의 최대 길이
"""
# 데이터, 레이블 각각의 경로
data_review = os.path.join(dataset_path, 'train', 'train_data')
data_label = os.path.join(dataset_path, 'train', 'train_label')
# 영화리뷰 데이터를 읽고 preprocess까지 진행합니다
with open(data_review, 'rt', encoding='utf-8') as f:
self.reviews = preprocess(f.readlines(), max_length)
# 영화리뷰 레이블을 읽고 preprocess까지 진행합니다.
with open(data_label) as f:
self.labels = [[np.float32(x)] for x in f.readlines()]
def __len__(self):
"""
:return: 전체 데이터의 수를 리턴합니다
"""
return len(self.reviews)
def __getitem__(self, idx):
"""
:param idx: 필요한 데이터의 인덱스
:return: 인덱스에 맞는 데이터, 레이블 pair를 리턴합니다
"""
return self.reviews[idx], self.labels[idx]
def preprocess(data: list, max_length: int):
"""
입력을 받아서 딥러닝 모델이 학습 가능한 포맷으로 변경하는 함수입니다.
기본 제공 알고리즘은 char2vec이며, 기본 모델이 MLP이기 때문에, 입력 값의 크기를 모두 고정한 벡터를 리턴합니다.
문자열의 길이가 고정값보다 길면 긴 부분을 제거하고, 짧으면 0으로 채웁니다.
:param data: 문자열 리스트 ([문자열1, 문자열2, ...])
:param max_length: 문자열의 최대 길이
:return: 벡터 리스트 ([[0, 1, 5, 6], [5, 4, 10, 200], ...]) max_length가 4일 때
"""
vectorized_data = [decompose_str_as_one_hot(datum, warning=False) for datum in data]
zero_padding = np.zeros((len(data), max_length), dtype=np.int32)
for idx, seq in enumerate(vectorized_data):
length = len(seq)
if length >= max_length:
length = max_length
zero_padding[idx, :length] = np.array(seq)[:length]
else:
zero_padding[idx, :length] = np.array(seq)
return zero_padding
# -*- coding: utf-8 -*-
import argparse
import os
import numpy as np
import tensorflow as tf
import nsml
from nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML
from dataset import MovieReviewDataset, preprocess
# DONOTCHANGE: They are reserved for nsml
# This is for nsml leaderboard
def bind_model(sess, config):
# 학습한 모델을 저장하는 함수입니다.
def save(dir_name, *args):
# directory
os.makedirs(dir_name, exist_ok=True)
saver = tf.train.Saver()
saver.save(sess, os.path.join(dir_name, 'model'))
# 저장한 모델을 불러올 수 있는 함수입니다.
def load(dir_name, *args):
saver = tf.train.Saver()
# find checkpoint
ckpt = tf.train.get_checkpoint_state(dir_name)
if ckpt and ckpt.model_checkpoint_path:
checkpoint = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(dir_name, checkpoint))
else:
raise NotImplemented('No checkpoint!')
print('Model loaded')
def infer(raw_data, **kwargs):
"""
:param raw_data: raw input (여기서는 문자열)을 입력받습니다
:param kwargs:
:return:
"""
# dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
preprocessed_data = preprocess(raw_data, config.strmaxlen)
# 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
pred = sess.run(output_sigmoid, feed_dict={x: preprocessed_data})
clipped = np.array(pred > config.threshold, dtype=np.int)
# DONOTCHANGE: They are reserved for nsml
# 리턴 결과는 [(확률, 0 or 1)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 확률의 값은 영향을 미치지 않습니다
return list(zip(pred.flatten(), clipped.flatten()))
# DONOTCHANGE: They are reserved for nsml
# nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.
nsml.bind(save=save, load=load, infer=infer)
def _batch_loader(iterable, n=1):
"""
데이터를 배치 사이즈만큼 잘라서 보내주는 함수입니다. PyTorch의 DataLoader와 같은 역할을 합니다
:param iterable: 데이터 list, 혹은 다른 포맷
:param n: 배치 사이즈
:return:
"""
length = len(iterable)
for n_idx in range(0, length, n):
yield iterable[n_idx:min(n_idx + n, length)]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
if __name__ == '__main__':
args = argparse.ArgumentParser()
# DONOTCHANGE: They are reserved for nsml
args.add_argument('--mode', type=str, default='train')
args.add_argument('--pause', type=int, default=0)
args.add_argument('--iteration', type=str, default='0')
# User options
args.add_argument('--output', type=int, default=1)
args.add_argument('--epochs', type=int, default=10)
args.add_argument('--batch', type=int, default=3000)
args.add_argument('--strmaxlen', type=int, default=300)
args.add_argument('--embedding', type=int, default=50)
args.add_argument('--threshold', type=float, default=0.5)
config = args.parse_args()
if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml
DATASET_PATH = '../sample_data/movie/'
# 모델의 specification
input_size = config.embedding*config.strmaxlen
output_size = 1
learning_rate = 0.001
character_size = 251
x = tf.placeholder(tf.int32, [None, config.strmaxlen])
y_ = tf.placeholder(tf.float32, [None, output_size])
keep_probs = tf.placeholder(tf.float32)
# 임베딩
char_embedding = tf.get_variable('char_embedding', [character_size, config.embedding])
embedded_chars_base = tf.nn.embedding_lookup(char_embedding, x)
embedded = tf.expand_dims(embedded_chars_base, -1)
print("emb", embedded.shape)
# MODEL
l2_conv = tf.layers.conv2d(embedded, 256, [2, config.embedding], activation=tf.nn.relu)
print("l2", l2_conv.shape)
l2_pool = tf.layers.max_pooling2d(l2_conv, [character_size-2+1, 1], strides=(1,1))
print("l2 pool", l2_pool.shape)
l3_conv = tf.layers.conv2d(embedded, 256, [3, config.embedding], activation=tf.nn.relu)
print("l3", l3_conv.shape)
l3_pool = tf.layers.max_pooling2d(l3_conv, [character_size-3+1, 1], strides=(1,1))
print("l3 pool", l3_pool.shape)
concat = tf.concat([l2_pool, l3_pool], 3)
print('concat', concat.shape)
flatten = tf.contrib.layers.flatten(concat)
print('flattne', flatten.shape)
dense = tf.layers.dense(flatten, 256, activation=tf.nn.relu)
drop = tf.layers.dropout(dense, keep_probs)
output_sigmoid = 10*tf.layers.dense(drop, output_size, activation=tf.nn.sigmoid)
# loss와 optimizer
binary_cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(tf.nn.log_softmax(output_sigmoid),1e-10,1.0)))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(binary_cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# DONOTCHANGE: Reserved for nsml
bind_model(sess=sess, config=config)
# DONOTCHANGE: Reserved for nsml
if config.pause:
nsml.paused(scope=locals())
if config.mode == 'train':
# 데이터를 로드합니다.
dataset = MovieReviewDataset(DATASET_PATH, config.strmaxlen)
dataset_len = len(dataset)
one_batch_size = dataset_len//config.batch
if dataset_len % config.batch != 0:
one_batch_size += 1
# epoch마다 학습을 수행합니다.
for epoch in range(config.epochs):
avg_loss = 0.0
for i, (data, labels) in enumerate(_batch_loader(dataset, config.batch)):
_, loss = sess.run([train_step, binary_cross_entropy],
feed_dict={x: data, y_: labels, keep_probs: 1.})
print('Batch : ', i + 1, '/', one_batch_size,
', BCE in this minibatch: ', float(loss))
avg_loss += float(loss)
print('epoch:', epoch, ' train_loss:', float(avg_loss/one_batch_size))
nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs,
train__loss=float(avg_loss/one_batch_size), step=epoch)
# DONOTCHANGE (You can decide how often you want to save the model)
nsml.save(epoch)
# 로컬 테스트 모드일때 사용합니다
# 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다.
# [(0.3, 0), (0.7, 1), ... ]
elif config.mode == 'test_local':
with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:
queries = f.readlines()
res = []
for batch in _batch_loader(queries, config.batch):
temp_res = nsml.infer(batch)
res += temp_res
print(res)