JeungEunLee

add DCGAN

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import os\n",
"from glob import glob\n",
"import numpy as np\n",
"from matplotlib import pyplot\n",
"from PIL import Image\n",
"import tensorflow as tf\n",
"\n",
"##README : IF output folder already existed in same route, it makes error. change past output folder's name ##"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"class Dataset(object):\n",
" def __init__(self, data_files):\n",
" IMAGE_WIDTH = 25\n",
" IMAGE_HEIGHT = 25\n",
" self.image_mode = 'RGB'\n",
" image_channels = 3\n",
" self.data_files = data_files\n",
" self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
"\n",
" def get_batches(self, batch_size):\n",
" IMAGE_MAX_VALUE = 255\n",
" current_index = 0\n",
" while current_index + batch_size <= self.shape[0]:\n",
" data_batch = get_batch(\n",
" self.data_files[current_index:current_index + batch_size],\n",
" self.shape[1],self.shape[2],\n",
" self.image_mode)\n",
" \n",
" current_index += batch_size\n",
" \n",
" yield data_batch / IMAGE_MAX_VALUE - 0.5\n",
"\n",
"\n",
"def model_inputs(image_width, image_height, image_channels, z_dim):\n",
" real_input_images = tf.placeholder(tf.float32, [None, image_width, image_height, image_channels], 'real_input_images')\n",
" input_z = tf.placeholder(tf.float32, [None, z_dim], 'input_z')\n",
" learning_rate = tf.placeholder(tf.float32, [], 'learning_rate')\n",
" return real_input_images, input_z, learning_rate\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def discriminator(images, reuse=False, alpha=0.2, keep_prob=0.5):\n",
" with tf.variable_scope('discriminator', reuse=reuse):\n",
" # Input layer is 25x25xn\n",
" # Convolutional layer, 13x13x64\n",
" conv1 = tf.layers.conv2d(images, 64, 5, 2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer())\n",
" lrelu1 = tf.maximum(alpha * conv1, conv1)\n",
" drop1 = tf.layers.dropout(lrelu1, keep_prob)\n",
" \n",
" # Strided convolutional layer, 7x7x128\n",
" conv2 = tf.layers.conv2d(drop1, 128, 5, 2, 'same', use_bias=False)\n",
" bn2 = tf.layers.batch_normalization(conv2)\n",
" lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
" drop2 = tf.layers.dropout(lrelu2, keep_prob)\n",
" \n",
" # Strided convolutional layer, 4x4x256\n",
" conv3 = tf.layers.conv2d(drop2, 256, 5, 2, 'same', use_bias=False)\n",
" bn3 = tf.layers.batch_normalization(conv3)\n",
" lrelu3 = tf.maximum(alpha * bn3, bn3)\n",
" drop3 = tf.layers.dropout(lrelu3, keep_prob)\n",
" \n",
" # fully connected\n",
" flat = tf.reshape(drop3, (-1, 4*4*256))\n",
" logits = tf.layers.dense(flat, 1)\n",
" out = tf.sigmoid(logits)\n",
" \n",
" return out, logits"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def generator(z, out_channel_dim, is_train=True, alpha=0.2, keep_prob=0.5):\n",
" # TODO: Implement Function\n",
" with tf.variable_scope('generator', reuse=(not is_train)):\n",
" # First fully connected layer, 8x4x512\n",
" fc = tf.layers.dense(z, 4*4*1024, use_bias=False)\n",
" fc = tf.reshape(fc, (-1, 4, 4, 1024))\n",
" bn0 = tf.layers.batch_normalization(fc, training=is_train)\n",
" lrelu0 = tf.maximum(alpha * bn0, bn0)\n",
" drop0 = tf.layers.dropout(lrelu0, keep_prob, training=is_train)\n",
" \n",
" # Deconvolution, 16x8x256\n",
" conv1 = tf.layers.conv2d_transpose(drop0, 512,3, 1, 'valid', use_bias=False)\n",
" bn1 = tf.layers.batch_normalization(conv1, training=is_train)\n",
" lrelu1 = tf.maximum(alpha * bn1, bn1)\n",
" drop1 = tf.layers.dropout(lrelu1, keep_prob, training=is_train)\n",
" \n",
" # Deconvolution, 32x 128\n",
" conv2 = tf.layers.conv2d_transpose(drop1, 256, 3, 2, 'same', use_bias=False)\n",
" bn2 = tf.layers.batch_normalization(conv2, training=is_train)\n",
" lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
" drop2 = tf.layers.dropout(lrelu2, keep_prob, training=is_train)\n",
" \n",
" # Output layer, 28x28xn\n",
" logits = tf.layers.conv2d_transpose(drop2, out_channel_dim, 3, 2, 'valid')\n",
" \n",
" out = tf.tanh(logits)\n",
" \n",
" print(fc.shape)\n",
" print(drop1.shape)\n",
" print(drop2.shape)\n",
" print(logits.shape)\n",
" \n",
" return out"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def model_loss(input_real, input_z, out_channel_dim, alpha=0.2, smooth_factor=0.1):\n",
" d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)\n",
" \n",
" d_loss_real = tf.reduce_mean(\n",
" tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n",
" labels=tf.ones_like(d_model_real) * (1 - smooth_factor)))\n",
" \n",
" input_fake = generator(input_z, out_channel_dim, alpha=alpha)\n",
" d_model_fake, d_logits_fake = discriminator(input_fake, reuse=True, alpha=alpha)\n",
" \n",
" d_loss_fake = tf.reduce_mean(\n",
" tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n",
" \n",
" g_loss = tf.reduce_mean(\n",
" tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n",
"\n",
" return d_loss_real + d_loss_fake, g_loss\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def model_opt(d_loss, g_loss, learning_rate, beta1):\n",
" # Get weights and bias to update\n",
" t_vars = tf.trainable_variables()\n",
" d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n",
" g_vars = [var for var in t_vars if var.name.startswith('generator')]\n",
"\n",
" # Optimize\n",
" with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n",
" d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n",
" g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n",
"\n",
" return d_train_opt, g_train_opt\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):\n",
" cmap = None if image_mode == 'RGB' else 'gray'\n",
" z_dim = input_z.get_shape().as_list()[-1]\n",
" example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])\n",
"\n",
" samples = sess.run(\n",
" generator(input_z, out_channel_dim, False),\n",
" feed_dict={input_z: example_z})\n",
" \n",
" # pyplot.show()\n",
" return samples"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode,\n",
" print_every=10, show_every=10):\n",
" # TODO: Build Model\n",
" input_real, input_z, _ = model_inputs(data_shape[2], data_shape[1], data_shape[3], z_dim)\n",
" d_loss, g_loss = model_loss(input_real, input_z, data_shape[3], alpha=0.2)\n",
" d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)\n",
" \n",
" saver = tf.train.Saver()\n",
" sample_z = np.random.uniform(-1, 1, size=(72, z_dim))\n",
" \n",
" samples, losses = [], []\n",
" \n",
" steps = 0\n",
" count = 0\n",
" \n",
" with tf.Session() as sess:\n",
" saver = tf.train.Saver()\n",
" sess.run(tf.global_variables_initializer())\n",
" \n",
" # continue training\n",
" save_path = saver.save(sess, \"/tmp/model.ckpt\")\n",
" ckpt = tf.train.latest_checkpoint('./model/')\n",
" saver.restore(sess, save_path)\n",
" coord = tf.train.Coordinator()\n",
" threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n",
"\n",
" os.mkdir('output')\n",
" for epoch_i in range(epoch_count):\n",
" for batch_images in get_batches(batch_size):\n",
" # Train Model\n",
" steps += 1\n",
" batch_images *= 2.0\n",
" \n",
" # Sample random noise for G\n",
" batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))\n",
" \n",
" # Run optimizers\n",
" sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n",
" sess.run(g_train_opt, feed_dict={input_z: batch_z})\n",
" \n",
" if steps % print_every == 0:\n",
" os.mkdir('output/'+ str(steps))\n",
" # At the end of each epoch, get the losses and print them out\n",
" train_loss_d = d_loss.eval({input_real: batch_images, input_z: batch_z})\n",
" train_loss_g = g_loss.eval({input_z: batch_z})\n",
" print(\"Epoch {}/{} Step {}...\".format(epoch_i+1, epoch_count, steps),\n",
" \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n",
" \"Generator Loss: {:.4f}\".format(train_loss_g))\n",
" # Save losses for viewing after training\n",
" #losses.append((train_loss_d, train_loss_g))\n",
" \n",
" if steps % show_every == 0:\n",
" count = count +1\n",
" iterr = count*show_every\n",
" # Show example output for the generator # 25 number for 1 time\n",
" images_grid = show_generator_output(sess, 25, input_z, data_shape[3], data_image_mode)\n",
" x = 0\n",
" for image_grid in images_grid : \n",
" x = x+1\n",
" dst = os.path.join(\"output\", str(steps),str(iterr)+str(x)+\".png\")\n",
" pyplot.imsave(dst, image_grid)\n",
" \n",
" # saving the model \n",
" if epoch_i % 10 == 0:\n",
" if not os.path.exists('./model/'):\n",
" os.makedirs('./model')\n",
" saver.save(sess, './model/' + str(epoch_i)) "
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"140\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n"
]
},
{
"ename": "FileExistsError",
"evalue": "[Errno 17] File exists: 'output'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileExistsError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-3cf64f8b526a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./motionpatch/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-4eafe8fdaf6d>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0mthreads\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstart_queue_runners\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoord\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoord\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;31m#sess.run(tf.global_variables_initializer())\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmkdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 28\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch_i\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch_count\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mbatch_images\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mFileExistsError\u001b[0m: [Errno 17] File exists: 'output'"
]
}
],
"source": [
"batch_size = 50\n",
"z_dim = 100\n",
"learning_rate = 0.00025\n",
"beta1 = 0.45\n",
"\n",
"epochs = 500\n",
"print(len(glob('./motionpatch/*.png')))\n",
"celeba_dataset = Dataset( glob('./motionpatch/*.png'))\n",
"with tf.Graph().as_default():\n",
" train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"%matplotlib inline\n",
"import os\n",
"from glob import glob\n",
"import numpy as np\n",
"from matplotlib import pyplot\n",
"from PIL import Image\n",
"import tensorflow as tf\n",
"\n",
"##README : IF output folder already existed in same route, it makes error. change past output folder's name ##"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"class Dataset(object):\n",
" def __init__(self, data_files):\n",
" IMAGE_WIDTH = 25\n",
" IMAGE_HEIGHT = 25\n",
" self.image_mode = 'RGB'\n",
" image_channels = 3\n",
" self.data_files = data_files\n",
" self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
" \n",
" def get_image(iself,mage_path, width, height, mode):\n",
" image = Image.open(image_path)\n",
" image = image.resize((width,height))\n",
" return np.array(image)\n",
"\n",
"\n",
" def get_batch(self,image_files, width, height, mode):\n",
" data_batch = np.array(\n",
" [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n",
" \n",
" # Make sure the images are in 4 dimensions\n",
" if len(data_batch.shape) < 4:\n",
" data_batch = data_batch.reshape(data_batch.shape + (1,))\n",
" return data_batch\n",
"\n",
" def get_batches(self, batch_size):\n",
" IMAGE_MAX_VALUE = 255\n",
" current_index = 0\n",
" while current_index + batch_size <= self.shape[0]:\n",
" data_batch = self.get_batch(\n",
" self.data_files[current_index:current_index + batch_size],\n",
" self.shape[1],self.shape[2],\n",
" self.image_mode)\n",
" \n",
" current_index += batch_size\n",
" \n",
" yield data_batch / IMAGE_MAX_VALUE - 0.5\n",
"\n",
"\n",
"def model_inputs(image_width, image_height, image_channels, z_dim):\n",
" real_input_images = tf.placeholder(tf.float32, [None, image_width, image_height, image_channels], 'real_input_images')\n",
" input_z = tf.placeholder(tf.float32, [None, z_dim], 'input_z')\n",
" learning_rate = tf.placeholder(tf.float32, [], 'learning_rate')\n",
" return real_input_images, input_z, learning_rate\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def discriminator(images, reuse=False, alpha=0.2, keep_prob=0.5):\n",
" with tf.variable_scope('discriminator', reuse=reuse):\n",
" # Input layer is 25x25xn\n",
" # Convolutional layer, 13x13x64\n",
" conv1 = tf.layers.conv2d(images, 64, 5, 2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer())\n",
" lrelu1 = tf.maximum(alpha * conv1, conv1)\n",
" drop1 = tf.layers.dropout(lrelu1, keep_prob)\n",
" \n",
" # Strided convolutional layer, 7x7x128\n",
" conv2 = tf.layers.conv2d(drop1, 128, 5, 2, 'same', use_bias=False)\n",
" bn2 = tf.layers.batch_normalization(conv2)\n",
" lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
" drop2 = tf.layers.dropout(lrelu2, keep_prob)\n",
" \n",
" # Strided convolutional layer, 4x4x256\n",
" conv3 = tf.layers.conv2d(drop2, 256, 5, 2, 'same', use_bias=False)\n",
" bn3 = tf.layers.batch_normalization(conv3)\n",
" lrelu3 = tf.maximum(alpha * bn3, bn3)\n",
" drop3 = tf.layers.dropout(lrelu3, keep_prob)\n",
" \n",
" # fully connected\n",
" flat = tf.reshape(drop3, (-1, 4*4*256))\n",
" logits = tf.layers.dense(flat, 1)\n",
" out = tf.sigmoid(logits)\n",
" \n",
" return out, logits"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def generator(z, out_channel_dim, is_train=True, alpha=0.2, keep_prob=0.5):\n",
" # TODO: Implement Function\n",
" with tf.variable_scope('generator', reuse=(not is_train)):\n",
" # First fully connected layer, 8x4x512\n",
" fc = tf.layers.dense(z, 4*4*1024, use_bias=False)\n",
" fc = tf.reshape(fc, (-1, 4, 4, 1024))\n",
" bn0 = tf.layers.batch_normalization(fc, training=is_train)\n",
" lrelu0 = tf.maximum(alpha * bn0, bn0)\n",
" drop0 = tf.layers.dropout(lrelu0, keep_prob, training=is_train)\n",
" \n",
" # Deconvolution, 16x8x256\n",
" conv1 = tf.layers.conv2d_transpose(drop0, 512,3, 1, 'valid', use_bias=False)\n",
" bn1 = tf.layers.batch_normalization(conv1, training=is_train)\n",
" lrelu1 = tf.maximum(alpha * bn1, bn1)\n",
" drop1 = tf.layers.dropout(lrelu1, keep_prob, training=is_train)\n",
" \n",
" # Deconvolution, 32x 128\n",
" conv2 = tf.layers.conv2d_transpose(drop1, 256, 3, 2, 'same', use_bias=False)\n",
" bn2 = tf.layers.batch_normalization(conv2, training=is_train)\n",
" lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
" drop2 = tf.layers.dropout(lrelu2, keep_prob, training=is_train)\n",
" \n",
" # Output layer, 28x28xn\n",
" logits = tf.layers.conv2d_transpose(drop2, out_channel_dim, 3, 2, 'valid')\n",
" \n",
" out = tf.tanh(logits)\n",
" \n",
" print(fc.shape)\n",
" print(drop1.shape)\n",
" print(drop2.shape)\n",
" print(logits.shape)\n",
" \n",
" return out"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def model_loss(input_real, input_z, out_channel_dim, alpha=0.2, smooth_factor=0.1):\n",
" d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)\n",
" \n",
" d_loss_real = tf.reduce_mean(\n",
" tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n",
" labels=tf.ones_like(d_model_real) * (1 - smooth_factor)))\n",
" \n",
" input_fake = generator(input_z, out_channel_dim, alpha=alpha)\n",
" d_model_fake, d_logits_fake = discriminator(input_fake, reuse=True, alpha=alpha)\n",
" \n",
" d_loss_fake = tf.reduce_mean(\n",
" tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n",
" \n",
" g_loss = tf.reduce_mean(\n",
" tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n",
"\n",
" return d_loss_real + d_loss_fake, g_loss\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def model_opt(d_loss, g_loss, learning_rate, beta1):\n",
" # Get weights and bias to update\n",
" t_vars = tf.trainable_variables()\n",
" d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n",
" g_vars = [var for var in t_vars if var.name.startswith('generator')]\n",
"\n",
" # Optimize\n",
" with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n",
" d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n",
" g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n",
"\n",
" return d_train_opt, g_train_opt\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):\n",
" cmap = None if image_mode == 'RGB' else 'gray'\n",
" z_dim = input_z.get_shape().as_list()[-1]\n",
" example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])\n",
"\n",
" samples = sess.run(\n",
" generator(input_z, out_channel_dim, False),\n",
" feed_dict={input_z: example_z})\n",
" \n",
" # pyplot.show()\n",
" return samples"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode,\n",
" print_every=10, show_every=10):\n",
" # TODO: Build Model\n",
" input_real, input_z, _ = model_inputs(data_shape[2], data_shape[1], data_shape[3], z_dim)\n",
" d_loss, g_loss = model_loss(input_real, input_z, data_shape[3], alpha=0.2)\n",
" d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)\n",
" \n",
" saver = tf.train.Saver()\n",
" sample_z = np.random.uniform(-1, 1, size=(72, z_dim))\n",
" \n",
" samples, losses = [], []\n",
" \n",
" steps = 0\n",
" count = 0\n",
" \n",
" with tf.Session() as sess:\n",
" saver = tf.train.Saver()\n",
" sess.run(tf.global_variables_initializer())\n",
" \n",
" # continue training\n",
" save_path = saver.save(sess, \"/tmp/model.ckpt\")\n",
" ckpt = tf.train.latest_checkpoint('./model/')\n",
" saver.restore(sess, save_path)\n",
" coord = tf.train.Coordinator()\n",
" threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n",
"\n",
" os.mkdir('output')\n",
" for epoch_i in range(epoch_count):\n",
" for batch_images in get_batches(batch_size):\n",
" # Train Model\n",
" steps += 1\n",
" batch_images *= 2.0\n",
" \n",
" # Sample random noise for G\n",
" batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))\n",
" \n",
" # Run optimizers\n",
" sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n",
" sess.run(g_train_opt, feed_dict={input_z: batch_z})\n",
" \n",
" if steps % print_every == 0:\n",
" os.mkdir('output/'+ str(steps))\n",
" # At the end of each epoch, get the losses and print them out\n",
" train_loss_d = d_loss.eval({input_real: batch_images, input_z: batch_z})\n",
" train_loss_g = g_loss.eval({input_z: batch_z})\n",
" print(\"Epoch {}/{} Step {}...\".format(epoch_i+1, epoch_count, steps),\n",
" \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n",
" \"Generator Loss: {:.4f}\".format(train_loss_g))\n",
" # Save losses for viewing after training\n",
" #losses.append((train_loss_d, train_loss_g))\n",
" \n",
" if steps % show_every == 0:\n",
" count = count +1\n",
" iterr = count*show_every\n",
" # Show example output for the generator # 25 number for 1 time\n",
" images_grid = show_generator_output(sess, 25, input_z, data_shape[3], data_image_mode)\n",
" x = 0\n",
" for image_grid in images_grid : \n",
" x = x+1\n",
" dst = os.path.join(\"output\", str(steps),str(iterr)+str(x)+\".png\")\n",
" pyplot.imsave(dst, image_grid)\n",
" \n",
" # saving the model \n",
" if epoch_i % 10 == 0:\n",
" if not os.path.exists('./model/'):\n",
" os.makedirs('./model')\n",
" saver.save(sess, './model/' + str(epoch_i)) "
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"140\n",
"(?, 4, 4, 1024)\n",
"(?, 6, 6, 512)\n",
"(?, 12, 12, 256)\n",
"(?, 25, 25, 3)\n",
"INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n"
]
},
{
"ename": "NameError",
"evalue": "name 'image_path' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-9-3cf64f8b526a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./motionpatch/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m<ipython-input-8-14a3faf19639>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmkdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch_i\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch_count\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mbatch_images\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;31m# Train Model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0msteps\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_batches\u001b[0;34m(self, batch_size)\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_files\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcurrent_index\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mcurrent_index\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m self.image_mode)\n\u001b[0m\u001b[1;32m 33\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0mcurrent_index\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_batch\u001b[0;34m(self, image_files, width, height, mode)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimage_files\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m data_batch = np.array(\n\u001b[0;32m---> 18\u001b[0;31m [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# Make sure the images are in 4 dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimage_files\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m data_batch = np.array(\n\u001b[0;32m---> 18\u001b[0;31m [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# Make sure the images are in 4 dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_image\u001b[0;34m(iself, mage_path, width, height, mode)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmage_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mheight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'image_path' is not defined"
]
}
],
"source": [
"batch_size = 50\n",
"z_dim = 100\n",
"learning_rate = 0.00025\n",
"beta1 = 0.45\n",
"\n",
"epochs = 500\n",
"print(len(glob('./motionpatch/*.png')))\n",
"celeba_dataset = Dataset( glob('./motionpatch/*.png'))\n",
"with tf.Graph().as_default():\n",
" train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
No preview for this file type
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import cv2\n",
"from glob import glob\n",
"import os \n",
"\n",
"motionpatch_location = './motionpatch/*.png'\n",
"output_location = './smallone/'\n",
"count = 0\n",
"for f in glob(motionpatch_location):\n",
" count += 1\n",
" image = cv2.imread(f)\n",
" small = cv2.resize(image,dsize=(25,25))\n",
" dst = os.path.join(output_location +str(count)+\".png\")\n",
" cv2.imwrite(dst,small)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
%missing file delete
%LOCATION : raw skeletone files
path_name = '/home/rfj/바탕화면/actionGAN/all_motionpatch/';
fileID = fopen('/home/rfj/바탕화면/actionGAN/skeletone_INDEX/stand_2.txt','r');
formatSpec = '%s';
sizeA = [20 Inf];
perfect_list = fscanf(fileID,formatSpec,sizeA);
perfect_list = perfect_list.';
fclose(fileID);
L = length(perfect_list);
for K = 1:L
file_name = char(perfect_list(K,:));
file_location = strcat(path_name,file_name,'.png')
ori = imread(file_location);
new_file_name = strcat('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch/',file_name(1:20),'.png');
imwrite(ori,new_file_name);
end
if people sit & 1
S001C001P008R001A020
S010C001P008R002A020
S010C001P013R001A020
S010C001P016R001A020
S010C001P016R002A020
S013C001P025R002A020
S013C002P008R002A020
S013C002P017R002A020
S017C003P015R001A020
S010C002P016R002A020_a
S010C002P007R002A020_a
S010C001P015R002A020_a
S011C001P038R001A020_a
S012C002P019R002A020_a
S013C001P015R002A020_a
sit & 2
S001C001P003R002A020
S001C001P004R001A020
S001C001P004R002A020
S001C002P004R002A020
S001C002P004R001A020
S010C001P008R001A020
S010C001P013R002A020
S010C001P015R001A020
S010C002P008R002A020
S010C002P013R002A020
S010C002P019R002A020
S010C003P008R001A020
S010C003P015R001A020
S011C001P015R002A020
S011C001P038R002A020
S011C002P015R002A020
S011C002P038R002A020
S011C003P001R001A020
S011C003P007R002A020
S011C003P015R001A020
S011C003P028R001A020
S013C001P007R001A020
S013C001P007R002A020
S013C001P015R001A020
S013C001P016R001A020
S013C001P017R001A020
S013C001P017R002A020
S014C003P019R001A020_a
S014C001P015R002A020_a
S013C002P007R002A020_a
S012C002P007R002A020_a
S001C002P001R001A020_a
S011C001P016R001A020_a
S011C001P028R001A020_a
S011C002P002R002A020_a
stand & 1
S014C003P019R002A020_a
S001C002P006R001A020_a
S001C002P006R002A020_a
S014C001P007R001A020_a
S014C001P007R002A020_a
S014C001P027R002A020_a
S017C002P020R001A020_a
S010C001P021R002A020
S011C001P016R002A020
S011C002P016R002A020
S012C001P037R002A020
S013C001P037R002A020
S012C002P037R002A020
S014C003P039R001A020
S014C003P037R001A020
S014C002P025R002A020
S014C001P025R002A020
S014C002P037R002A020
S014C001P037R001A020
S014C002P039R002A020
S014C001P037R002A020
S017C003P016R001A020
S014C001P039R002A020_a
S014C002P027R001A020_a
stand & 2
S001C001P003R001A020
S001C001P005R001A020
S001C001P005R002A020
S001C001P007R001A020
S001C001P008R002A020
S001C002P002R002A020
S001C001P001R001A020
S001C002P003R002A020
S001C002P005R001A020
S001C002P005R002A020
S009C003P017R001A020
S009C003P019R001A020
S009C003P019R002A020
S009C003P025R001A020
S010C001P019R002A020
S010C001P025R001A020
S010C001P025R002A020
S010C002P025R002A020
S010C003P019R001A020
S010C003P025R001A020
S011C001P007R002A020
S011C001P019R001A020
S011C001P019R002A020
S011C001P027R001A020
S011C001P027R002A020
S011C001P028R002A020
S011C002P018R002A020
S011C003P018R001A020
S011C003P019R001A020
S012C001P015R001A020
S012C001P018R001A020
S012C001P019R001A020
S012C001P025R001A020
S012C001P025R002A020
S012C001P028R001A020
S012C001P028R002A020
S012C001P037R001A020
S012C002P025R002A020
S012C002P028R002A020
S012C003P015R001A020
S012C003P025R001A020
S012C003P027R001A020
S012C003P037R001A020
S013C001P019R001A020
S013C001P019R002A020
S013C001P027R001A020
S013C001P028R001A020
S013C002P018R002A020
S013C003P019R001A020
S014C001P019R002A020
S014C001P025R001A020
S015C001P019R002A020
S015C002P019R002A020
S015C003P019R001A020
S016C001P007R001A020
S016C001P019R001A020
S016C001P019R002A020
S016C001P040R001A020
S016C002P007R001A020
S016C002P019R001A020
S016C002P019R002A020
S016C003P040R001A020
S017C001P003R001A020
S017C001P003R002A020
S017C001P007R001A020
S017C001P007R002A020
S017C001P008R001A020
S017C001P016R001A020
S017C001P020R001A020
S017C001P020R002A020
S017C002P003R002A020
S017C002P007R002A020
S017C002P020R002A020
S017C003P003R001A020
S017C003P008R001A020
S017C003P007R002A020_a
S017C003P007R001A020_a
S015C003P037R001A020_a
S016C001P007R002A020_a
S015C002P016R002A020_a
S015C001P019R001A020_a
S015C002P019R001A020_a
S016C002P039R002A020_a
S015C001P017R001A020_a
S016C002P040R001A020_a
S014C002P019R002A020_a
S014C002P007R002A020_a
S013C003P027R001A020_a
S013C001P037R001A020_a
S016C002P007R002A020_a
S012C003P028R001A020_a
S012C003P018R001A020_a
S012C002P019R001A020_a
S012C002P018R002A020_a
S012C002P016R002A020_a
S012C003P025R002A020_a
S011C002P027R002A020_a
S011C001P018R002A020_a
S010C002P025R001A020_a
S012C001P018R002A020_a
S009C003P017R002A020_a
S012C001P027R002A020_a
S001C002P001R002A020_a
S001C001P001R002A020_a
S001C001P002R001A020_a
S013C002P019R002A020_a
S014C003P027R001A020_a
S001C001P006R001A020_a //다리를 든다
......@@ -53,6 +53,31 @@ S016C003P019R002A020
S017C002P003R001A020
S017C003P015R002A020
S008C002P036R001A020
S008C003P001R002A020
S008C003P031R002A020
S009C002P015R001A020
S009C002P025R001A020
S009C003P015R002A020
S009C003P016R001A020
S008C002P034R001A020
S008C002P033R001A020
S008C002P032R001A020
S008C002P007R002A020
S008C002P007R001A020
S008C002P001R001A020
S007C003P027R002A020
S007C002P015R002A020
S007C002P026R001A020
S007C002P028R001A020
S007C003P008R002A020
S007C003P025R001A020
S007C003P026R002A020
S008C001P007R001A020
S008C001P029R001A020
S008C002P029R001A020
S008C002P031R001A020
othercase //팔 다리 모션
S017C003P020R001A020
S017C003P017R001A020
......@@ -71,3 +96,9 @@ S010C001P021R001A020
S011C002P017R002A020
S011C001P017R002A020
S010C002P018R002A020
S007C003P018R001A020 // time dif
S008C001P030R001A020 // sit time dif
S008C001P034R001A020 // twist hand bad?
S008C002P025R001A020 //twist hand
S009C001P025R002A020 //leg
S008C003P032R001A020 //what?
......
if people sit & 1
S001C001P008R001A020
S010C001P008R002A020
S010C001P013R001A020
S010C001P016R001A020
S010C001P016R002A020
S013C001P025R002A020
S013C002P008R002A020
S013C002P017R002A020
S017C003P015R001A020
S002C002P007R002A020
S002C003P007R002A020
S002C003P013R001A020
S003C002P007R002A020
S003C003P002R002A020
S004C001P008R001A020
S004C003P003R001A020
S005C002P004R002A020
S005C002P015R002A020
S005C002P016R002A020
S006C001P007R001A020
S006C002P015R002A020
S007C002P016R002A020_a
S007C003P016R001A020_a //bad?
S008C001P001R001A020_a
S006C002P007R001A020_a
S010C002P016R002A020_a
S010C002P007R002A020_a
S010C001P015R002A020_a
S011C001P038R001A020_a
S012C002P019R002A020_a
S013C001P015R002A020_a
sit & 2
S001C001P003R002A020
S001C001P004R001A020
S001C001P004R002A020
S001C002P004R002A020
S001C002P004R001A020
S010C001P008R001A020
S010C001P013R002A020
S010C001P015R001A020
S010C002P008R002A020
S010C002P013R002A020
S010C002P019R002A020
S010C003P008R001A020
S010C003P015R001A020
S011C001P015R002A020
S011C001P038R002A020
S011C002P015R002A020
S011C002P038R002A020
S011C003P001R001A020
S011C003P007R002A020
S011C003P015R001A020
S011C003P028R001A020
S013C001P007R001A020
S013C001P007R002A020
S013C001P015R001A020
S013C001P016R001A020
S013C001P017R001A020
S013C001P017R002A020
S010C002P008R002A020
S010C002P013R002A020
S001C001P007R001A020
S001C003P007R001A020
S002C001P009R002A020
S002C002P003R002A020
S002C002P008R001A020
S002C002P009R002A020
S002C003P010R001A020
S003C001P001R001A020
S003C001P002R001A020
S003C001P019R001A020
S003C003P002R001A020
S006C001P019R001A020
S006C001P023R001A020
S006C001P023R002A020
S006C002P023R002A020
S007C001P007R001A020
S007C002P008R002A020
S007C003P001R001A020
S007C003P007R001A020
S007C003P015R001A020
S008C001P015R001A020
S008C001P019R001A020
S009C001P007R002A020
S008C003P019R001A020
S007C002P019R002A020_a // amb...
S007C003P019R002A020_a
S008C001P007R002A020_a
S008C001P015R002A020_a //a?
S006C003P008R001A020_a
S006C001P001R001A020_a
S014C003P019R001A020_a
S014C001P015R002A020_a
S013C002P007R002A020_a
S012C002P007R002A020_a
S001C002P001R001A020_a
S011C001P016R001A020_a
S011C001P028R001A020_a
S011C002P002R002A020_a
stand & 1
S008C003P036R001A020
S007C001P018R002A020
S007C001P027R001A020
S007C002P008R001A020
S007C002P018R002A020
S007C003P008R001A020
S007C003P027R001A020
S008C001P029R002A020
S008C001P032R002A020
S008C001P033R001A020
S008C001P033R002A020
S008C001P034R002A020
S008C002P033R002A020
S008C002P034R002A020
S009C003P015R001A020
S009C001P015R001A020
S008C003P033R001A020
S010C001P021R002A020
S011C001P016R002A020
S011C002P016R002A020
S012C001P037R002A020
S013C001P037R002A020
S012C002P037R002A020
S014C003P039R001A020
S014C003P037R001A020
S014C002P025R002A020
S014C001P025R002A020
S014C002P037R002A020
S014C001P037R001A020
S014C002P039R002A020
S014C001P037R002A020
S017C003P016R001A020
S002C003P007R001A020
S003C002P002R002A020
S003C002P008R002A020
S005C001P015R001A020
S005C001P018R002A020
S005C003P015R001A020
S006C001P016R002A020
S006C001P017R002A020
S006C001P022R001A020
S006C002P007R002A020
S006C002P016R002A020
S006C003P022R001A020
S014C003P019R002A020_a
S001C002P006R001A020_a
S001C002P006R002A020_a
S014C001P007R001A020_a
S014C001P007R002A020_a
S014C001P027R002A020_a
S017C002P020R001A020_a
S005C002P017R002A020_a
S005C002P018R002A020_a
S014C001P039R002A020_a
S014C002P027R001A020_a
S005C001P017R002A020_a
S003C001P015R001A020_a
S006C003P007R001A020_a
S007C001P018R001A020_a
S007C002P027R002A020_a
S008C001P036R001A020_a
S008C002P032R002A020_a//bad?
stand & 2
S001C001P003R001A020
S001C001P005R001A020
S001C001P005R002A020
S001C001P007R001A020
S001C001P008R002A020
S001C002P002R002A020
S001C001P001R001A020
S001C002P003R002A020
S001C002P005R001A020
S001C002P005R002A020
S009C003P017R001A020
S009C003P019R001A020
S009C003P019R002A020
S009C003P025R001A020
S010C001P019R002A020
S010C001P025R001A020
S010C001P025R002A020
S010C002P025R002A020
S010C003P019R001A020
S010C003P025R001A020
S011C001P007R002A020
S011C001P019R001A020
S011C001P019R002A020
S011C001P027R001A020
S011C001P027R002A020
S011C001P028R002A020
S011C002P018R002A020
S011C003P018R001A020
S011C003P019R001A020
S012C001P015R001A020
S012C001P018R001A020
S012C001P019R001A020
S012C001P025R001A020
S012C001P025R002A020
S012C001P028R001A020
S012C001P028R002A020
S012C001P037R001A020
S012C002P025R002A020
S012C002P028R002A020
S012C003P015R001A020
S012C003P025R001A020
S012C003P027R001A020
S012C003P037R001A020
S013C001P019R001A020
S013C001P019R002A020
S013C001P027R001A020
S013C001P028R001A020
S013C002P018R002A020
S013C003P019R001A020
S014C001P019R002A020
S014C001P025R001A020
S015C001P019R002A020
S015C002P019R002A020
S015C003P019R001A020
S016C001P007R001A020
S016C001P019R001A020
S016C001P019R002A020
S016C001P040R001A020
S016C002P007R001A020
S016C002P019R001A020
S016C002P019R002A020
S016C003P040R001A020
S017C001P003R001A020
S017C001P003R002A020
S017C001P007R001A020
S017C001P007R002A020
S017C001P008R001A020
S017C001P016R001A020
S017C001P020R001A020
S017C001P020R002A020
S017C002P003R002A020
S017C002P007R002A020
S017C002P020R002A020
S017C003P003R001A020
S017C003P008R001A020
S001C001P001R002A020
S003C003P007R001A020
S001C001P005R001A020
S001C001P005R002A020
S001C002P002R002A020
S001C002P005R002A020
S001C002P008R002A020
S001C003P002R002A020
S001C003P005R001A020
S002C001P009R001A020
S002C001P010R002A020
S002C001P013R002A020
S002C002P009R001A020
S002C002P010R002A020
S002C002P013R002A020
S002C003P009R001A020
S003C001P001R002A020
S003C001P007R001A020
S003C001P016R001A020
S004C001P003R002A020
S004C001P008R002A020
S004C001P020R001A020
S004C001P020R002A020
S004C002P003R002A020
S004C002P008R002A020
S004C002P020R002A020
S005C001P021R002A020
S005C002P021R002A020
S005C003P018R001A020
S006C001P001R002A020
S006C001P007R002A020
S006C001P019R002A020
S006C001P022R002A020
S006C002P001R002A020
S006C002P008R002A020
S006C002P022R002A020
S006C003P016R001A020
S006C003P017R001A020
S007C003P019R001A020
S007C001P019R001A020
S007C002P017R002A020
S008C001P019R002A020
S008C001P025R001A020
S008C001P025R002A020
S008C001P030R002A020
S008C001P031R002A020
S008C001P035R001A020
S008C001P035R002A020
S008C001P036R002A020
S008C002P019R002A020
S008C002P025R002A020
S008C002P031R002A020
S009C002P019R001A020 //bad..?
S009C002P019R002A020
S009C002P017R002A020
S009C002P016R002A020
S009C001P025R001A020
S009C001P019R002A020
S009C001P019R001A020
S009C001P017R002A020
S009C001P017R001A020
S009C001P016R002A020
S008C003P035R001A020
S008C003P031R001A020 //bad?
S008C003P025R001A020
S008C002P036R002A020
S008C001P031R001A020_a//leg
S008C001P032R001A020_a // twist hand
S008C002P001R002A020_a
S008C002P030R002A020_a //late
S009C002P025R002A020_a //good?
S008C003P019R002A020_a //wall
S008C002P035R002A020_a //noisy?
S007C002P018R001A020_a
S007C002P019R001A020_a
S007C003P017R002A020_a
S007C003P026R001A020_a
S007C003P028R001A020_a //time dif
S008C001P001R002A020_a //bad?
S006C001P008R002A020_a
S006C001P017R001A020_a
S006C003P024R001A020_a
S004C003P020R001A020_a
S017C003P007R002A020_a
S006C002P019R002A020_a
S017C003P007R001A020_a
S015C003P037R001A020_a
S016C001P007R002A020_a
S015C002P016R002A020_a
S015C001P019R001A020_a
S015C002P019R001A020_a
S016C002P039R002A020_a
S015C001P017R001A020_a
S016C002P040R001A020_a
S014C002P019R002A020_a
S014C002P007R002A020_a
S013C003P027R001A020_a
S013C001P037R001A020_a
S016C002P007R002A020_a
S001C002P001R002A020_a
S001C001P002R001A020_a
S001C002P003R001A020_a
S002C001P003R001A020_a
S002C002P003R001A020_a
S002C003P003R001A020_a
S003C001P017R001A020_a
S003C001P019R002A020_a
S003C002P001R002A020_a
S003C002P017R002A020_a
S003C002P019R002A020_a
S003C003P016R001A020_a
S003C003P017R001A020_a
S003C003P018R001A020_a
S012C003P028R001A020_a
S012C003P018R001A020_a
S012C002P019R001A020_a
S012C002P018R002A020_a
S012C002P016R002A020_a
S012C003P025R002A020_a
S011C002P027R002A020_a
S011C001P018R002A020_a
S010C002P025R001A020_a
S012C001P018R002A020_a
S009C003P017R002A020_a
S012C001P027R002A020_a
S001C002P001R002A020_a
S001C001P001R002A020_a
S001C001P002R001A020_a
S013C002P019R002A020_a
S014C003P027R001A020_a
S001C001P006R001A020_a//leg
S001C001P003R001A020
S001C001P005R001A020
S001C001P005R002A020
S001C001P007R001A020
S001C001P008R002A020
S001C002P002R002A020
S001C001P001R001A020
S001C002P003R002A020
S001C002P005R001A020
S001C002P005R002A020
S009C003P017R001A020
S009C003P019R001A020
S009C003P019R002A020
S009C003P025R001A020
S010C001P019R002A020
S010C001P025R001A020
S010C001P025R002A020
S010C002P025R002A020
S010C003P019R001A020
S010C003P025R001A020
S011C001P007R002A020
S011C001P019R001A020
S011C001P019R002A020
S011C001P027R001A020
S011C001P027R002A020
S011C001P028R002A020
S011C002P018R002A020
S011C003P018R001A020
S011C003P019R001A020
S012C001P015R001A020
S012C001P018R001A020
S012C001P019R001A020
S012C001P025R001A020
S012C001P025R002A020
S012C001P028R001A020
S012C001P028R002A020
S012C001P037R001A020
S012C002P025R002A020
S012C002P028R002A020
S012C003P015R001A020
S012C003P025R001A020
S012C003P027R001A020
S012C003P037R001A020
S013C001P019R001A020
S013C001P019R002A020
S013C001P027R001A020
S013C001P028R001A020
S013C002P018R002A020
S013C003P019R001A020
S014C001P019R002A020
S014C001P025R001A020
S015C001P019R002A020
S015C002P019R002A020
S015C003P019R001A020
S016C001P007R001A020
S016C001P019R001A020
S016C001P019R002A020
S016C001P040R001A020
S016C002P007R001A020
S016C002P019R001A020
S016C002P019R002A020
S016C003P040R001A020
S017C001P003R001A020
S017C001P003R002A020
S017C001P007R001A020
S017C001P007R002A020
S017C001P008R001A020
S017C001P016R001A020
S017C001P020R001A020
S017C001P020R002A020
S017C002P003R002A020
S017C002P007R002A020
S017C002P020R002A020
S017C003P003R001A020
S017C003P008R001A020
S001C001P001R002A020
S003C003P007R001A020
S001C001P005R001A020
S001C001P005R002A020
S001C002P002R002A020
S001C002P005R002A020
S001C002P008R002A020
S001C003P002R002A020
S001C003P005R001A020
S002C001P009R001A020
S002C001P010R002A020
S002C001P013R002A020
S002C002P009R001A020
S002C002P010R002A020
S002C002P013R002A020
S002C003P009R001A020
S003C001P001R002A020
S003C001P007R001A020
S003C001P016R001A020
S004C001P003R002A020
S004C001P008R002A020
S004C001P020R001A020
S004C001P020R002A020
S004C002P003R002A020
S004C002P008R002A020
S004C002P020R002A020
S005C001P021R002A020
S005C002P021R002A020
S005C003P018R001A020
S006C001P001R002A020
S006C001P007R002A020
S006C001P019R002A020
S006C001P022R002A020
S006C002P001R002A020
S006C002P008R002A020
S006C002P022R002A020
S006C003P016R001A020
S006C003P017R001A020
S007C003P019R001A020
S007C001P019R001A020
S007C002P017R002A020
S008C001P019R002A020
S008C001P025R001A020
S008C001P025R002A020
S008C001P030R002A020
S008C001P031R002A020
S008C001P035R001A020
S008C001P035R002A020
S008C001P036R002A020
S008C002P019R002A020
S008C002P025R002A020
S008C002P031R002A020
S009C002P019R002A020
S009C002P017R002A020
S009C002P016R002A020
S009C001P025R001A020
S009C001P019R002A020
S009C001P019R001A020
S009C001P017R002A020
S009C001P017R001A020
S009C001P016R002A020
S008C003P035R001A020
S008C003P025R001A020
S008C002P036R002A020
S009C002P025R002A020
S008C002P035R002A020
S009C002P019R001A020
S008C003P019R002A020
S008C003P031R001A020