JeungEunLee

add DCGAN

1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 1,
6 + "metadata": {
7 + "scrolled": true
8 + },
9 + "outputs": [],
10 + "source": [
11 + "%matplotlib inline\n",
12 + "import os\n",
13 + "from glob import glob\n",
14 + "import numpy as np\n",
15 + "from matplotlib import pyplot\n",
16 + "from PIL import Image\n",
17 + "import tensorflow as tf\n",
18 + "\n",
19 + "##README : IF output folder already existed in same route, it makes error. change past output folder's name ##"
20 + ]
21 + },
22 + {
23 + "cell_type": "code",
24 + "execution_count": 2,
25 + "metadata": {},
26 + "outputs": [],
27 + "source": [
28 + "class Dataset(object):\n",
29 + " def __init__(self, data_files):\n",
30 + " IMAGE_WIDTH = 25\n",
31 + " IMAGE_HEIGHT = 25\n",
32 + " self.image_mode = 'RGB'\n",
33 + " image_channels = 3\n",
34 + " self.data_files = data_files\n",
35 + " self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
36 + "\n",
37 + " def get_batches(self, batch_size):\n",
38 + " IMAGE_MAX_VALUE = 255\n",
39 + " current_index = 0\n",
40 + " while current_index + batch_size <= self.shape[0]:\n",
41 + " data_batch = get_batch(\n",
42 + " self.data_files[current_index:current_index + batch_size],\n",
43 + " self.shape[1],self.shape[2],\n",
44 + " self.image_mode)\n",
45 + " \n",
46 + " current_index += batch_size\n",
47 + " \n",
48 + " yield data_batch / IMAGE_MAX_VALUE - 0.5\n",
49 + "\n",
50 + "\n",
51 + "def model_inputs(image_width, image_height, image_channels, z_dim):\n",
52 + " real_input_images = tf.placeholder(tf.float32, [None, image_width, image_height, image_channels], 'real_input_images')\n",
53 + " input_z = tf.placeholder(tf.float32, [None, z_dim], 'input_z')\n",
54 + " learning_rate = tf.placeholder(tf.float32, [], 'learning_rate')\n",
55 + " return real_input_images, input_z, learning_rate\n"
56 + ]
57 + },
58 + {
59 + "cell_type": "code",
60 + "execution_count": 3,
61 + "metadata": {},
62 + "outputs": [],
63 + "source": [
64 + "def discriminator(images, reuse=False, alpha=0.2, keep_prob=0.5):\n",
65 + " with tf.variable_scope('discriminator', reuse=reuse):\n",
66 + " # Input layer is 25x25xn\n",
67 + " # Convolutional layer, 13x13x64\n",
68 + " conv1 = tf.layers.conv2d(images, 64, 5, 2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer())\n",
69 + " lrelu1 = tf.maximum(alpha * conv1, conv1)\n",
70 + " drop1 = tf.layers.dropout(lrelu1, keep_prob)\n",
71 + " \n",
72 + " # Strided convolutional layer, 7x7x128\n",
73 + " conv2 = tf.layers.conv2d(drop1, 128, 5, 2, 'same', use_bias=False)\n",
74 + " bn2 = tf.layers.batch_normalization(conv2)\n",
75 + " lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
76 + " drop2 = tf.layers.dropout(lrelu2, keep_prob)\n",
77 + " \n",
78 + " # Strided convolutional layer, 4x4x256\n",
79 + " conv3 = tf.layers.conv2d(drop2, 256, 5, 2, 'same', use_bias=False)\n",
80 + " bn3 = tf.layers.batch_normalization(conv3)\n",
81 + " lrelu3 = tf.maximum(alpha * bn3, bn3)\n",
82 + " drop3 = tf.layers.dropout(lrelu3, keep_prob)\n",
83 + " \n",
84 + " # fully connected\n",
85 + " flat = tf.reshape(drop3, (-1, 4*4*256))\n",
86 + " logits = tf.layers.dense(flat, 1)\n",
87 + " out = tf.sigmoid(logits)\n",
88 + " \n",
89 + " return out, logits"
90 + ]
91 + },
92 + {
93 + "cell_type": "code",
94 + "execution_count": 4,
95 + "metadata": {},
96 + "outputs": [],
97 + "source": [
98 + "def generator(z, out_channel_dim, is_train=True, alpha=0.2, keep_prob=0.5):\n",
99 + " # TODO: Implement Function\n",
100 + " with tf.variable_scope('generator', reuse=(not is_train)):\n",
101 + " # First fully connected layer, 8x4x512\n",
102 + " fc = tf.layers.dense(z, 4*4*1024, use_bias=False)\n",
103 + " fc = tf.reshape(fc, (-1, 4, 4, 1024))\n",
104 + " bn0 = tf.layers.batch_normalization(fc, training=is_train)\n",
105 + " lrelu0 = tf.maximum(alpha * bn0, bn0)\n",
106 + " drop0 = tf.layers.dropout(lrelu0, keep_prob, training=is_train)\n",
107 + " \n",
108 + " # Deconvolution, 16x8x256\n",
109 + " conv1 = tf.layers.conv2d_transpose(drop0, 512,3, 1, 'valid', use_bias=False)\n",
110 + " bn1 = tf.layers.batch_normalization(conv1, training=is_train)\n",
111 + " lrelu1 = tf.maximum(alpha * bn1, bn1)\n",
112 + " drop1 = tf.layers.dropout(lrelu1, keep_prob, training=is_train)\n",
113 + " \n",
114 + " # Deconvolution, 32x 128\n",
115 + " conv2 = tf.layers.conv2d_transpose(drop1, 256, 3, 2, 'same', use_bias=False)\n",
116 + " bn2 = tf.layers.batch_normalization(conv2, training=is_train)\n",
117 + " lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
118 + " drop2 = tf.layers.dropout(lrelu2, keep_prob, training=is_train)\n",
119 + " \n",
120 + " # Output layer, 28x28xn\n",
121 + " logits = tf.layers.conv2d_transpose(drop2, out_channel_dim, 3, 2, 'valid')\n",
122 + " \n",
123 + " out = tf.tanh(logits)\n",
124 + " \n",
125 + " print(fc.shape)\n",
126 + " print(drop1.shape)\n",
127 + " print(drop2.shape)\n",
128 + " print(logits.shape)\n",
129 + " \n",
130 + " return out"
131 + ]
132 + },
133 + {
134 + "cell_type": "code",
135 + "execution_count": 5,
136 + "metadata": {},
137 + "outputs": [],
138 + "source": [
139 + "def model_loss(input_real, input_z, out_channel_dim, alpha=0.2, smooth_factor=0.1):\n",
140 + " d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)\n",
141 + " \n",
142 + " d_loss_real = tf.reduce_mean(\n",
143 + " tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n",
144 + " labels=tf.ones_like(d_model_real) * (1 - smooth_factor)))\n",
145 + " \n",
146 + " input_fake = generator(input_z, out_channel_dim, alpha=alpha)\n",
147 + " d_model_fake, d_logits_fake = discriminator(input_fake, reuse=True, alpha=alpha)\n",
148 + " \n",
149 + " d_loss_fake = tf.reduce_mean(\n",
150 + " tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n",
151 + " \n",
152 + " g_loss = tf.reduce_mean(\n",
153 + " tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n",
154 + "\n",
155 + " return d_loss_real + d_loss_fake, g_loss\n"
156 + ]
157 + },
158 + {
159 + "cell_type": "code",
160 + "execution_count": 6,
161 + "metadata": {},
162 + "outputs": [],
163 + "source": [
164 + "def model_opt(d_loss, g_loss, learning_rate, beta1):\n",
165 + " # Get weights and bias to update\n",
166 + " t_vars = tf.trainable_variables()\n",
167 + " d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n",
168 + " g_vars = [var for var in t_vars if var.name.startswith('generator')]\n",
169 + "\n",
170 + " # Optimize\n",
171 + " with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n",
172 + " d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n",
173 + " g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n",
174 + "\n",
175 + " return d_train_opt, g_train_opt\n"
176 + ]
177 + },
178 + {
179 + "cell_type": "code",
180 + "execution_count": 7,
181 + "metadata": {},
182 + "outputs": [],
183 + "source": [
184 + "def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):\n",
185 + " cmap = None if image_mode == 'RGB' else 'gray'\n",
186 + " z_dim = input_z.get_shape().as_list()[-1]\n",
187 + " example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])\n",
188 + "\n",
189 + " samples = sess.run(\n",
190 + " generator(input_z, out_channel_dim, False),\n",
191 + " feed_dict={input_z: example_z})\n",
192 + " \n",
193 + " # pyplot.show()\n",
194 + " return samples"
195 + ]
196 + },
197 + {
198 + "cell_type": "code",
199 + "execution_count": 8,
200 + "metadata": {},
201 + "outputs": [],
202 + "source": [
203 + "def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode,\n",
204 + " print_every=10, show_every=10):\n",
205 + " # TODO: Build Model\n",
206 + " input_real, input_z, _ = model_inputs(data_shape[2], data_shape[1], data_shape[3], z_dim)\n",
207 + " d_loss, g_loss = model_loss(input_real, input_z, data_shape[3], alpha=0.2)\n",
208 + " d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)\n",
209 + " \n",
210 + " saver = tf.train.Saver()\n",
211 + " sample_z = np.random.uniform(-1, 1, size=(72, z_dim))\n",
212 + " \n",
213 + " samples, losses = [], []\n",
214 + " \n",
215 + " steps = 0\n",
216 + " count = 0\n",
217 + " \n",
218 + " with tf.Session() as sess:\n",
219 + " saver = tf.train.Saver()\n",
220 + " sess.run(tf.global_variables_initializer())\n",
221 + " \n",
222 + " # continue training\n",
223 + " save_path = saver.save(sess, \"/tmp/model.ckpt\")\n",
224 + " ckpt = tf.train.latest_checkpoint('./model/')\n",
225 + " saver.restore(sess, save_path)\n",
226 + " coord = tf.train.Coordinator()\n",
227 + " threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n",
228 + "\n",
229 + " os.mkdir('output')\n",
230 + " for epoch_i in range(epoch_count):\n",
231 + " for batch_images in get_batches(batch_size):\n",
232 + " # Train Model\n",
233 + " steps += 1\n",
234 + " batch_images *= 2.0\n",
235 + " \n",
236 + " # Sample random noise for G\n",
237 + " batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))\n",
238 + " \n",
239 + " # Run optimizers\n",
240 + " sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n",
241 + " sess.run(g_train_opt, feed_dict={input_z: batch_z})\n",
242 + " \n",
243 + " if steps % print_every == 0:\n",
244 + " os.mkdir('output/'+ str(steps))\n",
245 + " # At the end of each epoch, get the losses and print them out\n",
246 + " train_loss_d = d_loss.eval({input_real: batch_images, input_z: batch_z})\n",
247 + " train_loss_g = g_loss.eval({input_z: batch_z})\n",
248 + " print(\"Epoch {}/{} Step {}...\".format(epoch_i+1, epoch_count, steps),\n",
249 + " \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n",
250 + " \"Generator Loss: {:.4f}\".format(train_loss_g))\n",
251 + " # Save losses for viewing after training\n",
252 + " #losses.append((train_loss_d, train_loss_g))\n",
253 + " \n",
254 + " if steps % show_every == 0:\n",
255 + " count = count +1\n",
256 + " iterr = count*show_every\n",
257 + " # Show example output for the generator # 25 number for 1 time\n",
258 + " images_grid = show_generator_output(sess, 25, input_z, data_shape[3], data_image_mode)\n",
259 + " x = 0\n",
260 + " for image_grid in images_grid : \n",
261 + " x = x+1\n",
262 + " dst = os.path.join(\"output\", str(steps),str(iterr)+str(x)+\".png\")\n",
263 + " pyplot.imsave(dst, image_grid)\n",
264 + " \n",
265 + " # saving the model \n",
266 + " if epoch_i % 10 == 0:\n",
267 + " if not os.path.exists('./model/'):\n",
268 + " os.makedirs('./model')\n",
269 + " saver.save(sess, './model/' + str(epoch_i)) "
270 + ]
271 + },
272 + {
273 + "cell_type": "code",
274 + "execution_count": 10,
275 + "metadata": {
276 + "scrolled": true
277 + },
278 + "outputs": [
279 + {
280 + "name": "stdout",
281 + "output_type": "stream",
282 + "text": [
283 + "140\n",
284 + "(?, 4, 4, 1024)\n",
285 + "(?, 6, 6, 512)\n",
286 + "(?, 12, 12, 256)\n",
287 + "(?, 25, 25, 3)\n",
288 + "INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n"
289 + ]
290 + },
291 + {
292 + "ename": "FileExistsError",
293 + "evalue": "[Errno 17] File exists: 'output'",
294 + "output_type": "error",
295 + "traceback": [
296 + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
297 + "\u001b[0;31mFileExistsError\u001b[0m Traceback (most recent call last)",
298 + "\u001b[0;32m<ipython-input-10-3cf64f8b526a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./motionpatch/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
299 + "\u001b[0;32m<ipython-input-8-4eafe8fdaf6d>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0mthreads\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstart_queue_runners\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcoord\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcoord\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;31m#sess.run(tf.global_variables_initializer())\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmkdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 28\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch_i\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch_count\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mbatch_images\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
300 + "\u001b[0;31mFileExistsError\u001b[0m: [Errno 17] File exists: 'output'"
301 + ]
302 + }
303 + ],
304 + "source": [
305 + "batch_size = 50\n",
306 + "z_dim = 100\n",
307 + "learning_rate = 0.00025\n",
308 + "beta1 = 0.45\n",
309 + "\n",
310 + "epochs = 500\n",
311 + "print(len(glob('./motionpatch/*.png')))\n",
312 + "celeba_dataset = Dataset( glob('./motionpatch/*.png'))\n",
313 + "with tf.Graph().as_default():\n",
314 + " train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
315 + ]
316 + },
317 + {
318 + "cell_type": "code",
319 + "execution_count": null,
320 + "metadata": {},
321 + "outputs": [],
322 + "source": []
323 + },
324 + {
325 + "cell_type": "code",
326 + "execution_count": null,
327 + "metadata": {},
328 + "outputs": [],
329 + "source": []
330 + },
331 + {
332 + "cell_type": "code",
333 + "execution_count": null,
334 + "metadata": {},
335 + "outputs": [],
336 + "source": []
337 + }
338 + ],
339 + "metadata": {
340 + "kernelspec": {
341 + "display_name": "Python 3",
342 + "language": "python",
343 + "name": "python3"
344 + },
345 + "language_info": {
346 + "codemirror_mode": {
347 + "name": "ipython",
348 + "version": 3
349 + },
350 + "file_extension": ".py",
351 + "mimetype": "text/x-python",
352 + "name": "python",
353 + "nbconvert_exporter": "python",
354 + "pygments_lexer": "ipython3",
355 + "version": "3.5.0"
356 + }
357 + },
358 + "nbformat": 4,
359 + "nbformat_minor": 1
360 +}
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 1,
6 + "metadata": {
7 + "scrolled": true
8 + },
9 + "outputs": [],
10 + "source": [
11 + "%matplotlib inline\n",
12 + "import os\n",
13 + "from glob import glob\n",
14 + "import numpy as np\n",
15 + "from matplotlib import pyplot\n",
16 + "from PIL import Image\n",
17 + "import tensorflow as tf\n",
18 + "\n",
19 + "##README : IF output folder already existed in same route, it makes error. change past output folder's name ##"
20 + ]
21 + },
22 + {
23 + "cell_type": "code",
24 + "execution_count": 2,
25 + "metadata": {},
26 + "outputs": [],
27 + "source": [
28 + "class Dataset(object):\n",
29 + " def __init__(self, data_files):\n",
30 + " IMAGE_WIDTH = 25\n",
31 + " IMAGE_HEIGHT = 25\n",
32 + " self.image_mode = 'RGB'\n",
33 + " image_channels = 3\n",
34 + " self.data_files = data_files\n",
35 + " self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
36 + " \n",
37 + " def get_image(iself,mage_path, width, height, mode):\n",
38 + " image = Image.open(image_path)\n",
39 + " image = image.resize((width,height))\n",
40 + " return np.array(image)\n",
41 + "\n",
42 + "\n",
43 + " def get_batch(self,image_files, width, height, mode):\n",
44 + " data_batch = np.array(\n",
45 + " [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n",
46 + " \n",
47 + " # Make sure the images are in 4 dimensions\n",
48 + " if len(data_batch.shape) < 4:\n",
49 + " data_batch = data_batch.reshape(data_batch.shape + (1,))\n",
50 + " return data_batch\n",
51 + "\n",
52 + " def get_batches(self, batch_size):\n",
53 + " IMAGE_MAX_VALUE = 255\n",
54 + " current_index = 0\n",
55 + " while current_index + batch_size <= self.shape[0]:\n",
56 + " data_batch = self.get_batch(\n",
57 + " self.data_files[current_index:current_index + batch_size],\n",
58 + " self.shape[1],self.shape[2],\n",
59 + " self.image_mode)\n",
60 + " \n",
61 + " current_index += batch_size\n",
62 + " \n",
63 + " yield data_batch / IMAGE_MAX_VALUE - 0.5\n",
64 + "\n",
65 + "\n",
66 + "def model_inputs(image_width, image_height, image_channels, z_dim):\n",
67 + " real_input_images = tf.placeholder(tf.float32, [None, image_width, image_height, image_channels], 'real_input_images')\n",
68 + " input_z = tf.placeholder(tf.float32, [None, z_dim], 'input_z')\n",
69 + " learning_rate = tf.placeholder(tf.float32, [], 'learning_rate')\n",
70 + " return real_input_images, input_z, learning_rate\n"
71 + ]
72 + },
73 + {
74 + "cell_type": "code",
75 + "execution_count": 3,
76 + "metadata": {},
77 + "outputs": [],
78 + "source": [
79 + "def discriminator(images, reuse=False, alpha=0.2, keep_prob=0.5):\n",
80 + " with tf.variable_scope('discriminator', reuse=reuse):\n",
81 + " # Input layer is 25x25xn\n",
82 + " # Convolutional layer, 13x13x64\n",
83 + " conv1 = tf.layers.conv2d(images, 64, 5, 2, padding='same', kernel_initializer=tf.contrib.layers.xavier_initializer())\n",
84 + " lrelu1 = tf.maximum(alpha * conv1, conv1)\n",
85 + " drop1 = tf.layers.dropout(lrelu1, keep_prob)\n",
86 + " \n",
87 + " # Strided convolutional layer, 7x7x128\n",
88 + " conv2 = tf.layers.conv2d(drop1, 128, 5, 2, 'same', use_bias=False)\n",
89 + " bn2 = tf.layers.batch_normalization(conv2)\n",
90 + " lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
91 + " drop2 = tf.layers.dropout(lrelu2, keep_prob)\n",
92 + " \n",
93 + " # Strided convolutional layer, 4x4x256\n",
94 + " conv3 = tf.layers.conv2d(drop2, 256, 5, 2, 'same', use_bias=False)\n",
95 + " bn3 = tf.layers.batch_normalization(conv3)\n",
96 + " lrelu3 = tf.maximum(alpha * bn3, bn3)\n",
97 + " drop3 = tf.layers.dropout(lrelu3, keep_prob)\n",
98 + " \n",
99 + " # fully connected\n",
100 + " flat = tf.reshape(drop3, (-1, 4*4*256))\n",
101 + " logits = tf.layers.dense(flat, 1)\n",
102 + " out = tf.sigmoid(logits)\n",
103 + " \n",
104 + " return out, logits"
105 + ]
106 + },
107 + {
108 + "cell_type": "code",
109 + "execution_count": 4,
110 + "metadata": {},
111 + "outputs": [],
112 + "source": [
113 + "def generator(z, out_channel_dim, is_train=True, alpha=0.2, keep_prob=0.5):\n",
114 + " # TODO: Implement Function\n",
115 + " with tf.variable_scope('generator', reuse=(not is_train)):\n",
116 + " # First fully connected layer, 8x4x512\n",
117 + " fc = tf.layers.dense(z, 4*4*1024, use_bias=False)\n",
118 + " fc = tf.reshape(fc, (-1, 4, 4, 1024))\n",
119 + " bn0 = tf.layers.batch_normalization(fc, training=is_train)\n",
120 + " lrelu0 = tf.maximum(alpha * bn0, bn0)\n",
121 + " drop0 = tf.layers.dropout(lrelu0, keep_prob, training=is_train)\n",
122 + " \n",
123 + " # Deconvolution, 16x8x256\n",
124 + " conv1 = tf.layers.conv2d_transpose(drop0, 512,3, 1, 'valid', use_bias=False)\n",
125 + " bn1 = tf.layers.batch_normalization(conv1, training=is_train)\n",
126 + " lrelu1 = tf.maximum(alpha * bn1, bn1)\n",
127 + " drop1 = tf.layers.dropout(lrelu1, keep_prob, training=is_train)\n",
128 + " \n",
129 + " # Deconvolution, 32x 128\n",
130 + " conv2 = tf.layers.conv2d_transpose(drop1, 256, 3, 2, 'same', use_bias=False)\n",
131 + " bn2 = tf.layers.batch_normalization(conv2, training=is_train)\n",
132 + " lrelu2 = tf.maximum(alpha * bn2, bn2)\n",
133 + " drop2 = tf.layers.dropout(lrelu2, keep_prob, training=is_train)\n",
134 + " \n",
135 + " # Output layer, 28x28xn\n",
136 + " logits = tf.layers.conv2d_transpose(drop2, out_channel_dim, 3, 2, 'valid')\n",
137 + " \n",
138 + " out = tf.tanh(logits)\n",
139 + " \n",
140 + " print(fc.shape)\n",
141 + " print(drop1.shape)\n",
142 + " print(drop2.shape)\n",
143 + " print(logits.shape)\n",
144 + " \n",
145 + " return out"
146 + ]
147 + },
148 + {
149 + "cell_type": "code",
150 + "execution_count": 5,
151 + "metadata": {},
152 + "outputs": [],
153 + "source": [
154 + "def model_loss(input_real, input_z, out_channel_dim, alpha=0.2, smooth_factor=0.1):\n",
155 + " d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)\n",
156 + " \n",
157 + " d_loss_real = tf.reduce_mean(\n",
158 + " tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,\n",
159 + " labels=tf.ones_like(d_model_real) * (1 - smooth_factor)))\n",
160 + " \n",
161 + " input_fake = generator(input_z, out_channel_dim, alpha=alpha)\n",
162 + " d_model_fake, d_logits_fake = discriminator(input_fake, reuse=True, alpha=alpha)\n",
163 + " \n",
164 + " d_loss_fake = tf.reduce_mean(\n",
165 + " tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n",
166 + " \n",
167 + " g_loss = tf.reduce_mean(\n",
168 + " tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n",
169 + "\n",
170 + " return d_loss_real + d_loss_fake, g_loss\n"
171 + ]
172 + },
173 + {
174 + "cell_type": "code",
175 + "execution_count": 6,
176 + "metadata": {},
177 + "outputs": [],
178 + "source": [
179 + "def model_opt(d_loss, g_loss, learning_rate, beta1):\n",
180 + " # Get weights and bias to update\n",
181 + " t_vars = tf.trainable_variables()\n",
182 + " d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n",
183 + " g_vars = [var for var in t_vars if var.name.startswith('generator')]\n",
184 + "\n",
185 + " # Optimize\n",
186 + " with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n",
187 + " d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n",
188 + " g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n",
189 + "\n",
190 + " return d_train_opt, g_train_opt\n"
191 + ]
192 + },
193 + {
194 + "cell_type": "code",
195 + "execution_count": 7,
196 + "metadata": {},
197 + "outputs": [],
198 + "source": [
199 + "def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):\n",
200 + " cmap = None if image_mode == 'RGB' else 'gray'\n",
201 + " z_dim = input_z.get_shape().as_list()[-1]\n",
202 + " example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])\n",
203 + "\n",
204 + " samples = sess.run(\n",
205 + " generator(input_z, out_channel_dim, False),\n",
206 + " feed_dict={input_z: example_z})\n",
207 + " \n",
208 + " # pyplot.show()\n",
209 + " return samples"
210 + ]
211 + },
212 + {
213 + "cell_type": "code",
214 + "execution_count": 8,
215 + "metadata": {},
216 + "outputs": [],
217 + "source": [
218 + "def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode,\n",
219 + " print_every=10, show_every=10):\n",
220 + " # TODO: Build Model\n",
221 + " input_real, input_z, _ = model_inputs(data_shape[2], data_shape[1], data_shape[3], z_dim)\n",
222 + " d_loss, g_loss = model_loss(input_real, input_z, data_shape[3], alpha=0.2)\n",
223 + " d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)\n",
224 + " \n",
225 + " saver = tf.train.Saver()\n",
226 + " sample_z = np.random.uniform(-1, 1, size=(72, z_dim))\n",
227 + " \n",
228 + " samples, losses = [], []\n",
229 + " \n",
230 + " steps = 0\n",
231 + " count = 0\n",
232 + " \n",
233 + " with tf.Session() as sess:\n",
234 + " saver = tf.train.Saver()\n",
235 + " sess.run(tf.global_variables_initializer())\n",
236 + " \n",
237 + " # continue training\n",
238 + " save_path = saver.save(sess, \"/tmp/model.ckpt\")\n",
239 + " ckpt = tf.train.latest_checkpoint('./model/')\n",
240 + " saver.restore(sess, save_path)\n",
241 + " coord = tf.train.Coordinator()\n",
242 + " threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n",
243 + "\n",
244 + " os.mkdir('output')\n",
245 + " for epoch_i in range(epoch_count):\n",
246 + " for batch_images in get_batches(batch_size):\n",
247 + " # Train Model\n",
248 + " steps += 1\n",
249 + " batch_images *= 2.0\n",
250 + " \n",
251 + " # Sample random noise for G\n",
252 + " batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))\n",
253 + " \n",
254 + " # Run optimizers\n",
255 + " sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n",
256 + " sess.run(g_train_opt, feed_dict={input_z: batch_z})\n",
257 + " \n",
258 + " if steps % print_every == 0:\n",
259 + " os.mkdir('output/'+ str(steps))\n",
260 + " # At the end of each epoch, get the losses and print them out\n",
261 + " train_loss_d = d_loss.eval({input_real: batch_images, input_z: batch_z})\n",
262 + " train_loss_g = g_loss.eval({input_z: batch_z})\n",
263 + " print(\"Epoch {}/{} Step {}...\".format(epoch_i+1, epoch_count, steps),\n",
264 + " \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n",
265 + " \"Generator Loss: {:.4f}\".format(train_loss_g))\n",
266 + " # Save losses for viewing after training\n",
267 + " #losses.append((train_loss_d, train_loss_g))\n",
268 + " \n",
269 + " if steps % show_every == 0:\n",
270 + " count = count +1\n",
271 + " iterr = count*show_every\n",
272 + " # Show example output for the generator # 25 number for 1 time\n",
273 + " images_grid = show_generator_output(sess, 25, input_z, data_shape[3], data_image_mode)\n",
274 + " x = 0\n",
275 + " for image_grid in images_grid : \n",
276 + " x = x+1\n",
277 + " dst = os.path.join(\"output\", str(steps),str(iterr)+str(x)+\".png\")\n",
278 + " pyplot.imsave(dst, image_grid)\n",
279 + " \n",
280 + " # saving the model \n",
281 + " if epoch_i % 10 == 0:\n",
282 + " if not os.path.exists('./model/'):\n",
283 + " os.makedirs('./model')\n",
284 + " saver.save(sess, './model/' + str(epoch_i)) "
285 + ]
286 + },
287 + {
288 + "cell_type": "code",
289 + "execution_count": 9,
290 + "metadata": {
291 + "scrolled": true
292 + },
293 + "outputs": [
294 + {
295 + "name": "stdout",
296 + "output_type": "stream",
297 + "text": [
298 + "140\n",
299 + "(?, 4, 4, 1024)\n",
300 + "(?, 6, 6, 512)\n",
301 + "(?, 12, 12, 256)\n",
302 + "(?, 25, 25, 3)\n",
303 + "INFO:tensorflow:Restoring parameters from /tmp/model.ckpt\n"
304 + ]
305 + },
306 + {
307 + "ename": "NameError",
308 + "evalue": "name 'image_path' is not defined",
309 + "output_type": "error",
310 + "traceback": [
311 + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
312 + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
313 + "\u001b[0;32m<ipython-input-9-3cf64f8b526a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mceleba_dataset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m(\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'./motionpatch/*.png'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGraph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mz_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbeta1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_batches\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mceleba_dataset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
314 + "\u001b[0;32m<ipython-input-8-14a3faf19639>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode, print_every, show_every)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmkdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch_i\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mepoch_count\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mbatch_images\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget_batches\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;31m# Train Model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0msteps\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
315 + "\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_batches\u001b[0;34m(self, batch_size)\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_files\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcurrent_index\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mcurrent_index\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m self.image_mode)\n\u001b[0m\u001b[1;32m 33\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0mcurrent_index\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
316 + "\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_batch\u001b[0;34m(self, image_files, width, height, mode)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimage_files\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m data_batch = np.array(\n\u001b[0;32m---> 18\u001b[0;31m [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# Make sure the images are in 4 dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
317 + "\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36m<listcomp>\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mimage_files\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m data_batch = np.array(\n\u001b[0;32m---> 18\u001b[0;31m [self.get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# Make sure the images are in 4 dimensions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
318 + "\u001b[0;32m<ipython-input-2-77ee1ea74a0f>\u001b[0m in \u001b[0;36mget_image\u001b[0;34m(iself, mage_path, width, height, mode)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mmage_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mheight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0mimage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mwidth\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mheight\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
319 + "\u001b[0;31mNameError\u001b[0m: name 'image_path' is not defined"
320 + ]
321 + }
322 + ],
323 + "source": [
324 + "batch_size = 50\n",
325 + "z_dim = 100\n",
326 + "learning_rate = 0.00025\n",
327 + "beta1 = 0.45\n",
328 + "\n",
329 + "epochs = 500\n",
330 + "print(len(glob('./motionpatch/*.png')))\n",
331 + "celeba_dataset = Dataset( glob('./motionpatch/*.png'))\n",
332 + "with tf.Graph().as_default():\n",
333 + " train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
334 + ]
335 + },
336 + {
337 + "cell_type": "code",
338 + "execution_count": null,
339 + "metadata": {},
340 + "outputs": [],
341 + "source": []
342 + },
343 + {
344 + "cell_type": "code",
345 + "execution_count": null,
346 + "metadata": {},
347 + "outputs": [],
348 + "source": []
349 + },
350 + {
351 + "cell_type": "code",
352 + "execution_count": null,
353 + "metadata": {},
354 + "outputs": [],
355 + "source": []
356 + }
357 + ],
358 + "metadata": {
359 + "kernelspec": {
360 + "display_name": "Python 3",
361 + "language": "python",
362 + "name": "python3"
363 + },
364 + "language_info": {
365 + "codemirror_mode": {
366 + "name": "ipython",
367 + "version": 3
368 + },
369 + "file_extension": ".py",
370 + "mimetype": "text/x-python",
371 + "name": "python",
372 + "nbconvert_exporter": "python",
373 + "pygments_lexer": "ipython3",
374 + "version": "3.5.0"
375 + }
376 + },
377 + "nbformat": 4,
378 + "nbformat_minor": 1
379 +}
No preview for this file type
1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 3,
6 + "metadata": {},
7 + "outputs": [],
8 + "source": [
9 + "import cv2\n",
10 + "from glob import glob\n",
11 + "import os \n",
12 + "\n",
13 + "motionpatch_location = './motionpatch/*.png'\n",
14 + "output_location = './smallone/'\n",
15 + "count = 0\n",
16 + "for f in glob(motionpatch_location):\n",
17 + " count += 1\n",
18 + " image = cv2.imread(f)\n",
19 + " small = cv2.resize(image,dsize=(25,25))\n",
20 + " dst = os.path.join(output_location +str(count)+\".png\")\n",
21 + " cv2.imwrite(dst,small)"
22 + ]
23 + }
24 + ],
25 + "metadata": {
26 + "kernelspec": {
27 + "display_name": "Python 3",
28 + "language": "python",
29 + "name": "python3"
30 + },
31 + "language_info": {
32 + "codemirror_mode": {
33 + "name": "ipython",
34 + "version": 3
35 + },
36 + "file_extension": ".py",
37 + "mimetype": "text/x-python",
38 + "name": "python",
39 + "nbconvert_exporter": "python",
40 + "pygments_lexer": "ipython3",
41 + "version": "3.5.0"
42 + }
43 + },
44 + "nbformat": 4,
45 + "nbformat_minor": 2
46 +}
1 +
2 +%missing file delete
3 +%LOCATION : raw skeletone files
4 +path_name = '/home/rfj/바탕화면/actionGAN/all_motionpatch/';
5 +fileID = fopen('/home/rfj/바탕화면/actionGAN/skeletone_INDEX/stand_2.txt','r');
6 +formatSpec = '%s';
7 +sizeA = [20 Inf];
8 +perfect_list = fscanf(fileID,formatSpec,sizeA);
9 +perfect_list = perfect_list.';
10 +fclose(fileID);
11 +
12 +L = length(perfect_list);
13 +
14 +for K = 1:L
15 + file_name = char(perfect_list(K,:));
16 + file_location = strcat(path_name,file_name,'.png')
17 + ori = imread(file_location);
18 +
19 + new_file_name = strcat('/home/rfj/바탕화면/actionGAN/DCGAN/new_motionpatch/',file_name(1:20),'.png');
20 + imwrite(ori,new_file_name);
21 +
22 +end
1 -if people sit & 1
2 -S001C001P008R001A020
3 -S010C001P008R002A020
4 -S010C001P013R001A020
5 -S010C001P016R001A020
6 -S010C001P016R002A020
7 -S013C001P025R002A020
8 -S013C002P008R002A020
9 -S013C002P017R002A020
10 -S017C003P015R001A020
11 -
12 -
13 -S010C002P016R002A020_a
14 -S010C002P007R002A020_a
15 -S010C001P015R002A020_a
16 -S011C001P038R001A020_a
17 -S012C002P019R002A020_a
18 -S013C001P015R002A020_a
19 -
20 -sit & 2
21 -S001C001P003R002A020
22 -S001C001P004R001A020
23 -S001C001P004R002A020
24 -S001C002P004R002A020
25 -S001C002P004R001A020
26 -S010C001P008R001A020
27 -S010C001P013R002A020
28 -S010C001P015R001A020
29 -S010C002P008R002A020
30 -S010C002P013R002A020
31 -S010C002P019R002A020
32 -S010C003P008R001A020
33 -S010C003P015R001A020
34 -S011C001P015R002A020
35 -S011C001P038R002A020
36 -S011C002P015R002A020
37 -S011C002P038R002A020
38 -S011C003P001R001A020
39 -S011C003P007R002A020
40 -S011C003P015R001A020
41 -S011C003P028R001A020
42 -S013C001P007R001A020
43 -S013C001P007R002A020
44 -S013C001P015R001A020
45 -S013C001P016R001A020
46 -S013C001P017R001A020
47 -S013C001P017R002A020
48 -
49 -S014C003P019R001A020_a
50 -S014C001P015R002A020_a
51 -S013C002P007R002A020_a
52 -S012C002P007R002A020_a
53 -S001C002P001R001A020_a
54 -S011C001P016R001A020_a
55 -S011C001P028R001A020_a
56 -S011C002P002R002A020_a
57 -
58 -
59 -
60 -
61 -
62 -stand & 1
63 -
64 -S014C003P019R002A020_a
65 -S001C002P006R001A020_a
66 -S001C002P006R002A020_a
67 -S014C001P007R001A020_a
68 -S014C001P007R002A020_a
69 -S014C001P027R002A020_a
70 -S017C002P020R001A020_a
71 -S010C001P021R002A020
72 -S011C001P016R002A020
73 -S011C002P016R002A020
74 -S012C001P037R002A020
75 -S013C001P037R002A020
76 -S012C002P037R002A020
77 -S014C003P039R001A020
78 -S014C003P037R001A020
79 -S014C002P025R002A020
80 -S014C001P025R002A020
81 -S014C002P037R002A020
82 -S014C001P037R001A020
83 -S014C002P039R002A020
84 -S014C001P037R002A020
85 -S017C003P016R001A020
86 -S014C001P039R002A020_a
87 -S014C002P027R001A020_a
88 -
89 -stand & 2
90 -S001C001P003R001A020
91 -S001C001P005R001A020
92 -S001C001P005R002A020
93 -S001C001P007R001A020
94 -S001C001P008R002A020
95 -S001C002P002R002A020
96 -S001C001P001R001A020
97 -S001C002P003R002A020
98 -S001C002P005R001A020
99 -S001C002P005R002A020
100 -S009C003P017R001A020
101 -S009C003P019R001A020
102 -S009C003P019R002A020
103 -S009C003P025R001A020
104 -S010C001P019R002A020
105 -S010C001P025R001A020
106 -S010C001P025R002A020
107 -S010C002P025R002A020
108 -S010C003P019R001A020
109 -S010C003P025R001A020
110 -S011C001P007R002A020
111 -S011C001P019R001A020
112 -S011C001P019R002A020
113 -S011C001P027R001A020
114 -S011C001P027R002A020
115 -S011C001P028R002A020
116 -S011C002P018R002A020
117 -S011C003P018R001A020
118 -S011C003P019R001A020
119 -S012C001P015R001A020
120 -S012C001P018R001A020
121 -S012C001P019R001A020
122 -S012C001P025R001A020
123 -S012C001P025R002A020
124 -S012C001P028R001A020
125 -S012C001P028R002A020
126 -S012C001P037R001A020
127 -S012C002P025R002A020
128 -S012C002P028R002A020
129 -S012C003P015R001A020
130 -S012C003P025R001A020
131 -S012C003P027R001A020
132 -S012C003P037R001A020
133 -S013C001P019R001A020
134 -S013C001P019R002A020
135 -S013C001P027R001A020
136 -S013C001P028R001A020
137 -S013C002P018R002A020
138 -S013C003P019R001A020
139 -S014C001P019R002A020
140 -S014C001P025R001A020
141 -S015C001P019R002A020
142 -S015C002P019R002A020
143 -S015C003P019R001A020
144 -S016C001P007R001A020
145 -S016C001P019R001A020
146 -S016C001P019R002A020
147 -S016C001P040R001A020
148 -S016C002P007R001A020
149 -S016C002P019R001A020
150 -S016C002P019R002A020
151 -S016C003P040R001A020
152 -S017C001P003R001A020
153 -S017C001P003R002A020
154 -S017C001P007R001A020
155 -S017C001P007R002A020
156 -S017C001P008R001A020
157 -S017C001P016R001A020
158 -S017C001P020R001A020
159 -S017C001P020R002A020
160 -S017C002P003R002A020
161 -S017C002P007R002A020
162 -S017C002P020R002A020
163 -S017C003P003R001A020
164 -S017C003P008R001A020
165 -
166 -S017C003P007R002A020_a
167 -
168 -S017C003P007R001A020_a
169 -S015C003P037R001A020_a
170 -S016C001P007R002A020_a
171 -
172 -S015C002P016R002A020_a
173 -S015C001P019R001A020_a
174 -S015C002P019R001A020_a
175 -S016C002P039R002A020_a
176 -S015C001P017R001A020_a
177 -S016C002P040R001A020_a
178 -S014C002P019R002A020_a
179 -S014C002P007R002A020_a
180 -S013C003P027R001A020_a
181 -S013C001P037R001A020_a
182 -S016C002P007R002A020_a
183 -
184 -S012C003P028R001A020_a
185 -
186 -S012C003P018R001A020_a
187 -S012C002P019R001A020_a
188 -S012C002P018R002A020_a
189 -
190 -S012C002P016R002A020_a
191 -S012C003P025R002A020_a
192 -
193 -S011C002P027R002A020_a
194 -S011C001P018R002A020_a
195 -
196 -S010C002P025R001A020_a
197 -S012C001P018R002A020_a
198 -
199 -S009C003P017R002A020_a
200 -S012C001P027R002A020_a
201 -S001C002P001R002A020_a
202 -S001C001P001R002A020_a
203 -S001C001P002R001A020_a
204 -
205 -S013C002P019R002A020_a
206 -S014C003P027R001A020_a
207 -S001C001P006R001A020_a //다리를 든다
...@@ -53,6 +53,31 @@ S016C003P019R002A020 ...@@ -53,6 +53,31 @@ S016C003P019R002A020
53 S017C002P003R001A020 53 S017C002P003R001A020
54 S017C003P015R002A020 54 S017C003P015R002A020
55 55
56 +S008C002P036R001A020
57 +S008C003P001R002A020
58 +S008C003P031R002A020
59 +S009C002P015R001A020
60 +S009C002P025R001A020
61 +S009C003P015R002A020
62 +S009C003P016R001A020
63 +S008C002P034R001A020
64 +S008C002P033R001A020
65 +S008C002P032R001A020
66 +S008C002P007R002A020
67 +S008C002P007R001A020
68 +S008C002P001R001A020
69 +S007C003P027R002A020
70 +S007C002P015R002A020
71 +S007C002P026R001A020
72 +S007C002P028R001A020
73 +S007C003P008R002A020
74 +S007C003P025R001A020
75 +S007C003P026R002A020
76 +S008C001P007R001A020
77 +S008C001P029R001A020
78 +S008C002P029R001A020
79 +S008C002P031R001A020
80 +
56 othercase //팔 다리 모션 81 othercase //팔 다리 모션
57 S017C003P020R001A020 82 S017C003P020R001A020
58 S017C003P017R001A020 83 S017C003P017R001A020
...@@ -71,3 +96,9 @@ S010C001P021R001A020 ...@@ -71,3 +96,9 @@ S010C001P021R001A020
71 S011C002P017R002A020 96 S011C002P017R002A020
72 S011C001P017R002A020 97 S011C001P017R002A020
73 S010C002P018R002A020 98 S010C002P018R002A020
99 +S007C003P018R001A020 // time dif
100 +S008C001P030R001A020 // sit time dif
101 +S008C001P034R001A020 // twist hand bad?
102 +S008C002P025R001A020 //twist hand
103 +S009C001P025R002A020 //leg
104 +S008C003P032R001A020 //what?
......
1 +if people sit & 1
2 +S001C001P008R001A020
3 +S010C001P008R002A020
4 +S010C001P013R001A020
5 +S010C001P016R001A020
6 +S010C001P016R002A020
7 +S013C001P025R002A020
8 +S013C002P008R002A020
9 +S013C002P017R002A020
10 +S017C003P015R001A020
11 +S002C002P007R002A020
12 +S002C003P007R002A020
13 +S002C003P013R001A020
14 +S003C002P007R002A020
15 +S003C003P002R002A020
16 +S004C001P008R001A020
17 +S004C003P003R001A020
18 +S005C002P004R002A020
19 +S005C002P015R002A020
20 +S005C002P016R002A020
21 +S006C001P007R001A020
22 +S006C002P015R002A020
23 +S007C002P016R002A020_a
24 +S007C003P016R001A020_a //bad?
25 +S008C001P001R001A020_a
26 +S006C002P007R001A020_a
27 +S010C002P016R002A020_a
28 +S010C002P007R002A020_a
29 +S010C001P015R002A020_a
30 +S011C001P038R001A020_a
31 +S012C002P019R002A020_a
32 +S013C001P015R002A020_a
33 +
34 +sit & 2
35 +S001C001P003R002A020
36 +S001C001P004R001A020
37 +S001C001P004R002A020
38 +S001C002P004R002A020
39 +S001C002P004R001A020
40 +S010C001P008R001A020
41 +S010C001P013R002A020
42 +S010C001P015R001A020
43 +S010C002P008R002A020
44 +S010C002P013R002A020
45 +S010C002P019R002A020
46 +S010C003P008R001A020
47 +S010C003P015R001A020
48 +S011C001P015R002A020
49 +S011C001P038R002A020
50 +S011C002P015R002A020
51 +S011C002P038R002A020
52 +S011C003P001R001A020
53 +S011C003P007R002A020
54 +S011C003P015R001A020
55 +S011C003P028R001A020
56 +S013C001P007R001A020
57 +S013C001P007R002A020
58 +S013C001P015R001A020
59 +S013C001P016R001A020
60 +S013C001P017R001A020
61 +S013C001P017R002A020
62 +S010C002P008R002A020
63 +S010C002P013R002A020
64 +S001C001P007R001A020
65 +S001C003P007R001A020
66 +S002C001P009R002A020
67 +S002C002P003R002A020
68 +S002C002P008R001A020
69 +S002C002P009R002A020
70 +S002C003P010R001A020
71 +S003C001P001R001A020
72 +S003C001P002R001A020
73 +S003C001P019R001A020
74 +S003C003P002R001A020
75 +S006C001P019R001A020
76 +S006C001P023R001A020
77 +S006C001P023R002A020
78 +S006C002P023R002A020
79 +S007C001P007R001A020
80 +S007C002P008R002A020
81 +S007C003P001R001A020
82 +S007C003P007R001A020
83 +S007C003P015R001A020
84 +S008C001P015R001A020
85 +S008C001P019R001A020
86 +S009C001P007R002A020
87 +S008C003P019R001A020
88 +
89 +S007C002P019R002A020_a // amb...
90 +S007C003P019R002A020_a
91 +S008C001P007R002A020_a
92 +S008C001P015R002A020_a //a?
93 +S006C003P008R001A020_a
94 +S006C001P001R001A020_a
95 +S014C003P019R001A020_a
96 +S014C001P015R002A020_a
97 +S013C002P007R002A020_a
98 +S012C002P007R002A020_a
99 +S001C002P001R001A020_a
100 +S011C001P016R001A020_a
101 +S011C001P028R001A020_a
102 +S011C002P002R002A020_a
103 +
104 +
105 +
106 +
107 +
108 +stand & 1
109 +
110 +S008C003P036R001A020
111 +S007C001P018R002A020
112 +S007C001P027R001A020
113 +S007C002P008R001A020
114 +S007C002P018R002A020
115 +S007C003P008R001A020
116 +S007C003P027R001A020
117 +S008C001P029R002A020
118 +S008C001P032R002A020
119 +S008C001P033R001A020
120 +S008C001P033R002A020
121 +S008C001P034R002A020
122 +S008C002P033R002A020
123 +S008C002P034R002A020
124 +S009C003P015R001A020
125 +S009C001P015R001A020
126 +S008C003P033R001A020
127 +S010C001P021R002A020
128 +S011C001P016R002A020
129 +S011C002P016R002A020
130 +S012C001P037R002A020
131 +S013C001P037R002A020
132 +S012C002P037R002A020
133 +S014C003P039R001A020
134 +S014C003P037R001A020
135 +S014C002P025R002A020
136 +S014C001P025R002A020
137 +S014C002P037R002A020
138 +S014C001P037R001A020
139 +S014C002P039R002A020
140 +S014C001P037R002A020
141 +S017C003P016R001A020
142 +S002C003P007R001A020
143 +S003C002P002R002A020
144 +S003C002P008R002A020
145 +S005C001P015R001A020
146 +S005C001P018R002A020
147 +S005C003P015R001A020
148 +S006C001P016R002A020
149 +S006C001P017R002A020
150 +S006C001P022R001A020
151 +S006C002P007R002A020
152 +S006C002P016R002A020
153 +S006C003P022R001A020
154 +
155 +S014C003P019R002A020_a
156 +S001C002P006R001A020_a
157 +S001C002P006R002A020_a
158 +S014C001P007R001A020_a
159 +S014C001P007R002A020_a
160 +S014C001P027R002A020_a
161 +S017C002P020R001A020_a
162 +S005C002P017R002A020_a
163 +S005C002P018R002A020_a
164 +S014C001P039R002A020_a
165 +S014C002P027R001A020_a
166 +S005C001P017R002A020_a
167 +S003C001P015R001A020_a
168 +S006C003P007R001A020_a
169 +S007C001P018R001A020_a
170 +S007C002P027R002A020_a
171 +S008C001P036R001A020_a
172 +S008C002P032R002A020_a//bad?
173 +
174 +stand & 2
175 +S001C001P003R001A020
176 +S001C001P005R001A020
177 +S001C001P005R002A020
178 +S001C001P007R001A020
179 +S001C001P008R002A020
180 +S001C002P002R002A020
181 +S001C001P001R001A020
182 +S001C002P003R002A020
183 +S001C002P005R001A020
184 +S001C002P005R002A020
185 +S009C003P017R001A020
186 +S009C003P019R001A020
187 +S009C003P019R002A020
188 +S009C003P025R001A020
189 +S010C001P019R002A020
190 +S010C001P025R001A020
191 +S010C001P025R002A020
192 +S010C002P025R002A020
193 +S010C003P019R001A020
194 +S010C003P025R001A020
195 +S011C001P007R002A020
196 +S011C001P019R001A020
197 +S011C001P019R002A020
198 +S011C001P027R001A020
199 +S011C001P027R002A020
200 +S011C001P028R002A020
201 +S011C002P018R002A020
202 +S011C003P018R001A020
203 +S011C003P019R001A020
204 +S012C001P015R001A020
205 +S012C001P018R001A020
206 +S012C001P019R001A020
207 +S012C001P025R001A020
208 +S012C001P025R002A020
209 +S012C001P028R001A020
210 +S012C001P028R002A020
211 +S012C001P037R001A020
212 +S012C002P025R002A020
213 +S012C002P028R002A020
214 +S012C003P015R001A020
215 +S012C003P025R001A020
216 +S012C003P027R001A020
217 +S012C003P037R001A020
218 +S013C001P019R001A020
219 +S013C001P019R002A020
220 +S013C001P027R001A020
221 +S013C001P028R001A020
222 +S013C002P018R002A020
223 +S013C003P019R001A020
224 +S014C001P019R002A020
225 +S014C001P025R001A020
226 +S015C001P019R002A020
227 +S015C002P019R002A020
228 +S015C003P019R001A020
229 +S016C001P007R001A020
230 +S016C001P019R001A020
231 +S016C001P019R002A020
232 +S016C001P040R001A020
233 +S016C002P007R001A020
234 +S016C002P019R001A020
235 +S016C002P019R002A020
236 +S016C003P040R001A020
237 +S017C001P003R001A020
238 +S017C001P003R002A020
239 +S017C001P007R001A020
240 +S017C001P007R002A020
241 +S017C001P008R001A020
242 +S017C001P016R001A020
243 +S017C001P020R001A020
244 +S017C001P020R002A020
245 +S017C002P003R002A020
246 +S017C002P007R002A020
247 +S017C002P020R002A020
248 +S017C003P003R001A020
249 +S017C003P008R001A020
250 +S001C001P001R002A020
251 +S003C003P007R001A020
252 +S001C001P005R001A020
253 +S001C001P005R002A020
254 +S001C002P002R002A020
255 +S001C002P005R002A020
256 +S001C002P008R002A020
257 +S001C003P002R002A020
258 +S001C003P005R001A020
259 +S002C001P009R001A020
260 +S002C001P010R002A020
261 +S002C001P013R002A020
262 +S002C002P009R001A020
263 +S002C002P010R002A020
264 +S002C002P013R002A020
265 +S002C003P009R001A020
266 +S003C001P001R002A020
267 +S003C001P007R001A020
268 +S003C001P016R001A020
269 +S004C001P003R002A020
270 +S004C001P008R002A020
271 +S004C001P020R001A020
272 +S004C001P020R002A020
273 +S004C002P003R002A020
274 +S004C002P008R002A020
275 +S004C002P020R002A020
276 +S005C001P021R002A020
277 +S005C002P021R002A020
278 +S005C003P018R001A020
279 +S006C001P001R002A020
280 +S006C001P007R002A020
281 +S006C001P019R002A020
282 +S006C001P022R002A020
283 +S006C002P001R002A020
284 +S006C002P008R002A020
285 +S006C002P022R002A020
286 +S006C003P016R001A020
287 +S006C003P017R001A020
288 +S007C003P019R001A020
289 +S007C001P019R001A020
290 +S007C002P017R002A020
291 +S008C001P019R002A020
292 +S008C001P025R001A020
293 +S008C001P025R002A020
294 +S008C001P030R002A020
295 +S008C001P031R002A020
296 +S008C001P035R001A020
297 +S008C001P035R002A020
298 +S008C001P036R002A020
299 +S008C002P019R002A020
300 +S008C002P025R002A020
301 +S008C002P031R002A020
302 +S009C002P019R001A020 //bad..?
303 +S009C002P019R002A020
304 +S009C002P017R002A020
305 +S009C002P016R002A020
306 +S009C001P025R001A020
307 +S009C001P019R002A020
308 +S009C001P019R001A020
309 +S009C001P017R002A020
310 +S009C001P017R001A020
311 +S009C001P016R002A020
312 +S008C003P035R001A020
313 +S008C003P031R001A020 //bad?
314 +S008C003P025R001A020
315 +S008C002P036R002A020
316 +
317 +S008C001P031R001A020_a//leg
318 +S008C001P032R001A020_a // twist hand
319 +S008C002P001R002A020_a
320 +S008C002P030R002A020_a //late
321 +S009C002P025R002A020_a //good?
322 +S008C003P019R002A020_a //wall
323 +S008C002P035R002A020_a //noisy?
324 +S007C002P018R001A020_a
325 +S007C002P019R001A020_a
326 +S007C003P017R002A020_a
327 +S007C003P026R001A020_a
328 +S007C003P028R001A020_a //time dif
329 +S008C001P001R002A020_a //bad?
330 +S006C001P008R002A020_a
331 +S006C001P017R001A020_a
332 +S006C003P024R001A020_a
333 +S004C003P020R001A020_a
334 +S017C003P007R002A020_a
335 +S006C002P019R002A020_a
336 +S017C003P007R001A020_a
337 +S015C003P037R001A020_a
338 +S016C001P007R002A020_a
339 +S015C002P016R002A020_a
340 +S015C001P019R001A020_a
341 +S015C002P019R001A020_a
342 +S016C002P039R002A020_a
343 +S015C001P017R001A020_a
344 +S016C002P040R001A020_a
345 +S014C002P019R002A020_a
346 +S014C002P007R002A020_a
347 +S013C003P027R001A020_a
348 +S013C001P037R001A020_a
349 +S016C002P007R002A020_a
350 +S001C002P001R002A020_a
351 +S001C001P002R001A020_a
352 +S001C002P003R001A020_a
353 +S002C001P003R001A020_a
354 +S002C002P003R001A020_a
355 +S002C003P003R001A020_a
356 +S003C001P017R001A020_a
357 +S003C001P019R002A020_a
358 +S003C002P001R002A020_a
359 +S003C002P017R002A020_a
360 +S003C002P019R002A020_a
361 +S003C003P016R001A020_a
362 +S003C003P017R001A020_a
363 +S003C003P018R001A020_a
364 +S012C003P028R001A020_a
365 +S012C003P018R001A020_a
366 +S012C002P019R001A020_a
367 +S012C002P018R002A020_a
368 +S012C002P016R002A020_a
369 +S012C003P025R002A020_a
370 +S011C002P027R002A020_a
371 +S011C001P018R002A020_a
372 +S010C002P025R001A020_a
373 +S012C001P018R002A020_a
374 +S009C003P017R002A020_a
375 +S012C001P027R002A020_a
376 +S001C002P001R002A020_a
377 +S001C001P001R002A020_a
378 +S001C001P002R001A020_a
379 +S013C002P019R002A020_a
380 +S014C003P027R001A020_a
381 +S001C001P006R001A020_a//leg
1 +S001C001P003R001A020
2 +S001C001P005R001A020
3 +S001C001P005R002A020
4 +S001C001P007R001A020
5 +S001C001P008R002A020
6 +S001C002P002R002A020
7 +S001C001P001R001A020
8 +S001C002P003R002A020
9 +S001C002P005R001A020
10 +S001C002P005R002A020
11 +S009C003P017R001A020
12 +S009C003P019R001A020
13 +S009C003P019R002A020
14 +S009C003P025R001A020
15 +S010C001P019R002A020
16 +S010C001P025R001A020
17 +S010C001P025R002A020
18 +S010C002P025R002A020
19 +S010C003P019R001A020
20 +S010C003P025R001A020
21 +S011C001P007R002A020
22 +S011C001P019R001A020
23 +S011C001P019R002A020
24 +S011C001P027R001A020
25 +S011C001P027R002A020
26 +S011C001P028R002A020
27 +S011C002P018R002A020
28 +S011C003P018R001A020
29 +S011C003P019R001A020
30 +S012C001P015R001A020
31 +S012C001P018R001A020
32 +S012C001P019R001A020
33 +S012C001P025R001A020
34 +S012C001P025R002A020
35 +S012C001P028R001A020
36 +S012C001P028R002A020
37 +S012C001P037R001A020
38 +S012C002P025R002A020
39 +S012C002P028R002A020
40 +S012C003P015R001A020
41 +S012C003P025R001A020
42 +S012C003P027R001A020
43 +S012C003P037R001A020
44 +S013C001P019R001A020
45 +S013C001P019R002A020
46 +S013C001P027R001A020
47 +S013C001P028R001A020
48 +S013C002P018R002A020
49 +S013C003P019R001A020
50 +S014C001P019R002A020
51 +S014C001P025R001A020
52 +S015C001P019R002A020
53 +S015C002P019R002A020
54 +S015C003P019R001A020
55 +S016C001P007R001A020
56 +S016C001P019R001A020
57 +S016C001P019R002A020
58 +S016C001P040R001A020
59 +S016C002P007R001A020
60 +S016C002P019R001A020
61 +S016C002P019R002A020
62 +S016C003P040R001A020
63 +S017C001P003R001A020
64 +S017C001P003R002A020
65 +S017C001P007R001A020
66 +S017C001P007R002A020
67 +S017C001P008R001A020
68 +S017C001P016R001A020
69 +S017C001P020R001A020
70 +S017C001P020R002A020
71 +S017C002P003R002A020
72 +S017C002P007R002A020
73 +S017C002P020R002A020
74 +S017C003P003R001A020
75 +S017C003P008R001A020
76 +S001C001P001R002A020
77 +S003C003P007R001A020
78 +S001C001P005R001A020
79 +S001C001P005R002A020
80 +S001C002P002R002A020
81 +S001C002P005R002A020
82 +S001C002P008R002A020
83 +S001C003P002R002A020
84 +S001C003P005R001A020
85 +S002C001P009R001A020
86 +S002C001P010R002A020
87 +S002C001P013R002A020
88 +S002C002P009R001A020
89 +S002C002P010R002A020
90 +S002C002P013R002A020
91 +S002C003P009R001A020
92 +S003C001P001R002A020
93 +S003C001P007R001A020
94 +S003C001P016R001A020
95 +S004C001P003R002A020
96 +S004C001P008R002A020
97 +S004C001P020R001A020
98 +S004C001P020R002A020
99 +S004C002P003R002A020
100 +S004C002P008R002A020
101 +S004C002P020R002A020
102 +S005C001P021R002A020
103 +S005C002P021R002A020
104 +S005C003P018R001A020
105 +S006C001P001R002A020
106 +S006C001P007R002A020
107 +S006C001P019R002A020
108 +S006C001P022R002A020
109 +S006C002P001R002A020
110 +S006C002P008R002A020
111 +S006C002P022R002A020
112 +S006C003P016R001A020
113 +S006C003P017R001A020
114 +S007C003P019R001A020
115 +S007C001P019R001A020
116 +S007C002P017R002A020
117 +S008C001P019R002A020
118 +S008C001P025R001A020
119 +S008C001P025R002A020
120 +S008C001P030R002A020
121 +S008C001P031R002A020
122 +S008C001P035R001A020
123 +S008C001P035R002A020
124 +S008C001P036R002A020
125 +S008C002P019R002A020
126 +S008C002P025R002A020
127 +S008C002P031R002A020
128 +S009C002P019R002A020
129 +S009C002P017R002A020
130 +S009C002P016R002A020
131 +S009C001P025R001A020
132 +S009C001P019R002A020
133 +S009C001P019R001A020
134 +S009C001P017R002A020
135 +S009C001P017R001A020
136 +S009C001P016R002A020
137 +S008C003P035R001A020
138 +S008C003P025R001A020
139 +S008C002P036R002A020
140 +S009C002P025R002A020
141 +S008C002P035R002A020
142 +S009C002P019R001A020
143 +S008C003P019R002A020
144 +S008C003P031R001A020