augmentation.ipynb 5.22 KB
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"augmentation.ipynb","provenance":[],"collapsed_sections":[],"authorship_tag":"ABX9TyM0lQ+UqU7Sa/uXs3VwvxDl"},"kernelspec":{"name":"python3","display_name":"Python 3"}},"cells":[{"cell_type":"code","metadata":{"id":"wYGqloqDkEoU","colab_type":"code","outputId":"0d22028c-119d-4823-df3f-35d439ca019f","executionInfo":{"status":"ok","timestamp":1591716524562,"user_tz":-540,"elapsed":32835,"user":{"displayName":"‍신아형[학생](소프트웨어융합대학 컴퓨터공학과)","photoUrl":"","userId":"02057930622140727302"}},"colab":{"base_uri":"https://localhost:8080/","height":127}},"source":["from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":0,"outputs":[{"output_type":"stream","text":["Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n","\n","Enter your authorization code:\n","··········\n","Mounted at /content/gdrive\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"cPpLXwOPGYkB","colab_type":"code","colab":{}},"source":["import imgaug; print(imgaug.__file__)\n","!cd /content/../usr/local/lib/python3.6/dist-packages/\n","!pip uninstall imgaug\n","!pip install git+https://github.com/aleju/imgaug.git\n","!pip3 install pascal_voc_writer"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"3WumbxkdSAYo","colab_type":"code","colab":{}},"source":["# xml파일 파싱해서 filename, bounding-box 정보 반환\n","\n","import xml.etree.ElementTree as ET\n","\n","def read_anntation(xml_file: str):\n","    tree = ET.parse(xml_file)\n","    root = tree.getroot()\n","\n","    bounding_box_list = []\n","\n","    file_name = root.find('filename').text\n","    for obj in root.iter('object'):\n","\n","        object_label = obj.find(\"name\").text\n","        for box in obj.findall(\"bndbox\"):\n","            x_min = int(box.find(\"xmin\").text)\n","            y_min = int(box.find(\"ymin\").text)\n","            x_max = int(box.find(\"xmax\").text)\n","            y_max = int(box.find(\"ymax\").text)\n","\n","        bounding_box = [object_label, x_min, y_min, x_max, y_max]\n","        bounding_box_list.append(bounding_box)\n","\n","    return bounding_box_list, file_name"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"A-982sMgSOCV","colab_type":"code","colab":{}},"source":["# 4차원 nparray로 변환된 이미지, [bounding-box 리스트, xml 파일명, 이미지 파일명] 리스트 반환\n","\n","from os import listdir\n","import cv2\n","import numpy as np\n","\n","def read_train_dataset(dir):\n","    images = []\n","    annotations = []\n","\n","    for file in listdir(dir):\n","        if 'jpg' in file.lower() or 'png' in file.lower():\n","            images.append(cv2.imread(dir + file, 1))\n","            annotation_file = file.replace(file.split('.')[-1], 'xml')\n","            bounding_box_list, file_name = read_anntation(dir + annotation_file)\n","            annotations.append((bounding_box_list, annotation_file, file_name))\n","\n","    images = np.array(images)\n","\n","    return images, annotations"],"execution_count":0,"outputs":[]},{"cell_type":"code","metadata":{"id":"5Kz66PqlST6Q","colab_type":"code","colab":{}},"source":["import imgaug as ia\n","from imgaug import augmenters as iaa\n","from pascal_voc_writer import Writer\n","\n","ia.seed(1)\n","\n","\n","dir = '/content/gdrive/My Drive/capstone/data/'   # input 이미지 디렉토리\n","output_dir='/content/gdrive/My Drive/capstone/data/'   # output 이미지 디렉토리\n","\n","images, annotations = read_train_dataset(dir)\n","\n","for idx in range(len(images)):\n","    image = images[idx]\n","    boxes = annotations[idx][0]\n","\n","    ia_bounding_boxes = []\n","    for box in boxes:\n","        ia_bounding_boxes.append(ia.BoundingBox(x1=box[1], y1=box[2], x2=box[3], y2=box[4]))\n","    bbs = ia.BoundingBoxesOnImage(ia_bounding_boxes, shape=image.shape)\n","\n","    ######어둡게\n","    seq = iaa.MultiplyAndAddToBrightness(mul=(0.5, 0.5), add=(-10, 10))\n","\n","    ######밝게\n","    #seq = iaa.MultiplyAndAddToBrightness(mul= (1, 2.5), add=(-15, 15))\n","\n","    seq_det = seq.to_deterministic()\n","\n","    image_aug = seq_det.augment_images([image])[0]\n","    bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]\n","\n","    new_image_file = output_dir + 'dark_' + annotations[idx][2]\n","    cv2.imwrite(new_image_file, image_aug)\n","\n","    h, w = np.shape(image_aug)[0:2]\n","    voc_writer = Writer(new_image_file, w, h)\n","\n","    for i in range(len(bbs_aug.bounding_boxes)):\n","        bb_box = bbs_aug.bounding_boxes[i]\n","        voc_writer.addObject(boxes[i][0], int(bb_box.x1), int(bb_box.y1), int(bb_box.x2), int(bb_box.y2))\n","\n","    voc_writer.save(output_dir + 'dark_' + annotations[idx][1])\n","    print(output_dir + 'dark_' + annotations[idx][1])"],"execution_count":0,"outputs":[]}]}