Brixia_Regression.ipynb 147 KB
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"Brixia_Regression.ipynb","provenance":[],"collapsed_sections":[]},"kernelspec":{"name":"python3","display_name":"Python 3.9.2 64-bit"},"language_info":{"name":"python","version":"3.9.2"},"accelerator":"GPU","interpreter":{"hash":"f1c3ad9a411e42bb4c3d015178907b0dc2550e5a0b291e90bb6449c260b3fd43"}},"cells":[{"cell_type":"code","metadata":{"id":"fz9Yjc1AFADo"},"source":["from google.colab import drive\n","drive.mount('/content/gdrive')\n"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"AYSFYt4YLerO","executionInfo":{"status":"ok","timestamp":1624023327040,"user_tz":-540,"elapsed":299,"user":{"displayName":"‍김지현[학생](소프트웨어융합대학 컴퓨터공학과)","photoUrl":"","userId":"10635065983091343623"}},"outputId":"54fb2fa0-1d2f-41f3-ffe1-d08aed45d32d"},"source":["cd /content/gdrive/MyDrive/ColabNotebooks/final_code"],"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["[WinError 3] 지정된 경로를 찾을 수 없습니다: '/content/gdrive/MyDrive/ColabNotebooks/final_code'\nc:\\Users\\kimjihyun\\Downloads\\brixia-20210618T133921Z-001\\final_code\n"]}]},{"cell_type":"code","metadata":{"id":"I4VCFBxlMVOs"},"source":["#!unzip Brixia_jpg.zip -d /content/gdrive/MyDrive/ColabNotebooks/brixia/DB"],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"9D3C2_bhOcHi","executionInfo":{"status":"ok","timestamp":1624006750418,"user_tz":-540,"elapsed":2609,"user":{"displayName":"‍김지현[학생](소프트웨어융합대학 컴퓨터공학과)","photoUrl":"","userId":"10635065983091343623"}},"outputId":"905d15e9-7287-4b79-f0fe-e3f85f036606"},"source":["!pip install tensorboardX\n","import model as M\n","import model_l1 as M_l1"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Requirement already satisfied: tensorboardX in /usr/local/lib/python3.7/dist-packages (2.2)\n","Requirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (3.12.4)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from tensorboardX) (1.19.5)\n","Requirement already satisfied: six>=1.9 in /usr/local/lib/python3.7/dist-packages (from protobuf>=3.8.0->tensorboardX) (1.15.0)\n","Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from protobuf>=3.8.0->tensorboardX) (57.0.0)\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"Y1KgCYNEPOtL","executionInfo":{"status":"ok","timestamp":1622447622948,"user_tz":-540,"elapsed":915828,"user":{"displayName":"‍김지현[학생](소프트웨어융합대학 컴퓨터공학과)","photoUrl":"","userId":"10635065983091343623"}},"outputId":"7445d434-c18b-42e9-ad82-5e8f7c4a2c3c"},"source":["# Train from Brixia \n","PATH_TO_IMAGES = \"/content/gdrive/MyDrive/ColabNotebooks/final_code/DB\"\n","WEIGHT_DECAY = 0\n","LEARNING_RATE = 0.00001\n","#MSE\n","M.train_cnn(PATH_TO_IMAGES, LEARNING_RATE, WEIGHT_DECAY, fine_tune=True, regression=True, freeze=True, adam=True, initial_model_path='/content/gdrive/MyDrive/ColabNotebooks/final_code/checkpoints/checkpoint_best')\n"],"execution_count":null,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["DataParallel(\n","  (module): DenseNet(\n","    (features): Sequential(\n","      (conv0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n","      (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","      (relu0): ReLU(inplace=True)\n","      (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n","      (denseblock1): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(160, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(192, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(224, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(224, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (transition1): _Transition(\n","        (norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","        (relu): ReLU(inplace=True)\n","        (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","        (pool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n","      )\n","      (denseblock2): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(160, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(192, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(224, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(224, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(288, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer7): _DenseLayer(\n","          (norm1): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer8): _DenseLayer(\n","          (norm1): BatchNorm2d(352, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(352, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer9): _DenseLayer(\n","          (norm1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer10): _DenseLayer(\n","          (norm1): BatchNorm2d(416, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(416, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer11): _DenseLayer(\n","          (norm1): BatchNorm2d(448, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(448, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer12): _DenseLayer(\n","          (norm1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(480, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (transition2): _Transition(\n","        (norm): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","        (relu): ReLU(inplace=True)\n","        (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","        (pool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n","      )\n","      (denseblock3): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(288, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(352, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(352, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(416, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(416, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer7): _DenseLayer(\n","          (norm1): BatchNorm2d(448, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(448, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer8): _DenseLayer(\n","          (norm1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(480, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer9): _DenseLayer(\n","          (norm1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer10): _DenseLayer(\n","          (norm1): BatchNorm2d(544, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(544, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer11): _DenseLayer(\n","          (norm1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer12): _DenseLayer(\n","          (norm1): BatchNorm2d(608, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer13): _DenseLayer(\n","          (norm1): BatchNorm2d(640, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer14): _DenseLayer(\n","          (norm1): BatchNorm2d(672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(672, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer15): _DenseLayer(\n","          (norm1): BatchNorm2d(704, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(704, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer16): _DenseLayer(\n","          (norm1): BatchNorm2d(736, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(736, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer17): _DenseLayer(\n","          (norm1): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer18): _DenseLayer(\n","          (norm1): BatchNorm2d(800, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(800, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer19): _DenseLayer(\n","          (norm1): BatchNorm2d(832, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer20): _DenseLayer(\n","          (norm1): BatchNorm2d(864, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(864, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer21): _DenseLayer(\n","          (norm1): BatchNorm2d(896, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(896, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer22): _DenseLayer(\n","          (norm1): BatchNorm2d(928, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(928, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer23): _DenseLayer(\n","          (norm1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(960, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer24): _DenseLayer(\n","          (norm1): BatchNorm2d(992, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(992, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (transition3): _Transition(\n","        (norm): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","        (relu): ReLU(inplace=True)\n","        (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","        (pool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n","      )\n","      (denseblock4): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(544, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(544, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(608, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(640, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(672, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer7): _DenseLayer(\n","          (norm1): BatchNorm2d(704, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(704, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer8): _DenseLayer(\n","          (norm1): BatchNorm2d(736, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(736, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer9): _DenseLayer(\n","          (norm1): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer10): _DenseLayer(\n","          (norm1): BatchNorm2d(800, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(800, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer11): _DenseLayer(\n","          (norm1): BatchNorm2d(832, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer12): _DenseLayer(\n","          (norm1): BatchNorm2d(864, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(864, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer13): _DenseLayer(\n","          (norm1): BatchNorm2d(896, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(896, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer14): _DenseLayer(\n","          (norm1): BatchNorm2d(928, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(928, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer15): _DenseLayer(\n","          (norm1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(960, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer16): _DenseLayer(\n","          (norm1): BatchNorm2d(992, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(992, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (norm5): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","    )\n","    (classifier): Sequential(\n","      (0): Linear(in_features=1024, out_features=1, bias=True)\n","      (1): ReLU(inplace=True)\n","    )\n","  )\n",")\n","Epoch 1/100\n","----------\n","train epoch 1:loss 78.9740 with data size 3924\n","MAE:  6.3917179826650345\n","val epoch 1:loss 56.4126 with data size 564\n","saving\n","Training complete in 3m 59s\n","Epoch 2/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 2:loss 48.8838 with data size 3924\n","MAE:  5.1141786724328995\n","val epoch 2:loss 34.4940 with data size 564\n","saving\n","Training complete in 4m 33s\n","Epoch 3/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 3:loss 31.3369 with data size 3924\n","MAE:  4.302831234397195\n","val epoch 3:loss 24.8607 with data size 564\n","saving\n","Training complete in 5m 7s\n","Epoch 4/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 4:loss 24.1591 with data size 3924\n","MAE:  3.752322868886569\n","val epoch 4:loss 19.4918 with data size 564\n","saving\n","Training complete in 5m 40s\n","Epoch 5/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 5:loss 20.0730 with data size 3924\n","MAE:  3.361882087675498\n","val epoch 5:loss 16.1583 with data size 564\n","saving\n","Training complete in 6m 15s\n","Epoch 6/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 6:loss 17.0742 with data size 3924\n","MAE:  3.3616950376989676\n","val epoch 6:loss 16.2099 with data size 564\n","Training complete in 6m 48s\n","Epoch 7/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 7:loss 15.0869 with data size 3924\n","MAE:  2.968348072379405\n","val epoch 7:loss 13.3319 with data size 564\n","saving\n","Training complete in 7m 22s\n","Epoch 8/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 8:loss 13.1906 with data size 3924\n","MAE:  2.9434109553166317\n","val epoch 8:loss 13.1773 with data size 564\n","saving\n","Training complete in 7m 56s\n","Epoch 9/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 9:loss 12.3415 with data size 3924\n","MAE:  2.6698273599464843\n","val epoch 9:loss 11.0451 with data size 564\n","saving\n","Training complete in 8m 29s\n","Epoch 10/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 10:loss 10.6797 with data size 3924\n","MAE:  2.6825159430926573\n","val epoch 10:loss 10.9637 with data size 564\n","saving\n","Training complete in 9m 3s\n","Epoch 11/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 11:loss 9.9764 with data size 3924\n","MAE:  2.621988189135883\n","val epoch 11:loss 10.6327 with data size 564\n","saving\n","Training complete in 9m 36s\n","Epoch 12/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 12:loss 8.7138 with data size 3924\n","MAE:  2.5151976404302085\n","val epoch 12:loss 9.7818 with data size 564\n","saving\n","Training complete in 10m 10s\n","Epoch 13/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 13:loss 7.8231 with data size 3924\n","MAE:  2.265553227447449\n","val epoch 13:loss 8.1098 with data size 564\n","saving\n","Training complete in 10m 44s\n","Epoch 14/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 14:loss 6.9762 with data size 3924\n","MAE:  2.58903748203888\n","val epoch 14:loss 10.5454 with data size 564\n","Training complete in 11m 18s\n","Epoch 15/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 15:loss 5.8221 with data size 3924\n","MAE:  2.1868625956435577\n","val epoch 15:loss 7.4830 with data size 564\n","saving\n","Training complete in 11m 51s\n","Epoch 16/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 16:loss 4.8516 with data size 3924\n","MAE:  2.267456122878807\n","val epoch 16:loss 7.9942 with data size 564\n","Training complete in 12m 25s\n","Epoch 17/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 17:loss 4.1451 with data size 3924\n","MAE:  2.250011129317977\n","val epoch 17:loss 7.9307 with data size 564\n","Training complete in 12m 58s\n","Epoch 18/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 18:loss 3.7011 with data size 3924\n","MAE:  2.245815031177609\n","val epoch 18:loss 7.9113 with data size 564\n","Training complete in 13m 32s\n","Epoch 19/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 19:loss 3.2182 with data size 3924\n","MAE:  2.2385481681595456\n","val epoch 19:loss 7.9537 with data size 564\n","Training complete in 14m 5s\n","Epoch 20/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 20:loss 3.0831 with data size 3924\n","MAE:  2.2766258826492525\n","val epoch 20:loss 8.0324 with data size 564\n","Training complete in 14m 38s\n","Epoch 21/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 21:loss 2.5480 with data size 3924\n","MAE:  2.1298296061602047\n","val epoch 21:loss 7.3491 with data size 564\n","saving\n","Training complete in 15m 12s\n","Epoch 22/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 22:loss 2.3368 with data size 3924\n","MAE:  2.156023142328866\n","val epoch 22:loss 7.4185 with data size 564\n","Training complete in 15m 46s\n","Epoch 23/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 23:loss 2.0660 with data size 3924\n","MAE:  2.1785976553850985\n","val epoch 23:loss 7.5178 with data size 564\n","Training complete in 16m 19s\n","Epoch 24/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 24:loss 1.8959 with data size 3924\n","MAE:  2.171622960095076\n","val epoch 24:loss 7.5521 with data size 564\n","Training complete in 16m 52s\n","Epoch 25/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 25:loss 1.8483 with data size 3924\n","MAE:  2.1341184008840126\n","val epoch 25:loss 7.3437 with data size 564\n","saving\n","Training complete in 17m 26s\n","Epoch 26/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 26:loss 1.7388 with data size 3924\n","MAE:  2.1020543209627167\n","val epoch 26:loss 7.2985 with data size 564\n","saving\n","Training complete in 17m 59s\n","Epoch 27/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 27:loss 1.5112 with data size 3924\n","MAE:  2.091114138669156\n","val epoch 27:loss 7.1448 with data size 564\n","saving\n","Training complete in 18m 33s\n","Epoch 28/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 28:loss 1.7120 with data size 3924\n","MAE:  2.1631281819749386\n","val epoch 28:loss 7.4339 with data size 564\n","Training complete in 19m 7s\n","Epoch 29/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 29:loss 1.5569 with data size 3924\n","MAE:  2.081546748922279\n","val epoch 29:loss 7.1273 with data size 564\n","saving\n","Training complete in 19m 40s\n","Epoch 30/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 30:loss 1.2775 with data size 3924\n","MAE:  2.1111030676897538\n","val epoch 30:loss 7.2146 with data size 564\n","Training complete in 20m 14s\n","Epoch 31/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 31:loss 1.4923 with data size 3924\n","MAE:  2.1331063003827495\n","val epoch 31:loss 7.4849 with data size 564\n","Training complete in 20m 47s\n","Epoch 32/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 32:loss 1.3386 with data size 3924\n","MAE:  2.096634659949188\n","val epoch 32:loss 7.1237 with data size 564\n","saving\n","Training complete in 21m 22s\n","Epoch 33/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 33:loss 1.0966 with data size 3924\n","MAE:  2.0781990799075323\n","val epoch 33:loss 7.0942 with data size 564\n","saving\n","Training complete in 21m 56s\n","Epoch 34/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 34:loss 1.0943 with data size 3924\n","MAE:  2.09671749975136\n","val epoch 34:loss 7.1161 with data size 564\n","Training complete in 22m 30s\n","Epoch 35/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 35:loss 1.1186 with data size 3924\n","MAE:  2.069339726199495\n","val epoch 35:loss 7.0669 with data size 564\n","saving\n","Training complete in 23m 3s\n","Epoch 36/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 36:loss 1.2505 with data size 3924\n","MAE:  2.0475410502007665\n","val epoch 36:loss 6.9116 with data size 564\n","saving\n","Training complete in 23m 37s\n","Epoch 37/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 37:loss 1.2215 with data size 3924\n","MAE:  2.0379653488490597\n","val epoch 37:loss 6.7292 with data size 564\n","saving\n","Training complete in 24m 12s\n","Epoch 38/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 38:loss 1.2103 with data size 3924\n","MAE:  2.028071211373552\n","val epoch 38:loss 6.7928 with data size 564\n","Training complete in 24m 45s\n","Epoch 39/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 39:loss 0.9623 with data size 3924\n","MAE:  2.027499905372937\n","val epoch 39:loss 6.7421 with data size 564\n","Training complete in 25m 19s\n","Epoch 40/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 40:loss 0.9939 with data size 3924\n","MAE:  2.0907558199837277\n","val epoch 40:loss 7.0561 with data size 564\n","Training complete in 25m 52s\n","Epoch 41/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 41:loss 1.0387 with data size 3924\n","MAE:  2.02050518249789\n","val epoch 41:loss 6.6727 with data size 564\n","saving\n","Training complete in 26m 28s\n","Epoch 42/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 42:loss 0.9148 with data size 3924\n","MAE:  2.0334380846074285\n","val epoch 42:loss 6.7917 with data size 564\n","Training complete in 27m 1s\n","Epoch 43/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 43:loss 0.9719 with data size 3924\n","MAE:  2.026278153973374\n","val epoch 43:loss 6.7548 with data size 564\n","Training complete in 27m 35s\n","Epoch 44/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 44:loss 0.8651 with data size 3924\n","MAE:  1.9962530377063346\n","val epoch 44:loss 6.5425 with data size 564\n","saving\n","Training complete in 28m 9s\n","Epoch 45/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 45:loss 0.9037 with data size 3924\n","MAE:  2.027404611946764\n","val epoch 45:loss 6.6469 with data size 564\n","Training complete in 28m 42s\n","Epoch 46/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 46:loss 0.8050 with data size 3924\n","MAE:  2.039917788871214\n","val epoch 46:loss 6.7228 with data size 564\n","Training complete in 29m 15s\n","Epoch 47/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 47:loss 0.9854 with data size 3924\n","MAE:  1.9844852873645986\n","val epoch 47:loss 6.4251 with data size 564\n","saving\n","Training complete in 29m 49s\n","Epoch 48/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 48:loss 0.8911 with data size 3924\n","MAE:  2.0149696998881073\n","val epoch 48:loss 6.5605 with data size 564\n","Training complete in 30m 23s\n","Epoch 49/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 49:loss 0.6763 with data size 3924\n","MAE:  1.97749767589865\n","val epoch 49:loss 6.4504 with data size 564\n","Training complete in 30m 57s\n","Epoch 50/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 50:loss 0.7208 with data size 3924\n","MAE:  1.9885041075395353\n","val epoch 50:loss 6.4769 with data size 564\n","Training complete in 31m 30s\n","Epoch 51/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 51:loss 0.7832 with data size 3924\n","MAE:  1.977855291085463\n","val epoch 51:loss 6.4181 with data size 564\n","saving\n","Training complete in 32m 5s\n","Epoch 52/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 52:loss 0.8000 with data size 3924\n","MAE:  2.0036901152620077\n","val epoch 52:loss 6.6285 with data size 564\n","Training complete in 32m 39s\n","Epoch 53/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 53:loss 0.6876 with data size 3924\n","MAE:  1.9768221487443076\n","val epoch 53:loss 6.4464 with data size 564\n","Training complete in 33m 12s\n","Epoch 54/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 54:loss 0.8208 with data size 3924\n","MAE:  2.014802950176787\n","val epoch 54:loss 6.6601 with data size 564\n","Training complete in 33m 46s\n","Epoch 55/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 55:loss 0.7676 with data size 3924\n","MAE:  1.9839742137185226\n","val epoch 55:loss 6.4543 with data size 564\n","Training complete in 34m 19s\n","Epoch 56/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 56:loss 0.7810 with data size 3924\n","MAE:  1.9679777583120563\n","val epoch 56:loss 6.4086 with data size 564\n","saving\n","Training complete in 34m 53s\n","Epoch 57/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 57:loss 0.7769 with data size 3924\n","MAE:  2.04645995172203\n","val epoch 57:loss 6.9263 with data size 564\n","Training complete in 35m 26s\n","Epoch 58/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 58:loss 0.6726 with data size 3924\n","MAE:  1.972814902504708\n","val epoch 58:loss 6.4050 with data size 564\n","saving\n","Training complete in 35m 60s\n","Epoch 59/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 59:loss 0.7137 with data size 3924\n","MAE:  1.967135833959419\n","val epoch 59:loss 6.4158 with data size 564\n","Training complete in 36m 34s\n","Epoch 60/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 60:loss 0.7785 with data size 3924\n","MAE:  1.957633967887848\n","val epoch 60:loss 6.2972 with data size 564\n","saving\n","Training complete in 37m 7s\n","Epoch 61/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 61:loss 0.7278 with data size 3924\n","MAE:  1.947012342961756\n","val epoch 61:loss 6.2218 with data size 564\n","saving\n","Training complete in 37m 41s\n","Epoch 62/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 62:loss 0.7753 with data size 3924\n","MAE:  1.986487770598408\n","val epoch 62:loss 6.5076 with data size 564\n","Training complete in 38m 15s\n","Epoch 63/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 63:loss 0.6235 with data size 3924\n","MAE:  1.953429282755188\n","val epoch 63:loss 6.3174 with data size 564\n","Training complete in 38m 49s\n","Epoch 64/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 64:loss 0.7928 with data size 3924\n","MAE:  1.9570293522155877\n","val epoch 64:loss 6.2875 with data size 564\n","Training complete in 39m 22s\n","Epoch 65/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 65:loss 0.7110 with data size 3924\n","MAE:  1.9854452870521986\n","val epoch 65:loss 6.5207 with data size 564\n","Training complete in 39m 56s\n","Epoch 66/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 66:loss 0.7207 with data size 3924\n","MAE:  1.9492702065101752\n","val epoch 66:loss 6.2612 with data size 564\n","Training complete in 40m 29s\n","Epoch 67/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 67:loss 0.7136 with data size 3924\n","MAE:  1.963228888393558\n","val epoch 67:loss 6.2937 with data size 564\n","Training complete in 41m 3s\n","Epoch 68/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 68:loss 0.6845 with data size 3924\n","MAE:  1.9672471921005887\n","val epoch 68:loss 6.4008 with data size 564\n","Training complete in 41m 36s\n","Epoch 69/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 69:loss 0.6686 with data size 3924\n","MAE:  1.971293382716517\n","val epoch 69:loss 6.3016 with data size 564\n","Training complete in 42m 10s\n","Epoch 70/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 70:loss 0.7169 with data size 3924\n","MAE:  1.9672766568812918\n","val epoch 70:loss 6.3908 with data size 564\n","Training complete in 42m 44s\n","Epoch 71/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 71:loss 0.6931 with data size 3924\n","MAE:  1.9348847564336256\n","val epoch 71:loss 6.1369 with data size 564\n","saving\n","Training complete in 43m 18s\n","Epoch 72/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 72:loss 0.6207 with data size 3924\n","MAE:  1.9226636431635695\n","val epoch 72:loss 6.0415 with data size 564\n","saving\n","Training complete in 43m 51s\n","Epoch 73/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 73:loss 0.7677 with data size 3924\n","MAE:  1.9272308402978782\n","val epoch 73:loss 6.0743 with data size 564\n","Training complete in 44m 25s\n","Epoch 74/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 74:loss 0.5994 with data size 3924\n","MAE:  1.9542351522027177\n","val epoch 74:loss 6.1933 with data size 564\n","Training complete in 44m 59s\n","Epoch 75/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 75:loss 0.5071 with data size 3924\n","MAE:  1.9407615330836452\n","val epoch 75:loss 6.2107 with data size 564\n","Training complete in 45m 32s\n","Epoch 76/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 76:loss 0.4950 with data size 3924\n","MAE:  1.9254845019350662\n","val epoch 76:loss 6.1069 with data size 564\n","Training complete in 46m 6s\n","Epoch 77/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 77:loss 0.6736 with data size 3924\n","MAE:  1.947760651033398\n","val epoch 77:loss 6.2468 with data size 564\n","Training complete in 46m 39s\n","Epoch 78/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 78:loss 0.6142 with data size 3924\n","MAE:  1.9756013130676662\n","val epoch 78:loss 6.4077 with data size 564\n","Training complete in 47m 13s\n","Epoch 79/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 79:loss 0.4886 with data size 3924\n","MAE:  1.9357406307830878\n","val epoch 79:loss 6.1683 with data size 564\n","Training complete in 47m 47s\n","Epoch 80/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 80:loss 0.6368 with data size 3924\n","MAE:  1.941902331955044\n","val epoch 80:loss 6.1989 with data size 564\n","Training complete in 48m 20s\n","Epoch 81/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 81:loss 0.6218 with data size 3924\n","MAE:  1.9278697210664577\n","val epoch 81:loss 6.1389 with data size 564\n","Training complete in 48m 54s\n","Epoch 82/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 82:loss 0.5765 with data size 3924\n","MAE:  1.9394581127674022\n","val epoch 82:loss 6.1241 with data size 564\n","Training complete in 49m 27s\n","Epoch 83/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 83:loss 0.5390 with data size 3924\n","MAE:  1.9499071314402505\n","val epoch 83:loss 6.2375 with data size 564\n","Training complete in 50m 1s\n","Epoch 84/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 84:loss 0.6534 with data size 3924\n","MAE:  1.9455005033536161\n","val epoch 84:loss 6.1937 with data size 564\n","Training complete in 50m 35s\n","Epoch 85/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 85:loss 0.6346 with data size 3924\n","MAE:  1.9341497259042788\n","val epoch 85:loss 6.1032 with data size 564\n","Training complete in 51m 8s\n","Epoch 86/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 86:loss 0.5754 with data size 3924\n","MAE:  1.9405992184864713\n","val epoch 86:loss 6.1836 with data size 564\n","Training complete in 51m 42s\n","Epoch 87/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 87:loss 0.5086 with data size 3924\n","MAE:  1.9163325980622719\n","val epoch 87:loss 6.0332 with data size 564\n","saving\n","Training complete in 52m 15s\n","Epoch 88/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 88:loss 0.5676 with data size 3924\n","MAE:  1.9181871482877866\n","val epoch 88:loss 6.0078 with data size 564\n","saving\n","Training complete in 52m 49s\n","Epoch 89/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 89:loss 0.4697 with data size 3924\n","MAE:  1.9318671529808789\n","val epoch 89:loss 6.1175 with data size 564\n","Training complete in 53m 23s\n","Epoch 90/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 90:loss 0.4901 with data size 3924\n","MAE:  1.9291331036716488\n","val epoch 90:loss 6.0398 with data size 564\n","Training complete in 53m 56s\n","Epoch 91/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 91:loss 0.6193 with data size 3924\n","MAE:  1.9193224841365524\n","val epoch 91:loss 6.0759 with data size 564\n","Training complete in 54m 30s\n","Epoch 92/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 92:loss 0.5347 with data size 3924\n","MAE:  1.9184506630643885\n","val epoch 92:loss 5.9957 with data size 564\n","saving\n","Training complete in 55m 4s\n","Epoch 93/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 93:loss 0.4192 with data size 3924\n","MAE:  1.924592611683757\n","val epoch 93:loss 6.0710 with data size 564\n","Training complete in 55m 37s\n","Epoch 94/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 94:loss 0.4479 with data size 3924\n","MAE:  1.9219728851994724\n","val epoch 94:loss 6.0336 with data size 564\n","Training complete in 56m 11s\n","Epoch 95/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 95:loss 0.4603 with data size 3924\n","MAE:  1.9462131361959252\n","val epoch 95:loss 6.2197 with data size 564\n","Training complete in 56m 45s\n","Epoch 96/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 96:loss 0.5458 with data size 3924\n","MAE:  1.9080847384447748\n","val epoch 96:loss 5.9740 with data size 564\n","saving\n","Training complete in 57m 19s\n","Epoch 97/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 97:loss 0.5248 with data size 3924\n","MAE:  1.9237282102835094\n","val epoch 97:loss 6.0798 with data size 564\n","Training complete in 57m 52s\n","Epoch 98/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 98:loss 0.5887 with data size 3924\n","MAE:  1.9200940617221467\n","val epoch 98:loss 5.9755 with data size 564\n","Training complete in 58m 26s\n","Epoch 99/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 99:loss 0.4959 with data size 3924\n","MAE:  1.926045147239739\n","val epoch 99:loss 6.0238 with data size 564\n","Training complete in 58m 60s\n","Epoch 100/100\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["train epoch 100:loss 0.4799 with data size 3924\n","MAE:  1.9119450390233215\n","val epoch 100:loss 5.9536 with data size 564\n","saving\n","Training complete in 59m 35s\n","Training complete in 59m 35s\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"RFAXS0saB_68","outputId":"49455258-8a24-46de-dc3b-8391a91967b0"},"source":["# Train from Brixia\n","PATH_TO_IMAGES = \"/content/gdrive/MyDrive/ColabNotebooks/final_code/DB\"\n","WEIGHT_DECAY = 0\n","LEARNING_RATE = 0.00001\n","# L1 loss\n","M_l1.train_cnn(PATH_TO_IMAGES, LEARNING_RATE, WEIGHT_DECAY, fine_tune=True, regression=True, freeze=True, adam=True, initial_model_path='/content/gdrive/MyDrive/ColabNotebooks/final_code/checkpoints/checkpoint_best')"],"execution_count":null,"outputs":[{"output_type":"stream","text":["DataParallel(\n","  (module): DenseNet(\n","    (features): Sequential(\n","      (conv0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n","      (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","      (relu0): ReLU(inplace=True)\n","      (pool0): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n","      (denseblock1): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(96, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(160, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(192, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(224, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(224, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (transition1): _Transition(\n","        (norm): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","        (relu): ReLU(inplace=True)\n","        (conv): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","        (pool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n","      )\n","      (denseblock2): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(160, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(192, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(224, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(224, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(288, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer7): _DenseLayer(\n","          (norm1): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer8): _DenseLayer(\n","          (norm1): BatchNorm2d(352, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(352, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer9): _DenseLayer(\n","          (norm1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer10): _DenseLayer(\n","          (norm1): BatchNorm2d(416, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(416, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer11): _DenseLayer(\n","          (norm1): BatchNorm2d(448, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(448, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer12): _DenseLayer(\n","          (norm1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(480, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (transition2): _Transition(\n","        (norm): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","        (relu): ReLU(inplace=True)\n","        (conv): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","        (pool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n","      )\n","      (denseblock3): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(288, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(320, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(352, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(352, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(416, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(416, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer7): _DenseLayer(\n","          (norm1): BatchNorm2d(448, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(448, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer8): _DenseLayer(\n","          (norm1): BatchNorm2d(480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(480, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer9): _DenseLayer(\n","          (norm1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer10): _DenseLayer(\n","          (norm1): BatchNorm2d(544, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(544, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer11): _DenseLayer(\n","          (norm1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer12): _DenseLayer(\n","          (norm1): BatchNorm2d(608, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer13): _DenseLayer(\n","          (norm1): BatchNorm2d(640, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer14): _DenseLayer(\n","          (norm1): BatchNorm2d(672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(672, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer15): _DenseLayer(\n","          (norm1): BatchNorm2d(704, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(704, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer16): _DenseLayer(\n","          (norm1): BatchNorm2d(736, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(736, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer17): _DenseLayer(\n","          (norm1): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer18): _DenseLayer(\n","          (norm1): BatchNorm2d(800, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(800, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer19): _DenseLayer(\n","          (norm1): BatchNorm2d(832, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer20): _DenseLayer(\n","          (norm1): BatchNorm2d(864, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(864, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer21): _DenseLayer(\n","          (norm1): BatchNorm2d(896, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(896, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer22): _DenseLayer(\n","          (norm1): BatchNorm2d(928, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(928, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer23): _DenseLayer(\n","          (norm1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(960, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer24): _DenseLayer(\n","          (norm1): BatchNorm2d(992, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(992, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (transition3): _Transition(\n","        (norm): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","        (relu): ReLU(inplace=True)\n","        (conv): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","        (pool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n","      )\n","      (denseblock4): _DenseBlock(\n","        (denselayer1): _DenseLayer(\n","          (norm1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer2): _DenseLayer(\n","          (norm1): BatchNorm2d(544, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(544, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer3): _DenseLayer(\n","          (norm1): BatchNorm2d(576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer4): _DenseLayer(\n","          (norm1): BatchNorm2d(608, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer5): _DenseLayer(\n","          (norm1): BatchNorm2d(640, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(640, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer6): _DenseLayer(\n","          (norm1): BatchNorm2d(672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(672, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer7): _DenseLayer(\n","          (norm1): BatchNorm2d(704, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(704, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer8): _DenseLayer(\n","          (norm1): BatchNorm2d(736, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(736, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer9): _DenseLayer(\n","          (norm1): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer10): _DenseLayer(\n","          (norm1): BatchNorm2d(800, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(800, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer11): _DenseLayer(\n","          (norm1): BatchNorm2d(832, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(832, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer12): _DenseLayer(\n","          (norm1): BatchNorm2d(864, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(864, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer13): _DenseLayer(\n","          (norm1): BatchNorm2d(896, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(896, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer14): _DenseLayer(\n","          (norm1): BatchNorm2d(928, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(928, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer15): _DenseLayer(\n","          (norm1): BatchNorm2d(960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(960, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","        (denselayer16): _DenseLayer(\n","          (norm1): BatchNorm2d(992, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu1): ReLU(inplace=True)\n","          (conv1): Conv2d(992, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n","          (norm2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","          (relu2): ReLU(inplace=True)\n","          (conv2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n","        )\n","      )\n","      (norm5): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","    )\n","    (classifier): Sequential(\n","      (0): Linear(in_features=1024, out_features=1, bias=True)\n","      (1): ReLU(inplace=True)\n","    )\n","  )\n",")\n","Epoch 1/2\n","----------\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"}]},{"cell_type":"code","metadata":{"id":"LjjyEcaETkl_","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1624004277152,"user_tz":-540,"elapsed":2952,"user":{"displayName":"‍김지현[학생](소프트웨어융합대학 컴퓨터공학과)","photoUrl":"","userId":"10635065983091343623"}},"outputId":"6e50cfe5-d0f0-4885-c16b-976001b99c97"},"source":["#Eval Brixia\n","import torch\n","from torchvision import transforms\n","import cxr_dataset as CXR\n","\n","import eval_model as E\n","\n","device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n","PATH_TO_IMAGES = \"/content/gdrive/MyDrive/ColabNotebooks/final_code/DB\"\n","MODEL_PATH = '/content/gdrive/MyDrive/ColabNotebooks/final_code/checkpoints/regression_checkpoint_best'\n","\n","# use imagenet mean,std for normalization\n","mean = [0.485, 0.456, 0.406]\n","std = [0.229, 0.224, 0.225]\n","\n","data_transforms = {\n","    'train': transforms.Compose([\n","        transforms.Resize(224),\n","        transforms.CenterCrop(224),\n","        transforms.ToTensor(),\n","        transforms.Normalize(mean, std)\n","    ]),\n","    'val': transforms.Compose([\n","        transforms.Resize(224),\n","        transforms.CenterCrop(224),\n","        transforms.ToTensor(),\n","        transforms.Normalize(mean, std)\n","    ]),\n","}\n","\n","# create dataloader\n","dataset = CXR.CXRDataset(\n","    path_to_images=PATH_TO_IMAGES,\n","    fold='test',\n","    transform=data_transforms['val'],\n","    fine_tune=True,\n","    regression=True)\n","dataloader = torch.utils.data.DataLoader(\n","    dataset, 32, shuffle=False, num_workers=8)\n","\n","saved_model = torch.load(MODEL_PATH)\n","model = saved_model['model']\n","model.to(device)\n","del saved_model\n","\n","mae, ground_truths, preds = E.evaluate_mae(dataloader, model)\n","print('MAE:', mae)\n","print(\"ground_truths : \",ground_truths )\n","print(\"preds : \", preds)"],"execution_count":null,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:477: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n","  cpuset_checked))\n"],"name":"stderr"},{"output_type":"stream","text":["MAE: 1.9219716153858941\n","ground_truths :                    Image Index  true_score\n","0    14564261561865340756.jpg         2.0\n","1    11736208667372564600.jpg         4.0\n","2     9239594870393759636.jpg         9.0\n","3    11144427466904541752.jpg         3.0\n","4    16974554766317036034.jpg         6.0\n","..                        ...         ...\n","202   3928593347333653647.jpg        14.0\n","203  11582034330052535722.jpg        15.0\n","204  14872476706053263416.jpg        15.0\n","205   1158241960099300594.jpg        18.0\n","206  10409101678672828001.jpg         1.0\n","\n","[207 rows x 2 columns]\n","preds :                    Image Index   pred_score\n","0    14564261561865340756.jpg  [2.6033666]\n","1    11736208667372564600.jpg  [2.2611506]\n","2     9239594870393759636.jpg   [9.018191]\n","3    11144427466904541752.jpg   [3.854935]\n","4    16974554766317036034.jpg  [4.5253954]\n","..                        ...          ...\n","202   3928593347333653647.jpg  [10.171592]\n","203  11582034330052535722.jpg  [11.124537]\n","204  14872476706053263416.jpg   [15.35862]\n","205   1158241960099300594.jpg  [15.025976]\n","206  10409101678672828001.jpg   [2.922273]\n","\n","[207 rows x 2 columns]\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"MkGO19lRT9ip"},"source":[],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"B9wwcXLRco4M"},"source":[],"execution_count":null,"outputs":[]}]}