최인훈

fix

This diff is collapsed. Click to expand it.
from __future__ import division
from roipool2 import *
from models import *
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
# from test import evaluate
from terminaltables import AsciiTable
import os
import sys
import time
import datetime
import argparse
import warnings
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
import torch.optim as optim
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('device: ', device)
data_config = parse_data_config('config/cafe_distance.data')
train_path = data_config["train"]
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
model = Darknet('config/yolov3-tiny.cfg', 416).to(device)
model.load_state_dict(torch.load('checkpoints_cafe_distance/tiny1_2500.pth', map_location=device))
model.eval()
dataset = ListDataset(train_path, augment=True, multiscale=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
shuffle=True,
num_workers=4,
pin_memory=True,
collate_fn=dataset.collate_fn,
)
model_distance = ROIPool((3, 3)).to(device)
model_parameters = filter(lambda p: p.requires_grad, model_distance.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('Params: ', params)
optimizer = torch.optim.Adam(model_distance.parameters())
a = []
for epoch in range(2000):
warnings.filterwarnings('ignore', category=UserWarning)
for batch_i, (img_path, imgs, targets, targets_distance) in enumerate(dataloader):
imgs = Variable(imgs.to(device))
with torch.no_grad():
featuremap, detections = model(imgs)
# print(featuremap.shape)
featuremap = Variable(featuremap.to(device))
detections = non_max_suppression(detections, 0.8, 0.4)
targets_distance = torch.tensor(targets_distance[0])
targets_distance = Variable(targets_distance, requires_grad=True)
if detections is not None:
detections[0] = Variable(detections[0], requires_grad=True)
loss, outputs = model_distance(featuremap, detections[0], targets=targets_distance)
# loss = torch.tensor([loss]).to(device)
# loss.requires_grad = True
# print(model_distance.fc1.bias)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print(model_distance.fc1.bias)
# print(batch_i)
print(epoch)
# print(featuremap)
if epoch % 10 == 0:
optimizer.param_groups[0]['lr'] /= 2
if epoch % 10 == 0:
torch.save(model_distance.state_dict(), f'checkpoints_distance11/tiny1_{epoch}.pth')
from __future__ import division
from models import *
from roipool import *
# from utils.logger import *
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
# from test import evaluate
from terminaltables import AsciiTable
import os
import sys
import time
import datetime
import argparse
import warnings
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
import torch.optim as optim
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
if __name__ == "__main__":
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=8001, help="number of epochs")
parser.add_argument("--batch_size", type=int, default=1, help="size of each image batch")
parser.add_argument("--gradient_accumulations", type=int, default=2, help="number of gradient accums before step")
parser.add_argument("--model_def", type=str, default="config/yolov3-tiny.cfg", help="path to model definition file")
parser.add_argument("--data_config", type=str, default="config/testdata.data", help="path to data config file")
parser.add_argument("--pretrained_weights", type=str, help="if specified starts from checkpoint model")
parser.add_argument("--n_cpu", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--checkpoint_interval", type=int, default=50, help="interval between saving model weights")
parser.add_argument("--evaluation_interval", type=int, default=10000, help="interval evaluations on validation set")
parser.add_argument("--compute_map", default=False, help="if True computes mAP every tenth batch")
parser.add_argument("--multiscale_training", default=True, help="allow for multi-scale training")
opt = parser.parse_args()
print(opt)
# logger = Logger("logs")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('device: ', device)
os.makedirs("output", exist_ok=True)
os.makedirs("checkpoints", exist_ok=True)
# Get data configuration
data_config = parse_data_config(opt.data_config)
train_path = data_config["train"]
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# Initiate model
model = Darknet(opt.model_def).to(device)
model.apply(weights_init_normal)
model_distance = ROIPool((7, 7)).to(device)
# If specified we start from checkpoint
if opt.pretrained_weights:
if opt.pretrained_weights.endswith(".pth"):
model.load_state_dict(torch.load(opt.pretrained_weights))
else:
model.load_darknet_weights(opt.pretrained_weights)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('Params: ', params)
# Get dataloader
dataset = ListDataset(train_path, augment=True, multiscale=opt.multiscale_training)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu,
pin_memory=True,
collate_fn=dataset.collate_fn,
)
optimizer = torch.optim.Adam(model.parameters())
metrics = [
"grid_size",
"loss",
"x",
"y",
"w",
"h",
"conf",
"cls",
"cls_acc",
"recall50",
"recall75",
"precision",
"conf_obj",
"conf_noobj",
]
for epoch in range(opt.epochs):
model.train()
warnings.filterwarnings('ignore', category=UserWarning)
start_time = time.time()
for batch_i, (_, imgs, targets) in enumerate(dataloader):
batches_done = len(dataloader) * epoch + batch_i
imgs = Variable(imgs.to(device))
targets = Variable(targets.to(device), requires_grad=False)
loss, outputs = model(imgs, targets)
print(f'targets = {targets}')
loss.backward()
if batches_done % opt.gradient_accumulations:
# Accumulates gradient before each step
optimizer.step()
optimizer.zero_grad()
# ----------------
# Log progress
# ----------------
log_str = "\n---- [Epoch %d/%d, Batch %d/%d] ----\n" % (epoch, opt.epochs, batch_i, len(dataloader))
metric_table = [["Metrics", *[f"YOLO Layer {i}" for i in range(len(model.yolo_layers))]]]
# Log metrics at each YOLO layer
for i, metric in enumerate(metrics):
formats = {m: "%.6f" for m in metrics}
formats["grid_size"] = "%2d"
formats["cls_acc"] = "%.2f%%"
row_metrics = [formats[metric] % yolo.metrics.get(metric, 0) for yolo in model.yolo_layers]
metric_table += [[metric, *row_metrics]]
# Tensorboard logging
tensorboard_log = []
for j, yolo in enumerate(model.yolo_layers):
for name, metric in yolo.metrics.items():
if name != "grid_size":
tensorboard_log += [(f"{name}_{j+1}", metric)]
tensorboard_log += [("loss", loss.item())]
# logger.list_of_scalars_summary(tensorboard_log, batches_done)
log_str += AsciiTable(metric_table).table
log_str += f"\nTotal loss {loss.item()}"
# Determine approximate time left for epoch
epoch_batches_left = len(dataloader) - (batch_i + 1)
time_left = datetime.timedelta(seconds=epoch_batches_left * (time.time() - start_time) / (batch_i + 1))
log_str += f"\n---- ETA {time_left}"
print(log_str)
model.seen += imgs.size(0)
if epoch % opt.evaluation_interval == 0 and epoch != 0:
print("\n---- Evaluating Model ----")
# Evaluate the model on the validation set
precision, recall, AP, f1, ap_class = evaluate(
model,
path=valid_path,
iou_thres=0.5,
conf_thres=0.5,
nms_thres=0.5,
img_size=opt.img_size,
batch_size=1,
)
evaluation_metrics = [
("val_precision", precision.mean()),
("val_recall", recall.mean()),
("val_mAP", AP.mean()),
("val_f1", f1.mean()),
]
# logger.list_of_scalars_summary(evaluation_metrics, epoch)
# Print class APs and mAP
ap_table = [["Index", "Class name", "AP"]]
for i, c in enumerate(ap_class):
ap_table += [[c, class_names[c], "%.5f" % AP[i]]]
print(AsciiTable(ap_table).table)
print(f"---- mAP {AP.mean()}")
if epoch % opt.checkpoint_interval == 0:
torch.save(model.state_dict(), f"checkpoints_fire/tiny1_%d.pth" % (epoch))
import cv2
import queue
import threading
class BufferlessVideoCapture:
'''
BufferlessVideoCapture is a wrapper for cv2.VideoCapture,
which doesn't have frame buffer.
@param name: videocapture name
'''
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
self.thr = threading.Thread(target=self._reader)
self.thr.daemon = True
self.thr.start()
def _reader(self):
'''
Main loop for thread.
'''
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
if self.q.qsize() > 2:
print(self.q.qsize())
self.q.put(frame)
def isOpened(self):
return self.cap.isOpened()
def release(self):
self.cap.release()
def read(self):
'''
Read current frame.
'''
return True, self.q.get()
def close(self):
pass
\ No newline at end of file