haenim
Showing 508 changed files with 875 additions and 0 deletions
#folder
experiments/ecg/dataset/preprocessed/ano0
experiments/ecg/output/beatgan/ecg/model/
/workspace/2Dtest/1234/experiments/ecg/plotUtil.py:213: fixed
This diff is collapsed. Click to expand it.
No preview for this file type
No preview for this file type
Execution change.py and change2.py file for change __samples.npy to __spectogram.npy.
The shape is changed n*2*320 to n*128*128 to n*1*128*128
import librosa
import numpy as np
import matplotlib.pyplot as plt
import librosa, librosa.display
import cv2
n_data = np.load('N_samples.npy')
s_data = np.load('S_samples.npy')
v_data = np.load('V_samples.npy')
f_data = np.load('F_samples.npy')
q_data = np.load('Q_samples.npy')
n_fft_n= 256
win_length_n=64
hp_length_n=2
sr = 360
data =n_data #데이터 종류
lst = [] #npy로 저장할 데이터들
length = len(data) #출력할 데이터 개수
for i in range(length):
#원래 ECG 그래프 그리기
#ax1 = fig1.add_subplot(length,2,2*(i+1)-1)
#ax1.plot(data[i,0,:])
# STFT 이미지 그리기
#ax2 = fig1.add_subplot(length,2,2*(i+1))
#STFT
D_highres = librosa.stft(data[i,0,:].flatten(), n_fft=n_fft_n, hop_length=hp_length_n, win_length=win_length_n)
#ampiltude로 변환
magnitude = np.abs(D_highres)
#amplitude를 db 스케일로 변환
log_spectrogram = librosa.amplitude_to_db(magnitude)
#화이트 노이즈 제거
log_spectrogram = log_spectrogram[:,10:150]
#128,128로 resize
log_spectrogram = cv2.resize(log_spectrogram, (128,128), interpolation = cv2.INTER_AREA)
#스펙트로그램 출력
#img = librosa.display.specshow(log_spectrogram, sr=sr, hop_length = hp_length_n, ax=ax2, y_axis="linear", x_axis="time")
#컬러바
#fig.colorbar(img, ax=ax2)# format="%+2.f dB")
#print(log_spectrogram.shape)
lst.append(log_spectrogram)
if i%30==0:
print(i,'/',length)
#npy로 저장
lst = np.array(lst)
output_filename = 'n_spectrogram'
print(lst.shape)
np.save(output_filename, lst)
##########
data =s_data #데이터 종류
lst = [] #npy로 저장할 데이터들
length = len(data) #출력할 데이터 개수
for i in range(length):
#원래 ECG 그래프 그리기
#ax1 = fig1.add_subplot(length,2,2*(i+1)-1)
#ax1.plot(data[i,0,:])
# STFT 이미지 그리기
#ax2 = fig1.add_subplot(length,2,2*(i+1))
#STFT
D_highres = librosa.stft(data[i,0,:].flatten(), n_fft=n_fft_n, hop_length=hp_length_n, win_length=win_length_n)
#ampiltude로 변환
magnitude = np.abs(D_highres)
#amplitude를 db 스케일로 변환
log_spectrogram = librosa.amplitude_to_db(magnitude)
#화이트 노이즈 제거
log_spectrogram = log_spectrogram[:,10:150]
#128,128로 resize
log_spectrogram = cv2.resize(log_spectrogram, (128,128), interpolation = cv2.INTER_AREA)
#스펙트로그램 출력
#img = librosa.display.specshow(log_spectrogram, sr=sr, hop_length = hp_length_n, ax=ax2, y_axis="linear", x_axis="time")
#컬러바
#fig.colorbar(img, ax=ax2)# format="%+2.f dB")
#print(log_spectrogram.shape)
lst.append(log_spectrogram)
if i%30==0:
print(i,'/',length)
#npy로 저장
lst = np.array(lst)
output_filename = 's_spectrogram'
print(lst.shape)
np.save(output_filename, lst)
##########
data =v_data #데이터 종류
lst = [] #npy로 저장할 데이터들
length = len(data) #출력할 데이터 개수
for i in range(length):
#원래 ECG 그래프 그리기
#ax1 = fig1.add_subplot(length,2,2*(i+1)-1)
#ax1.plot(data[i,0,:])
# STFT 이미지 그리기
#ax2 = fig1.add_subplot(length,2,2*(i+1))
#STFT
D_highres = librosa.stft(data[i,0,:].flatten(), n_fft=n_fft_n, hop_length=hp_length_n, win_length=win_length_n)
#ampiltude로 변환
magnitude = np.abs(D_highres)
#amplitude를 db 스케일로 변환
log_spectrogram = librosa.amplitude_to_db(magnitude)
#화이트 노이즈 제거
log_spectrogram = log_spectrogram[:,10:150]
#128,128로 resize
log_spectrogram = cv2.resize(log_spectrogram, (128,128), interpolation = cv2.INTER_AREA)
#스펙트로그램 출력
#img = librosa.display.specshow(log_spectrogram, sr=sr, hop_length = hp_length_n, ax=ax2, y_axis="linear", x_axis="time")
#컬러바
#fig.colorbar(img, ax=ax2)# format="%+2.f dB")
#print(log_spectrogram.shape)
lst.append(log_spectrogram)
if i%30==0:
print(i,'/',length)
#npy로 저장
lst = np.array(lst)
output_filename = 'v_spectrogram'
print(lst.shape)
np.save(output_filename, lst)
##########
data =f_data #데이터 종류
lst = [] #npy로 저장할 데이터들
length = len(data) #출력할 데이터 개수
for i in range(length):
#원래 ECG 그래프 그리기
#ax1 = fig1.add_subplot(length,2,2*(i+1)-1)
#ax1.plot(data[i,0,:])
# STFT 이미지 그리기
#ax2 = fig1.add_subplot(length,2,2*(i+1))
#STFT
D_highres = librosa.stft(data[i,0,:].flatten(), n_fft=n_fft_n, hop_length=hp_length_n, win_length=win_length_n)
#ampiltude로 변환
magnitude = np.abs(D_highres)
#amplitude를 db 스케일로 변환
log_spectrogram = librosa.amplitude_to_db(magnitude)
#화이트 노이즈 제거
log_spectrogram = log_spectrogram[:,10:150]
#128,128로 resize
log_spectrogram = cv2.resize(log_spectrogram, (128,128), interpolation = cv2.INTER_AREA)
#스펙트로그램 출력
#img = librosa.display.specshow(log_spectrogram, sr=sr, hop_length = hp_length_n, ax=ax2, y_axis="linear", x_axis="time")
#컬러바
#fig.colorbar(img, ax=ax2)# format="%+2.f dB")
#print(log_spectrogram.shape)
lst.append(log_spectrogram)
if i%30==0:
print(i,'/',length)
#npy로 저장
lst = np.array(lst)
output_filename = 'f_spectrogram'
print(lst.shape)
np.save(output_filename, lst)
##########
data =q_data #데이터 종류
lst = [] #npy로 저장할 데이터들
length = len(data) #출력할 데이터 개수
for i in range(length):
#원래 ECG 그래프 그리기
#ax1 = fig1.add_subplot(length,2,2*(i+1)-1)
#ax1.plot(data[i,0,:])
# STFT 이미지 그리기
#ax2 = fig1.add_subplot(length,2,2*(i+1))
#STFT
D_highres = librosa.stft(data[i,0,:].flatten(), n_fft=n_fft_n, hop_length=hp_length_n, win_length=win_length_n)
#ampiltude로 변환
magnitude = np.abs(D_highres)
#amplitude를 db 스케일로 변환
log_spectrogram = librosa.amplitude_to_db(magnitude)
#화이트 노이즈 제거
log_spectrogram = log_spectrogram[:,10:150]
#128,128로 resize
log_spectrogram = cv2.resize(log_spectrogram, (128,128), interpolation = cv2.INTER_AREA)
#스펙트로그램 출력
#img = librosa.display.specshow(log_spectrogram, sr=sr, hop_length = hp_length_n, ax=ax2, y_axis="linear", x_axis="time")
#컬러바
#fig.colorbar(img, ax=ax2)# format="%+2.f dB")
#print(log_spectrogram.shape)
lst.append(log_spectrogram)
if i%30==0:
print(i,'/',length)
#npy로 저장
lst = np.array(lst)
output_filename = 'q_spectrogram'
print(lst.shape)
np.save(output_filename, lst)
import numpy as np
N_samples = np.load('n_spectrogram.npy')
S_samples = np.load('s_spectrogram.npy')
V_samples = np.load('v_spectrogram.npy')
F_samples = np.load('f_spectrogram.npy')
Q_samples = np.load('q_spectrogram.npy')
##########
S_samples = S_samples.reshape(S_samples.shape[0],1,S_samples.shape[1],S_samples.shape[2])
V_samples = V_samples.reshape(V_samples.shape[0],1,V_samples.shape[1],V_samples.shape[2])
F_samples = F_samples.reshape(F_samples.shape[0],1,F_samples.shape[1],F_samples.shape[2])
Q_samples = Q_samples.reshape(Q_samples.shape[0],1,Q_samples.shape[1],Q_samples.shape[2])
N_samples = N_samples.reshape(N_samples.shape[0],1,N_samples.shape[1],N_samples.shape[2])
np.save('q_spectrogram', Q_samples)
np.save('v_spectrogram', V_samples)
np.save('s_spectrogram', S_samples)
np.save('f_spectrogram', F_samples)
np.save('n_spectrogram', N_samples)
import os
import numpy as np
import torch
from torch.utils.data import DataLoader,TensorDataset
from model import BeatGAN
from options import Options
import matplotlib.pyplot as plt
import matplotlib
plt.rcParams["font.family"] = "Times New Roman"
matplotlib.rcParams.update({'font.size': 38})
from plotUtil import save_ts_heatmap
from data import normalize
device = torch.device("cpu")
SAVE_DIR="output/demo/"
def load_case(normal=True):
if normal:
test_samples = np.load(os.path.join("dataset/demo/", "normal_samples.npy"))
else:
test_samples = np.load(os.path.join("dataset/demo/", "abnormal_samples.npy"))
for i in range(test_samples.shape[0]):
for j in range(1):
test_samples[i][j] = normalize(test_samples[i][j][:])
test_samples = test_samples[:, :1, :]
print(test_samples.shape)
if not normal :
test_y=np.ones([test_samples.shape[0],1])
else:
test_y = np.zeros([test_samples.shape[0], 1])
test_dataset = TensorDataset(torch.Tensor(test_samples), torch.Tensor(test_y))
return DataLoader(dataset=test_dataset, # torch TensorDataset format
batch_size=64,
shuffle=False,
num_workers=0,
drop_last=False)
normal_dataloader=load_case(normal=True)
abnormal_dataloader=load_case(normal=False)
opt = Options()
opt.nc=1
opt.nz=50
opt.isize=320
opt.ndf=32
opt.ngf=32
opt.batchsize=64
opt.ngpu=1
opt.istest=True
opt.lr=0.001
opt.beta1=0.5
opt.niter=None
opt.dataset=None
opt.model = None
opt.outf=None
model=BeatGAN(opt,None,device)
model.G.load_state_dict(torch.load('model/beatgan_folder_0_G.pkl',map_location='cpu'))
model.D.load_state_dict(torch.load('model/beatgan_folder_0_D.pkl',map_location='cpu'))
model.G.eval()
model.D.eval()
with torch.no_grad():
abnormal_input=[]
abnormal_output=[]
normal_input=[]
normal_output=[]
for i, data in enumerate(abnormal_dataloader, 0):
test_x=data[0]
fake_x, _ = model.G(test_x)
batch_input = test_x.cpu().numpy()
batch_output = fake_x.cpu().numpy()
abnormal_input.append(batch_input)
abnormal_output.append(batch_output)
abnormal_input=np.concatenate(abnormal_input)
abnormal_output=np.concatenate(abnormal_output)
for i, data in enumerate(normal_dataloader, 0):
test_x=data[0]
fake_x, _ = model.G(test_x)
batch_input = test_x.cpu().numpy()
batch_output = fake_x.cpu().numpy()
normal_input.append(batch_input)
normal_output.append(batch_output)
normal_input=np.concatenate(normal_input)
normal_output=np.concatenate(normal_output)
# print(normal_input.shape)
# print(np.reshape((normal_input-normal_output)**2,(normal_input.shape[0],-1)).shape)
normal_heat= np.reshape((normal_input-normal_output)**2,(normal_input.shape[0],-1))
abnormal_heat = np.reshape((abnormal_input - abnormal_output)**2 , (abnormal_input.shape[0], -1))
# print(normal_heat.shape)
# assert False
max_val = max(np.max(normal_heat), np.max(abnormal_heat))
min_val = min(np.min(normal_heat), np.min(abnormal_heat))
normal_heat_norm = (normal_heat - min_val) / (max_val - min_val)
abnormal_heat_norm = (abnormal_heat - min_val) / (max_val - min_val)
# for fig
dataset=["normal","abnormal"]
for d in dataset:
if not os.path.exists(os.path.join(SAVE_DIR , d)):
os.makedirs(os.path.join(SAVE_DIR , d))
if d=="normal":
data_input=normal_input
data_output=normal_output
data_heat=normal_heat_norm
else:
data_input = abnormal_input
data_output = abnormal_output
data_heat = abnormal_heat_norm
for i in range(50):
input_sig=data_input[i]
output_sig=data_output[i]
heat=data_heat[i]
# print(input_sig.shape)
# print(output_sig.shape)
# print(heat.shape)
# assert False
x_points = np.arange(input_sig.shape[1])
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(6, 6), gridspec_kw={'height_ratios': [7, 1],
})
sig_in = input_sig[0, :]
sig_out = output_sig[0, :]
ax[0].plot(x_points, sig_in, 'k-', linewidth=2.5, label="ori")
ax[0].plot(x_points, sig_out, 'k--', linewidth=2.5, label="gen")
ax[0].set_yticks([])
# leg=ax[0].legend(loc="upper right",bbox_to_anchor=(1.06, 1.06))
# leg.get_frame().set_alpha(0.0)
heat_norm = np.reshape(heat, (1, -1))
# heat_norm=np.zeros((1,320))
# if d=="normal":
# heat_norm[0,100:120]=0.0003
# else:
# heat_norm[0,100:120]=0.9997
ax[1].imshow(heat_norm, cmap="jet", aspect="auto",vmin = 0,vmax = 0.2)
ax[1].set_yticks([])
# ax[1].set_xlim((0,len(x_points)))
# fig.subplots_adjust(hspace=0.01)
fig.tight_layout()
# fig.show()
# return
fig.savefig(os.path.join(SAVE_DIR+d,str(i)+"_output.png"))
fig2, ax2 = plt.subplots(1, 1)
ax2.plot(x_points, sig_in, 'k-', linewidth=2.5, label="input signal")
fig2.savefig(os.path.join(SAVE_DIR + d, str(i) + "_input.png"))
plt.clf()
print("output files are in:{}".format(SAVE_DIR))
\ No newline at end of file
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
from options import Options
from data import load_data
# from dcgan import DCGAN as myModel
device = torch.device("cuda:0" if
torch.cuda.is_available() else "cpu")
print('device: ',device)
opt = Options().parse()
print(opt)
dataloader=load_data(opt)
print("load data success!!!")
if opt.model == "beatgan":
from model import BeatGAN as MyModel
else:
raise Exception("no this model :{}".format(opt.model))
model=MyModel(opt,dataloader,device)
print('\nmodel_device:',model.device,'\n')
if not opt.istest:
print("################ Train ##################")
model.train()
else:
print("################ Eval ##################")
model.load()
model.test_type()
# model.test_time()
# model.plotTestFig()
# print("threshold:{}\tf1-score:{}\tauc:{}".format( th, f1, auc))
This diff is collapsed. Click to expand it.
No preview for this file type
This diff is collapsed. Click to expand it.
import os,pickle
import numpy as np
import torch
import torch.nn as nn
from plotUtil import plot_dist,save_pair_fig,save_plot_sample,print_network,save_plot_pair_sample,loss_plot
def weights_init(mod):
"""
Custom weights initialization called on netG, netD and netE
:param m:
:return:
"""
classname = mod.__class__.__name__
if classname.find('Conv') != -1:
# mod.weight.data.normal_(0.0, 0.02)
nn.init.xavier_normal_(mod.weight.data)
# nn.init.kaiming_uniform_(mod.weight.data)
elif classname.find('BatchNorm') != -1:
mod.weight.data.normal_(1.0, 0.02)
mod.bias.data.fill_(0)
elif classname.find('Linear') !=-1 :
torch.nn.init.xavier_uniform(mod.weight)
mod.bias.data.fill_(0.01)
class Encoder(nn.Module):
def __init__(self, ngpu,opt,out_z):
super(Encoder, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 320
#nn.Conv1d(opt.nc,opt.ndf,4,2,1,bias=False),
nn.Conv2d(opt.nc,opt.ndf,4,2,1,bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 160
#nn.Conv1d(opt.ndf, opt.ndf * 2, 4, 2, 1, bias=False),
#nn.BatchNorm1d(opt.ndf * 2),
nn.Conv2d(opt.ndf, opt.ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(opt.ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 80
#nn.Conv1d(opt.ndf * 2, opt.ndf * 4, 4, 2, 1, bias=False),
#nn.BatchNorm1d(opt.ndf * 4),
nn.Conv2d(opt.ndf * 2, opt.ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(opt.ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(opt.ndf * 4, opt.ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(opt.ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(opt.ndf * 8, opt.ndf * 16, 4, 1, 1, bias=False),
nn.BatchNorm2d(opt.ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*16) x 10
#nn.Conv1d(opt.ndf * 16, out_z, 10, 1, 0, bias=False)
nn.Conv2d(opt.ndf * 16, out_z, 7, 1, 0, bias=False),
# state size. (nz) x 1
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
##
class Decoder(nn.Module):
def __init__(self, ngpu,opt):
super(Decoder, self).__init__()
self.ngpu = ngpu
self.main=nn.Sequential(
nn.ConvTranspose2d(opt.nz,opt.ngf*16,7,1,0,bias=False),
nn.BatchNorm2d(opt.ngf*16),
nn.ReLU(True),
nn.ConvTranspose2d(opt.ngf * 16, opt.ngf * 8, 4, 1, 1, bias=False),
nn.BatchNorm2d(opt.ngf * 8),
nn.ReLU(True),
nn.ConvTranspose2d(opt.ngf * 8, opt.ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(opt.ngf * 4),
nn.ReLU(True),
# state size. (ngf*2) x 40
#nn.ConvTranspose1d(opt.ngf * 4, opt.ngf*2, 4, 2, 1, bias=False),
#nn.BatchNorm1d(opt.ngf*2),
nn.ConvTranspose2d(opt.ngf * 4, opt.ngf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(opt.ngf*2),
nn.ReLU(True),
# state size. (ngf) x 80
#nn.ConvTranspose1d(opt.ngf * 2, opt.ngf , 4, 2, 1, bias=False),
#nn.BatchNorm1d(opt.ngf ),
nn.ConvTranspose2d(opt.ngf * 2, opt.ngf , 4, 2, 1, bias=False),
nn.BatchNorm2d(opt.ngf ),
nn.ReLU(True),
# state size. (ngf) x 160
#nn.ConvTranspose1d(opt.ngf , opt.nc, 4, 2, 1, bias=False),
nn.ConvTranspose2d(opt.ngf , opt.nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 320
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class AD_MODEL(object):
def __init__(self,opt,dataloader,device):
self.G=None
self.D=None
self.opt=opt
self.niter=opt.niter
self.dataset=opt.dataset
self.model = opt.model
self.outf=opt.outf
def train(self):
raise NotImplementedError
def visualize_results(self, epoch,samples,is_train=True):
if is_train:
sub_folder="train"
else:
sub_folder="test"
save_dir=os.path.join(self.outf,self.model,self.dataset,sub_folder)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_plot_sample(samples, epoch, self.dataset, num_epochs=self.niter,
impath=os.path.join(save_dir,'epoch%03d' % epoch + '.png'))
def visualize_pair_results(self,epoch,samples1,samples2,is_train=True):
if is_train:
sub_folder="train"
else:
sub_folder="test"
save_dir=os.path.join(self.outf,self.model,self.dataset,sub_folder)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_plot_pair_sample(samples1, samples2, epoch, self.dataset, num_epochs=self.niter, impath=os.path.join(save_dir,'epoch%03d' % epoch + '.png'))
def save(self,train_hist):
save_dir = os.path.join(self.outf, self.model, self.dataset,"model")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, self.model + '_history.pkl'), 'wb') as f:
pickle.dump(train_hist, f)
def save_weight_GD(self):
save_dir = os.path.join(self.outf, self.model, self.dataset, "model")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(self.G.state_dict(), os.path.join(save_dir, self.model+"_folder_"+str(self.opt.folder) + '_G.pkl'))
torch.save(self.D.state_dict(), os.path.join(save_dir, self.model+"_folder_"+str(self.opt.folder) + '_D.pkl'))
def load(self):
save_dir = os.path.join(self.outf, self.model, self.dataset,"model")
self.G.load_state_dict(torch.load(os.path.join(save_dir, self.model+"_folder_"+str(self.opt.folder) + '_G.pkl')))
self.D.load_state_dict(torch.load(os.path.join(save_dir, self.model+"_folder_"+str(self.opt.folder) + '_D.pkl')))
def save_loss(self,train_hist):
loss_plot(train_hist, os.path.join(self.outf, self.model, self.dataset), self.model)
def saveTestPair(self,pair,save_dir):
'''
:param pair: list of (input,output)
:param save_dir:
:return:
'''
assert save_dir is not None
for idx,p in enumerate(pair):
input=p[0]
output=p[1]
save_pair_fig(input,output,os.path.join(save_dir,str(idx)+".png"))
def analysisRes(self,N_res,A_res,min_score,max_score,threshold,save_dir):
'''
:param N_res: list of normal score
:param A_res: dict{ "S": list of S score, "V":...}
:param min_score:
:param max_score:
:return:
'''
print("############ Analysis #############")
print("############ Threshold:{} #############".format(threshold))
all_abnormal_score=[]
all_normal_score=np.array([])
for a_type in A_res:
a_score=A_res[a_type]
print("********* Type:{} *************".format(a_type))
normal_score=normal(N_res, min_score, max_score)
abnormal_score=normal(a_score, min_score, max_score)
all_abnormal_score=np.concatenate((all_abnormal_score,np.array(abnormal_score)))
all_normal_score=normal_score
plot_dist(normal_score,abnormal_score , str(self.opt.folder)+"_"+"N", a_type,
save_dir)
TP=np.count_nonzero(abnormal_score >= threshold)
FP=np.count_nonzero(normal_score >= threshold)
TN=np.count_nonzero(normal_score < threshold)
FN=np.count_nonzero(abnormal_score<threshold)
print("TP:{}".format(TP))
print("FP:{}".format(FP))
print("TN:{}".format(TN))
print("FN:{}".format(FN))
print("Accuracy:{}".format((TP + TN) * 1.0 / (TP + TN + FP + FN)))
print("Precision/ppv:{}".format(TP * 1.0 / (TP + FP)))
print("sensitivity/Recall:{}".format(TP * 1.0 / (TP + FN)))
print("specificity:{}".format(TN * 1.0 / (TN + FP)))
print("F1:{}".format(2.0 * TP / (2 * TP + FP + FN)))
# all_abnormal_score=np.reshape(np.array(all_abnormal_score),(-1))
# print(all_abnormal_score.shape)
plot_dist(all_normal_score, all_abnormal_score, str(self.opt.folder)+"_"+"N", "A",
save_dir)
def normal(array,min_val,max_val):
return (array-min_val)/(max_val-min_val)
import argparse
import os
import torch
class Options():
"""Options class
Returns:
[argparse]: argparse containing train and test options
"""
def __init__(self):
##
#
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
##
# Base
self.parser.add_argument('--dataset', default='ecg', help='ecg dataset')
self.parser.add_argument('--dataroot', default='', help='path to dataset')
self.parser.add_argument('--batchsize', type=int, default=64, help='input batch size')
self.parser.add_argument('--workers', type=int, help='number of data loading workers', default=1)
self.parser.add_argument('--isize', type=int, default=320, help='input sequence size.')
self.parser.add_argument('--nc', type=int, default=1, help='input sequence channels')
self.parser.add_argument('--nz', type=int, default=50, help='size of the latent z vector')
self.parser.add_argument('--ngf', type=int, default=32)
self.parser.add_argument('--ndf', type=int, default=32)
self.parser.add_argument('--device', type=str, default='gpu', help='Device: gpu | cpu')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
self.parser.add_argument('--model', type=str, default='beatgan', help='choose model')
self.parser.add_argument('--outf', default='./output', help='output folder')
##
# Train
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--niter', type=int, default=100, help='number of epochs to train for')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
self.parser.add_argument('--w_adv', type=float, default=1, help='parameter')
self.parser.add_argument('--folder', type=int, default=0, help='folder index 0-4')
self.parser.add_argument('--n_aug', type=int, default=0, help='aug data times')
## Test
self.parser.add_argument('--istest',action='store_true',help='train model or test model')
self.parser.add_argument('--threshold', type=float, default=0.05, help='threshold score for anomaly')
self.opt = None
def parse(self):
""" Parse Arguments.
"""
self.opt = self.parser.parse_args()
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if self.opt.device == 'gpu':
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
# print('------------ Options -------------')
# for k, v in sorted(args.items()):
# print('%s: %s' % (str(k), str(v)))
# print('-------------- End ----------------')
# save to the disk
self.opt.name = "%s/%s" % (self.opt.model, self.opt.dataset)
expr_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
test_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
if not os.path.isdir(expr_dir):
os.makedirs(expr_dir)
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
\ No newline at end of file
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
No preview for this file type
No preview for this file type
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.