•
본 프로젝트는 즉 젯슨나노로 지문을 인식한 다음 ai로 구분하는 작업을 수행할 수 있습니다.
시작
•
사전 세팅 : torch + openCV 설치가 되었다는 가정하에 진행해야 합니다.
•
코드의 출처는 상기 링크와 같습니다.
•
상기 명령어로 초기 세팅을 진행합시다.
•
추가로 설치가 필요한 라이브러리들이 있는 각각 설치하는 명령어는 아래와 같으며 마지막 명령어는 굉장히 오래걸리니 유심히 잘 지켜보며 설치를 진행합시다.(인터넷 중간에 끊기면 밀어야 하는 오류가 있음)
•
pip install imageio
•
pip install imgaug
파일 세팅은 다음과 같습니다.
•
2.py 코드를 실행시키는 방향으로 갔으며, 기본적인 경로는 git clone 한 폴더에 있던걸 활용했습니다.
•
기존 images 하위 내용과 model, processed_images, test_images 파일은 전부다 비웠으며, 각각 필요한 지문에 대해서 코드내용을 보고 튜닝하시면 됩니다. (여기서부터는 입맛대로 작성하세요!)
/home/keti/Desktop/glory/2.py
import os
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import cv2
import imageio
import imgaug as ia
#import imgaug.augmenters as iaa
#from tensorflow import keras
from PIL import Image
# PyTorch libraries and modules
import torch
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout, Flatten
from torch.optim import Adam, SGD
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm import tqdm
import math
import numpy as np
import os
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import BCELoss
# function takes in the folder path and the filename to create synthetic images
# using data augmentation techniques
def augment(folder,file):
filename = folder + "/" + file
# loading in the images
image = imageio.imread(filename)
flip_vr=iaa.Flipud(p=1.0)
flip_vr_image= flip_vr.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_flipped.png"
cv2.imwrite(save_filename, flip_vr_image)
rotate = iaa.Affine(rotate=(50, -50))
rotated_image = rotate.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_rotated1.png"
cv2.imwrite(save_filename, rotated_image)
rotate = iaa.Affine(rotate=(50, -50))
rotated_image = rotate.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_rotated2.png"
cv2.imwrite(save_filename, rotated_image)
crop = iaa.Crop(percent=(0, 0.3)) # crop image
crop_image=crop.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_cropped1.png"
cv2.imwrite(save_filename, crop_image)
crop = iaa.Crop(percent=(0, 0.3)) # crop image
crop_image=crop.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_cropped2.png"
cv2.imwrite(save_filename, crop_image)
contrast=iaa.GammaContrast(gamma=2.0)
contrast_image =contrast.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_bright1.png"
cv2.imwrite(save_filename, contrast_image)
contrast=iaa.GammaContrast(gamma=1.4)
contrast_image =contrast.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_bright2.png"
cv2.imwrite(save_filename, contrast_image)
blur = iaa.GaussianBlur(sigma=4.0)
blur_image=blur.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_blur1.png"
cv2.imwrite(save_filename, blur_image)
blur = iaa.GaussianBlur(sigma=2.0)
blur_image=blur.augment_image(image)
save_filename = folder + "/" + file.split(".")[0] + "_blur2.png"
cv2.imwrite(save_filename, blur_image)
dimen = 224
dir_path = "/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/images/"
out_path = "/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/processed_images/"
model_path = "/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/model/"
# Augment the image
# Pre-process images to convert them into numpy arrays
# store them in individual folders for future processing
sub_dir_list = os.listdir(dir_path)
images = []
for i in range(len(sub_dir_list)):
# image_names = os.listdir(os.path.join(dir_path, sub_dir_list[i]))
# augment(os.path.join(dir_path, sub_dir_list[i]),image_names[0])
image_names = os.listdir(os.path.join(dir_path, sub_dir_list[i]))
sub_dir_images = []
for image_path in image_names:
path = os.path.join(dir_path, sub_dir_list[i], image_path )
try:
print(path)
image = Image.open(path)
resize_image = image.resize((dimen, dimen))
array_ = list()
for x in range(dimen):
sub_array = list()
for y in range(dimen):
sub_array.append(resize_image.load()[x, y])
array_.append(sub_array)
image_data = np.array(array_)
image = np.array(np.reshape(image_data, (dimen, dimen, 3))) / 255
sub_dir_images.append(image)
images.append(image)
except:
print('WARNING : File {} could not be processed.'.format(path))
sub_dir_images = np.array(sub_dir_images)
np.save( '{0}/{1}_processed.npy'.format(os.path.join(dir_path, sub_dir_list[i]),str(sub_dir_list[i])), sub_dir_images )
print("Save Complete")
images = np.array(images)
# Create image pairs and label them 1 if they belong to the same person, 0 otherwise
samples_1 = []
samples_2 = []
labels = []
for i in range(len(images)):
for j in range(len(images)):
samples_1.append(images[i])
samples_2.append(images[j])
t = i - i%10 +10
if t - 10 <= j < t:
labels.append(1)
else:
labels.append(0)
X1 = np.array(samples_1)
X2 = np.array(samples_2)
Y = np.array(labels)
np.save( '{}/images.npy'.format( out_path ), images )
np.save( '{}/x1.npy'.format( out_path ), X1 )
np.save( '{}/x2.npy'.format( out_path ), X2 )
np.save( '{}/y.npy'.format( out_path ) , Y )
# creating the model class
class Siamese(nn.Module):
# initializing the model
def __init__(self):
super(Siamese, self).__init__()
self.dimen = 224
self.conv = nn.Sequential(
nn.Conv2d(3, 64, (10,10), stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(64, 128, (7,7), stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(128, 256, (5,5), stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(256, 384, (3,3)),
nn.ReLU(inplace=True),
)
self.linear = nn.Sequential(nn.Linear(169344, 2048), nn.ReLU(), nn.Linear(2048, 1024), nn.Sigmoid())
self.out = nn.Sequential(nn.Linear(1024, 1),nn.Sigmoid())
# passing image into the model to get output
def forward_one(self, x):
# for layer in self.conv:
# x = layer(x.float())
# print(x.size())
x = self.conv(x.float())
x = x.view(x.size()[0], -1)
# print(x.size())
# for layer in self.linear:
# x = layer(x.float())
# print(x.size())
x = self.linear(x.float())
return x
# passing image to get model output
def forward(self, x1, x2):
out1 = self.forward_one(x1)
out2 = self.forward_one(x2)
dis = torch.abs(out1 - out2)
out = self.out(dis)
# print(out)
return out
# converts the images in the folder to the required format
def prepare_images_from_dir( self , dir_path , flatten=True ):
images_names = os.listdir(dir_path)
f = [name for name in images_names if '.npy' in name]
if len(f) == 0:
images = list()
for imageName in images_names:
# print(imageName)
image = Image.open(dir_path + imageName)
resize_image = image.resize((self.dimen, self.dimen))
array = list()
for x in range(self.dimen):
sub_array = list()
for y in range(self.dimen):
sub_array.append(resize_image.load()[x, y])
array.append(sub_array)
image_data = np.array(array)
image = np.array(np.reshape(image_data,(3, self.dimen, self.dimen))) /255
images.append(image)
if flatten:
images = np.array(images)
return images.reshape(images.shape[0], self.dimen**2 * 3).astype(np.float32)
else:
return torch.from_numpy(np.array(images))
else:
images = np.load('{0}{1}'.format(dir_path,f[0]))
if flatten:
images = np.array(images)
return images.reshape((images.shape[0], self.dimen**2 * 3)).astype(np.float32)
else:
images = np.array(np.reshape(images,(len(images), 3, self.dimen, self.dimen)))
return torch.from_numpy(images)
class TrainData(Dataset):
def __init__(self):
super(TrainData, self).__init__()
X1 = np.load('{}/x1.npy'.format(out_path))
X2 = np.load('{}/x2.npy'.format(out_path))
Y = np.load('{}/y.npy'.format(out_path))
self.len = Y.shape[0]
X1 = X1.reshape(self.len, 3, 224, 224)
X2 = X2.reshape(self.len, 3, 224, 224)
Y = Y.reshape(self.len, 1)
self.X1 = torch.from_numpy(X1)
self.X2 = torch.from_numpy(X2)
self.Y = torch.from_numpy(Y)
def __getitem__(self,index):
return self.X1[index], self.X2[index], self.Y[index]
def __len__(self):
return self.len
train_data = TrainData()
train_loader = DataLoader(dataset=train_data,
batch_size=25,
num_workers = 5)
# instantiating the class
net = Siamese()
# setting optimizer and loss function
optimizer = Adam(net.parameters(), lr=0.00003)
criterion = BCELoss()
# if GPU is available, then put the model into the GPU for faster processing
if torch.cuda.is_available():
net = net.cuda()
criterion = criterion.cuda()
# X1 = X1.cuda()
# X2 = X2.cuda()
# Y = Y.cuda()
print(net)
# train the model
def train(epoch,batch_size):
net.train()
tr_loss = 0
loss_train = 1
for i,data in enumerate(tqdm(train_loader)):
# clearing the Gradients of the model parameters
optimizer.zero_grad()
# load the data batch wise
x1, x2, y = data
x1, x2, y = Variable(x1), Variable(x2), Variable(y)
# if GPU available, put data into GPU for faster processing
# only adding batches into GPU to save GPU RAM
if torch.cuda.is_available():
x1, x2, y = x1.cuda(), x2.cuda(), y.cuda()
# get model output
output = net.forward(x1, x2)
# calculate loss
loss_train = criterion(output.float(), y.float())
train_losses.append(loss_train)
# backpropagate
loss_train.backward()
optimizer.step()
tr_loss = loss_train.item()
print('Epoch : ',epoch+1, '\t', 'loss :', loss_train)
# defining the number of epochs
n_epochs = 50
# empty list to store training losses
train_losses = []
batch_size = 25
# empty list to store validation losses
# training the model
for epoch in range(n_epochs):
train(epoch,batch_size)
# save PyTorch model weights
torch.save(net.state_dict(), "/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/model/torch2.model")
# Load saved model
# do not execute if model already present
net = Siamese()
net.load_state_dict(torch.load("/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/model/torch2.model"))
# put the model in CPU if the test images are loaded in the CPU
net = net.cpu()
# preparing the test images and class images for prediction
test_images = net.prepare_images_from_dir("/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/test_images/",flatten=False)
class_images = "/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/images/"
samples = {}
for class_name in os.listdir(class_images):
samples[class_name] = net.prepare_images_from_dir(class_images + class_name + "/",flatten=False)
test_images_names = os.listdir("/home/keti/Desktop/glory/fingerprint-recognition-using-siamese-network-with-retraining/test_images/")
# setting a threshold of 0.9 for prediction confidence
i = 0
threshold = 0.9
for j in range(0,len(test_images)):
for class_name in os.listdir(class_images):
for sample in range(len(samples[class_name])):
prediction_score = net.forward(test_images[j:j+1], samples[class_name][sample:sample+1])[0]
# print(prediction_score)
if prediction_score > threshold:
print( 'IMAGE {} is {} with confidence of {}'.format( test_images_names[i] , class_name, prediction_score[0]) )
break
if prediction_score > threshold:
break
i += 1
Python
복사
상기 코드는 정상적으로 구동이 됩니다.
•
단 학습 시 빈번하게 젯슨나노가 뻗을 수 있으니, 학습부분만 되도록 PC에서 진행하는거로 해야합니다!
안녕하세요
•
한국전자기술연구원 김영광입니다.
•
관련 기술 문의와 R&D 공동 연구 사업 관련 문의는 “glory@keti.re.kr”로 연락 부탁드립니다.
Hello 
•
I'm Yeonggwang Kim from the Korea Electronics Research Institute.
•
For technical and business inquiries, please contact me at “glory@keti.re.kr”