•
batch 사이즈 줄임 → 2
import numpy as np
#import matplotlib.pyplot as plt
import os
import math
import shutil
ROOT_DIR = '/home/test4611/Desktop/105_classes_pins_dataset'
DEST_DIR = './Data'
n = 5 #int(input("Enter Number of Targets: "))
#shutil.rmtree("./Data")
import random
exclude=[]
target=[]
count=[]
no_of_images = {}
if not os.path.exists("./Data"):
os.mkdir("./Data")
for i in range(n):
personfolder=os.listdir(ROOT_DIR)
for item in exclude:
if item in personfolder:
personfolder.remove(item)
person=random.choice(personfolder)
src = (ROOT_DIR+'/'+person)
dest = ('./Data/'+person[5:])
destexcl = (person)
exclude.append(destexcl)
shutil.copytree(src,dest)
for dir in os.listdir(DEST_DIR):
no_of_images[dir] = len(os.listdir(os.path.join(DEST_DIR,dir)))
othersno=max(no_of_images.values())
if not os.path.exists("./Data/NON-TARGET"):
os.mkdir("./Data/NON-TARGET")
for i in range(othersno+2):
person=random.choice(personfolder)
files=os.listdir(ROOT_DIR+'/'+person)
face=random.choice(files)
src = (ROOT_DIR+'/'+person+'/'+face)
dest = ('./Data/NON-TARGET/')
shutil.copy(src,dest)
no_of_images = {}
for dir in os.listdir(DEST_DIR):
no_of_images[dir] = len(os.listdir(os.path.join(DEST_DIR,dir)))
no_of_images.items()
print('\n')
for item in no_of_images:
if item != 'NON-TARGET':
target.append(item)
print('Target_Name\t\tNo_of_Images')
for item in target:
print(item,'\t\t',no_of_images[item])
print('NON-TARGET','\t\t',no_of_images['NON-TARGET'])
###
def dataFolder(p, split):
if not os.path.exists("./Model_Data"):
os.mkdir("./Model_Data")
if not os.path.exists("./Model_Data/"+p):
os.mkdir("./Model_Data/"+p)
for dir in os.listdir(DEST_DIR):
os.makedirs("./Model_Data/"+p+"/"+dir)
for img in np.random.choice(a=os.listdir(os.path.join(DEST_DIR,dir)), size = (math.floor(split*no_of_images[dir])-5),replace=False):
O = os.path.join(DEST_DIR,dir,img)
D = os.path.join("./Model_Data/"+p,dir)
shutil.copy(O,D)
else:
print(f"{p} Exists")
dataFolder("Train",0.7)
dataFolder("Val",0.3)
dataFolder("Test",0.5)
####
from keras.preprocessing.image import ImageDataGenerator
def preprocessingTrain(path):
image_data = ImageDataGenerator(featurewise_center=True,
rotation_range=0.4,
width_shift_range=0.3,
zoom_range=0.2,
shear_range=0.2,
rescale=1./255,
horizontal_flip= True)
image = image_data.flow_from_directory(directory= path,
target_size=(256,256),
batch_size=2,
class_mode='categorical')
return image
def preprocessingVal(path):
image_data = ImageDataGenerator(rescale=1./255)
image = image_data.flow_from_directory(directory= path,
target_size=(256,256),
batch_size=2,
class_mode='categorical')
return image
###
train_path="./Model_Data/Train"
train_data = preprocessingTrain(train_path)
val_path="./Model_Data/Val"
val_data = preprocessingVal(val_path)
test_path="./Model_Data/Test"
test_data = preprocessingVal(test_path)
from keras.models import Model
from keras.layers import Flatten, Dense
import keras.losses
from keras.applications.inception_resnet_v2 import InceptionResNetV2 as IncRes
###
base_model = IncRes(input_shape=(256,256,3),weights='imagenet',include_top=False)
for layer in base_model.layers:
layer.trainable=False
X=Flatten()(base_model.output)
X=Dense(units=n+1, activation='softmax')(X)
model_IncRes = Model(base_model.input, X)
model_IncRes.compile(optimizer='adam',loss=keras.losses.categorical_crossentropy,metrics=['accuracy'])
model_IncRes.summary()
#model_IncRes.summary()
###
Python
복사
2.py
import h5py
import numpy as np
#import matplotlib.pyplot as plt
#%matplotlib inline
#
# used to supress display of warnings
import warnings
#from sklearn.metrics import precision_recall_curve,accuracy_score,f1_score,precision_score,recall_score
# suppress display of warnings
warnings.filterwarnings('ignore')
import os
source_dir=os.path.join('/home/test4611/Desktop/105_classes_pins_dataset')
class IdentityMetadata():
def __init__(self, base, name, file):
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path):
metadata = []
for i in os.listdir(path):
for f in os.listdir(os.path.join(path, i)):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg':
metadata.append(IdentityMetadata(path, i, f))
return np.array(metadata)
# metadata = load_metadata('images')
metadata = load_metadata(source_dir)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D, Dropout, Flatten, Activation
def vgg_face():
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(224,224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
return model
model = vgg_face()
model.load_weights('/home/test4611/Desktop/asd/vgg_face_weights.h5')
Python
복사
→ 데이터 대폭 줄였는데도 안됨
→ 사진 데이터셋 부터 줄여보고 안되면, 쿠다 설치
→ 결과 둘다 실패
앞으로 tensorflow GPU 메모리 오류가 나는 경우에는
1. 배치 사이즈를 줄이거나
2. 입력 데이터의 사이즈를 줄이는