Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions api/face_rec.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# Face recognition function for api

# Reference: Muhammad Ardi
# from https://medium.com/@muhammad_ardi/a-simple-python-code-for-face-recognition-3266e9fad3be
import os
import cv2
import face_recognition
import numpy as np
from tqdm import tqdm

class Face:
def __init__(self, bounding_box, cropped_face, name, feature_vector):
self.bounding_box = bounding_box
self.cropped_face = cropped_face
self.name = name
self.feature_vector = feature_vector

def load_image(path):
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image

def create_database(filenames, images):
faces = []
for filename, image in tqdm(zip(filenames, images), total=len(filenames)):
loc = face_recognition.face_locations(image, model='hog')[0]
vec = face_recognition.face_encodings(image, [loc], num_jitters=20)[0]

top, right, bottom, left = loc

cropped_face = image[top:bottom, left:right]

face = Face(bounding_box=loc, cropped_face=cropped_face, name=filename.split('.')[0], feature_vector=vec)
faces.append(face)

return faces

def detect_faces(image_test, faces, threshold=0.5):
locs_test = face_recognition.face_locations(image_test, model='hog')
vecs_test = face_recognition.face_encodings(image_test, locs_test, num_jitters=1)

match = "false"

for loc_test, vec_test in zip(locs_test, vecs_test):
distances = []
for face in faces:
distance = face_recognition.face_distance([vec_test], face.feature_vector)
distances.append(distance)

if np.min(distances) < threshold:
match = "true"
break

return match

filenames = os.listdir('templates')
images = [load_image(f'templates/{filename}') for filename in filenames]
faces = create_database(filenames, images)

# My modification
tests = os.listdir('tests')
for test in tests:
if(".jpg" not in test and ".png" not in test):
continue
print(f'test: {test}')
image_test = load_image(f'tests/{test}')

name = detect_faces(image_test, faces, threshold=0.5)
print(f'Pridict name: {name}\n')
39 changes: 39 additions & 0 deletions api/run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# API for face recognition

# Reference: 10程式中
# from https://ithelp.ithome.com.tw/articles/10280422

import numpy as np
from face_rec import create_database, detect_faces
import base64

from flask import Flask, request, jsonify
# from flask_cors import CORS

app = Flask(__name__)
# CORS(app)

@app.route('/')
def index():
return 'This is the API for face recognition'

@app.route('/predict', methods=['POST'])
def postInput():
# 取得前端傳過來的數值
insertValues = request.get_json()
image1 = base64.b64decode(insertValues['picture1'])
test_image = base64.b64decode(insertValues['picture2'])

filenames = []
filenames.append("image1")
standard_image = []
standard_image.append(image1)

faces = create_database(filenames, standard_image)

result = detect_faces(test_image, faces, threshold=0.4)

return jsonify({'result': str(result)})

if __name__ == '__main__':
app.run(port=5000, debug=True)
5 changes: 5 additions & 0 deletions face_reconition/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# This folder is for face recognition.

## Usage
* put the authorized image in /templates
* put the images that need examination in /tests
132 changes: 132 additions & 0 deletions face_reconition/face_rec.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
# Testcode for face recognition

# Reference: Muhammad Ardi
# from https://medium.com/@muhammad_ardi/a-simple-python-code-for-face-recognition-3266e9fad3be
import os
import cv2
import face_recognition
import numpy as np
from tqdm import tqdm
# for showing image
import matplotlib.pyplot as plt

class Face:
def __init__(self, bounding_box, cropped_face, name, feature_vector):
self.bounding_box = bounding_box
self.cropped_face = cropped_face
self.name = name
self.feature_vector = feature_vector

def load_image(path):
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image

def show_image(image):
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.show()
return

def create_database(filenames, images):
faces = []
for filename, image in tqdm(zip(filenames, images), total=len(filenames)):
loc = face_recognition.face_locations(image, model='hog')[0]
vec = face_recognition.face_encodings(image, [loc], num_jitters=20)[0]

top, right, bottom, left = loc

cropped_face = image[top:bottom, left:right]

face = Face(bounding_box=loc, cropped_face=cropped_face, name=filename.split('.')[0], feature_vector=vec)
faces.append(face)

return faces

def detect_faces(image_test, faces, threshold=0.6):
locs_test = face_recognition.face_locations(image_test, model='hog')
vecs_test = face_recognition.face_encodings(image_test, locs_test, num_jitters=1)

for loc_test, vec_test in zip(locs_test, vecs_test):
distances = []
for face in faces:
distance = face_recognition.face_distance([vec_test], face.feature_vector)
distances.append(distance)

if np.min(distances) > threshold:
pred_name = 'unknown'
print('Not match')
print('min distance:', np.min(distances))
else:
pred_index = np.argmin(distances)
pred_name = faces[pred_index].name
print('Match')
print('min distance:', np.min(distances))

# image_test = draw_bounding_box(image_test, loc_test)
# image_test = draw_name(image_test, loc_test, pred_name)

return pred_name

'''
def draw_bounding_box(image_test, loc_test):
top, right, bottom, left = loc_test

line_color = (0, 255, 0)
line_thickness = 2

cv2.rectangle(image_test, (left, top), (right, bottom), line_color, line_thickness)
return image_test

def draw_name(image_test, loc_test, pred_name):
top, right, bottom, left = loc_test

font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1.5
font_color = (0, 0, 255)
line_thickness = 3

text_size, _ = cv2.getTextSize(pred_name, font, font_scale, line_thickness)

bg_top_left = (left, top-text_size[1])
bg_bottom_right = (left+text_size[0], top)
cv2.rectangle(image_test, bg_top_left, bg_bottom_right, (0, 255, 0), -1)

cv2.putText(image_test, pred_name, (left, top), font, font_scale, font_color, line_thickness)

return image_test
'''
filenames = os.listdir('templates')
images = [load_image(f'templates/{filename}') for filename in filenames]
# show_image(images[0])
faces = create_database(filenames, images)

# My modification
tests = os.listdir('tests')
for test in tests:
if(".jpg" not in test and ".png" not in test):
continue
print(f'test: {test}')
image_test = load_image(f'tests/{test}')
# show_image(image_test)
name = detect_faces(image_test, faces, threshold=0.5)
print(f'Pridict name: {name}\n')

# cap = cv2.VideoCapture(0)
# cap.set(3, 640)
# cap.set(4, 480)

# while True:
# _, image, = cap.read()
# image = cv2.flip(image, flipCode=1)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# image = detect_faces(image, faces, threshold=0.6)

# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

# if cv2.waitKey(1) & 0xFF == ord('q'):
# break

# cv2.imshow('image', image)