opencv、dlib、paddlehub检测效果对比。dlib和paddlehub的效果相对好一点。
说明:本文只做人脸检测不识别,找识别的不用看本文。
## 部署说明 # 1. 安装python或conda # 2. 安装依赖,pip install -r requirements.txt # 3. 192.168.1.41 修改为你部署机器的IP # 4. python app_dlib.py启动 # 5. 试验,http://192.168.1.41:7049 # 6. 接口,http://192.168.1.41:7049/run/predict/
接口参数,post请求,body传1个包含base64图片的JSON,替换图片就行
{fn_index: 0, data: ["data:image/jpeg;base64,/9j/4AAQSkZJtXlnut7A8QOeSpiTO/DNIrhn3HpugKCATj590EhqShGP8VInOz6TrioYTyGR0oyiMh/dnEpkQ0Pu+Yy+QWamDMkbve9U6MyWdEa+MqHDn1zUtpCT4f/AC//2Q=="], session_hash: "s1oy98lial"
}
依赖(用1个就行)
dlib需要C++编译器(gcc 或 vs)
gradio
opencv-python
dlib
paddlehub
opencv检测
import gradio as gr
import cv2# 加载人脸检测器
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye_tree_eyeglasses.xml')# UGC: Define the inference fn() for your models
def model_inference(image):# 加载图像# image = cv2.imread(image)# 将图像转换为灰度图像gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)# 进行人脸检测faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))# 在图像上标记人脸for (x, y, w, h) in faces:cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 3)# 显示结果# cv2.imshow('Face Detection', image)# cv2.waitKey(0)# cv2.destroyAllWindows()json_out = {"result": len(faces)}return image,json_outdef clear_all():return None, None, Nonewith gr.Blocks() as demo:gr.Markdown("人脸检测")with gr.Column(scale=1, min_width=100):img_in = gr.Image(value="1.png",label="Input")with gr.Row():btn1 = gr.Button("Clear")btn2 = gr.Button("Submit")img_out = gr.Image(label="Output").style(height=400)json_out = gr.JSON(label="jsonOutput")btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out])btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out])gr.Button.style(1)demo.launch(server_name='192.168.1.41', share=True, server_port=7048)
dlib检测
import gradio as gr
import cv2
import dlibdetector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor(
# "dlib_model/shape_predictor_68_face_landmarks.dat"
# )# UGC: Define the inference fn() for your models
def model_inference(image):# 加载图像# image = cv2.imread(image)# 将图像转换为灰度图像gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)# 进行人脸检测faces = detector(gray, 1)for face in faces:# 在图片中标注人脸,并显示left = face.left()top = face.top()right = face.right()bottom = face.bottom()cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)# shape = predictor(image, face) # 寻找人脸的68个标定点# # 遍历所有点,打印出其坐标,并圈出来# for pt in shape.parts():# pt_pos = (pt.x, pt.y)# cv2.circle(image, pt_pos, 1, (0, 255, 0), 2)json_out = {"result": len(faces)}return image,json_outdef clear_all():return None, None, Nonewith gr.Blocks() as demo:gr.Markdown("人脸检测")with gr.Column(scale=1, min_width=100):img_in = gr.Image(value="1.png",label="Input")with gr.Row():btn1 = gr.Button("Clear")btn2 = gr.Button("Submit")img_out = gr.Image(label="Output").style(height=400)json_out = gr.JSON(label="jsonOutput")btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out])btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out])gr.Button.style(1)demo.launch(server_name='192.168.1.41', share=True, server_port=7049)
PaddleHub检测
import gradio as gr
import paddlehub as hub
import cv2#直接调用PaddleHub中的人脸检测
module = hub.Module(name="ultra_light_fast_generic_face_detector_1mb_640")def model_inference(image):# images(list[numpy.ndarray]): 图片数据,ndarray.shape为[H, W, C],BGR格式;# paths(list[str]): 图片的路径;# batch_size(int): batch的大小;# use_gpu(bool): 是否使用GPU;# visualization(bool): 是否将识别结果保存为图片文件;# output_dir(str): 图片的保存路径,当为None时,默认设为face_detector_640_predict_output;# confs_threshold(float): 置信度的阈值。faces = module.face_detection([image], visualization=False)[0]["data"]for face in faces:# 在图片中标注人脸,并显示left = int(face["left"])top = int(face["top"])right = int(face["right"])bottom = int(face["bottom"])cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)json_out = {"result": len(faces)}return image,json_outdef clear_all():return None, None, Nonewith gr.Blocks() as demo:gr.Markdown("人脸检测")with gr.Column(scale=1, min_width=100):img_in = gr.Image(value="1.png",label="Input")with gr.Row():btn1 = gr.Button("Clear")btn2 = gr.Button("Submit")img_out = gr.Image(label="Output").style(height=400)json_out = gr.JSON(label="jsonOutput")btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out])btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out])gr.Button.style(1)demo.launch(server_name='192.168.1.41', share=True, server_port=7050)
APIPOST调接口测试
axios调用示例
var axios = require("axios").default;var options = {method: 'POST',url: 'http://192.168.1.41:7050/run/predict/',headers: {'content-type': 'application/json'},data: '{\r\n fn_index: 0, \r\n data: ["data:image/jpeg;base64,/9j/4gM5jj4ihEoiOUxSpDKSBjsPFBYRtXlnut7A8QOeSpiTO/DNIrhn3HpugKCATj590EhqShGP8VInOz6TrioYTyGR0oyiMh/dnEpkQ0Pu+Yy+QWamDMkbve9U6MyWdEa+MqHDn1zUtpCT4f/AC//2Q=="], \r\n session_hash: "s1oy98lial"\r\n}'
};axios.request(options).then(function (response) {console.log(response.data);
}).catch(function (error) {console.error(error);
});
jquery调用示例
const settings = {"async": true,"crossDomain": true,"url": "http://192.168.1.41:7050/run/predict/","method": "POST","headers": {"content-type": "application/json"},"data": "{\r\n fn_index: 0, \r\n data: [\"data:image/jpeg;base64,/9j/4AAQSkZJUWYgM5jj4ihEoiOUxSpDKSBjsPFBYRtXlnut7A8QOeSpiTO/DNIrhn3HpugKCATj590EhqShGP8VInOz6TrioYTyGR0oyiMh/dnEpkQ0Pu+Yy+QWamDMkbve9U6MyWdEa+MqHDn1zUtpCT4f/AC//2Q==\"], \r\n session_hash: \"s1oy98lial\"\r\n}"
};$.ajax(settings).done(function (response) {console.log(response);
});