Face Rank 基于dlib的颜值计算服务

__author__ = 'pangzhiwei'

import cv2
import dlib
import numpy as np
import math
import itertools
from sklearn.externals import joblib
from sklearn import decomposition
import bottle
from bottle import request
import urllib.request
import json


def facialRatio(points):
	x1 = points[0]
	y1 = points[1]
	x2 = points[2]
	y2 = points[3]
	x3 = points[4]
	y3 = points[5]
	x4 = points[6]
	y4 = points[7]
	dist1 = math.sqrt((x1-x2)**2 + (y1-y2)**2)
	dist2 = math.sqrt((x3-x4)**2 + (y3-y4)**2)
	ratio = dist1/dist2
	return ratio


def generateFeatures(pointIndices1, pointIndices2, pointIndices3, pointIndices4, allLandmarkCoordinates):
	size = allLandmarkCoordinates.shape
	if len(size) > 1:
		allFeatures = np.zeros((size[0], len(pointIndices1)))
		for x in range(0, size[0]):
			landmarkCoordinates = allLandmarkCoordinates[x, :]
			ratios = []
			for i in range(0, len(pointIndices1)):
				x1 = landmarkCoordinates[2*(pointIndices1[i]-1)]
				y1 = landmarkCoordinates[2*pointIndices1[i] - 1]
				x2 = landmarkCoordinates[2*(pointIndices2[i]-1)]
				y2 = landmarkCoordinates[2*pointIndices2[i] - 1]
				x3 = landmarkCoordinates[2*(pointIndices3[i]-1)]
				y3 = landmarkCoordinates[2*pointIndices3[i] - 1]
				x4 = landmarkCoordinates[2*(pointIndices4[i]-1)]
				y4 = landmarkCoordinates[2*pointIndices4[i] - 1]
				points = [x1, y1, x2, y2, x3, y3, x4, y4]
				ratios.append(facialRatio(points))
			allFeatures[x, :] = np.asarray(ratios)
	else:
		allFeatures = np.zeros((1, len(pointIndices1)))
		landmarkCoordinates = allLandmarkCoordinates
		ratios = []
		for i in range(0, len(pointIndices1)):
			x1 = landmarkCoordinates[2*(pointIndices1[i]-1)]
			y1 = landmarkCoordinates[2*pointIndices1[i] - 1]
			x2 = landmarkCoordinates[2*(pointIndices2[i]-1)]
			y2 = landmarkCoordinates[2*pointIndices2[i] - 1]
			x3 = landmarkCoordinates[2*(pointIndices3[i]-1)]
			y3 = landmarkCoordinates[2*pointIndices3[i] - 1]
			x4 = landmarkCoordinates[2*(pointIndices4[i]-1)]
			y4 = landmarkCoordinates[2*pointIndices4[i] - 1]
			points = [x1, y1, x2, y2, x3, y3, x4, y4]
			ratios.append(facialRatio(points))
		allFeatures[0, :] = np.asarray(ratios)
	return allFeatures


def generateAllFeatures(allLandmarkCoordinates):
	a = [18, 22, 23, 27, 37, 40, 43, 46, 28, 32, 34, 36, 5, 9, 13, 49, 55, 52, 58]
	combinations = itertools.combinations(a, 4)
	i = 0
	pointIndices1 = []
	pointIndices2 = []
	pointIndices3 = []
	pointIndices4 = []
	for combination in combinations:
		pointIndices1.append(combination[0])
		pointIndices2.append(combination[1])
		pointIndices3.append(combination[2])
		pointIndices4.append(combination[3])
		i = i+1
		pointIndices1.append(combination[0])
		pointIndices2.append(combination[2])
		pointIndices3.append(combination[1])
		pointIndices4.append(combination[3])
		i = i+1
		pointIndices1.append(combination[0])
		pointIndices2.append(combination[3])
		pointIndices3.append(combination[1])
		pointIndices4.append(combination[2])
		i = i+1
	return generateFeatures(pointIndices1, pointIndices2, pointIndices3, pointIndices4, allLandmarkCoordinates)


def fetch_face_pic(face,predictor):
    rects = detector(face, 1)
    #str = ""
    #strs = ""
    arrs = []
    face_arr = []
    for faces in range(len(rects)):
        # 使用predictor进行人脸关键点识别
        #print(rects[faces])
        landmarks = np.matrix([[p.x, p.y] for p in predictor(face, rects[faces]).parts()])
        #face_img = face.copy()
        # 使用enumerate函数遍历序列中的元素以及它们的下标
        arr = []

        for idx, point in enumerate(landmarks):
            arr = np.append(arr,point[0,0])
            arr = np.append(arr,point[0,1])
            #strs += str(point[0, 0]) + ','  + str(point[0, 1]) + ','
            #pos = (point[0, 0], point[0, 1])
            #print(point)
            #f.write(str(point[0, 0]))
            #f.write(',')
            #f.write(str(point[0, 1]))
            #f.write(',')
            #f.write('\n')
        if len(arrs) == 0:
            arrs = [arr]
        else:
            arrs = np.concatenate((arrs,[arr]),axis=0)
        f = rects[faces]
        [x1,x2,y1,y2]=[f.left(),f.right(),f.top(),f.bottom()]
        a = [[x1,x2,y1,y2]]
        if len(face_arr) == 0:
            face_arr = a
        else:
            face_arr = np.concatenate((face_arr,a) ,axis=0)
    return arrs,face_arr

def predict(my_features):
    predictions = []
    for i in range(len(my_features)):
        feature = my_features[i, :]
        feature_transfer = pca.transform(feature.reshape(1, -1))
        predictions.append(pre_model.predict(feature_transfer).tolist())
        print(i)
    '''
    if len(my_features.shape) > 1:
        for i in range(len(my_features)):
            feature = my_features[i, :]
            feature_transfer = pca.transform(feature.reshape(1, -1))
            predictions.append(pre_model.predict(feature_transfer))
        print('照片中的人颜值得分依次为(满分为5分):')
        k = 1
        for pre in predictions:
            print('第%d个人:' % k, end='')
            print(str(pre)+'分')
            k += 1
    else:
        feature = my_features
        feature_transfer = pca.transform(feature.reshape(1, -1))
        predictions.append(pre_model.predict(feature_transfer))
        print('照片中的人颜值得分为(满分为5分):')
        k = 1
        for pre in predictions:
            print(str(pre)+'分')
            k += 1
    '''
    return predictions

PREDICTOR_PATH = './model/shape_predictor_68_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
# 使用官方提供的模型构建特征提取器
predictor = dlib.shape_predictor(PREDICTOR_PATH)
pre_model = joblib.load('./model/face_rating.pkl')
features = np.loadtxt('./data/features_ALL.txt', delimiter=',')
pca = decomposition.PCA(n_components=20)
pca.fit(features)


@bottle.route('/find', method='GET')
def do_find():
    w = request.query.get("url")
    #print(w)
    resp = urllib.request.urlopen(w)
    image = np.asarray(bytearray(resp.read()),dtype="uint8")
    image = cv2.imdecode(image,cv2.IMREAD_COLOR)
    arrs,faces = fetch_face_pic(image,predictor)
    print(arrs)
    my_features = generateAllFeatures(arrs)
    if len(my_features.shape) > 1:
        predictions = predict(my_features,)
        print(faces)
        print(predictions)
        # print(type(predictions))
        result =[
            faces.tolist(),predictions
        ]
    #print(image)
    print(faces)
    return json.dumps(result)


bottle.run(host='0.0.0.0', port=8888)

最新代码及model 文件见 https://github.com/endpang/xindong

opencv 人脸识别并返回置信度的服务

import cv2
import numpy as np
import os
import bottle
def fetch_face_pic(img,face_cascade):
        # 将图像灰度化
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 人脸检测
        faces,rl,wl = face_cascade.detectMultiScale3(gray,scaleFactor=1.1,
                minNeighbors=3,
                minSize=(30, 30),
                flags = cv2.CASCADE_SCALE_IMAGE,
                outputRejectLevels = True
                )
        #输出分数最高的
        ol = 0

        for i,(x,y,w,h) in enumerate(faces):

            if wl[i][0] > ol:
                crop = img[y:y+h, x:x+w] # 使用切片操作直接提取感兴趣的区域
                print(wl[i][0],ol,x,y,w,h)
                ol = wl[i][0]
        return crop


face_cascade = cv2.CascadeClassifier('/root/girl/opencv-master/data/haarcascades/haarcascade_frontalface_default.xml')

@bottle.route('/find/<w>', method='GET')
def do_find(w):
    jaffe_pic = '/web/maps.cc/public/girl/img/' + w
    img = cv2.imread(jaffe_pic)
    crop = fetch_face_pic(img,face_cascade)
    if crop is not None:
        cv2.imwrite("/web/maps.cc/public/girl/thumb/"+w,crop)
        return w
    return ""

bottle.run(host='0.0.0.0', port=8080)

分割识别的web服务

将mask-rcnn 打造成一个 webserver

因线上服务器没有装显卡,需用 autossh 反向隧道的方式,将显卡封装成服务提供给服务器使用。

服务器端用 php 封了一个壳。

本地用python 的 bottle 将 mask-rcnn 封装成了webserver

代码参见 github : https://github.com/endpang/mask-rcnn-webserver

参考

用docker 部署一个 web 应用  https://zhuanlan.zhihu.com/p/26418829?utm_medium=social&utm_source=weibo

OpenCV iOS 开发笔记

GITHUB:https://github.com/endpang/opencvdemo

实现了寻找边缘和自定义上色。

  • 添加opencv.framework后编译报错
Expected identifier

解决办法

把 NO 改为
NO_EXPOSURE_COMPENSATOR = 0
  • opencv 依赖的库
libc++.tbd
AVFoundation.framework
CoreImage.framework
CoreGraphics.framework
QuartzCore.framework
Accelerate.framework

//摄像头
CoreVideo.framework
CoreMedia.framework
AssetsLibrary.framework
#import "ViewController.h"
#import <opencv2/opencv.hpp>
#import <opencv2/videoio/cap_ios.h>

//@interface ViewController ()


@interface ViewController ()<CvVideoCameraDelegate>{
    UIImageView *cameraView;
    CvVideoCamera *videoCamera;
}
@end

@implementation ViewController

- (void)viewDidLoad {
    [super viewDidLoad];
    // Do any additional setup after loading the view, typically from a nib.
    cameraView = [[UIImageView alloc] initWithFrame:self.view.frame];
    [self.view addSubview:cameraView];
    
    videoCamera = [[CvVideoCamera alloc] initWithParentView:cameraView];
    videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
    videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset1920x1080;
    videoCamera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait;
    videoCamera.defaultFPS = 60;
    videoCamera.grayscaleMode = NO;
    videoCamera.delegate = self;
}
- (void)viewDidAppear:(BOOL)animated {
    [super viewDidAppear:animated];
    [videoCamera start];
}

- (void)viewWillDisappear:(BOOL)animated {
    [super viewWillDisappear:animated];
    [videoCamera stop];
}

#pragma mark -  CvVideoCameraDelegate
- (void)processImage:(cv::Mat&)image {
    //在这儿我们将要添加图形处理的代码
    cv::Mat image_copy;
    //首先将图片由RGBA转成GRAY
    cv::cvtColor(image, image_copy, cv::COLOR_BGR2GRAY);
    //反转图片
    cv::bitwise_not(image_copy, image_copy);
    //将处理后的图片赋值给image,用来显示
    cv::cvtColor(image_copy, image, cv::COLOR_GRAY2BGR);
}


- (void)didReceiveMemoryWarning {
    [super didReceiveMemoryWarning];
    // Dispose of any resources that can be recreated.
}

plist 里加入  Privacy – Camera Usage Description

服务器端代码

8000 端口的服务是 https://github.com/lllyasviel/style2paints

k.png 是一张纯白色 png

<?php
/**/
//print_R($_FILES);
$imgname = $_FILES['upload1']['name'];
$tmp = $_FILES['upload1']['tmp_name'];
$filepath = 'photo/';
if(move_uploaded_file($tmp,$filepath.$imgname)){
    //echo "上传成功";
    my_post($filepath.$imgname);
}else{
    echo "上传失败";
}
//*/
//my_post("photo/20180118172008.png");
function my_post($filename){
    $url = "http://172.18.100.205:8000/paint";

    $headers[] = 'Origin: http://172.18.100.205:8000';
    $headers[] = 'Accept-Encoding: gzip, deflate';
    $headers[] = 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8';
    $headers[] = 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36';
    $headers[] = 'Content-Type: application/x-www-form-urlencoded;';
    $headers[] = 'Accept: */*';
    $headers[] = 'Referer: http://172.18.100.205:8000/';
    $headers[] = 'Cookie: scale=2; style=photo; noise=2';
    $headers[] = 'Connection: keep-alive';
    //$post_data = array ("username" => "bob","key" => "12345");
    $ch = curl_init();
    $post_data = [];
    $post_data["sketchDenoise"] = "true";
    $post_data["resultDenoise"] = "true";
    $post_data["algrithom"] = "quality";
    $post_data["method"] = "colorize";
    $post_data["sketchID"] = "new";
    $post_data["referenceID"] = "no";
    //$b6 = "data%3Aimage%2Fpng%3Bbase64%2C";
    $b5 = "data:image/jpeg;base64,";
    $b6 = "data:image/png;base64,";
    //echo base64_encode($b6);
    $post_data["hint"] = $b6.base64_encode(file_get_contents("k.png"));
    $post_data["reference"] ="null" ;//$b6.base64_encode(file_get_contents("108_copy.png"));
    //$post_data["reference"] = null;
    $post_data["sketch"] = $b6.base64_encode(file_get_contents($filename));
    curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
    curl_setopt($ch, CURLOPT_URL, $url);
    curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
    // post数据
    curl_setopt($ch, CURLOPT_POST, 1);
    // post的变
    //echo $post_data["sketch"];
    //print_R(http_build_query($post_data));
    curl_setopt($ch, CURLOPT_POSTFIELDS, http_build_query($post_data));

    $output = curl_exec($ch);
    $out_array = explode('*',$output);

    curl_close($ch);
    echo "http://172.18.100.205:8000/results/".$out_array[0].".jpg";
    //print_R($output);
}

No module named ‘skimage’   (python3)

apt install . python3-skimage

OpenCV 去雾

import cv2
import numpy as np
  
def zmMinFilterGray(src, r=7):
    '''最小值滤波,r是滤波器半径'''
    '''if r <= 0:
        return src
    h, w = src.shape[:2]
    I = src
    res = np.minimum(I  , I[[0]+range(h-1)  , :])
    res = np.minimum(res, I[range(1,h)+[h-1], :])
    I = res
    res = np.minimum(I  , I[:, [0]+range(w-1)])
    res = np.minimum(res, I[:, range(1,w)+[w-1]])
    return zmMinFilterGray(res, r-1)'''
    return cv2.erode(src, np.ones((2*r+1, 2*r+1)))                      #使用opencv的erode函数更高效
def guidedfilter(I, p, r, eps):
    '''引导滤波,直接参考网上的matlab代码'''
    height, width = I.shape
    m_I = cv2.boxFilter(I, -1, (r,r))
    m_p = cv2.boxFilter(p, -1, (r,r))
    m_Ip = cv2.boxFilter(I*p, -1, (r,r))
    cov_Ip = m_Ip-m_I*m_p
  
    m_II = cv2.boxFilter(I*I, -1, (r,r))
    var_I = m_II-m_I*m_I
  
    a = cov_Ip/(var_I+eps)
    b = m_p-a*m_I
  
    m_a = cv2.boxFilter(a, -1, (r,r))
    m_b = cv2.boxFilter(b, -1, (r,r))
    return m_a*I+m_b
  
def getV1(m, r, eps, w, maxV1):  #输入rgb图像,值范围[0,1]
    '''计算大气遮罩图像V1和光照值A, V1 = 1-t/A'''
    V1 = np.min(m,2)                                         #得到暗通道图像
    V1 = guidedfilter(V1, zmMinFilterGray(V1,7), r, eps)     #使用引导滤波优化
    bins = 2000
    ht = np.histogram(V1, bins)                              #计算大气光照A
    d = np.cumsum(ht[0])/float(V1.size)
    for lmax in range(bins-1, 0, -1):
        if d[lmax]<=0.999:
            break
    A  = np.mean(m,2)[V1>=ht[1][lmax]].max()
          
    V1 = np.minimum(V1*w, maxV1)                   #对值范围进行限制
      
    return V1,A
  
def deHaze(m, r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False):
    Y = np.zeros(m.shape)
    V1,A = getV1(m, r, eps, w, maxV1)               #得到遮罩图像和大气光照
    for k in range(3):
        Y[:,:,k] = (m[:,:,k]-V1)/(1-V1/A)           #颜色校正
    Y =  np.clip(Y, 0, 1)
    if bGamma:
        Y = Y**(np.log(0.5)/np.log(Y.mean()))       #gamma校正,默认不进行该操作
    return Y
  
if __name__ == '__main__':
    m = deHaze(cv2.imread('land.jpg')/255.0)*255
    cv2.imwrite('defog.jpg', m)

转自:https://www.cnblogs.com/zmshy2128/p/6128033.html

视频去雾

if __name__ == '__main__':
    cap = cv2.VideoCapture("1.mp4")
    while(1):
        ret, frame = cap.read()
        m = deHaze(frame/255.0)  #注意,这里不要乘 255
        cv2.imshow("yuan",frame)
        cv2.imshow("this",m)
        if cv2.waitKey(1) & 0xFF == ord('q'):
             break
cap.release()
cv2.destroyAllWindows()

OpenCV 实时景深

#coding=utf-8
import cv2
import numpy as np
import usb.core
import usb.backend.libusb1
import requests
import time
from matplotlib import pyplot as plt

cap = cv2.VideoCapture(1)
backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
 # 
dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])
dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[4,0x00])   

firstFrame = None
window_size = 0 
while(1):
        ret, frame = cap.read()
        frame = cv2.resize(frame, (1280, 480), interpolation=cv2.CV_8SC1)
        #cv2.cvtColor(frame, frame, cv2.COLOR_BGR2GRAY);
        frame_left_old = frame[0:480,0:640] 
        frame_left = cv2.cvtColor(frame_left_old,  cv2.COLOR_BGR2GRAY);
        frame_right_old = frame[0:480,640:1280]
        frame_right = cv2.cvtColor(frame_right_old,  cv2.COLOR_BGR2GRAY);
        #stereo = cv2.StereoBM_create(numDisparities=32, blockSize=15)
        stereo = cv2.StereoSGBM_create(minDisparity = 16,
            numDisparities = 64,
            blockSize = 16,
            P1 = 8*3*window_size**2,
            P2 = 32*3*window_size**2,
            disp12MaxDiff = 1,
            uniquenessRatio = 10,
            speckleWindowSize = 100,
            speckleRange = 32
        )
        disparity = stereo.compute(frame_left,frame_right)
        disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
        cv2.imshow("h",disp)
        #plt.imshow(disparity,'gray')
        #plt.show()
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break
cap.release()
cv2.destroyAllWindows()

最新文件在 : https://github.com/endpang/driverless

disparity.py   图片调参
disparity_video.py   视频取图调参

这两个文件是调节摄像头的。其实应该用严格标定,懒得麻烦的,可以用这个工具简单表定下。

left_or_right.py  测试

其中minDisparity是控制匹配搜索的第一个参数,代表了匹配搜苏从哪里开始,numberOfDisparities表示最大搜索视差数uniquenessRatio表示匹配功能函数

OpenCV 动态检测

10000 是检测灵敏度阈值,根据需要调整灵敏度。

#coding=utf-8
import cv2
import numpy as np
import usb.core
import usb.backend.libusb1
import requests
import time

cap = cv2.VideoCapture(1)
backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
 # 
dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])
dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[4,0x00])   

firstFrame = None

while(1):
        ret, frame = cap.read()
        frame = cv2.resize(frame, (1280, 480), interpolation=cv2.INTER_CUBIC)
        frame_left = frame[0:480,0:640]
        frame_right = frame[0:480,640:1280]
        gray = cv2.cvtColor(frame_left, cv2.COLOR_BGR2GRAY)         
        gray = cv2.GaussianBlur(gray, (21, 21), 0)
        if firstFrame is None:  
            firstFrame = gray
            continue
    
        frameDelta = cv2.absdiff(firstFrame, gray)
        thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] 
        thresh = cv2.dilate(thresh, None, iterations=2)
        (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
       
        g = frame.copy()
        ii = 0
        for c in cnts:
            if cv2.contourArea(c) < 10000:
                continue
            ii = 1
            #(x, y, w, h) = cv2.boundingRect(c)
            #print (cv2.contourArea(c))
            #cv2.rectangle(g, (x, y), (x + w, y + h), (0, 255, 0), 2)
        if ii == 1:
            url = "http://br.bj.oupeng.com:10080"
            t = time.time()
            cv2.imwrite("./"+str(t)+".jpg", g)
            files = {'file123':(str(t)+'.png',open("./"+str(t)+".jpg",'rb'))}
            data = {'time',str(t)}
            r = requests.post(url, files=files )#, data = data)
            print(r.text)
        cv2.imshow("br",g)
        firstFrame = gray.copy() 
        ##去掉绿色通道,生成红蓝图
        #zeros = np.zeros(frame_2.shape[:2], dtype = "uint8")
        #merged = cv2.merge([b,zeros,r2])  
        #cv2.imshow("br",merged)  
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break
cap.release()
cv2.destroyAllWindows()

TOF python 实现

看了下  lighthouse ,图漾景深摄像头的原理。似乎是用 tof(time of fly) 和 光流实现的。

模拟一下

import cv2
import numpy as np
cap = cv2.VideoCapture(0)

ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255

while(1):
    ret, frame2 = cap.read()
    next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)

    flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)

    mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
    hsv[...,0] = ang*180/np.pi/2
    hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)

    cv2.imshow('frame2',rgb)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    elif k == ord('s'):
        cv2.imwrite('opticalfb.png',frame2)
        cv2.imwrite('opticalhsv.png',rgb)
    prvs = next

cap.release()
cv2.destroyAllWindows()

效果:

视频地址:https://m.youtube.com/watch?v=5jur04bJDkM&feature=youtu.be

Opencv 双目摄像头 转 红蓝3D (python)

买来的便宜板子的红蓝模式是   红青。。。手头又没有红青眼镜。

本着能动手绝不伸手的原则(其实买个眼镜也就10几块钱。)自己搞之。

摄像头显示左右分割

一直觉得左右分割会有个 split 类似的函数,找了很久也没找到。。。

结果发现,就是这么简单,惊喜不惊喜。。。。

import cv2
import numpy as np

cap = cv2.VideoCapture(0)
while(1):
        ret, frame = cap.read()
        frame_new = frame[0:400,0:400]
        frame_2 = frame[400:800,0:400]
        cv2.imshow("capture",frame_new)
        cv2.imshow("capture2",frame_2)
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break
cap.release()
cv2.destroyAllWindows()

最终的代码

实现的原理,就是合并两个摄像头的红,蓝部分,并去掉绿色部分。

摄像头像素太渣,看不出效果。

import cv2
import numpy as np
import usb.core
import usb.backend.libusb1
cap = cv2.VideoCapture(1)
backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
 # 
dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])
dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[4,0x00])   

while(1):
        ret, frame = cap.read()
        frame = cv2.resize(frame, (1280, 480), interpolation=cv2.INTER_CUBIC)
        frame_new = frame[0:480,0:640]
        b, g, r = cv2.split(frame_new)
        frame_2 = frame[0:480,640:1280]
        b2, g2, r2 = cv2.split(frame_2)
        zeros = np.zeros(frame_2.shape[:2], dtype = "uint8")
        merged = cv2.merge([b,zeros,r2]) 
        cv2.imshow("br",merged)  
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break
cap.release()
cv2.destroyAllWindows()

Openpose RTSP协议 连接3D网络摄像头

首先你要准备一个3d 摄像头。比如这样的。

28买的。。。640 x 480 ,有色车不推荐买。缺银子的可以考虑。自己焊usb线款的18。

只想本地看的 ,看这篇文章

将摄像头的视频数据通过 RTSP 输出。代码如下。

import cv2
import usb.core
import usb.backend.libusb1
from flask import Flask, render_template, Response

# cam=cv2.VideoCapture(1)
# backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
# 
# dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
# # simulate the SET_CUR sequence
# dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
# dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
# dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
# dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
# dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
# dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
# dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
# dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
# dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])

class VideoCamera(object):
 def __init__(self):
 # Using OpenCV to capture from device 0. If you have trouble capturing
 # from a webcam, comment the line below out and use a video file
 # instead.
 self.video = cv2.VideoCapture(1)
 # If you decide to use video.mp4, you must have this file in the folder
 # as the main.py.
 # self.video = cv2.VideoCapture('video.mp4')
 # cam=cv2.VideoCapture(1)
 backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
 # 
 dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
 # # simulate the SET_CUR sequence
 dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
 dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
 dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
 dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
 dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
 dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
 dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
 dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
 dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])
 dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[2,0x00])   
 #1:left 2:right 3:red & blue 4:2to1
 self.dev = dev


 def __del__(self):
 self.video.release()
 
 def get_frame(self):
 success, image = self.video.read()
 image = cv2.resize(image, (640, 480), interpolation=cv2.INTER_CUBIC)
 # We are using Motion JPEG, but OpenCV defaults to capture raw images,
 # so we must encode it into JPEG in order to correctly display the
 # video stream.
 ret, jpeg = cv2.imencode('.jpg', image)
 return jpeg.tobytes()

# k=0
 # while (k!=ord('q')):
 # ret, frame=cam.read()
 # frame = cv2.resize(frame,(640,480),interpolation=cv2.INTER_CUBIC)
 # cv2.imshow("cam_test",frame)
 # 
 # k=cv2.waitKey(18)&0xFF
 # 
 # kv=k-ord('0')
 # # print(kv)
 # # if press 1,2,3 or 4, change the 3d camera mode
 # if kv in [1,2,3,4]:
 # dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[kv,0x00])

app = Flask(__name__)

@app.route('/')
def index():
 return render_template('index.html')

def gen(camera):
 while True:
 frame = camera.get_frame()
 yield (b'--frame\r\n'
 b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')

@app.route('/video_feed')
def video_feed():
 return Response(gen(VideoCamera()),
 mimetype='multipart/x-mixed-replace; boundary=frame')

if __name__ == '__main__':
 app.run(host='0.0.0.0', debug=True)

然后通过访问   http://127.0.0.1:5000/video_feed  试试

如果可以去 openpose 执行

./build/examples/openpose/openpose.bin  –ip_camera http://xxx.xxx.xxx.xxx:5000/video_feed

OK

直接访问  http://127.0.0.1:5000/video_feed  也可以看到,但推流部分还不支持多路并发,有时间再弄。

openpose 如何处理3d 视频流,以后再继续吧。