Road Damage Detector 调试笔记

git clone git@github.com:sekilab/RoadDamageDetector.git

wget https://s3-ap-northeast-1.amazonaws.com/mycityreport/RoadDamageDataset.tar.gz
wget https://s3-ap-northeast-1.amazonaws.com/mycityreport/trainedModels.tar.gz

tar -zxf ./RoadDamageDataset.tar.gz
tar -zxf ./trainedModels.tar.gz

cd RoadDamageDataset

#删除掉 "Adachi", "Chiba", "Ichihara", "Muroran", "Nagakute", "Numazu", "Sumida"  子文件夹  /Annotations/ 中  ._ 开头的文件。否则执行会报错。

改写了用cv.imshow 展示的 py代码。

from xml.etree import ElementTree
from xml.dom import minidom
import collections

import os

import matplotlib.pyplot as plt
import matplotlib as matplot
import seaborn as sns
#%matplotlib inline

base_path = os.getcwd() + '/RoadDamageDataset/'
print(base_path)
damageTypes=["D00", "D01", "D10", "D11", "D20", "D40", "D43", "D44"]

# govs corresponds to municipality name.
govs = ["Adachi", "Chiba", "Ichihara", "Muroran", "Nagakute", "Numazu", "Sumida"]

# the number of total images and total labels.
cls_names = []
total_images = 0
for gov in govs:
    
    file_list = os.listdir(base_path + gov + '/Annotations/')

    for file in file_list:

        total_images = total_images + 1
        if file =='.DS_Store':
            pass
        else:
            infile_xml = open(base_path + gov + '/Annotations/' +file)
            #print(infile_xml)
            tree = ElementTree.parse(infile_xml)
            root = tree.getroot()
            for obj in root.iter('object'):
                cls_name = obj.find('name').text
                cls_names.append(cls_name)
print("total")
print("# of images:" + str(total_images))
print("# of labels:" + str(len(cls_names)))

# the number of each class labels.
import collections
count_dict = collections.Counter(cls_names)
cls_count = []
for damageType in damageTypes:
    print(str(damageType) + ' : ' + str(count_dict[damageType]))
    cls_count.append(count_dict[damageType])
    
sns.set_palette("winter", 8)
sns.barplot(damageTypes, cls_count)

# the number of each class labels for each municipality
for gov in govs:
    cls_names = []
    total_images = 0
    file_list = os.listdir(base_path + gov + '/Annotations/')

    for file in file_list:

        total_images = total_images + 1
        if file =='.DS_Store':
            pass
        else:
            infile_xml = open(base_path + gov + '/Annotations/' +file)
            tree = ElementTree.parse(infile_xml)
            root = tree.getroot()
            for obj in root.iter('object'):
                cls_name = obj.find('name').text
                cls_names.append(cls_name)
    print(gov)
    print("# of images:" + str(total_images))
    print("# of labels:" + str(len(cls_names)))
    
    count_dict = collections.Counter(cls_names)
    cls_count = []
    for damageType in damageTypes:
        print(str(damageType) + ' : ' + str(count_dict[damageType]))
        cls_count.append(count_dict[damageType])
        
    print('**************************************************')


import cv2
import random

def draw_images(image_file):
    gov = image_file.split('_')[0]
    img = cv2.imread(base_path + gov + '/JPEGImages/' + image_file.split('.')[0] + '.jpg')
    print(base_path + gov + '/JPEGImages/' + image_file.split('.')[0] + '.jpg')
    infile_xml = open(base_path + gov + '/Annotations/' +image_file)
    tree = ElementTree.parse(infile_xml)
    root = tree.getroot()
    
    for obj in root.iter('object'):
        cls_name = obj.find('name').text
        xmlbox = obj.find('bndbox')
        xmin = int(xmlbox.find('xmin').text)
        xmax = int(xmlbox.find('xmax').text)
        ymin = int(xmlbox.find('ymin').text)
        ymax = int(xmlbox.find('ymax').text)

        font = cv2.FONT_HERSHEY_SIMPLEX

        # put text
        cv2.putText(img,cls_name,(xmin,ymin-10),font,1,(0,255,0),2,cv2.LINE_AA)

        # draw bounding box
        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0,255,0),3)
    return img

for damageType in damageTypes:
    tmp = []
    for gov in govs:
        file = open(base_path + gov + '/ImageSets/Main/%s_trainval.txt' %damageType, 'r')

        for line in file:
            line = line.rstrip('\n').split('/')[-1]
            #print(line)
            if line.split(' ')[2] == '1':
                tmp.append(line.split(' ')[0]+'.xml')
        
    #print(tmp)   
    random.shuffle(tmp)
    fig = plt.figure(figsize=(6,6))
    for number, image in enumerate(tmp[0:1]):
      #if(number > 0):
        print('number & image :' + str(number) + image)
        print('The image including ' + damageType)
        img = draw_images(image)
        cv2.imshow(damageType,img)


while(1):
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break 
	#plt.subplot(1,1,number)
        #plt.axis('off')
        #plt.title('The image including ' + damageType)
        #plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))

Mask R-CNN 安装笔记

apt-get install docker.io

地址在  :https://github.com/NVIDIA/nvidia-docker

更新源巨慢。。。

直接下载安装 ,ubuntu 需要用 alien

wget  https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker-1.0.1-1.x86_64.rpm 

apt-get install alien
alien -i nvidia-docker-1.0.1-1.x86_64.rpm
nvidia-docker run caffe2ai/caffe2:c2v0.8.1.cuda8.cudnn7.ubuntu16.04

报错

docker: Error response from daemon: create nvidia_driver_387.26: create nvidia_driver_387.26: Error looking up volume plugin nvidia-docker: legacy plugin: plugin not found.

sudo nvidia-docker-plugin

nvidia-docker-plugin | 2018/01/25 18:30:47 Loading NVIDIA unified memory
nvidia-docker-plugin | 2018/01/25 18:30:47 Loading NVIDIA management library
nvidia-docker-plugin | 2018/01/25 18:30:47 Discovering GPU devices
nvidia-docker-plugin | 2018/01/25 18:30:48 Error: cuda: out of memory

sudo systemctl start nvidia-docker 报错

Jan 26 11:51:34 bj-s-19 systemd[1]: nvidia-docker.service: Main process exited, code=exited, status=217/USER
Jan 26 11:51:34 bj-s-19 systemd[1]: nvidia-docker.service: Control process exited, code=exited status=217
Jan 26 11:51:34 bj-s-19 systemd[1]: nvidia-docker.service: Control process exited, code=exited status=217

vim /usr/lib/systemd/system/nvidia-docker.service

把 USER (nvidia-docker) 换成 root

sudo systemctl start nvidia-docker   成功。

继续  nvidia-docker run caffe2ai/caffe2:c2v0.8.1.cuda8.cudnn7.ubuntu16.04

报错,感觉是caffe docker image 下错了

Detectron ops lib not found at '/usr/local/lib/libcaffe2_detectron_ops_gpu.so'; make sure that your Caffe2 version includes Detectron module

重新下

nvidia-docker pull caffe2ai/caffe2

nvidia-docker run  -d -it caffe2ai/caffe2  #退出 ctrl + p + q (pq按顺序点)
nvidia-docker run -it caffe2ai/caffe2:latest python -m caffe2.python.operator_test.relu_op_test  #测试新docker image

问题依旧,搜了很久也没搞清楚 libcaffe2_detectron_ops_gpu.so 这个东西是怎么来的。

============  重新开始的分割线 ==============

自己编一下 caffe2 试试。。。

准备环境

https://caffe2.ai/docs/getting-started.html?platform=ubuntu&configuration=compile

http://blog.csdn.net/zziahgf/article/details/79141879

git clone --recursive https://github.com/caffe2/caffe2.git && cd caffe2
cd docker/ubuntu-16.04-cuda8-cudnn6-all-options
sed -i -e 's/ --branch v0.8.1//g' Dockerfile 
docker build -t caffe2:cuda8-cudnn6-all-options .
cd $DETECTRON/docker
docker build -t detectron:c2-cuda8-cudnn6 .

run 成功。

nvidia-docker run --rm -it detectron:c2-cuda8-cudnn6 python2 tests/test_batch_permutation_op.py
E0131 17:08:40.230015     1 init_intrinsics_check.cc:54] CPU feature avx is present on your machine, but the Caffe2 binary is not compiled with it. It means you may not get the full speed of your CPU.
E0131 17:08:40.230031     1 init_intrinsics_check.cc:54] CPU feature avx2 is present on your machine, but the Caffe2 binary is not compiled with it. It means you may not get the full speed of your CPU.
E0131 17:08:40.230036     1 init_intrinsics_check.cc:54] CPU feature fma is present on your machine, but the Caffe2 binary is not compiled with it. It means you may not get the full speed of your CPU.
..
----------------------------------------------------------------------
Ran 2 tests in 0.745s

OK
nvidia-docker run -d -itdetectron:c2-cuda8-cudnn6
nvidia-docker ps
nvidia-docker attach xxxx #上一步查出来的sha码。

python2 tools/infer_simple.py \
    --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml \
    --output-dir /tmp/detectron-visualizations \
    --image-ext jpg \
    --wts https://s3-us-west-2.amazonaws.com/detectron/35861858/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml.02_32_51.SgT4y1cO/output/train/coco_2014_train:coco_2014_valminusminival/generalized_rcnn/model_final.pkl \
    demo

测试效果。

OpenCV iOS 开发笔记

GITHUB:https://github.com/endpang/opencvdemo

实现了寻找边缘和自定义上色。

  • 添加opencv.framework后编译报错
Expected identifier

解决办法

把 NO 改为
NO_EXPOSURE_COMPENSATOR = 0
  • opencv 依赖的库
libc++.tbd
AVFoundation.framework
CoreImage.framework
CoreGraphics.framework
QuartzCore.framework
Accelerate.framework

//摄像头
CoreVideo.framework
CoreMedia.framework
AssetsLibrary.framework
#import "ViewController.h"
#import <opencv2/opencv.hpp>
#import <opencv2/videoio/cap_ios.h>

//@interface ViewController ()


@interface ViewController ()<CvVideoCameraDelegate>{
    UIImageView *cameraView;
    CvVideoCamera *videoCamera;
}
@end

@implementation ViewController

- (void)viewDidLoad {
    [super viewDidLoad];
    // Do any additional setup after loading the view, typically from a nib.
    cameraView = [[UIImageView alloc] initWithFrame:self.view.frame];
    [self.view addSubview:cameraView];
    
    videoCamera = [[CvVideoCamera alloc] initWithParentView:cameraView];
    videoCamera.defaultAVCaptureDevicePosition = AVCaptureDevicePositionFront;
    videoCamera.defaultAVCaptureSessionPreset = AVCaptureSessionPreset1920x1080;
    videoCamera.defaultAVCaptureVideoOrientation = AVCaptureVideoOrientationPortrait;
    videoCamera.defaultFPS = 60;
    videoCamera.grayscaleMode = NO;
    videoCamera.delegate = self;
}
- (void)viewDidAppear:(BOOL)animated {
    [super viewDidAppear:animated];
    [videoCamera start];
}

- (void)viewWillDisappear:(BOOL)animated {
    [super viewWillDisappear:animated];
    [videoCamera stop];
}

#pragma mark -  CvVideoCameraDelegate
- (void)processImage:(cv::Mat&)image {
    //在这儿我们将要添加图形处理的代码
    cv::Mat image_copy;
    //首先将图片由RGBA转成GRAY
    cv::cvtColor(image, image_copy, cv::COLOR_BGR2GRAY);
    //反转图片
    cv::bitwise_not(image_copy, image_copy);
    //将处理后的图片赋值给image,用来显示
    cv::cvtColor(image_copy, image, cv::COLOR_GRAY2BGR);
}


- (void)didReceiveMemoryWarning {
    [super didReceiveMemoryWarning];
    // Dispose of any resources that can be recreated.
}

plist 里加入  Privacy – Camera Usage Description

服务器端代码

8000 端口的服务是 https://github.com/lllyasviel/style2paints

k.png 是一张纯白色 png

<?php
/**/
//print_R($_FILES);
$imgname = $_FILES['upload1']['name'];
$tmp = $_FILES['upload1']['tmp_name'];
$filepath = 'photo/';
if(move_uploaded_file($tmp,$filepath.$imgname)){
    //echo "上传成功";
    my_post($filepath.$imgname);
}else{
    echo "上传失败";
}
//*/
//my_post("photo/20180118172008.png");
function my_post($filename){
    $url = "http://172.18.100.205:8000/paint";

    $headers[] = 'Origin: http://172.18.100.205:8000';
    $headers[] = 'Accept-Encoding: gzip, deflate';
    $headers[] = 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8';
    $headers[] = 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36';
    $headers[] = 'Content-Type: application/x-www-form-urlencoded;';
    $headers[] = 'Accept: */*';
    $headers[] = 'Referer: http://172.18.100.205:8000/';
    $headers[] = 'Cookie: scale=2; style=photo; noise=2';
    $headers[] = 'Connection: keep-alive';
    //$post_data = array ("username" => "bob","key" => "12345");
    $ch = curl_init();
    $post_data = [];
    $post_data["sketchDenoise"] = "true";
    $post_data["resultDenoise"] = "true";
    $post_data["algrithom"] = "quality";
    $post_data["method"] = "colorize";
    $post_data["sketchID"] = "new";
    $post_data["referenceID"] = "no";
    //$b6 = "data%3Aimage%2Fpng%3Bbase64%2C";
    $b5 = "data:image/jpeg;base64,";
    $b6 = "data:image/png;base64,";
    //echo base64_encode($b6);
    $post_data["hint"] = $b6.base64_encode(file_get_contents("k.png"));
    $post_data["reference"] ="null" ;//$b6.base64_encode(file_get_contents("108_copy.png"));
    //$post_data["reference"] = null;
    $post_data["sketch"] = $b6.base64_encode(file_get_contents($filename));
    curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
    curl_setopt($ch, CURLOPT_URL, $url);
    curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1);
    // post数据
    curl_setopt($ch, CURLOPT_POST, 1);
    // post的变
    //echo $post_data["sketch"];
    //print_R(http_build_query($post_data));
    curl_setopt($ch, CURLOPT_POSTFIELDS, http_build_query($post_data));

    $output = curl_exec($ch);
    $out_array = explode('*',$output);

    curl_close($ch);
    echo "http://172.18.100.205:8000/results/".$out_array[0].".jpg";
    //print_R($output);
}

No module named ‘skimage’   (python3)

apt install . python3-skimage

视频物体分割–One-Shot Video Object Segmentation

git clone git@github.com:fperazzi/davis-2017.git

./configure.sh && make -C build/release

pip install virtualenv virtualenvwrapper
source /usr/local/bin/virtualenvwrapper.sh
mkvirtualenv davis
pip install -r python/requirements.txt
export PYTHONPATH=$(pwd)/python/lib

example

cd python/experiments
python read_write_segmentation.py

报错

Traceback (most recent call last):
  File "read_write_segmentation.py", line 20, in <module>
    from   davis import cfg,io,DAVISLoader
  File "/root/game/davis-2017/python/lib/davis/__init__.py", line 13, in <module>
    from misc import log     # Logger
ImportError: No module named 'misc'

./python/lib/davis/misc/config.py

./python/lib/davis/__init__.py

有一些文件引用错了,改之。。。又报错

[WARNING][11-01-2018 17:27:26] Temporal stability not available
Traceback (most recent call last):
  File "./python/experiments/read_write_segmentation.py", line 23, in <module>
    db = DAVISLoader(year=cfg.YEAR,phase=cfg.PHASE)
  File "/root/game/davis-2017/python/lib/davis/dataset/loader.py", line 73, in __init__
    for s in self._db_sequences]
  File "/root/game/davis-2017/python/lib/davis/dataset/base.py", line 97, in __init__
    osp.join(cfg.PATH.SEQUENCES,name),regex)
  File "/root/game/davis-2017/python/lib/davis/dataset/base.py", line 78, in __init__
    self.name,len(self),cfg.SEQUENCES[self.name].num_frames))
Exception: Incorrect frames number for sequence 'bike-packing': found 0, expected 69.

原因是找不到文件

到data 目录下执行两个 sh 文件 1G 左右大小的文件。

get_davis_results.sh
get_davis.sh
python python/tools/visualize.py

按住空格或者回车键。

tensorflow 版本:https://github.com/scaelles/OSVOS-TensorFlow

OpenCV 去雾

import cv2
import numpy as np
  
def zmMinFilterGray(src, r=7):
    '''最小值滤波,r是滤波器半径'''
    '''if r <= 0:
        return src
    h, w = src.shape[:2]
    I = src
    res = np.minimum(I  , I[[0]+range(h-1)  , :])
    res = np.minimum(res, I[range(1,h)+[h-1], :])
    I = res
    res = np.minimum(I  , I[:, [0]+range(w-1)])
    res = np.minimum(res, I[:, range(1,w)+[w-1]])
    return zmMinFilterGray(res, r-1)'''
    return cv2.erode(src, np.ones((2*r+1, 2*r+1)))                      #使用opencv的erode函数更高效
def guidedfilter(I, p, r, eps):
    '''引导滤波,直接参考网上的matlab代码'''
    height, width = I.shape
    m_I = cv2.boxFilter(I, -1, (r,r))
    m_p = cv2.boxFilter(p, -1, (r,r))
    m_Ip = cv2.boxFilter(I*p, -1, (r,r))
    cov_Ip = m_Ip-m_I*m_p
  
    m_II = cv2.boxFilter(I*I, -1, (r,r))
    var_I = m_II-m_I*m_I
  
    a = cov_Ip/(var_I+eps)
    b = m_p-a*m_I
  
    m_a = cv2.boxFilter(a, -1, (r,r))
    m_b = cv2.boxFilter(b, -1, (r,r))
    return m_a*I+m_b
  
def getV1(m, r, eps, w, maxV1):  #输入rgb图像,值范围[0,1]
    '''计算大气遮罩图像V1和光照值A, V1 = 1-t/A'''
    V1 = np.min(m,2)                                         #得到暗通道图像
    V1 = guidedfilter(V1, zmMinFilterGray(V1,7), r, eps)     #使用引导滤波优化
    bins = 2000
    ht = np.histogram(V1, bins)                              #计算大气光照A
    d = np.cumsum(ht[0])/float(V1.size)
    for lmax in range(bins-1, 0, -1):
        if d[lmax]<=0.999:
            break
    A  = np.mean(m,2)[V1>=ht[1][lmax]].max()
          
    V1 = np.minimum(V1*w, maxV1)                   #对值范围进行限制
      
    return V1,A
  
def deHaze(m, r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False):
    Y = np.zeros(m.shape)
    V1,A = getV1(m, r, eps, w, maxV1)               #得到遮罩图像和大气光照
    for k in range(3):
        Y[:,:,k] = (m[:,:,k]-V1)/(1-V1/A)           #颜色校正
    Y =  np.clip(Y, 0, 1)
    if bGamma:
        Y = Y**(np.log(0.5)/np.log(Y.mean()))       #gamma校正,默认不进行该操作
    return Y
  
if __name__ == '__main__':
    m = deHaze(cv2.imread('land.jpg')/255.0)*255
    cv2.imwrite('defog.jpg', m)

转自:https://www.cnblogs.com/zmshy2128/p/6128033.html

视频去雾

if __name__ == '__main__':
    cap = cv2.VideoCapture("1.mp4")
    while(1):
        ret, frame = cap.read()
        m = deHaze(frame/255.0)  #注意,这里不要乘 255
        cv2.imshow("yuan",frame)
        cv2.imshow("this",m)
        if cv2.waitKey(1) & 0xFF == ord('q'):
             break
cap.release()
cv2.destroyAllWindows()
安吉
感谢 @静静静姐姐 的照片

以太币私有链搭建教程

sudo apt-get update

sudo apt-get install software-properties-common

sudo add-apt-repository -y ppa:ethereum/ethereum

sudo add-apt-repository -y ppa:ethereum/ethereum-dev

sudo apt-get update

sudo apt-get install ethereum

geth #启动

创始块配置文件 piccgenesis.json

{
  "config": {
        "chainId": 10,
        "homesteadBlock": 0,
        "eip155Block": 0,
        "eip158Block": 0
    },
  "alloc"      : {},
  "coinbase"   : "0x0000000000000000000000000000000000000000",
  "difficulty" : "0x02000000",
  "extraData"  : "",
  "gasLimit"   : "0x2fefd8",
  "nonce"      : "0x0000000000000042",
  "mixhash"    : "0x0000000000000000000000000000000000000000000000000000000000000000",
  "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
  "timestamp"  : "0x00"
}

mixhash

与nonce配合用于挖矿,由上一个区块的一部分生成的hash。注意他和nonce的设置需要满足以太坊的Yellow paper, 4.3.4. Block Header Validity, (44)章节所描述的条件。.
nonce nonce就是一个64位随机数,用于挖矿,注意他和mixhash的设置需要满足以太坊的Yellow paper, 4.3.4. Block Header Validity, (44)章节所描述的条件。
difficulty 设置当前区块的难度,如果难度过大,cpu挖矿就很难,这里设置较小难度
alloc 用来预置账号以及账号的以太币数量,因为私有链挖矿比较容易,所以我们不需要预置有币的账号,需要的时候自己创建即可以。
coinbase 矿工的账号,随便填
timestamp 设置创世块的时间戳
parentHash 上一个区块的hash值,因为是创世块,所以这个值是0
extraData 附加信息,随便填,可以填你的个性信息
gasLimit 该值设置对GAS的消耗总量限制,用来限制区块能包含的交易信息总和,因为我们是私有链,所以填最大。
geth  --datadir "Your/Path" init piccgenesis.json

创建数据存放地址并初始化创世块

geth --identity "YourName"  --rpc  --rpccorsdomain "*" --datadir "Your/Path" --port "30303"  --rpcapi "db,eth,net,web3" --networkid  95518  console

#如果nohup 请去掉 console

从第二个节点连接。创建目录并将piccgenesis.json拷贝进去。

geth --datadir . init ./piccgenesis.json  #初始化目录
geth --datadir . --networkid 95518 --ipcdisable --port 61911 --rpcport 8101 --bootnodes "enode://8a0127669dc890530d868aa2a08efaff12933c885da21882ed935f563b7cdb7aa3497aa24807bc348c143017dba4c4433d97e64571a1962e257726a7c0725ed8@172.18.100.205:8888" console

创建账号

personal.newAccount()

#启动挖矿
miner.start()

#查看账户余额,不为零即已经在挖矿了
eth.getBalance(eth.accounts[0])

显卡挖矿

下载二进制安装包

https://github.com/ethereum-mining/ethminer/releases

或(注意两种安装方式的命令不一样,请  -h 查看)

apt-get install ethminer
ethminer -G

解锁账户

personal.unlockAccount("0x00530bc8552639fd8479c22f403e84fe5e98896f","===pwd===")

转账3以太币

eth.sendTransaction({from:"0x00530bc8552639fd8479c22f403e84fe5e98896f",to:"0xa6993059b5a31e53f4df0e84a59ccf13abf86b86",value:web3.toWei(3,"ether")})

矿池挖矿

./ethminer --farm-recheck 2000 -G -S us2.ethpool.org:3333 -FS us1.ethpool.org:3333 -O 0x550ce8f9cacae8d5137feb704fbfa45e213bcd30

 

CBB(抄币币)的创世节点:enode://3251729f9c8622826f9a94c40c6f8566505ffd2f34771f77468797858cae8f16b7e7b923a694431f14815b7460c910349d981801a2637a30a8bc503eedee781d@13.115.164.167:30303

配置文件:

 
{
        "config": {
                "chainId": 10,
                        "homesteadBlock": 0,
                        "eip155Block": 0,
                        "eip158Block": 0
        },
                "alloc"      : {},
                "coinbase"   : "0x0000000000000000000000000000000000000000",
                "difficulty" : "0x020000",
                "extraData"  : "",
                "gasLimit"   : "0x2fefd8",
                "nonce"      : "0x0000000000000042",
                "mixhash"    : "0x0000000000000000000000000000000000000000000000000000000000000000",
                "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
                "timestamp"  : "0x00"
}

OpenCV 实时景深

#coding=utf-8
import cv2
import numpy as np
import usb.core
import usb.backend.libusb1
import requests
import time
from matplotlib import pyplot as plt

cap = cv2.VideoCapture(1)
backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
 # 
dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])
dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[4,0x00])   

firstFrame = None
window_size = 0 
while(1):
        ret, frame = cap.read()
        frame = cv2.resize(frame, (1280, 480), interpolation=cv2.CV_8SC1)
        #cv2.cvtColor(frame, frame, cv2.COLOR_BGR2GRAY);
        frame_left_old = frame[0:480,0:640] 
        frame_left = cv2.cvtColor(frame_left_old,  cv2.COLOR_BGR2GRAY);
        frame_right_old = frame[0:480,640:1280]
        frame_right = cv2.cvtColor(frame_right_old,  cv2.COLOR_BGR2GRAY);
        #stereo = cv2.StereoBM_create(numDisparities=32, blockSize=15)
        stereo = cv2.StereoSGBM_create(minDisparity = 16,
            numDisparities = 64,
            blockSize = 16,
            P1 = 8*3*window_size**2,
            P2 = 32*3*window_size**2,
            disp12MaxDiff = 1,
            uniquenessRatio = 10,
            speckleWindowSize = 100,
            speckleRange = 32
        )
        disparity = stereo.compute(frame_left,frame_right)
        disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
        cv2.imshow("h",disp)
        #plt.imshow(disparity,'gray')
        #plt.show()
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break
cap.release()
cv2.destroyAllWindows()

最新文件在 : https://github.com/endpang/driverless

disparity.py   图片调参
disparity_video.py   视频取图调参

这两个文件是调节摄像头的。其实应该用严格标定,懒得麻烦的,可以用这个工具简单表定下。

left_or_right.py  测试

其中minDisparity是控制匹配搜索的第一个参数,代表了匹配搜苏从哪里开始,numberOfDisparities表示最大搜索视差数uniquenessRatio表示匹配功能函数

机器找不到 libcudnn.so.6

Traceback (most recent call last):
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
    from tensorflow.python.pywrap_tensorflow_internal import *
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
    _pywrap_tensorflow_internal = swig_import_helper()
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
    _mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
  File "/usr/lib/python3.5/imp.py", line 242, in load_module
    return load_dynamic(name, filename, file)
  File "/usr/lib/python3.5/imp.py", line 342, in load_dynamic
    return _load(spec)
ImportError: libcudnn.so.6: cannot open shared object file: No such file or directory

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/__init__.py", line 24, in <module>
    from tensorflow.python import *
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/__init__.py", line 49, in <module>
    from tensorflow.python import pywrap_tensorflow
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 72, in <module>
    raise ImportError(msg)
ImportError: Traceback (most recent call last):
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow.py", line 58, in <module>
    from tensorflow.python.pywrap_tensorflow_internal import *
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 28, in <module>
    _pywrap_tensorflow_internal = swig_import_helper()
  File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/pywrap_tensorflow_internal.py", line 24, in swig_import_helper
    _mod = imp.load_module('_pywrap_tensorflow_internal', fp, pathname, description)
  File "/usr/lib/python3.5/imp.py", line 242, in load_module
    return load_dynamic(name, filename, file)
  File "/usr/lib/python3.5/imp.py", line 342, in load_dynamic
    return _load(spec)
ImportError: libcudnn.so.6: cannot open shared object file: No such file or directory


Failed to load the native TensorFlow runtime.

See https://www.tensorflow.org/install/install_sources#common_installation_problems

.bashrc 中加入

export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH

srez – super-resolution

https://github.com/david-gpu/srez

python3 srez_main.py --run demo.

报错

Traceback (most recent call last):
  File "/usr/local/lib/python3.5/dist-packages/imageio/plugins/ffmpeg.py", line 82, in get_exe
    auto=False)
  File "/usr/local/lib/python3.5/dist-packages/imageio/core/fetching.py", line 102, in get_remote_file
    raise NeedDownloadError()
imageio.core.fetching.NeedDownloadError

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "srez_main.py", line 1, in <module>
    import srez_demo
  File "/root/srez/srez_demo.py", line 1, in <module>
    import moviepy.editor as mpe
  File "/usr/local/lib/python3.5/dist-packages/moviepy/editor.py", line 22, in <module>
    from .video.io.VideoFileClip import VideoFileClip
  File "/usr/local/lib/python3.5/dist-packages/moviepy/video/io/VideoFileClip.py", line 3, in <module>
    from moviepy.video.VideoClip import VideoClip
  File "/usr/local/lib/python3.5/dist-packages/moviepy/video/VideoClip.py", line 21, in <module>
    from .io.ffmpeg_writer import ffmpeg_write_image, ffmpeg_write_video
  File "/usr/local/lib/python3.5/dist-packages/moviepy/video/io/ffmpeg_writer.py", line 11, in <module>
    from moviepy.config import get_setting
  File "/usr/local/lib/python3.5/dist-packages/moviepy/config.py", line 35, in <module>
    FFMPEG_BINARY = get_exe()
  File "/usr/local/lib/python3.5/dist-packages/imageio/plugins/ffmpeg.py", line 86, in get_exe
    raise NeedDownloadError('Need ffmpeg exe. '
imageio.core.fetching.NeedDownloadError: Need ffmpeg exe. You can download it by calling:
  imageio.plugins.ffmpeg.download()

修改

srez_demo.py

最上面加两行

import imageio
imageio.plugins.ffmpeg.download()

新建 checkpoint 文件夹,重新运行。

报错

Traceback (most recent call last):
 File "srez_main.py", line 190, in <module>
 tf.app.run()
 File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/platform/app.py", line 48, in run
 _sys.exit(main(_sys.argv[:1] + flags_passthrough))
 File "srez_main.py", line 185, in main
 _demo()
 File "srez_main.py", line 113, in _demo
 sess, summary_writer = setup_tensorflow()
 File "srez_main.py", line 103, in setup_tensorflow
 summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
AttributeError: module 'tensorflow.python.training.training' has no attribute 'SummaryWriter'

升级 tensorflow-gpu
/usr/bin/pip3 install –upgrade tensorflow-gpu

又报错

ImportError: libcudnn.so.6: cannot open shared object file: No such file or directory

参考 这篇文章

 

OpenCV 动态检测

10000 是检测灵敏度阈值,根据需要调整灵敏度。

#coding=utf-8
import cv2
import numpy as np
import usb.core
import usb.backend.libusb1
import requests
import time

cap = cv2.VideoCapture(1)
backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so")
 # 
dev = usb.core.find(idVendor=0x18e3, idProduct=0x5031, backend=backend)
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x50,0xff])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0xf6])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x25,0x00])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x5f,0xfe])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x03])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x02])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x12])
dev.ctrl_transfer(0x21,0x01,0x0f00,0x0600,[0x00,0x04])
dev.ctrl_transfer(0x21,0x01,0x0800,0x0600,[0x76,0xc3])
dev.ctrl_transfer(0x21,0x01,0x0a00,0x0600,[4,0x00])   

firstFrame = None

while(1):
        ret, frame = cap.read()
        frame = cv2.resize(frame, (1280, 480), interpolation=cv2.INTER_CUBIC)
        frame_left = frame[0:480,0:640]
        frame_right = frame[0:480,640:1280]
        gray = cv2.cvtColor(frame_left, cv2.COLOR_BGR2GRAY)         
        gray = cv2.GaussianBlur(gray, (21, 21), 0)
        if firstFrame is None:  
            firstFrame = gray
            continue
    
        frameDelta = cv2.absdiff(firstFrame, gray)
        thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] 
        thresh = cv2.dilate(thresh, None, iterations=2)
        (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
       
        g = frame.copy()
        ii = 0
        for c in cnts:
            if cv2.contourArea(c) < 10000:
                continue
            ii = 1
            #(x, y, w, h) = cv2.boundingRect(c)
            #print (cv2.contourArea(c))
            #cv2.rectangle(g, (x, y), (x + w, y + h), (0, 255, 0), 2)
        if ii == 1:
            url = "http://br.bj.oupeng.com:10080"
            t = time.time()
            cv2.imwrite("./"+str(t)+".jpg", g)
            files = {'file123':(str(t)+'.png',open("./"+str(t)+".jpg",'rb'))}
            data = {'time',str(t)}
            r = requests.post(url, files=files )#, data = data)
            print(r.text)
        cv2.imshow("br",g)
        firstFrame = gray.copy() 
        ##去掉绿色通道,生成红蓝图
        #zeros = np.zeros(frame_2.shape[:2], dtype = "uint8")
        #merged = cv2.merge([b,zeros,r2])  
        #cv2.imshow("br",merged)  
        
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break
cap.release()
cv2.destroyAllWindows()