hans

hans

【Python】【Caffe】图像特征与特征库匹配的物体识别方法


前言#

Github 代码地址: https://github.com/HansRen1024/Object-Classification-of-Mapping-Features

后来做了一些改动, 最新代码看 Github 吧,我就不在这里更新代码了。

Save_Feature_cam.py 通过摄像头拍摄,按空格键保存特征入库。

Contrast_Feature_cam.py 通过摄像头拍摄,按空格键比对特征,输出结果。

Save_image.py 保存大量图片用作特征库资源。每个类别保存三张图片。

Save_Feature_image.py 从上面收集的图片中保存特征入库。

Test_All.py 通过 test.txt
文件中图片路径和类别索引进行测试。test.txt 格式和训练 caffe 模型中生成 LMDB 文件的 txt 文件内容格式一致。[图片路径 + 空格 + 类别索引]

这个方法其实和人脸检测很像,只不过是用在了大众物品检测上面。优点是当你所要检测的物体数据集特别少或者不方便收集数据集的时候,效果很突出。同时也可以用作在线学习。具体原理也没什么难的,就是通过卷积神经网络提取特征,将某一层特征入库。重新再拍一张照片和所有库中特征求欧拉距离。不过选什么卷积网络,选哪一层特征要看情况而论。后来又完成了 C++ 分别调用 caffe 和 ncnn 实现这个功能的代码。就不放出来了,感兴趣的话自己研究研究也不难。

下面代码关于调用 caffe 模块的内容不懂的话,可以看看我之前写的博文。包括怎样转换均值到.npy 文件的代码都有。

一、保存特征代码:#

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 13:09:05 2017

@author: hans

http://blog.csdn.net/renhanchi
"""

import caffe
import cv2
import os 
import skimage
import numpy as np
import matplotlib.pyplot as plt
        
prototxt='doc/deploy_squeezenet.prototxt'
caffe_model='doc/squeezenet.caffemodel'
mean_file='doc/mean_squeezenet.npy'
caffe.set_mode_gpu()
net = caffe.Net(prototxt,caffe_model,caffe.TEST)
for name,feature in net.blobs.items(): #查看各层特征规模
    print name + '\t' + str(feature.data.shape)

def show(data, padsize=1, padval=0):
    data -= data.min()
    data /= data.max()
    
    n = int(np.ceil(np.sqrt(data.shape[0])))
    padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
    data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
    
    data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
    data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
    plt.imshow(data)
    plt.axis('off')

def saveFeat(image):
    global prob
    im = caffe.io.resize_image(image,(227,227,3))
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) #data blob 结构(n, k, h, w)
    transformer.set_transpose('data', (2, 0, 1)) #改变图片维度顺序,(h, w, k) -> (k, h, w)
    transformer.set_mean('data', np.load(mean_file).mean(1).mean(1))
    transformer.set_raw_scale('data', 255)

    net.blobs['data'].data[...] = transformer.preprocess('data', im)
    net.forward()

#    labels_filename='card/words_card.txt'
#    labels = np.loadtxt(labels_filename, str, delimiter='\t')
#    prob = net.blobs['prob'].data[0].flatten()
#    order = prob.argsort()[-1]
#    print 'class:', labels[order], 'accuracy: ', prob[order]

    conv1_data = net.blobs['conv10'].data[0] #提取特征
    conv1_data.tofile(claPath+'feat.bin')
    show(conv1_data)

c = cv2.VideoCapture(0)
while 1:
    ret, image = c.read()
    cv2.rectangle(image,(117,37),(522,442),(0,255,0),2)
    cv2.imshow("aaa", image)
    key = cv2.waitKey(10)
    if key == ord(' '):
        cla = str(raw_input("Please enter class name: "))
        claPath = os.path.join(r'features/%s/' %cla)
        if not os.path.exists(claPath):
            os.makedirs(claPath)
        else:
            print "This class has been saved before"
            os._exit(1)
        img = image[40:440, 120:520]
        img = skimage.img_as_float(image[40:440, 120:520]).astype(np.float32)
        saveFeat(img)
    elif key == 27:
        cv2.destroyAllWindows()
        break

二、对比特征代码:#

#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 28 15:39:58 2017

@author: hans

http://blog.csdn.net/renhanchi
"""

import caffe
import cv2
import os 
import skimage
import numpy as np
from math import sqrt

dirpath = 'features/'
prototxt='doc/deploy_squeezenet.prototxt'
caffe_model='doc/squeezenet.caffemodel'
mean_file='doc/mean_squeezenet.npy'
caffe.set_mode_gpu()
net = caffe.Net(prototxt,caffe_model,caffe.TEST)

def contrastFeat(image):
    global similarity
    similarity = []
    cla = []
    im = caffe.io.resize_image(image,(227,227,3))
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) #data blob 结构(n, k, h, w)
    transformer.set_transpose('data', (2, 0, 1)) #改变图片维度顺序,(h, w, k) -> (k, h, w)
    transformer.set_mean('data', np.load(mean_file).mean(1).mean(1))
    transformer.set_raw_scale('data', 255)
    net.blobs['data'].data[...] = transformer.preprocess('data', im)
    net.forward()
    conv1_data = net.blobs['conv10'].data[0] #提取特征
    
    for dirname in os.listdir(dirpath):
        if os.path.isdir(r'%s%s/' %(dirpath, dirname)):
            claPath = os.path.join(r'%s%s/' %(dirpath, dirname))
            feat = np.fromfile(claPath+'feat.bin', dtype = np.float32)
            feat = feat.reshape(conv1_data.shape[0],conv1_data.shape[1],conv1_data.shape[2])
            dis = 0
            for n in range(feat.shape[0]):
                for h in range(feat.shape[1]):
                    for w in range(feat.shape[2]):
                        dis += pow(conv1_data[n,h,w]-feat[n,h,w],2)
            L2 = sqrt(dis)
            similarity.append(1/(1+L2))
            cla.append(dirname)
    similarity = np.array(similarity)
    print similarity
    order = similarity.argsort()[-1]
    print 'clss:', cla[order], 'prob:', similarity[order]

c = cv2.VideoCapture(0)
while 1:
    ret, image = c.read()
    cv2.rectangle(image,(117,37),(522,442),(0,255,0),2)
    cv2.imshow("aaa", image)
    key = cv2.waitKey(10)
    if key == ord(' '):
        img = image[40:440, 120:520]
        img = skimage.img_as_float(image[40:440, 120:520]).astype(np.float32)
        contrastFeat(img)
    elif key == 27:
        cv2.destroyAllWindows()
        break
Loading...
Ownership of this post data is guaranteed by blockchain and smart contracts to the creator alone.