-
Notifications
You must be signed in to change notification settings - Fork 4.2k
Home
wiki-sync-bot edited this page Oct 15, 2024
·
1 revision
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "net.h"
int main()
{
cv::Mat img = cv::imread("image.ppm", CV_LOAD_IMAGE_GRAYSCALE);
int w = img.cols;
int h = img.rows;
// subtract 128, norm to -1 ~ 1
ncnn::Mat in = ncnn::Mat::from_pixels_resize(img.data, ncnn::Mat::PIXEL_GRAY, w, h, 60, 60);
float mean[1] = { 128.f };
float norm[1] = { 1/128.f };
in.substract_mean_normalize(mean, norm);
ncnn::Net net;
net.load_param("model.param");
net.load_model("model.bin");
ncnn::Extractor ex = net.create_extractor();
ex.input("data", in);
ncnn::Mat feat;
ex.extract("output", feat);
return 0;
}
void pretty_print(const ncnn::Mat& m)
{
for (int q=0; q<m.c; q++)
{
const float* ptr = m.channel(q);
for (int z=0; z<m.d; z++)
{
for (int y=0; y<m.h; y++)
{
for (int x=0; x<m.w; x++)
{
printf("%f ", ptr[x]);
}
ptr += m.w;
printf("\n");
}
printf("\n");
}
printf("------------------------\n");
}
}
void visualize(const char* title, const ncnn::Mat& m)
{
std::vector<cv::Mat> normed_feats(m.c);
for (int i=0; i<m.c; i++)
{
cv::Mat tmp(m.h, m.w, CV_32FC1, (void*)(const float*)m.channel(i));
cv::normalize(tmp, normed_feats[i], 0, 255, cv::NORM_MINMAX, CV_8U);
cv::cvtColor(normed_feats[i], normed_feats[i], cv::COLOR_GRAY2BGR);
// check NaN
for (int y=0; y<m.h; y++)
{
const float* tp = tmp.ptr<float>(y);
uchar* sp = normed_feats[i].ptr<uchar>(y);
for (int x=0; x<m.w; x++)
{
float v = tp[x];
if (v != v)
{
sp[0] = 0;
sp[1] = 0;
sp[2] = 255;
}
sp += 3;
}
}
}
int tw = m.w < 10 ? 32 : m.w < 20 ? 16 : m.w < 40 ? 8 : m.w < 80 ? 4 : m.w < 160 ? 2 : 1;
int th = (m.c - 1) / tw + 1;
cv::Mat show_map(m.h * th, m.w * tw, CV_8UC3);
show_map = cv::Scalar(127);
// tile
for (int i=0; i<m.c; i++)
{
int ty = i / tw;
int tx = i % tw;
normed_feats[i].copyTo(show_map(cv::Rect(tx * m.w, ty * m.h, m.w, m.h)));
}
cv::resize(show_map, show_map, cv::Size(0,0), 2, 2, cv::INTER_NEAREST);
cv::imshow(title, show_map);
}
Q ncnn的起源
A 深度学习算法要在手机上落地,caffe依赖太多,手机上也没有cuda,需要个又快又小的前向网络实现
Q ncnn名字的来历
A cnn就是卷积神经网络的缩写,开头的n算是一语n关。比如new/next(全新的实现),naive(ncnn是naive实现),neon(ncnn最初为手机优化),up主名字(←_←)
Q 支持哪些平台
A 跨平台,支持 android / ios / linux / windows / macos,也支持裸机跑
Q 计算精度如何
A armv7 neon float 不遵照 ieee754 标准,有些采用快速实现(如exp sin等),速度快但确保精度足够高
Q logo
A up主是mc玩家,所以灵魂手绘像素猫,还可以找到ncnn...