================================================
FILE: MobileNetSSD_demo/app/.gitignore
================================================
/build
================================================
FILE: MobileNetSSD_demo/app/CMakeLists.txt
================================================
# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html
# Sets the minimum version of CMake required to build the native library.
cmake_minimum_required(VERSION 3.4.1)
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.
##需要添加
set(ncnn_lib ${CMAKE_SOURCE_DIR}/src/main/jniLibs/armeabi-v7a/libncnn.a)
add_library (ncnn_lib STATIC IMPORTED)
set_target_properties(ncnn_lib PROPERTIES IMPORTED_LOCATION ${ncnn_lib})
add_library( # Sets the name of the library.
MobileNetssd ## 为生成.so的文字最好直接和.c名字一样,需要更改
# Sets the library as a shared library.
SHARED
# Provides a relative path to your source file(s).
src/main/cpp/MobileNetssd.cpp)##cpp文件的name
# Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library
# you want to add. CMake verifies that the library exists before
# completing its build.
find_library( # Sets the name of the path variable.
log-lib
# Specifies the name of the NDK library that
# you want CMake to locate.
log)
# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library.
##以下三个都要添加
MobileNetssd #和上面一样
ncnn_lib #这个ncnn的lib的add
jnigraphics #这个jni也需要add
# Links the target library to the log library
# included in the NDK.
${log-lib})
================================================
FILE: MobileNetSSD_demo/app/build.gradle
================================================
apply plugin: 'com.android.application'
android {
compileSdkVersion 28
defaultConfig {
applicationId "com.example.che.mobilenetssd_demo"
minSdkVersion 15
targetSdkVersion 28
versionCode 1
versionName "1.0"
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
externalNativeBuild {
cmake {
cppFlags "-std=c++11 -fopenmp"//c++,多线程 需要添加need to add
abiFilters "armeabi-v7a" // 手机的硬件架构,基本所有的硬件都适配
}
}
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
externalNativeBuild {
cmake {
path "CMakeLists.txt"
}
}
// 需要添加 把 .a文件导入, .a为 ncnn make intall生成的里面的.a文件
sourceSets {
main {
jniLibs.srcDirs = ["src/main/jniLibs"]
jni.srcDirs = ['src/cpp']
}
}
}
dependencies {
implementation fileTree(dir: 'libs', include: ['*.jar'])
implementation 'com.android.support:appcompat-v7:28.0.0'
implementation 'com.android.support.constraint:constraint-layout:1.1.3'
testImplementation 'junit:junit:4.12'
implementation 'com.github.bumptech.glide:glide:4.3.1' // need to add增加图片类 bumptech,build自动红线消失
androidTestImplementation 'com.android.support.test:runner:1.0.2'
androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
}
================================================
FILE: MobileNetSSD_demo/app/proguard-rules.pro
================================================
# Add project specific ProGuard rules here.
# You can control the set of applied configuration files using the
# proguardFiles setting in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile
================================================
FILE: MobileNetSSD_demo/app/src/androidTest/java/com/example/che/mobilenetssd_demo/ExampleInstrumentedTest.java
================================================
package com.example.che.mobilenetssd_demo;
import android.content.Context;
import android.support.test.InstrumentationRegistry;
import android.support.test.runner.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see Testing documentation
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getTargetContext();
assertEquals("com.example.che.mobilenetssd_demo", appContext.getPackageName());
}
}
================================================
FILE: MobileNetSSD_demo/app/src/main/AndroidManifest.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/main/assets/words.txt
================================================
background
aeroplane
bicycle
bird
boat
bottle
bus
car
cat
chair
cow
diningtable
dog
horse
motorbike
person
pottedplant
sheep
sofa
train
tvmonitor
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/MobileNetSSD_deploy.id.h
================================================
#ifndef NCNN_INCLUDE_GUARD_MobileNetSSD_deploy_id_h
#define NCNN_INCLUDE_GUARD_MobileNetSSD_deploy_id_h
namespace MobileNetSSD_deploy_param_id {
const int LAYER_input = 0;
const int BLOB_data = 0;
const int LAYER_splitncnn_0 = 1;
const int BLOB_data_splitncnn_0 = 1;
const int BLOB_data_splitncnn_1 = 2;
const int BLOB_data_splitncnn_2 = 3;
const int BLOB_data_splitncnn_3 = 4;
const int BLOB_data_splitncnn_4 = 5;
const int BLOB_data_splitncnn_5 = 6;
const int BLOB_data_splitncnn_6 = 7;
const int LAYER_conv0 = 2;
const int BLOB_conv0 = 8;
const int LAYER_conv0_relu = 3;
const int BLOB_conv0_conv0_relu = 9;
const int LAYER_conv1_dw = 4;
const int BLOB_conv1_dw = 10;
const int LAYER_conv1_dw_relu = 5;
const int BLOB_conv1_dw_conv1_dw_relu = 11;
const int LAYER_conv1 = 6;
const int BLOB_conv1 = 12;
const int LAYER_conv1_relu = 7;
const int BLOB_conv1_conv1_relu = 13;
const int LAYER_conv2_dw = 8;
const int BLOB_conv2_dw = 14;
const int LAYER_conv2_dw_relu = 9;
const int BLOB_conv2_dw_conv2_dw_relu = 15;
const int LAYER_conv2 = 10;
const int BLOB_conv2 = 16;
const int LAYER_conv2_relu = 11;
const int BLOB_conv2_conv2_relu = 17;
const int LAYER_conv3_dw = 12;
const int BLOB_conv3_dw = 18;
const int LAYER_conv3_dw_relu = 13;
const int BLOB_conv3_dw_conv3_dw_relu = 19;
const int LAYER_conv3 = 14;
const int BLOB_conv3 = 20;
const int LAYER_conv3_relu = 15;
const int BLOB_conv3_conv3_relu = 21;
const int LAYER_conv4_dw = 16;
const int BLOB_conv4_dw = 22;
const int LAYER_conv4_dw_relu = 17;
const int BLOB_conv4_dw_conv4_dw_relu = 23;
const int LAYER_conv4 = 18;
const int BLOB_conv4 = 24;
const int LAYER_conv4_relu = 19;
const int BLOB_conv4_conv4_relu = 25;
const int LAYER_conv5_dw = 20;
const int BLOB_conv5_dw = 26;
const int LAYER_conv5_dw_relu = 21;
const int BLOB_conv5_dw_conv5_dw_relu = 27;
const int LAYER_conv5 = 22;
const int BLOB_conv5 = 28;
const int LAYER_conv5_relu = 23;
const int BLOB_conv5_conv5_relu = 29;
const int LAYER_conv6_dw = 24;
const int BLOB_conv6_dw = 30;
const int LAYER_conv6_dw_relu = 25;
const int BLOB_conv6_dw_conv6_dw_relu = 31;
const int LAYER_conv6 = 26;
const int BLOB_conv6 = 32;
const int LAYER_conv6_relu = 27;
const int BLOB_conv6_conv6_relu = 33;
const int LAYER_conv7_dw = 28;
const int BLOB_conv7_dw = 34;
const int LAYER_conv7_dw_relu = 29;
const int BLOB_conv7_dw_conv7_dw_relu = 35;
const int LAYER_conv7 = 30;
const int BLOB_conv7 = 36;
const int LAYER_conv7_relu = 31;
const int BLOB_conv7_conv7_relu = 37;
const int LAYER_conv8_dw = 32;
const int BLOB_conv8_dw = 38;
const int LAYER_conv8_dw_relu = 33;
const int BLOB_conv8_dw_conv8_dw_relu = 39;
const int LAYER_conv8 = 34;
const int BLOB_conv8 = 40;
const int LAYER_conv8_relu = 35;
const int BLOB_conv8_conv8_relu = 41;
const int LAYER_conv9_dw = 36;
const int BLOB_conv9_dw = 42;
const int LAYER_conv9_dw_relu = 37;
const int BLOB_conv9_dw_conv9_dw_relu = 43;
const int LAYER_conv9 = 38;
const int BLOB_conv9 = 44;
const int LAYER_conv9_relu = 39;
const int BLOB_conv9_conv9_relu = 45;
const int LAYER_conv10_dw = 40;
const int BLOB_conv10_dw = 46;
const int LAYER_conv10_dw_relu = 41;
const int BLOB_conv10_dw_conv10_dw_relu = 47;
const int LAYER_conv10 = 42;
const int BLOB_conv10 = 48;
const int LAYER_conv10_relu = 43;
const int BLOB_conv10_conv10_relu = 49;
const int LAYER_conv11_dw = 44;
const int BLOB_conv11_dw = 50;
const int LAYER_conv11_dw_relu = 45;
const int BLOB_conv11_dw_conv11_dw_relu = 51;
const int LAYER_conv11 = 46;
const int BLOB_conv11 = 52;
const int LAYER_conv11_relu = 47;
const int BLOB_conv11_conv11_relu = 53;
const int LAYER_splitncnn_1 = 48;
const int BLOB_conv11_conv11_relu_splitncnn_0 = 54;
const int BLOB_conv11_conv11_relu_splitncnn_1 = 55;
const int BLOB_conv11_conv11_relu_splitncnn_2 = 56;
const int BLOB_conv11_conv11_relu_splitncnn_3 = 57;
const int LAYER_conv12_dw = 49;
const int BLOB_conv12_dw = 58;
const int LAYER_conv12_dw_relu = 50;
const int BLOB_conv12_dw_conv12_dw_relu = 59;
const int LAYER_conv12 = 51;
const int BLOB_conv12 = 60;
const int LAYER_conv12_relu = 52;
const int BLOB_conv12_conv12_relu = 61;
const int LAYER_conv13_dw = 53;
const int BLOB_conv13_dw = 62;
const int LAYER_conv13_dw_relu = 54;
const int BLOB_conv13_dw_conv13_dw_relu = 63;
const int LAYER_conv13 = 55;
const int BLOB_conv13 = 64;
const int LAYER_conv13_relu = 56;
const int BLOB_conv13_conv13_relu = 65;
const int LAYER_splitncnn_2 = 57;
const int BLOB_conv13_conv13_relu_splitncnn_0 = 66;
const int BLOB_conv13_conv13_relu_splitncnn_1 = 67;
const int BLOB_conv13_conv13_relu_splitncnn_2 = 68;
const int BLOB_conv13_conv13_relu_splitncnn_3 = 69;
const int LAYER_conv14_1 = 58;
const int BLOB_conv14_1 = 70;
const int LAYER_conv14_1_relu = 59;
const int BLOB_conv14_1_conv14_1_relu = 71;
const int LAYER_conv14_2 = 60;
const int BLOB_conv14_2 = 72;
const int LAYER_conv14_2_relu = 61;
const int BLOB_conv14_2_conv14_2_relu = 73;
const int LAYER_splitncnn_3 = 62;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_0 = 74;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_1 = 75;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_2 = 76;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_3 = 77;
const int LAYER_conv15_1 = 63;
const int BLOB_conv15_1 = 78;
const int LAYER_conv15_1_relu = 64;
const int BLOB_conv15_1_conv15_1_relu = 79;
const int LAYER_conv15_2 = 65;
const int BLOB_conv15_2 = 80;
const int LAYER_conv15_2_relu = 66;
const int BLOB_conv15_2_conv15_2_relu = 81;
const int LAYER_splitncnn_4 = 67;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_0 = 82;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_1 = 83;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_2 = 84;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_3 = 85;
const int LAYER_conv16_1 = 68;
const int BLOB_conv16_1 = 86;
const int LAYER_conv16_1_relu = 69;
const int BLOB_conv16_1_conv16_1_relu = 87;
const int LAYER_conv16_2 = 70;
const int BLOB_conv16_2 = 88;
const int LAYER_conv16_2_relu = 71;
const int BLOB_conv16_2_conv16_2_relu = 89;
const int LAYER_splitncnn_5 = 72;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_0 = 90;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_1 = 91;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_2 = 92;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_3 = 93;
const int LAYER_conv17_1 = 73;
const int BLOB_conv17_1 = 94;
const int LAYER_conv17_1_relu = 74;
const int BLOB_conv17_1_conv17_1_relu = 95;
const int LAYER_conv17_2 = 75;
const int BLOB_conv17_2 = 96;
const int LAYER_conv17_2_relu = 76;
const int BLOB_conv17_2_conv17_2_relu = 97;
const int LAYER_splitncnn_6 = 77;
const int BLOB_conv17_2_conv17_2_relu_splitncnn_0 = 98;
const int BLOB_conv17_2_conv17_2_relu_splitncnn_1 = 99;
const int BLOB_conv17_2_conv17_2_relu_splitncnn_2 = 100;
const int LAYER_conv11_mbox_loc = 78;
const int BLOB_conv11_mbox_loc = 101;
const int LAYER_conv11_mbox_loc_perm = 79;
const int BLOB_conv11_mbox_loc_perm = 102;
const int LAYER_conv11_mbox_loc_flat = 80;
const int BLOB_conv11_mbox_loc_flat = 103;
const int LAYER_conv11_mbox_conf = 81;
const int BLOB_conv11_mbox_conf = 104;
const int LAYER_conv11_mbox_conf_perm = 82;
const int BLOB_conv11_mbox_conf_perm = 105;
const int LAYER_conv11_mbox_conf_flat = 83;
const int BLOB_conv11_mbox_conf_flat = 106;
const int LAYER_conv11_mbox_priorbox = 84;
const int BLOB_conv11_mbox_priorbox = 107;
const int LAYER_conv13_mbox_loc = 85;
const int BLOB_conv13_mbox_loc = 108;
const int LAYER_conv13_mbox_loc_perm = 86;
const int BLOB_conv13_mbox_loc_perm = 109;
const int LAYER_conv13_mbox_loc_flat = 87;
const int BLOB_conv13_mbox_loc_flat = 110;
const int LAYER_conv13_mbox_conf = 88;
const int BLOB_conv13_mbox_conf = 111;
const int LAYER_conv13_mbox_conf_perm = 89;
const int BLOB_conv13_mbox_conf_perm = 112;
const int LAYER_conv13_mbox_conf_flat = 90;
const int BLOB_conv13_mbox_conf_flat = 113;
const int LAYER_conv13_mbox_priorbox = 91;
const int BLOB_conv13_mbox_priorbox = 114;
const int LAYER_conv14_2_mbox_loc = 92;
const int BLOB_conv14_2_mbox_loc = 115;
const int LAYER_conv14_2_mbox_loc_perm = 93;
const int BLOB_conv14_2_mbox_loc_perm = 116;
const int LAYER_conv14_2_mbox_loc_flat = 94;
const int BLOB_conv14_2_mbox_loc_flat = 117;
const int LAYER_conv14_2_mbox_conf = 95;
const int BLOB_conv14_2_mbox_conf = 118;
const int LAYER_conv14_2_mbox_conf_perm = 96;
const int BLOB_conv14_2_mbox_conf_perm = 119;
const int LAYER_conv14_2_mbox_conf_flat = 97;
const int BLOB_conv14_2_mbox_conf_flat = 120;
const int LAYER_conv14_2_mbox_priorbox = 98;
const int BLOB_conv14_2_mbox_priorbox = 121;
const int LAYER_conv15_2_mbox_loc = 99;
const int BLOB_conv15_2_mbox_loc = 122;
const int LAYER_conv15_2_mbox_loc_perm = 100;
const int BLOB_conv15_2_mbox_loc_perm = 123;
const int LAYER_conv15_2_mbox_loc_flat = 101;
const int BLOB_conv15_2_mbox_loc_flat = 124;
const int LAYER_conv15_2_mbox_conf = 102;
const int BLOB_conv15_2_mbox_conf = 125;
const int LAYER_conv15_2_mbox_conf_perm = 103;
const int BLOB_conv15_2_mbox_conf_perm = 126;
const int LAYER_conv15_2_mbox_conf_flat = 104;
const int BLOB_conv15_2_mbox_conf_flat = 127;
const int LAYER_conv15_2_mbox_priorbox = 105;
const int BLOB_conv15_2_mbox_priorbox = 128;
const int LAYER_conv16_2_mbox_loc = 106;
const int BLOB_conv16_2_mbox_loc = 129;
const int LAYER_conv16_2_mbox_loc_perm = 107;
const int BLOB_conv16_2_mbox_loc_perm = 130;
const int LAYER_conv16_2_mbox_loc_flat = 108;
const int BLOB_conv16_2_mbox_loc_flat = 131;
const int LAYER_conv16_2_mbox_conf = 109;
const int BLOB_conv16_2_mbox_conf = 132;
const int LAYER_conv16_2_mbox_conf_perm = 110;
const int BLOB_conv16_2_mbox_conf_perm = 133;
const int LAYER_conv16_2_mbox_conf_flat = 111;
const int BLOB_conv16_2_mbox_conf_flat = 134;
const int LAYER_conv16_2_mbox_priorbox = 112;
const int BLOB_conv16_2_mbox_priorbox = 135;
const int LAYER_conv17_2_mbox_loc = 113;
const int BLOB_conv17_2_mbox_loc = 136;
const int LAYER_conv17_2_mbox_loc_perm = 114;
const int BLOB_conv17_2_mbox_loc_perm = 137;
const int LAYER_conv17_2_mbox_loc_flat = 115;
const int BLOB_conv17_2_mbox_loc_flat = 138;
const int LAYER_conv17_2_mbox_conf = 116;
const int BLOB_conv17_2_mbox_conf = 139;
const int LAYER_conv17_2_mbox_conf_perm = 117;
const int BLOB_conv17_2_mbox_conf_perm = 140;
const int LAYER_conv17_2_mbox_conf_flat = 118;
const int BLOB_conv17_2_mbox_conf_flat = 141;
const int LAYER_conv17_2_mbox_priorbox = 119;
const int BLOB_conv17_2_mbox_priorbox = 142;
const int LAYER_mbox_loc = 120;
const int BLOB_mbox_loc = 143;
const int LAYER_mbox_conf = 121;
const int BLOB_mbox_conf = 144;
const int LAYER_mbox_priorbox = 122;
const int BLOB_mbox_priorbox = 145;
const int LAYER_mbox_conf_reshape = 123;
const int BLOB_mbox_conf_reshape = 146;
const int LAYER_mbox_conf_softmax = 124;
const int BLOB_mbox_conf_softmax = 147;
const int LAYER_mbox_conf_flatten = 125;
const int BLOB_mbox_conf_flatten = 148;
const int LAYER_detection_out = 126;
const int BLOB_detection_out = 149;
} // namespace MobileNetSSD_deploy_param_id
#endif // NCNN_INCLUDE_GUARD_MobileNetSSD_deploy_id_h
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/MobileNetssd.cpp
================================================
#include
#include
#include
#include
#include
// ncnn
#include "include/opencv.h"
#include "MobileNetSSD_deploy.id.h" //这里看成自己的id.h
#include
#include
#include "include/net.h"
static ncnn::UnlockedPoolAllocator g_blob_pool_allocator;
static ncnn::PoolAllocator g_workspace_pool_allocator;
static ncnn::Mat ncnn_param;
static ncnn::Mat ncnn_bin;
static ncnn::Net ncnn_net;
extern "C" {
// public native boolean Init(byte[] words,byte[] param, byte[] bin); 原函数形式(c++) 以下形式为ndk的c++形式
JNIEXPORT jboolean JNICALL
Java_com_example_che_mobilenetssd_1demo_MobileNetssd_Init(JNIEnv *env, jobject obj, jbyteArray param, jbyteArray bin) {
__android_log_print(ANDROID_LOG_DEBUG, "MobileNetssd", "enter the jni func");
// init param
{
int len = env->GetArrayLength(param);
ncnn_param.create(len, (size_t) 1u);
env->GetByteArrayRegion(param, 0, len, (jbyte *) ncnn_param);
int ret = ncnn_net.load_param((const unsigned char *) ncnn_param);
__android_log_print(ANDROID_LOG_DEBUG, "MobileNetssd", "load_param %d %d", ret, len);
}
// init bin
{
int len = env->GetArrayLength(bin);
ncnn_bin.create(len, (size_t) 1u);
env->GetByteArrayRegion(bin, 0, len, (jbyte *) ncnn_bin);
int ret = ncnn_net.load_model((const unsigned char *) ncnn_bin);
__android_log_print(ANDROID_LOG_DEBUG, "MobileNetssd", "load_model %d %d", ret, len);
}
ncnn::Option opt;
opt.lightmode = true;
opt.num_threads = 4; //线程 这里可以修改
opt.blob_allocator = &g_blob_pool_allocator;
opt.workspace_allocator = &g_workspace_pool_allocator;
ncnn::set_default_option(opt);
return JNI_TRUE;
}
// public native String Detect(Bitmap bitmap);
JNIEXPORT jfloatArray JNICALL Java_com_example_che_mobilenetssd_1demo_MobileNetssd_Detect(JNIEnv* env, jobject thiz, jobject bitmap)
{
// ncnn from bitmap
ncnn::Mat in;
{
AndroidBitmapInfo info;
AndroidBitmap_getInfo(env, bitmap, &info);
// int origin_w = info.width;
// int origin_h = info.height;
// int width = 300;
// int height = 300;
int width = info.width;
int height = info.height;
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888)
return NULL;
void* indata;
AndroidBitmap_lockPixels(env, bitmap, &indata);
// 把像素转换成data,并指定通道顺序
// 因为图像预处理每个网络层输入的数据格式不一样一般为300*300 128*128等等所以这类需要一个resize的操作可以在cpp中写,也可以是java读入图片时有个resize操作
// in = ncnn::Mat::from_pixels_resize((const unsigned char*)indata, ncnn::Mat::PIXEL_RGBA2RGB, origin_w, origin_h, width, height);
in = ncnn::Mat::from_pixels((const unsigned char*)indata, ncnn::Mat::PIXEL_RGBA2RGB, width, height);
// 下面一行为debug代码
//__android_log_print(ANDROID_LOG_DEBUG, "MobilenetssdJniIn", "Mobilenetssd_predict_has_input1, in.w: %d; in.h: %d", in.w, in.h);
AndroidBitmap_unlockPixels(env, bitmap);
}
// ncnn_net
std::vector cls_scores;
{
// 减去均值和乘上比例(这个数据和前面的归一化图片预处理形式一一对应)
const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
const float scale[3] = {0.007843f, 0.007843f, 0.007843f};
in.substract_mean_normalize(mean_vals, scale);// 归一化
ncnn::Extractor ex = ncnn_net.create_extractor();//前向传播
// 如果不加密是使用ex.input("data", in);
// BLOB_data在id.h文件中可见,相当于datainput网络层的id
ex.input(MobileNetSSD_deploy_param_id::BLOB_data, in);
//ex.set_num_threads(4); 和上面一样一个对象
ncnn::Mat out;
// 如果时不加密是使用ex.extract("prob", out);
//BLOB_detection_out.h文件中可见,相当于dataout网络层的id,输出检测的结果数据
ex.extract(MobileNetSSD_deploy_param_id::BLOB_detection_out, out);
int output_wsize = out.w;
int output_hsize = out.h;
//输出整理
jfloat *output[output_wsize * output_hsize]; // float类型
for(int i = 0; i< out.h; i++) {
for (int j = 0; j < out.w; j++) {
output[i*output_wsize + j] = &out.row(i)[j];
}
}
//建立float数组 长度为 output_wsize * output_hsize,如果只是ouput_size相当于只有一行的out的数据那就是一个object检测数据
jfloatArray jOutputData = env->NewFloatArray(output_wsize * output_hsize);
if (jOutputData == nullptr) return nullptr;
env->SetFloatArrayRegion(jOutputData, 0, output_wsize * output_hsize,
reinterpret_cast(*output));
return jOutputData;
}
}
}
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/allocator.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_ALLOCATOR_H
#define NCNN_ALLOCATOR_H
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include
#else
#include
#endif
#include
#include
namespace ncnn {
// the alignment of all the allocated buffers
#define MALLOC_ALIGN 16
// Aligns a pointer to the specified number of bytes
// ptr Aligned pointer
// n Alignment size that must be a power of two
template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp))
{
return (_Tp*)(((size_t)ptr + n-1) & -n);
}
// Aligns a buffer size to the specified number of bytes
// The function returns the minimum number that is greater or equal to sz and is divisible by n
// sz Buffer size to align
// n Alignment size that must be a power of two
static inline size_t alignSize(size_t sz, int n)
{
return (sz + n-1) & -n;
}
static inline void* fastMalloc(size_t size)
{
unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + MALLOC_ALIGN);
if (!udata)
return 0;
unsigned char** adata = alignPtr((unsigned char**)udata + 1, MALLOC_ALIGN);
adata[-1] = udata;
return adata;
}
static inline void fastFree(void* ptr)
{
if (ptr)
{
unsigned char* udata = ((unsigned char**)ptr)[-1];
free(udata);
}
}
// exchange-add operation for atomic operations on reference counters
#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)
// atomic increment on the linux version of the Intel(tm) compiler
# define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta)
#elif defined __GNUC__
# if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)
# ifdef __ATOMIC_ACQ_REL
# define NCNN_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
# else
# define NCNN_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)
# endif
# else
# if defined __ATOMIC_ACQ_REL && !defined __clang__
// version for gcc >= 4.7
# define NCNN_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)
# else
# define NCNN_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))
# endif
# endif
#elif defined _MSC_VER && !defined RC_INVOKED
# include
# define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)
#else
// thread-unsafe branch
static inline int NCNN_XADD(int* addr, int delta) { int tmp = *addr; *addr += delta; return tmp; }
#endif
#ifdef _WIN32
class Mutex
{
public:
Mutex() { InitializeSRWLock(&srwlock); }
~Mutex() {}
void lock() { AcquireSRWLockExclusive(&srwlock); }
void unlock() { ReleaseSRWLockExclusive(&srwlock); }
private:
// NOTE SRWLock is available from windows vista
SRWLOCK srwlock;
};
#else // _WIN32
class Mutex
{
public:
Mutex() { pthread_mutex_init(&mutex, 0); }
~Mutex() { pthread_mutex_destroy(&mutex); }
void lock() { pthread_mutex_lock(&mutex); }
void unlock() { pthread_mutex_unlock(&mutex); }
private:
pthread_mutex_t mutex;
};
#endif // _WIN32
class Allocator
{
public:
virtual ~Allocator() = 0;
virtual void* fastMalloc(size_t size) = 0;
virtual void fastFree(void* ptr) = 0;
};
class PoolAllocator : public Allocator
{
public:
PoolAllocator();
~PoolAllocator();
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
Mutex budgets_lock;
Mutex payouts_lock;
unsigned int size_compare_ratio;// 0~256
std::list< std::pair > budgets;
std::list< std::pair > payouts;
};
class UnlockedPoolAllocator : public Allocator
{
public:
UnlockedPoolAllocator();
~UnlockedPoolAllocator();
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
unsigned int size_compare_ratio;// 0~256
std::list< std::pair > budgets;
std::list< std::pair > payouts;
};
} // namespace ncnn
#endif // NCNN_ALLOCATOR_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/benchmark.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BENCHMARK_H
#define NCNN_BENCHMARK_H
#include "platform.h"
#include "mat.h"
#include "layer.h"
namespace ncnn {
// get now timestamp in ms
double get_current_time();
#if NCNN_BENCHMARK
void benchmark(const Layer* layer, double start, double end);
void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end);
#endif // NCNN_BENCHMARK
} // namespace ncnn
#endif // NCNN_BENCHMARK_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/blob.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BLOB_H
#define NCNN_BLOB_H
#include
#include
#include "platform.h"
namespace ncnn {
class Blob
{
public:
// empty
Blob();
public:
#if NCNN_STRING
// blob name
std::string name;
#endif // NCNN_STRING
// layer index which produce this blob as output
int producer;
// layer index which need this blob as input
std::vector consumers;
};
} // namespace ncnn
#endif // NCNN_BLOB_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/cpu.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_CPU_H
#define NCNN_CPU_H
namespace ncnn {
// test optional cpu features
// neon = armv7 neon or aarch64 asimd
int cpu_support_arm_neon();
// vfpv4 = armv7 fp16 + fma
int cpu_support_arm_vfpv4();
// asimdhp = aarch64 asimd half precision
int cpu_support_arm_asimdhp();
// cpu info
int get_cpu_count();
// bind all threads on little clusters if powersave enabled
// affacts HMP arch cpu like ARM big.LITTLE
// only implemented on android at the moment
// switching powersave is expensive and not thread-safe
// 0 = all cores enabled(default)
// 1 = only little clusters enabled
// 2 = only big clusters enabled
// return 0 if success for setter function
int get_cpu_powersave();
int set_cpu_powersave(int powersave);
// misc function wrapper for openmp routines
int get_omp_num_threads();
void set_omp_num_threads(int num_threads);
int get_omp_dynamic();
void set_omp_dynamic(int dynamic);
} // namespace ncnn
#endif // NCNN_CPU_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/layer.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_H
#define NCNN_LAYER_H
#include
#include
#include
#include "mat.h"
#include "modelbin.h"
#include "paramdict.h"
#include "platform.h"
namespace ncnn {
class Allocator;
class Option
{
public:
// default option
Option();
public:
// light mode
// intermediate blob will be recycled when enabled
// enabled by default
bool lightmode;
// thread count
// default value is the one returned by get_cpu_count()
int num_threads;
// blob memory allocator
Allocator* blob_allocator;
// workspace memory allocator
Allocator* workspace_allocator;
};
// the global default option
const Option& get_default_option();
int set_default_option(const Option& opt);
class Layer
{
public:
// empty
Layer();
// virtual destructor
virtual ~Layer();
// load layer specific parameter from parsed dict
// return 0 if success
virtual int load_param(const ParamDict& pd);
// load layer specific weight data from model binary
// return 0 if success
virtual int load_model(const ModelBin& mb);
public:
// one input and one output blob
bool one_blob_only;
// support inplace inference
bool support_inplace;
public:
// implement inference
// return 0 if success
virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt = get_default_option()) const;
virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt = get_default_option()) const;
// implement inplace inference
// return 0 if success
virtual int forward_inplace(std::vector& bottom_top_blobs, const Option& opt = get_default_option()) const;
virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt = get_default_option()) const;
public:
#if NCNN_STRING
// layer type name
std::string type;
// layer name
std::string name;
#endif // NCNN_STRING
// blob index which this layer needs as input
std::vector bottoms;
// blob index which this layer produces as output
std::vector tops;
};
// layer factory function
typedef Layer* (*layer_creator_func)();
struct layer_registry_entry
{
#if NCNN_STRING
// layer type name
const char* name;
#endif // NCNN_STRING
// layer factory entry
layer_creator_func creator;
};
#if NCNN_STRING
// get layer type from type name
int layer_to_index(const char* type);
// create layer from type name
Layer* create_layer(const char* type);
#endif // NCNN_STRING
// create layer from layer type
Layer* create_layer(int index);
#define DEFINE_LAYER_CREATOR(name) \
::ncnn::Layer* name##_layer_creator() { return new name; }
} // namespace ncnn
#endif // NCNN_LAYER_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/layer_type.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_TYPE_H
#define NCNN_LAYER_TYPE_H
namespace ncnn {
namespace LayerType {
enum
{
#include "layer_type_enum.h"
CustomBit = (1<<8),
};
} // namespace LayerType
} // namespace ncnn
#endif // NCNN_LAYER_TYPE_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/layer_type_enum.h
================================================
// Layer Type Enum header
//
// This file is auto-generated by cmake, don't edit it.
AbsVal = 0,
ArgMax = 1,
BatchNorm = 2,
Bias = 3,
BNLL = 4,
Concat = 5,
Convolution = 6,
Crop = 7,
Deconvolution = 8,
Dropout = 9,
Eltwise = 10,
ELU = 11,
Embed = 12,
Exp = 13,
Flatten = 14,
InnerProduct = 15,
Input = 16,
Log = 17,
LRN = 18,
MemoryData = 19,
MVN = 20,
Pooling = 21,
Power = 22,
PReLU = 23,
Proposal = 24,
Reduction = 25,
ReLU = 26,
Reshape = 27,
ROIPooling = 28,
Scale = 29,
Sigmoid = 30,
Slice = 31,
Softmax = 32,
Split = 33,
SPP = 34,
TanH = 35,
Threshold = 36,
Tile = 37,
RNN = 38,
LSTM = 39,
BinaryOp = 40,
UnaryOp = 41,
ConvolutionDepthWise = 42,
Padding = 43,
Squeeze = 44,
ExpandDims = 45,
Normalize = 46,
Permute = 47,
PriorBox = 48,
DetectionOutput = 49,
Interp = 50,
DeconvolutionDepthWise = 51,
ShuffleChannel = 52,
InstanceNorm = 53,
Clip = 54,
Reorg = 55,
YoloDetectionOutput = 56,
Quantize = 57,
Dequantize = 58,
Yolov3DetectionOutput = 59,
PSROIPooling = 60,
ROIAlign = 61,
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/mat.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_MAT_H
#define NCNN_MAT_H
#include
#include
#if __ARM_NEON
#include
#endif
#include "allocator.h"
#include "platform.h"
namespace ncnn {
// the three dimension matrix
class Mat
{
public:
// empty
Mat();
// vec
Mat(int w, size_t elemsize = 4u, Allocator* allocator = 0);
// image
Mat(int w, int h, size_t elemsize = 4u, Allocator* allocator = 0);
// dim
Mat(int w, int h, int c, size_t elemsize = 4u, Allocator* allocator = 0);
// copy
Mat(const Mat& m);
// external vec
Mat(int w, void* data, size_t elemsize = 4u, Allocator* allocator = 0);
// external image
Mat(int w, int h, void* data, size_t elemsize = 4u, Allocator* allocator = 0);
// external dim
Mat(int w, int h, int c, void* data, size_t elemsize = 4u, Allocator* allocator = 0);
// release
~Mat();
// assign
Mat& operator=(const Mat& m);
// set all
void fill(float v);
void fill(int v);
template void fill(T v);
// deep copy
Mat clone(Allocator* allocator = 0) const;
// reshape vec
Mat reshape(int w, Allocator* allocator = 0) const;
// reshape image
Mat reshape(int w, int h, Allocator* allocator = 0) const;
// reshape dim
Mat reshape(int w, int h, int c, Allocator* allocator = 0) const;
// allocate vec
void create(int w, size_t elemsize = 4u, Allocator* allocator = 0);
// allocate image
void create(int w, int h, size_t elemsize = 4u, Allocator* allocator = 0);
// allocate dim
void create(int w, int h, int c, size_t elemsize = 4u, Allocator* allocator = 0);
// refcount++
void addref();
// refcount--
void release();
bool empty() const;
size_t total() const;
// data reference
Mat channel(int c);
const Mat channel(int c) const;
float* row(int y);
const float* row(int y) const;
template T* row(int y);
template const T* row(int y) const;
// range reference
Mat channel_range(int c, int channels);
const Mat channel_range(int c, int channels) const;
Mat row_range(int y, int rows);
const Mat row_range(int y, int rows) const;
Mat range(int x, int n);
const Mat range(int x, int n) const;
// access raw data
template operator T*();
template operator const T*() const;
// convenient access float vec element
float& operator[](int i);
const float& operator[](int i) const;
#if NCNN_PIXEL
enum
{
PIXEL_CONVERT_SHIFT = 16,
PIXEL_FORMAT_MASK = 0x0000ffff,
PIXEL_CONVERT_MASK = 0xffff0000,
PIXEL_RGB = 1,
PIXEL_BGR = (1 << 1),
PIXEL_GRAY = (1 << 2),
PIXEL_RGBA = (1 << 3),
PIXEL_RGB2BGR = PIXEL_RGB | (PIXEL_BGR << PIXEL_CONVERT_SHIFT),
PIXEL_RGB2GRAY = PIXEL_RGB | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT),
PIXEL_BGR2RGB = PIXEL_BGR | (PIXEL_RGB << PIXEL_CONVERT_SHIFT),
PIXEL_BGR2GRAY = PIXEL_BGR | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT),
PIXEL_GRAY2RGB = PIXEL_GRAY | (PIXEL_RGB << PIXEL_CONVERT_SHIFT),
PIXEL_GRAY2BGR = PIXEL_GRAY | (PIXEL_BGR << PIXEL_CONVERT_SHIFT),
PIXEL_RGBA2RGB = PIXEL_RGBA | (PIXEL_RGB << PIXEL_CONVERT_SHIFT),
PIXEL_RGBA2BGR = PIXEL_RGBA | (PIXEL_BGR << PIXEL_CONVERT_SHIFT),
PIXEL_RGBA2GRAY = PIXEL_RGBA | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT),
};
// convenient construct from pixel data
static Mat from_pixels(const unsigned char* pixels, int type, int w, int h, Allocator* allocator = 0);
// convenient construct from pixel data and resize to specific size
static Mat from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int target_width, int target_height, Allocator* allocator = 0);
// convenient export to pixel data
void to_pixels(unsigned char* pixels, int type) const;
// convenient export to pixel data and resize to specific size
void to_pixels_resize(unsigned char* pixels, int type, int target_width, int target_height) const;
#endif // NCNN_PIXEL
// substract channel-wise mean values, then multiply by normalize values, pass 0 to skip
void substract_mean_normalize(const float* mean_vals, const float* norm_vals);
// convenient construct from half precisoin floating point data
static Mat from_float16(const unsigned short* data, int size);
// pointer to the data
void* data;
// pointer to the reference counter
// when points to user-allocated data, the pointer is NULL
int* refcount;
// element size in bytes
// 4 = float32/int32
// 2 = float16
// 1 = int8/uint8
// 0 = empty
size_t elemsize;
// the allocator
Allocator* allocator;
// the dimensionality
int dims;
int w;
int h;
int c;
size_t cstep;
};
// misc function
#if NCNN_PIXEL
// convert yuv420sp(nv21) to rgb, the fast approximate version
void yuv420sp2rgb(const unsigned char* yuv420sp, int w, int h, unsigned char* rgb);
// image pixel bilinear resize
void resize_bilinear_c1(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
void resize_bilinear_c2(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
void resize_bilinear_c3(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
void resize_bilinear_c4(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
// image pixel bilinear resize, convenient wrapper for yuv420sp(nv21)
void resize_bilinear_yuv420sp(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
#endif // NCNN_PIXEL
// mat process
enum
{
BORDER_CONSTANT = 0,
BORDER_REPLICATE = 1,
};
void copy_make_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, int type, float v, Allocator* allocator = 0, int num_threads = 1);
void copy_cut_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, Allocator* allocator = 0, int num_threads = 1);
void resize_bilinear(const Mat& src, Mat& dst, int w, int h, Allocator* allocator = 0, int num_threads = 1);
inline Mat::Mat()
: data(0), refcount(0), elemsize(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0)
{
}
inline Mat::Mat(int _w, size_t _elemsize, Allocator* allocator)
: data(0), refcount(0), dims(0)
{
create(_w, _elemsize, allocator);
}
inline Mat::Mat(int _w, int _h, size_t _elemsize, Allocator* allocator)
: data(0), refcount(0), dims(0)
{
create(_w, _h, _elemsize, allocator);
}
inline Mat::Mat(int _w, int _h, int _c, size_t _elemsize, Allocator* allocator)
: data(0), refcount(0), dims(0)
{
create(_w, _h, _c, _elemsize, allocator);
}
inline Mat::Mat(const Mat& m)
: data(m.data), refcount(m.refcount), elemsize(m.elemsize), allocator(m.allocator), dims(m.dims)
{
if (refcount)
NCNN_XADD(refcount, 1);
w = m.w;
h = m.h;
c = m.c;
cstep = m.cstep;
}
inline Mat::Mat(int _w, void* _data, size_t _elemsize, Allocator* _allocator)
: data(_data), refcount(0), elemsize(_elemsize), allocator(_allocator), dims(1)
{
w = _w;
h = 1;
c = 1;
cstep = w;
}
inline Mat::Mat(int _w, int _h, void* _data, size_t _elemsize, Allocator* _allocator)
: data(_data), refcount(0), elemsize(_elemsize), allocator(_allocator), dims(2)
{
w = _w;
h = _h;
c = 1;
cstep = w * h;
}
inline Mat::Mat(int _w, int _h, int _c, void* _data, size_t _elemsize, Allocator* _allocator)
: data(_data), refcount(0), elemsize(_elemsize), allocator(_allocator), dims(3)
{
w = _w;
h = _h;
c = _c;
cstep = alignSize(w * h * elemsize, 16) / elemsize;
}
inline Mat::~Mat()
{
release();
}
inline Mat& Mat::operator=(const Mat& m)
{
if (this == &m)
return *this;
if (m.refcount)
NCNN_XADD(m.refcount, 1);
release();
data = m.data;
refcount = m.refcount;
elemsize = m.elemsize;
allocator = m.allocator;
dims = m.dims;
w = m.w;
h = m.h;
c = m.c;
cstep = m.cstep;
return *this;
}
inline void Mat::fill(float _v)
{
int size = total();
float* ptr = (float*)data;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _c = vdupq_n_f32(_v);
#if __aarch64__
if (nn > 0)
{
asm volatile (
"0: \n"
"subs %w0, %w0, #1 \n"
"st1 {%4.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"subs %0, #1 \n"
"vst1.f32 {%e4-%f4}, [%1 :128]!\n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
*ptr++ = _v;
}
}
inline void Mat::fill(int _v)
{
int size = total();
int* ptr = (int*)data;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
int32x4_t _c = vdupq_n_s32(_v);
#if __aarch64__
if (nn > 0)
{
asm volatile (
"0: \n"
"subs %w0, %w0, #1 \n"
"st1 {%4.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"subs %0, #1 \n"
"vst1.s32 {%e4-%f4}, [%1 :128]!\n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
*ptr++ = _v;
}
}
template
inline void Mat::fill(T _v)
{
int size = total();
T* ptr = (T*)data;
for (int i=0; i 0)
{
memcpy(m.data, data, total() * elemsize);
}
return m;
}
inline Mat Mat::reshape(int _w, Allocator* allocator) const
{
if (w * h * c != _w)
return Mat();
if (dims == 3 && cstep != (size_t)w * h)
{
Mat m;
m.create(_w, elemsize, allocator);
// flatten
for (int i=0; i 0)
{
size_t totalsize = alignSize(total() * elemsize, 4);
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
inline void Mat::create(int _w, int _h, size_t _elemsize, Allocator* _allocator)
{
if (dims == 2 && w == _w && h == _h && elemsize == _elemsize && allocator == _allocator)
return;
release();
elemsize = _elemsize;
allocator = _allocator;
dims = 2;
w = _w;
h = _h;
c = 1;
cstep = w * h;
if (total() > 0)
{
size_t totalsize = alignSize(total() * elemsize, 4);
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
inline void Mat::create(int _w, int _h, int _c, size_t _elemsize, Allocator* _allocator)
{
if (dims == 3 && w == _w && h == _h && c == _c && elemsize == _elemsize && allocator == _allocator)
return;
release();
elemsize = _elemsize;
allocator = _allocator;
dims = 3;
w = _w;
h = _h;
c = _c;
cstep = alignSize(w * h * elemsize, 16) / elemsize;
if (total() > 0)
{
size_t totalsize = alignSize(total() * elemsize, 4);
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
inline void Mat::addref()
{
if (refcount)
NCNN_XADD(refcount, 1);
}
inline void Mat::release()
{
if (refcount && NCNN_XADD(refcount, -1) == 1)
{
if (allocator)
allocator->fastFree(data);
else
fastFree(data);
}
data = 0;
elemsize = 0;
dims = 0;
w = 0;
h = 0;
c = 0;
cstep = 0;
refcount = 0;
}
inline bool Mat::empty() const
{
return data == 0 || total() == 0;
}
inline size_t Mat::total() const
{
return cstep * c;
}
inline Mat Mat::channel(int c)
{
return Mat(w, h, (unsigned char*)data + cstep * c * elemsize, elemsize, allocator);
}
inline const Mat Mat::channel(int c) const
{
return Mat(w, h, (unsigned char*)data + cstep * c * elemsize, elemsize, allocator);
}
inline float* Mat::row(int y)
{
return (float*)data + w * y;
}
inline const float* Mat::row(int y) const
{
return (const float*)data + w * y;
}
template
inline T* Mat::row(int y)
{
return (T*)data + w * y;
}
template
inline const T* Mat::row(int y) const
{
return (const T*)data + w * y;
}
inline Mat Mat::channel_range(int _c, int channels)
{
return Mat(w, h, channels, (unsigned char*)data + cstep * _c * elemsize, elemsize, allocator);
}
inline const Mat Mat::channel_range(int _c, int channels) const
{
return Mat(w, h, channels, (unsigned char*)data + cstep * _c * elemsize, elemsize, allocator);
}
inline Mat Mat::row_range(int y, int rows)
{
return Mat(w, rows, (unsigned char*)data + w * y * elemsize, elemsize, allocator);
}
inline const Mat Mat::row_range(int y, int rows) const
{
return Mat(w, rows, (unsigned char*)data + w * y * elemsize, elemsize, allocator);
}
inline Mat Mat::range(int x, int n)
{
return Mat(n, (unsigned char*)data + x * elemsize, elemsize, allocator);
}
inline const Mat Mat::range(int x, int n) const
{
return Mat(n, (unsigned char*)data + x * elemsize, elemsize, allocator);
}
template
inline Mat::operator T*()
{
return (T*)data;
}
template
inline Mat::operator const T*() const
{
return (const T*)data;
}
inline float& Mat::operator[](int i)
{
return ((float*)data)[i];
}
inline const float& Mat::operator[](int i) const
{
return ((const float*)data)[i];
}
} // namespace ncnn
#endif // NCNN_MAT_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/modelbin.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_MODELBIN_H
#define NCNN_MODELBIN_H
#include
#include "mat.h"
#include "platform.h"
namespace ncnn {
class Net;
class ModelBin
{
public:
// element type
// 0 = auto
// 1 = float32
// 2 = float16
// 3 = int8
// load vec
virtual Mat load(int w, int type) const = 0;
// load image
virtual Mat load(int w, int h, int type) const;
// load dim
virtual Mat load(int w, int h, int c, int type) const;
};
#if NCNN_STDIO
class ModelBinFromStdio : public ModelBin
{
public:
// construct from file
ModelBinFromStdio(FILE* binfp);
virtual Mat load(int w, int type) const;
protected:
FILE* binfp;
};
#endif // NCNN_STDIO
class ModelBinFromMemory : public ModelBin
{
public:
// construct from external memory
ModelBinFromMemory(const unsigned char*& mem);
virtual Mat load(int w, int type) const;
protected:
const unsigned char*& mem;
};
class ModelBinFromMatArray : public ModelBin
{
public:
// construct from weight blob array
ModelBinFromMatArray(const Mat* weights);
virtual Mat load(int w, int type) const;
protected:
mutable const Mat* weights;
};
} // namespace ncnn
#endif // NCNN_MODELBIN_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/net.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_NET_H
#define NCNN_NET_H
#include
#include
#include "blob.h"
#include "layer.h"
#include "mat.h"
#include "platform.h"
namespace ncnn {
class Extractor;
class Net
{
public:
// empty init
Net();
// clear and destroy
~Net();
#if NCNN_STRING
// register custom layer by layer type name
// return 0 if success
int register_custom_layer(const char* type, layer_creator_func creator);
#endif // NCNN_STRING
// register custom layer by layer type
// return 0 if success
int register_custom_layer(int index, layer_creator_func creator);
#if NCNN_STDIO
#if NCNN_STRING
// load network structure from plain param file
// return 0 if success
int load_param(FILE* fp);
int load_param(const char* protopath);
int load_param_mem(const char* mem);
#endif // NCNN_STRING
// load network structure from binary param file
// return 0 if success
int load_param_bin(FILE* fp);
int load_param_bin(const char* protopath);
// load network weight data from model file
// return 0 if success
int load_model(FILE* fp);
int load_model(const char* modelpath);
#endif // NCNN_STDIO
// load network structure from external memory
// memory pointer must be 32-bit aligned
// return bytes consumed
int load_param(const unsigned char* mem);
// reference network weight data from external memory
// weight data is not copied but referenced
// so external memory should be retained when used
// memory pointer must be 32-bit aligned
// return bytes consumed
int load_model(const unsigned char* mem);
// unload network structure and weight data
void clear();
// construct an Extractor from network
Extractor create_extractor() const;
public:
// enable winograd convolution optimization
// improve convolution 3x3 stride1 performace, may consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
int use_winograd_convolution;
// enable sgemm convolution optimization
// improve convolution 1x1 stride1 performace, may consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
int use_sgemm_convolution;
// enable quantized int8 inference
// use low-precision int8 path for quantized model
// changes should be applied before loading network structure and weight
// enabled by default
int use_int8_inference;
protected:
friend class Extractor;
#if NCNN_STRING
int find_blob_index_by_name(const char* name) const;
int find_layer_index_by_name(const char* name) const;
int custom_layer_to_index(const char* type);
Layer* create_custom_layer(const char* type);
#endif // NCNN_STRING
Layer* create_custom_layer(int index);
int forward_layer(int layer_index, std::vector& blob_mats, Option& opt) const;
protected:
std::vector blobs;
std::vector layers;
std::vector custom_layer_registry;
};
class Extractor
{
public:
// enable light mode
// intermediate blob will be recycled when enabled
// enabled by default
void set_light_mode(bool enable);
// set thread count for this extractor
// this will overwrite the global setting
// default count is system depended
void set_num_threads(int num_threads);
// set blob memory allocator
void set_blob_allocator(Allocator* allocator);
// set workspace memory allocator
void set_workspace_allocator(Allocator* allocator);
#if NCNN_STRING
// set input by blob name
// return 0 if success
int input(const char* blob_name, const Mat& in);
// get result by blob name
// return 0 if success
int extract(const char* blob_name, Mat& feat);
#endif // NCNN_STRING
// set input by blob index
// return 0 if success
int input(int blob_index, const Mat& in);
// get result by blob index
// return 0 if success
int extract(int blob_index, Mat& feat);
protected:
friend Extractor Net::create_extractor() const;
Extractor(const Net* net, int blob_count);
private:
const Net* net;
std::vector blob_mats;
Option opt;
};
} // namespace ncnn
#endif // NCNN_NET_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/opencv.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_OPENCV_H
#define NCNN_OPENCV_H
#include "platform.h"
#if NCNN_OPENCV
#include
#include
#include "mat.h"
// minimal opencv style data structure implementation
namespace cv
{
struct Size
{
Size() : width(0), height(0) {}
Size(int _w, int _h) : width(_w), height(_h) {}
int width;
int height;
};
template
struct Rect_
{
Rect_() : x(0), y(0), width(0), height(0) {}
Rect_(_Tp _x, _Tp _y, _Tp _w, _Tp _h) : x(_x), y(_y), width(_w), height(_h) {}
_Tp x;
_Tp y;
_Tp width;
_Tp height;
// area
_Tp area() const
{
return width * height;
}
};
template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )
{
_Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y);
a.width = std::min(a.x + a.width, b.x + b.width) - x1;
a.height = std::min(a.y + a.height, b.y + b.height) - y1;
a.x = x1; a.y = y1;
if( a.width <= 0 || a.height <= 0 )
a = Rect_<_Tp>();
return a;
}
template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )
{
_Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y);
a.width = std::max(a.x + a.width, b.x + b.width) - x1;
a.height = std::max(a.y + a.height, b.y + b.height) - y1;
a.x = x1; a.y = y1;
return a;
}
template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
Rect_<_Tp> c = a;
return c &= b;
}
template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
Rect_<_Tp> c = a;
return c |= b;
}
typedef Rect_ Rect;
typedef Rect_ Rect2f;
template
struct Point_
{
Point_() : x(0), y(0) {}
Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {}
_Tp x;
_Tp y;
};
typedef Point_ Point;
typedef Point_ Point2f;
#define CV_8UC1 1
#define CV_8UC3 3
#define CV_8UC4 4
#define CV_32FC1 4
struct Mat
{
Mat() : data(0), refcount(0), rows(0), cols(0), c(0) {}
Mat(int _rows, int _cols, int flags) : data(0), refcount(0)
{
create(_rows, _cols, flags);
}
// copy
Mat(const Mat& m) : data(m.data), refcount(m.refcount)
{
if (refcount)
NCNN_XADD(refcount, 1);
rows = m.rows;
cols = m.cols;
c = m.c;
}
Mat(int _rows, int _cols, int flags, void* _data) : data((unsigned char*)_data), refcount(0)
{
rows = _rows;
cols = _cols;
c = flags;
}
~Mat()
{
release();
}
// assign
Mat& operator=(const Mat& m)
{
if (this == &m)
return *this;
if (m.refcount)
NCNN_XADD(m.refcount, 1);
release();
data = m.data;
refcount = m.refcount;
rows = m.rows;
cols = m.cols;
c = m.c;
return *this;
}
void create(int _rows, int _cols, int flags)
{
release();
rows = _rows;
cols = _cols;
c = flags;
if (total() > 0)
{
// refcount address must be aligned, so we expand totalsize here
size_t totalsize = (total() + 3) >> 2 << 2;
data = (unsigned char*)ncnn::fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
void release()
{
if (refcount && NCNN_XADD(refcount, -1) == 1)
ncnn::fastFree(data);
data = 0;
rows = 0;
cols = 0;
c = 0;
refcount = 0;
}
Mat clone() const
{
if (empty())
return Mat();
Mat m(rows, cols, c);
if (total() > 0)
{
memcpy(m.data, data, total());
}
return m;
}
bool empty() const { return data == 0 || total() == 0; }
int channels() const { return c; }
size_t total() const { return cols * rows * c; }
const unsigned char* ptr(int y) const { return data + y * cols * c; }
unsigned char* ptr(int y) { return data + y * cols * c; }
// roi
Mat operator()( const Rect& roi ) const
{
if (empty())
return Mat();
Mat m(roi.height, roi.width, c);
int sy = roi.y;
for (int y = 0; y < roi.height; y++)
{
const unsigned char* sptr = ptr(sy) + roi.x * c;
unsigned char* dptr = m.ptr(y);
memcpy(dptr, sptr, roi.width * c);
sy++;
}
return m;
}
unsigned char* data;
// pointer to the reference counter;
// when points to user-allocated data, the pointer is NULL
int* refcount;
int rows;
int cols;
int c;
};
#define CV_LOAD_IMAGE_GRAYSCALE 1
#define CV_LOAD_IMAGE_COLOR 3
Mat imread(const std::string& path, int flags);
void imwrite(const std::string& path, const Mat& m);
#if NCNN_PIXEL
void resize(const Mat& src, Mat& dst, const Size& size, float sw = 0.f, float sh = 0.f, int flags = 0);
#endif // NCNN_PIXEL
} // namespace cv
#endif // NCNN_OPENCV
#endif // NCNN_OPENCV_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/paramdict.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PARAMDICT_H
#define NCNN_PARAMDICT_H
#include
#include "mat.h"
#include "platform.h"
// at most 20 parameters
#define NCNN_MAX_PARAM_COUNT 20
namespace ncnn {
class Net;
class ParamDict
{
public:
// empty
ParamDict();
// get int
int get(int id, int def) const;
// get float
float get(int id, float def) const;
// get array
Mat get(int id, const Mat& def) const;
// set int
void set(int id, int i);
// set float
void set(int id, float f);
// set array
void set(int id, const Mat& v);
public:
int use_winograd_convolution;
int use_sgemm_convolution;
int use_int8_inference;
protected:
friend class Net;
void clear();
#if NCNN_STDIO
#if NCNN_STRING
int load_param(FILE* fp);
int load_param_mem(const char*& mem);
#endif // NCNN_STRING
int load_param_bin(FILE* fp);
#endif // NCNN_STDIO
int load_param(const unsigned char*& mem);
protected:
struct
{
int loaded;
union { int i; float f; };
Mat v;
} params[NCNN_MAX_PARAM_COUNT];
};
} // namespace ncnn
#endif // NCNN_PARAMDICT_H
================================================
FILE: MobileNetSSD_demo/app/src/main/cpp/include/platform.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PLATFORM_H
#define NCNN_PLATFORM_H
#define NCNN_STDIO 1
#define NCNN_STRING 1
#define NCNN_OPENCV 0
#define NCNN_BENCHMARK 0
#define NCNN_PIXEL 1
#define NCNN_PIXEL_ROTATE 0
#endif // NCNN_PLATFORM_H
================================================
FILE: MobileNetSSD_demo/app/src/main/java/com/example/che/mobilenetssd_demo/MainActivity.java
================================================
package com.example.che.mobilenetssd_demo;
import android.Manifest;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.res.AssetManager;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.net.Uri;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.bumptech.glide.Glide;
import com.bumptech.glide.load.engine.DiskCacheStrategy;
import com.bumptech.glide.request.RequestOptions;
public class MainActivity extends AppCompatActivity {
private static final String TAG = MainActivity.class.getName();
private static final int USE_PHOTO = 1001;
private String camera_image_path;
private ImageView show_image;
private TextView result_text;
private boolean load_result = false;
private int[] ddims = {1, 3, 300, 300}; //这里的维度的值要和train model的input 一一对应
private int model_index = 1;
private List resultLabel = new ArrayList<>();
private MobileNetssd mobileNetssd = new MobileNetssd(); //java接口实例化 下面直接利用java函数调用NDK c++函数
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
try
{
initMobileNetSSD();
} catch (IOException e) {
Log.e("MainActivity", "initMobileNetSSD error");
}
init_view();
readCacheLabelFromLocalFile();
}
/**
*
* MobileNetssd初始化,也就是把model文件进行加载
*/
private void initMobileNetSSD() throws IOException {
byte[] param = null;
byte[] bin = null;
{
//用io流读取二进制文件,最后存入到byte[]数组中
InputStream assetsInputStream = getAssets().open("MobileNetSSD_deploy.param.bin");// param: 网络结构文件
int available = assetsInputStream.available();
param = new byte[available];
int byteCode = assetsInputStream.read(param);
assetsInputStream.close();
}
{
//用io流读取二进制文件,最后存入到byte上,转换为int型
InputStream assetsInputStream = getAssets().open("MobileNetSSD_deploy.bin");//bin: model文件
int available = assetsInputStream.available();
bin = new byte[available];
int byteCode = assetsInputStream.read(bin);
assetsInputStream.close();
}
load_result = mobileNetssd.Init(param, bin);// 再将文件传入java的NDK接口(c++ 代码中的init接口 )
Log.d("load model", "MobileNetSSD_load_model_result:" + load_result);
}
// initialize view
private void init_view() {
request_permissions();
show_image = (ImageView) findViewById(R.id.show_image);
result_text = (TextView) findViewById(R.id.result_text);
result_text.setMovementMethod(ScrollingMovementMethod.getInstance());
Button use_photo = (Button) findViewById(R.id.use_photo);
// use photo click
use_photo.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (!load_result) {
Toast.makeText(MainActivity.this, "never load model", Toast.LENGTH_SHORT).show();
return;
}
PhotoUtil.use_photo(MainActivity.this, USE_PHOTO);
}
});
}
// load label's name
private void readCacheLabelFromLocalFile() {
try {
AssetManager assetManager = getApplicationContext().getAssets();
BufferedReader reader = new BufferedReader(new InputStreamReader(assetManager.open("words.txt")));//这里是label的文件
String readLine = null;
while ((readLine = reader.readLine()) != null) {
resultLabel.add(readLine);
}
reader.close();
} catch (Exception e) {
Log.e("labelCache", "error " + e);
}
}
protected void onActivityResult(int requestCode, int resultCode, @Nullable Intent data) {
String image_path;
RequestOptions options = new RequestOptions().skipMemoryCache(true).diskCacheStrategy(DiskCacheStrategy.NONE);
if (resultCode == Activity.RESULT_OK) {
switch (requestCode) {
case USE_PHOTO:
if (data == null) {
Log.w(TAG, "user photo data is null");
return;
}
Uri image_uri = data.getData();
//Glide.with(MainActivity.this).load(image_uri).apply(options).into(show_image);
// get image path from uri
image_path = PhotoUtil.get_path_from_URI(MainActivity.this, image_uri);
// predict image
predict_image(image_path);
break;
}
}
}
// predict image
private void predict_image(String image_path) {
// picture to float array
Bitmap bmp = PhotoUtil.getScaleBitmap(image_path);
Bitmap rgba = bmp.copy(Bitmap.Config.ARGB_8888, true);
// resize
Bitmap input_bmp = Bitmap.createScaledBitmap(rgba, ddims[2], ddims[3], false);
try {
// Data format conversion takes too long
// Log.d("inputData", Arrays.toString(inputData));
long start = System.currentTimeMillis();
// get predict result
float[] result = mobileNetssd.Detect(input_bmp);
// time end
long end = System.currentTimeMillis();
Log.d(TAG, "origin predict result:" + Arrays.toString(result));
long time = end - start;
Log.d("result length", "length of result: " + String.valueOf(result.length));
// show predict result and time
// float[] r = get_max_result(result);
String show_text = "result:" + Arrays.toString(result) + "\nname:" + resultLabel.get((int) result[0]) + "\nprobability:" + result[1] + "\ntime:" + time + "ms" ;
result_text.setText(show_text);
// 画布配置
Canvas canvas = new Canvas(rgba);
//图像上画矩形
Paint paint = new Paint();
paint.setColor(Color.RED);
paint.setStyle(Paint.Style.STROKE);//不填充
paint.setStrokeWidth(5); //线的宽度
float get_finalresult[][] = TwoArry(result);
Log.d("zhuanhuan",get_finalresult+"");
int object_num = 0;
int num = result.length/6;// number of object
//continue to draw rect
for(object_num = 0; object_num < num; object_num++){
Log.d(TAG, "haha :" + Arrays.toString(get_finalresult));
// 画框
paint.setColor(Color.RED);
paint.setStyle(Paint.Style.STROKE);//不填充
paint.setStrokeWidth(5); //线的宽度
canvas.drawRect(get_finalresult[object_num][2] * rgba.getWidth(), get_finalresult[object_num][3] * rgba.getHeight(),
get_finalresult[object_num][4] * rgba.getWidth(), get_finalresult[object_num][5] * rgba.getHeight(), paint);
paint.setColor(Color.YELLOW);
paint.setStyle(Paint.Style.FILL);//不填充
paint.setStrokeWidth(1); //线的宽度
canvas.drawText(resultLabel.get((int) get_finalresult[object_num][0]) + "\n" + get_finalresult[object_num][1],
get_finalresult[object_num][2]*rgba.getWidth(),get_finalresult[object_num][3]*rgba.getHeight(),paint);
}
show_image.setImageBitmap(rgba);
} catch (Exception e) {
e.printStackTrace();
}
}
//一维数组转化为二维数组
public static float[][] TwoArry(float[] inputfloat){
int n = inputfloat.length;
int num = inputfloat.length/6;
float[][] outputfloat = new float[num][6];
int k = 0;
for(int i = 0; i < num ; i++)
{
int j = 0;
while(j<6)
{
outputfloat[i][j] = inputfloat[k];
k++;
j++;
}
}
return outputfloat;
}
/*
// get max probability label
private float[] get_max_result(float[] result) {
int num_rs = result.length / 6;
float maxProp = result[1];
int maxI = 0;
for(int i = 1; i permissionList = new ArrayList<>();
if (ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.CAMERA);
}
if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.WRITE_EXTERNAL_STORAGE);
}
if (ContextCompat.checkSelfPermission(this, Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.READ_EXTERNAL_STORAGE);
}
// if list is not empty will request permissions
if (!permissionList.isEmpty()) {
ActivityCompat.requestPermissions(this, permissionList.toArray(new String[permissionList.size()]), 1);
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
switch (requestCode) {
case 1:
if (grantResults.length > 0) {
for (int i = 0; i < grantResults.length; i++) {
int grantResult = grantResults[i];
if (grantResult == PackageManager.PERMISSION_DENIED) {
String s = permissions[i];
Toast.makeText(this, s + "permission was denied", Toast.LENGTH_SHORT).show();
}
}
}
break;
}
}
}
================================================
FILE: MobileNetSSD_demo/app/src/main/java/com/example/che/mobilenetssd_demo/MobileNetssd.java
================================================
package com.example.che.mobilenetssd_demo;
import android.graphics.Bitmap;
/**
* MobileNetssd的java接口,与本地c++代码相呼应 native为本地 此文件与 MobileNetssd.cpp相呼应
*/
public class MobileNetssd {
public native boolean Init(byte[] param, byte[] bin); // 初始化函数
public native float[] Detect(Bitmap bitmap); // 检测函数
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("MobileNetssd");
}
}
================================================
FILE: MobileNetSSD_demo/app/src/main/java/com/example/che/mobilenetssd_demo/PhotoUtil.java
================================================
package com.example.che.mobilenetssd_demo;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.provider.MediaStore;
public class PhotoUtil {
// get picture in photo
public static void use_photo(Activity activity, int requestCode) {
Intent intent = new Intent(Intent.ACTION_PICK);
intent.setType("image/*");
activity.startActivityForResult(intent, requestCode);
}
// get photo from Uri
public static String get_path_from_URI(Context context, Uri uri) {
String result;
Cursor cursor = context.getContentResolver().query(uri, null, null, null, null);
if (cursor == null) {
result = uri.getPath();
} else {
cursor.moveToFirst();
int idx = cursor.getColumnIndex(MediaStore.Images.ImageColumns.DATA);
result = cursor.getString(idx);
cursor.close();
}
return result;
}
// compress picture
public static Bitmap getScaleBitmap(String filePath) {
BitmapFactory.Options opt = new BitmapFactory.Options();
opt.inJustDecodeBounds = true;
BitmapFactory.decodeFile(filePath, opt);
int bmpWidth = opt.outWidth;
int bmpHeight = opt.outHeight;
int maxSize = 500;
// compress picture with inSampleSize
opt.inSampleSize = 1;
while (true) {
if (bmpWidth / opt.inSampleSize < maxSize || bmpHeight / opt.inSampleSize < maxSize) {
break;
}
opt.inSampleSize *= 2;
}
opt.inJustDecodeBounds = false;
return BitmapFactory.decodeFile(filePath, opt);
}
}
================================================
FILE: MobileNetSSD_demo/app/src/main/res/drawable/ic_launcher_background.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/main/res/drawable-v24/ic_launcher_foreground.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/main/res/layout/activity_main.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/main/res/values/colors.xml
================================================
#008577#00574B#D81B60
================================================
FILE: MobileNetSSD_demo/app/src/main/res/values/strings.xml
================================================
MobileNetSSD_demo
================================================
FILE: MobileNetSSD_demo/app/src/main/res/values/styles.xml
================================================
================================================
FILE: MobileNetSSD_demo/app/src/test/java/com/example/che/mobilenetssd_demo/ExampleUnitTest.java
================================================
package com.example.che.mobilenetssd_demo;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see Testing documentation
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() {
assertEquals(4, 2 + 2);
}
}
================================================
FILE: MobileNetSSD_demo/build.gradle
================================================
// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
repositories {
google()
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:3.2.1'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
}
}
allprojects {
repositories {
google()
jcenter()
}
}
task clean(type: Delete) {
delete rootProject.buildDir
}
================================================
FILE: MobileNetSSD_demo/gradle/wrapper/gradle-wrapper.properties
================================================
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.6-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
================================================
FILE: MobileNetSSD_demo/gradle.properties
================================================
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx1536m
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
================================================
FILE: MobileNetSSD_demo/gradlew
================================================
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"
================================================
FILE: MobileNetSSD_demo/gradlew.bat
================================================
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
================================================
FILE: MobileNetSSD_demo/settings.gradle
================================================
include ':app'
================================================
FILE: MobileNetSSD_demo_single/.gitignore
================================================
*.iml
.gradle
/local.properties
/.idea/caches/build_file_checksums.ser
/.idea/libraries
/.idea/modules.xml
/.idea/workspace.xml
.DS_Store
/build
/captures
.externalNativeBuild
================================================
FILE: MobileNetSSD_demo_single/.idea/codeStyles/Project.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/.gitignore
================================================
/build
================================================
FILE: MobileNetSSD_demo_single/app/CMakeLists.txt
================================================
# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html
# Sets the minimum version of CMake required to build the native library.
cmake_minimum_required(VERSION 3.4.1)
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.
##需要添加
set(ncnn_lib ${CMAKE_SOURCE_DIR}/src/main/jniLibs/armeabi-v7a/libncnn.a)
add_library (ncnn_lib STATIC IMPORTED)
set_target_properties(ncnn_lib PROPERTIES IMPORTED_LOCATION ${ncnn_lib})
add_library( # Sets the name of the library.
MobileNetssd ## 为生成.so的文字最好直接和.c名字一样,需要更改
# Sets the library as a shared library.
SHARED
# Provides a relative path to your source file(s).
src/main/cpp/MobileNetssd.cpp)##cpp文件的name
# Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library
# you want to add. CMake verifies that the library exists before
# completing its build.
find_library( # Sets the name of the path variable.
log-lib
# Specifies the name of the NDK library that
# you want CMake to locate.
log)
# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library.
##以下三个都要添加
MobileNetssd #和上面一样
ncnn_lib #这个ncnn的lib的add
jnigraphics #这个jni也需要add
# Links the target library to the log library
# included in the NDK.
${log-lib})
================================================
FILE: MobileNetSSD_demo_single/app/build.gradle
================================================
apply plugin: 'com.android.application'
android {
compileSdkVersion 28
defaultConfig {
applicationId "com.example.che.mobilenetssd_demo"
minSdkVersion 15
targetSdkVersion 28
versionCode 1
versionName "1.0"
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
externalNativeBuild {
cmake {
cppFlags "-std=c++11 -fopenmp"//c++,多线程 需要添加need to add
abiFilters "armeabi-v7a" // 手机的硬件架构,基本所有的硬件都适配
}
}
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
}
}
externalNativeBuild {
cmake {
path "CMakeLists.txt"
}
}
// 需要添加 把 .a文件导入, .a为 ncnn make intall生成的里面的.a文件
sourceSets {
main {
jniLibs.srcDirs = ["src/main/jniLibs"]
jni.srcDirs = ['src/cpp']
}
}
}
dependencies {
implementation fileTree(dir: 'libs', include: ['*.jar'])
implementation 'com.android.support:appcompat-v7:28.0.0'
implementation 'com.android.support.constraint:constraint-layout:1.1.3'
testImplementation 'junit:junit:4.12'
implementation 'com.github.bumptech.glide:glide:4.3.1' // need to add增加图片类 bumptech,build自动红线消失
androidTestImplementation 'com.android.support.test:runner:1.0.2'
androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
}
================================================
FILE: MobileNetSSD_demo_single/app/proguard-rules.pro
================================================
# Add project specific ProGuard rules here.
# You can control the set of applied configuration files using the
# proguardFiles setting in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile
================================================
FILE: MobileNetSSD_demo_single/app/src/androidTest/java/com/example/che/mobilenetssd_demo/ExampleInstrumentedTest.java
================================================
package com.example.che.mobilenetssd_demo;
import android.content.Context;
import android.support.test.InstrumentationRegistry;
import android.support.test.runner.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
/**
* Instrumented test, which will execute on an Android device.
*
* @see Testing documentation
*/
@RunWith(AndroidJUnit4.class)
public class ExampleInstrumentedTest {
@Test
public void useAppContext() {
// Context of the app under test.
Context appContext = InstrumentationRegistry.getTargetContext();
assertEquals("com.example.che.mobilenetssd_demo", appContext.getPackageName());
}
}
================================================
FILE: MobileNetSSD_demo_single/app/src/main/AndroidManifest.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/main/assets/words.txt
================================================
background
aeroplane
bicycle
bird
boat
bottle
bus
car
cat
chair
cow
diningtable
dog
horse
motorbike
person
pottedplant
sheep
sofa
train
tvmonitor
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/MobileNetSSD_deploy.id.h
================================================
#ifndef NCNN_INCLUDE_GUARD_MobileNetSSD_deploy_id_h
#define NCNN_INCLUDE_GUARD_MobileNetSSD_deploy_id_h
namespace MobileNetSSD_deploy_param_id {
const int LAYER_input = 0;
const int BLOB_data = 0;
const int LAYER_splitncnn_0 = 1;
const int BLOB_data_splitncnn_0 = 1;
const int BLOB_data_splitncnn_1 = 2;
const int BLOB_data_splitncnn_2 = 3;
const int BLOB_data_splitncnn_3 = 4;
const int BLOB_data_splitncnn_4 = 5;
const int BLOB_data_splitncnn_5 = 6;
const int BLOB_data_splitncnn_6 = 7;
const int LAYER_conv0 = 2;
const int BLOB_conv0 = 8;
const int LAYER_conv0_relu = 3;
const int BLOB_conv0_conv0_relu = 9;
const int LAYER_conv1_dw = 4;
const int BLOB_conv1_dw = 10;
const int LAYER_conv1_dw_relu = 5;
const int BLOB_conv1_dw_conv1_dw_relu = 11;
const int LAYER_conv1 = 6;
const int BLOB_conv1 = 12;
const int LAYER_conv1_relu = 7;
const int BLOB_conv1_conv1_relu = 13;
const int LAYER_conv2_dw = 8;
const int BLOB_conv2_dw = 14;
const int LAYER_conv2_dw_relu = 9;
const int BLOB_conv2_dw_conv2_dw_relu = 15;
const int LAYER_conv2 = 10;
const int BLOB_conv2 = 16;
const int LAYER_conv2_relu = 11;
const int BLOB_conv2_conv2_relu = 17;
const int LAYER_conv3_dw = 12;
const int BLOB_conv3_dw = 18;
const int LAYER_conv3_dw_relu = 13;
const int BLOB_conv3_dw_conv3_dw_relu = 19;
const int LAYER_conv3 = 14;
const int BLOB_conv3 = 20;
const int LAYER_conv3_relu = 15;
const int BLOB_conv3_conv3_relu = 21;
const int LAYER_conv4_dw = 16;
const int BLOB_conv4_dw = 22;
const int LAYER_conv4_dw_relu = 17;
const int BLOB_conv4_dw_conv4_dw_relu = 23;
const int LAYER_conv4 = 18;
const int BLOB_conv4 = 24;
const int LAYER_conv4_relu = 19;
const int BLOB_conv4_conv4_relu = 25;
const int LAYER_conv5_dw = 20;
const int BLOB_conv5_dw = 26;
const int LAYER_conv5_dw_relu = 21;
const int BLOB_conv5_dw_conv5_dw_relu = 27;
const int LAYER_conv5 = 22;
const int BLOB_conv5 = 28;
const int LAYER_conv5_relu = 23;
const int BLOB_conv5_conv5_relu = 29;
const int LAYER_conv6_dw = 24;
const int BLOB_conv6_dw = 30;
const int LAYER_conv6_dw_relu = 25;
const int BLOB_conv6_dw_conv6_dw_relu = 31;
const int LAYER_conv6 = 26;
const int BLOB_conv6 = 32;
const int LAYER_conv6_relu = 27;
const int BLOB_conv6_conv6_relu = 33;
const int LAYER_conv7_dw = 28;
const int BLOB_conv7_dw = 34;
const int LAYER_conv7_dw_relu = 29;
const int BLOB_conv7_dw_conv7_dw_relu = 35;
const int LAYER_conv7 = 30;
const int BLOB_conv7 = 36;
const int LAYER_conv7_relu = 31;
const int BLOB_conv7_conv7_relu = 37;
const int LAYER_conv8_dw = 32;
const int BLOB_conv8_dw = 38;
const int LAYER_conv8_dw_relu = 33;
const int BLOB_conv8_dw_conv8_dw_relu = 39;
const int LAYER_conv8 = 34;
const int BLOB_conv8 = 40;
const int LAYER_conv8_relu = 35;
const int BLOB_conv8_conv8_relu = 41;
const int LAYER_conv9_dw = 36;
const int BLOB_conv9_dw = 42;
const int LAYER_conv9_dw_relu = 37;
const int BLOB_conv9_dw_conv9_dw_relu = 43;
const int LAYER_conv9 = 38;
const int BLOB_conv9 = 44;
const int LAYER_conv9_relu = 39;
const int BLOB_conv9_conv9_relu = 45;
const int LAYER_conv10_dw = 40;
const int BLOB_conv10_dw = 46;
const int LAYER_conv10_dw_relu = 41;
const int BLOB_conv10_dw_conv10_dw_relu = 47;
const int LAYER_conv10 = 42;
const int BLOB_conv10 = 48;
const int LAYER_conv10_relu = 43;
const int BLOB_conv10_conv10_relu = 49;
const int LAYER_conv11_dw = 44;
const int BLOB_conv11_dw = 50;
const int LAYER_conv11_dw_relu = 45;
const int BLOB_conv11_dw_conv11_dw_relu = 51;
const int LAYER_conv11 = 46;
const int BLOB_conv11 = 52;
const int LAYER_conv11_relu = 47;
const int BLOB_conv11_conv11_relu = 53;
const int LAYER_splitncnn_1 = 48;
const int BLOB_conv11_conv11_relu_splitncnn_0 = 54;
const int BLOB_conv11_conv11_relu_splitncnn_1 = 55;
const int BLOB_conv11_conv11_relu_splitncnn_2 = 56;
const int BLOB_conv11_conv11_relu_splitncnn_3 = 57;
const int LAYER_conv12_dw = 49;
const int BLOB_conv12_dw = 58;
const int LAYER_conv12_dw_relu = 50;
const int BLOB_conv12_dw_conv12_dw_relu = 59;
const int LAYER_conv12 = 51;
const int BLOB_conv12 = 60;
const int LAYER_conv12_relu = 52;
const int BLOB_conv12_conv12_relu = 61;
const int LAYER_conv13_dw = 53;
const int BLOB_conv13_dw = 62;
const int LAYER_conv13_dw_relu = 54;
const int BLOB_conv13_dw_conv13_dw_relu = 63;
const int LAYER_conv13 = 55;
const int BLOB_conv13 = 64;
const int LAYER_conv13_relu = 56;
const int BLOB_conv13_conv13_relu = 65;
const int LAYER_splitncnn_2 = 57;
const int BLOB_conv13_conv13_relu_splitncnn_0 = 66;
const int BLOB_conv13_conv13_relu_splitncnn_1 = 67;
const int BLOB_conv13_conv13_relu_splitncnn_2 = 68;
const int BLOB_conv13_conv13_relu_splitncnn_3 = 69;
const int LAYER_conv14_1 = 58;
const int BLOB_conv14_1 = 70;
const int LAYER_conv14_1_relu = 59;
const int BLOB_conv14_1_conv14_1_relu = 71;
const int LAYER_conv14_2 = 60;
const int BLOB_conv14_2 = 72;
const int LAYER_conv14_2_relu = 61;
const int BLOB_conv14_2_conv14_2_relu = 73;
const int LAYER_splitncnn_3 = 62;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_0 = 74;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_1 = 75;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_2 = 76;
const int BLOB_conv14_2_conv14_2_relu_splitncnn_3 = 77;
const int LAYER_conv15_1 = 63;
const int BLOB_conv15_1 = 78;
const int LAYER_conv15_1_relu = 64;
const int BLOB_conv15_1_conv15_1_relu = 79;
const int LAYER_conv15_2 = 65;
const int BLOB_conv15_2 = 80;
const int LAYER_conv15_2_relu = 66;
const int BLOB_conv15_2_conv15_2_relu = 81;
const int LAYER_splitncnn_4 = 67;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_0 = 82;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_1 = 83;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_2 = 84;
const int BLOB_conv15_2_conv15_2_relu_splitncnn_3 = 85;
const int LAYER_conv16_1 = 68;
const int BLOB_conv16_1 = 86;
const int LAYER_conv16_1_relu = 69;
const int BLOB_conv16_1_conv16_1_relu = 87;
const int LAYER_conv16_2 = 70;
const int BLOB_conv16_2 = 88;
const int LAYER_conv16_2_relu = 71;
const int BLOB_conv16_2_conv16_2_relu = 89;
const int LAYER_splitncnn_5 = 72;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_0 = 90;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_1 = 91;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_2 = 92;
const int BLOB_conv16_2_conv16_2_relu_splitncnn_3 = 93;
const int LAYER_conv17_1 = 73;
const int BLOB_conv17_1 = 94;
const int LAYER_conv17_1_relu = 74;
const int BLOB_conv17_1_conv17_1_relu = 95;
const int LAYER_conv17_2 = 75;
const int BLOB_conv17_2 = 96;
const int LAYER_conv17_2_relu = 76;
const int BLOB_conv17_2_conv17_2_relu = 97;
const int LAYER_splitncnn_6 = 77;
const int BLOB_conv17_2_conv17_2_relu_splitncnn_0 = 98;
const int BLOB_conv17_2_conv17_2_relu_splitncnn_1 = 99;
const int BLOB_conv17_2_conv17_2_relu_splitncnn_2 = 100;
const int LAYER_conv11_mbox_loc = 78;
const int BLOB_conv11_mbox_loc = 101;
const int LAYER_conv11_mbox_loc_perm = 79;
const int BLOB_conv11_mbox_loc_perm = 102;
const int LAYER_conv11_mbox_loc_flat = 80;
const int BLOB_conv11_mbox_loc_flat = 103;
const int LAYER_conv11_mbox_conf = 81;
const int BLOB_conv11_mbox_conf = 104;
const int LAYER_conv11_mbox_conf_perm = 82;
const int BLOB_conv11_mbox_conf_perm = 105;
const int LAYER_conv11_mbox_conf_flat = 83;
const int BLOB_conv11_mbox_conf_flat = 106;
const int LAYER_conv11_mbox_priorbox = 84;
const int BLOB_conv11_mbox_priorbox = 107;
const int LAYER_conv13_mbox_loc = 85;
const int BLOB_conv13_mbox_loc = 108;
const int LAYER_conv13_mbox_loc_perm = 86;
const int BLOB_conv13_mbox_loc_perm = 109;
const int LAYER_conv13_mbox_loc_flat = 87;
const int BLOB_conv13_mbox_loc_flat = 110;
const int LAYER_conv13_mbox_conf = 88;
const int BLOB_conv13_mbox_conf = 111;
const int LAYER_conv13_mbox_conf_perm = 89;
const int BLOB_conv13_mbox_conf_perm = 112;
const int LAYER_conv13_mbox_conf_flat = 90;
const int BLOB_conv13_mbox_conf_flat = 113;
const int LAYER_conv13_mbox_priorbox = 91;
const int BLOB_conv13_mbox_priorbox = 114;
const int LAYER_conv14_2_mbox_loc = 92;
const int BLOB_conv14_2_mbox_loc = 115;
const int LAYER_conv14_2_mbox_loc_perm = 93;
const int BLOB_conv14_2_mbox_loc_perm = 116;
const int LAYER_conv14_2_mbox_loc_flat = 94;
const int BLOB_conv14_2_mbox_loc_flat = 117;
const int LAYER_conv14_2_mbox_conf = 95;
const int BLOB_conv14_2_mbox_conf = 118;
const int LAYER_conv14_2_mbox_conf_perm = 96;
const int BLOB_conv14_2_mbox_conf_perm = 119;
const int LAYER_conv14_2_mbox_conf_flat = 97;
const int BLOB_conv14_2_mbox_conf_flat = 120;
const int LAYER_conv14_2_mbox_priorbox = 98;
const int BLOB_conv14_2_mbox_priorbox = 121;
const int LAYER_conv15_2_mbox_loc = 99;
const int BLOB_conv15_2_mbox_loc = 122;
const int LAYER_conv15_2_mbox_loc_perm = 100;
const int BLOB_conv15_2_mbox_loc_perm = 123;
const int LAYER_conv15_2_mbox_loc_flat = 101;
const int BLOB_conv15_2_mbox_loc_flat = 124;
const int LAYER_conv15_2_mbox_conf = 102;
const int BLOB_conv15_2_mbox_conf = 125;
const int LAYER_conv15_2_mbox_conf_perm = 103;
const int BLOB_conv15_2_mbox_conf_perm = 126;
const int LAYER_conv15_2_mbox_conf_flat = 104;
const int BLOB_conv15_2_mbox_conf_flat = 127;
const int LAYER_conv15_2_mbox_priorbox = 105;
const int BLOB_conv15_2_mbox_priorbox = 128;
const int LAYER_conv16_2_mbox_loc = 106;
const int BLOB_conv16_2_mbox_loc = 129;
const int LAYER_conv16_2_mbox_loc_perm = 107;
const int BLOB_conv16_2_mbox_loc_perm = 130;
const int LAYER_conv16_2_mbox_loc_flat = 108;
const int BLOB_conv16_2_mbox_loc_flat = 131;
const int LAYER_conv16_2_mbox_conf = 109;
const int BLOB_conv16_2_mbox_conf = 132;
const int LAYER_conv16_2_mbox_conf_perm = 110;
const int BLOB_conv16_2_mbox_conf_perm = 133;
const int LAYER_conv16_2_mbox_conf_flat = 111;
const int BLOB_conv16_2_mbox_conf_flat = 134;
const int LAYER_conv16_2_mbox_priorbox = 112;
const int BLOB_conv16_2_mbox_priorbox = 135;
const int LAYER_conv17_2_mbox_loc = 113;
const int BLOB_conv17_2_mbox_loc = 136;
const int LAYER_conv17_2_mbox_loc_perm = 114;
const int BLOB_conv17_2_mbox_loc_perm = 137;
const int LAYER_conv17_2_mbox_loc_flat = 115;
const int BLOB_conv17_2_mbox_loc_flat = 138;
const int LAYER_conv17_2_mbox_conf = 116;
const int BLOB_conv17_2_mbox_conf = 139;
const int LAYER_conv17_2_mbox_conf_perm = 117;
const int BLOB_conv17_2_mbox_conf_perm = 140;
const int LAYER_conv17_2_mbox_conf_flat = 118;
const int BLOB_conv17_2_mbox_conf_flat = 141;
const int LAYER_conv17_2_mbox_priorbox = 119;
const int BLOB_conv17_2_mbox_priorbox = 142;
const int LAYER_mbox_loc = 120;
const int BLOB_mbox_loc = 143;
const int LAYER_mbox_conf = 121;
const int BLOB_mbox_conf = 144;
const int LAYER_mbox_priorbox = 122;
const int BLOB_mbox_priorbox = 145;
const int LAYER_mbox_conf_reshape = 123;
const int BLOB_mbox_conf_reshape = 146;
const int LAYER_mbox_conf_softmax = 124;
const int BLOB_mbox_conf_softmax = 147;
const int LAYER_mbox_conf_flatten = 125;
const int BLOB_mbox_conf_flatten = 148;
const int LAYER_detection_out = 126;
const int BLOB_detection_out = 149;
} // namespace MobileNetSSD_deploy_param_id
#endif // NCNN_INCLUDE_GUARD_MobileNetSSD_deploy_id_h
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/MobileNetssd.cpp
================================================
#include
#include
#include
#include
#include
// ncnn
#include "include/opencv.h"
#include "MobileNetSSD_deploy.id.h" //这里看成自己的id.h
#include
#include
#include "include/net.h"
static ncnn::UnlockedPoolAllocator g_blob_pool_allocator;
static ncnn::PoolAllocator g_workspace_pool_allocator;
static ncnn::Mat ncnn_param;
static ncnn::Mat ncnn_bin;
static ncnn::Net ncnn_net;
extern "C" {
// public native boolean Init(byte[] words,byte[] param, byte[] bin); 原函数形式(c++) 以下形式为ndk的c++形式
JNIEXPORT jboolean JNICALL
Java_com_example_che_mobilenetssd_1demo_MobileNetssd_Init(JNIEnv *env, jobject obj, jbyteArray param, jbyteArray bin) {
__android_log_print(ANDROID_LOG_DEBUG, "MobileNetssd", "enter the jni func");
// init param
{
int len = env->GetArrayLength(param);
ncnn_param.create(len, (size_t) 1u);
env->GetByteArrayRegion(param, 0, len, (jbyte *) ncnn_param);
int ret = ncnn_net.load_param((const unsigned char *) ncnn_param);
__android_log_print(ANDROID_LOG_DEBUG, "MobileNetssd", "load_param %d %d", ret, len);
}
// init bin
{
int len = env->GetArrayLength(bin);
ncnn_bin.create(len, (size_t) 1u);
env->GetByteArrayRegion(bin, 0, len, (jbyte *) ncnn_bin);
int ret = ncnn_net.load_model((const unsigned char *) ncnn_bin);
__android_log_print(ANDROID_LOG_DEBUG, "MobileNetssd", "load_model %d %d", ret, len);
}
ncnn::Option opt;
opt.lightmode = true;
opt.num_threads = 4; //线程 这里可以修改
opt.blob_allocator = &g_blob_pool_allocator;
opt.workspace_allocator = &g_workspace_pool_allocator;
ncnn::set_default_option(opt);
return JNI_TRUE;
}
// public native String Detect(Bitmap bitmap);
JNIEXPORT jfloatArray JNICALL Java_com_example_che_mobilenetssd_1demo_MobileNetssd_Detect(JNIEnv* env, jobject thiz, jobject bitmap)
{
// ncnn from bitmap
ncnn::Mat in;
{
AndroidBitmapInfo info;
AndroidBitmap_getInfo(env, bitmap, &info);
// int origin_w = info.width;
// int origin_h = info.height;
// int width = 300;
// int height = 300;
int width = info.width;
int height = info.height;
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888)
return NULL;
void* indata;
AndroidBitmap_lockPixels(env, bitmap, &indata);
// 把像素转换成data,并指定通道顺序
// 因为图像预处理每个网络层输入的数据格式不一样一般为300*300 128*128等等所以这类需要一个resize的操作可以在cpp中写,也可以是java读入图片时有个resize操作
// in = ncnn::Mat::from_pixels_resize((const unsigned char*)indata, ncnn::Mat::PIXEL_RGBA2RGB, origin_w, origin_h, width, height);
in = ncnn::Mat::from_pixels((const unsigned char*)indata, ncnn::Mat::PIXEL_RGBA2RGB, width, height);
// 下面一行为debug代码
//__android_log_print(ANDROID_LOG_DEBUG, "MobilenetssdJniIn", "Mobilenetssd_predict_has_input1, in.w: %d; in.h: %d", in.w, in.h);
AndroidBitmap_unlockPixels(env, bitmap);
}
// ncnn_net
std::vector cls_scores;
{
// 减去均值和乘上比例(这个数据和前面的归一化图片预处理形式一一对应)
const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
const float scale[3] = {0.007843f, 0.007843f, 0.007843f};
in.substract_mean_normalize(mean_vals, scale);// 归一化
ncnn::Extractor ex = ncnn_net.create_extractor();//前向传播
// 如果不加密是使用ex.input("data", in);
// BLOB_data在id.h文件中可见,相当于datainput网络层的id
ex.input(MobileNetSSD_deploy_param_id::BLOB_data, in);
//ex.set_num_threads(4); 和上面一样一个对象
ncnn::Mat out;
// 如果时不加密是使用ex.extract("prob", out);
//BLOB_detection_out.h文件中可见,相当于dataout网络层的id,输出检测的结果数据
ex.extract(MobileNetSSD_deploy_param_id::BLOB_detection_out, out);
int output_wsize = out.w;
int output_hsize = out.h;
//输出整理
jfloat *output[output_wsize * output_hsize];
for(int i = 0; i< out.h; i++) {
for (int j = 0; j < out.w; j++) {
output[i*output_wsize + j] = &out.row(i)[j];
}
}
jfloatArray jOutputData = env->NewFloatArray(output_wsize);
if (jOutputData == nullptr) return nullptr;
env->SetFloatArrayRegion(jOutputData, 0, output_wsize * output_hsize,
reinterpret_cast(*output));
return jOutputData;
}
}
}
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/allocator.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_ALLOCATOR_H
#define NCNN_ALLOCATOR_H
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include
#else
#include
#endif
#include
#include
namespace ncnn {
// the alignment of all the allocated buffers
#define MALLOC_ALIGN 16
// Aligns a pointer to the specified number of bytes
// ptr Aligned pointer
// n Alignment size that must be a power of two
template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp))
{
return (_Tp*)(((size_t)ptr + n-1) & -n);
}
// Aligns a buffer size to the specified number of bytes
// The function returns the minimum number that is greater or equal to sz and is divisible by n
// sz Buffer size to align
// n Alignment size that must be a power of two
static inline size_t alignSize(size_t sz, int n)
{
return (sz + n-1) & -n;
}
static inline void* fastMalloc(size_t size)
{
unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + MALLOC_ALIGN);
if (!udata)
return 0;
unsigned char** adata = alignPtr((unsigned char**)udata + 1, MALLOC_ALIGN);
adata[-1] = udata;
return adata;
}
static inline void fastFree(void* ptr)
{
if (ptr)
{
unsigned char* udata = ((unsigned char**)ptr)[-1];
free(udata);
}
}
// exchange-add operation for atomic operations on reference counters
#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)
// atomic increment on the linux version of the Intel(tm) compiler
# define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast(reinterpret_cast(addr)), delta)
#elif defined __GNUC__
# if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)
# ifdef __ATOMIC_ACQ_REL
# define NCNN_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
# else
# define NCNN_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)
# endif
# else
# if defined __ATOMIC_ACQ_REL && !defined __clang__
// version for gcc >= 4.7
# define NCNN_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)
# else
# define NCNN_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))
# endif
# endif
#elif defined _MSC_VER && !defined RC_INVOKED
# include
# define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)
#else
// thread-unsafe branch
static inline int NCNN_XADD(int* addr, int delta) { int tmp = *addr; *addr += delta; return tmp; }
#endif
#ifdef _WIN32
class Mutex
{
public:
Mutex() { InitializeSRWLock(&srwlock); }
~Mutex() {}
void lock() { AcquireSRWLockExclusive(&srwlock); }
void unlock() { ReleaseSRWLockExclusive(&srwlock); }
private:
// NOTE SRWLock is available from windows vista
SRWLOCK srwlock;
};
#else // _WIN32
class Mutex
{
public:
Mutex() { pthread_mutex_init(&mutex, 0); }
~Mutex() { pthread_mutex_destroy(&mutex); }
void lock() { pthread_mutex_lock(&mutex); }
void unlock() { pthread_mutex_unlock(&mutex); }
private:
pthread_mutex_t mutex;
};
#endif // _WIN32
class Allocator
{
public:
virtual ~Allocator() = 0;
virtual void* fastMalloc(size_t size) = 0;
virtual void fastFree(void* ptr) = 0;
};
class PoolAllocator : public Allocator
{
public:
PoolAllocator();
~PoolAllocator();
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
Mutex budgets_lock;
Mutex payouts_lock;
unsigned int size_compare_ratio;// 0~256
std::list< std::pair > budgets;
std::list< std::pair > payouts;
};
class UnlockedPoolAllocator : public Allocator
{
public:
UnlockedPoolAllocator();
~UnlockedPoolAllocator();
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
unsigned int size_compare_ratio;// 0~256
std::list< std::pair > budgets;
std::list< std::pair > payouts;
};
} // namespace ncnn
#endif // NCNN_ALLOCATOR_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/benchmark.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BENCHMARK_H
#define NCNN_BENCHMARK_H
#include "platform.h"
#include "mat.h"
#include "layer.h"
namespace ncnn {
// get now timestamp in ms
double get_current_time();
#if NCNN_BENCHMARK
void benchmark(const Layer* layer, double start, double end);
void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end);
#endif // NCNN_BENCHMARK
} // namespace ncnn
#endif // NCNN_BENCHMARK_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/blob.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BLOB_H
#define NCNN_BLOB_H
#include
#include
#include "platform.h"
namespace ncnn {
class Blob
{
public:
// empty
Blob();
public:
#if NCNN_STRING
// blob name
std::string name;
#endif // NCNN_STRING
// layer index which produce this blob as output
int producer;
// layer index which need this blob as input
std::vector consumers;
};
} // namespace ncnn
#endif // NCNN_BLOB_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/cpu.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_CPU_H
#define NCNN_CPU_H
namespace ncnn {
// test optional cpu features
// neon = armv7 neon or aarch64 asimd
int cpu_support_arm_neon();
// vfpv4 = armv7 fp16 + fma
int cpu_support_arm_vfpv4();
// asimdhp = aarch64 asimd half precision
int cpu_support_arm_asimdhp();
// cpu info
int get_cpu_count();
// bind all threads on little clusters if powersave enabled
// affacts HMP arch cpu like ARM big.LITTLE
// only implemented on android at the moment
// switching powersave is expensive and not thread-safe
// 0 = all cores enabled(default)
// 1 = only little clusters enabled
// 2 = only big clusters enabled
// return 0 if success for setter function
int get_cpu_powersave();
int set_cpu_powersave(int powersave);
// misc function wrapper for openmp routines
int get_omp_num_threads();
void set_omp_num_threads(int num_threads);
int get_omp_dynamic();
void set_omp_dynamic(int dynamic);
} // namespace ncnn
#endif // NCNN_CPU_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/layer.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_H
#define NCNN_LAYER_H
#include
#include
#include
#include "mat.h"
#include "modelbin.h"
#include "paramdict.h"
#include "platform.h"
namespace ncnn {
class Allocator;
class Option
{
public:
// default option
Option();
public:
// light mode
// intermediate blob will be recycled when enabled
// enabled by default
bool lightmode;
// thread count
// default value is the one returned by get_cpu_count()
int num_threads;
// blob memory allocator
Allocator* blob_allocator;
// workspace memory allocator
Allocator* workspace_allocator;
};
// the global default option
const Option& get_default_option();
int set_default_option(const Option& opt);
class Layer
{
public:
// empty
Layer();
// virtual destructor
virtual ~Layer();
// load layer specific parameter from parsed dict
// return 0 if success
virtual int load_param(const ParamDict& pd);
// load layer specific weight data from model binary
// return 0 if success
virtual int load_model(const ModelBin& mb);
public:
// one input and one output blob
bool one_blob_only;
// support inplace inference
bool support_inplace;
public:
// implement inference
// return 0 if success
virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt = get_default_option()) const;
virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt = get_default_option()) const;
// implement inplace inference
// return 0 if success
virtual int forward_inplace(std::vector& bottom_top_blobs, const Option& opt = get_default_option()) const;
virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt = get_default_option()) const;
public:
#if NCNN_STRING
// layer type name
std::string type;
// layer name
std::string name;
#endif // NCNN_STRING
// blob index which this layer needs as input
std::vector bottoms;
// blob index which this layer produces as output
std::vector tops;
};
// layer factory function
typedef Layer* (*layer_creator_func)();
struct layer_registry_entry
{
#if NCNN_STRING
// layer type name
const char* name;
#endif // NCNN_STRING
// layer factory entry
layer_creator_func creator;
};
#if NCNN_STRING
// get layer type from type name
int layer_to_index(const char* type);
// create layer from type name
Layer* create_layer(const char* type);
#endif // NCNN_STRING
// create layer from layer type
Layer* create_layer(int index);
#define DEFINE_LAYER_CREATOR(name) \
::ncnn::Layer* name##_layer_creator() { return new name; }
} // namespace ncnn
#endif // NCNN_LAYER_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/layer_type.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_TYPE_H
#define NCNN_LAYER_TYPE_H
namespace ncnn {
namespace LayerType {
enum
{
#include "layer_type_enum.h"
CustomBit = (1<<8),
};
} // namespace LayerType
} // namespace ncnn
#endif // NCNN_LAYER_TYPE_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/layer_type_enum.h
================================================
// Layer Type Enum header
//
// This file is auto-generated by cmake, don't edit it.
AbsVal = 0,
ArgMax = 1,
BatchNorm = 2,
Bias = 3,
BNLL = 4,
Concat = 5,
Convolution = 6,
Crop = 7,
Deconvolution = 8,
Dropout = 9,
Eltwise = 10,
ELU = 11,
Embed = 12,
Exp = 13,
Flatten = 14,
InnerProduct = 15,
Input = 16,
Log = 17,
LRN = 18,
MemoryData = 19,
MVN = 20,
Pooling = 21,
Power = 22,
PReLU = 23,
Proposal = 24,
Reduction = 25,
ReLU = 26,
Reshape = 27,
ROIPooling = 28,
Scale = 29,
Sigmoid = 30,
Slice = 31,
Softmax = 32,
Split = 33,
SPP = 34,
TanH = 35,
Threshold = 36,
Tile = 37,
RNN = 38,
LSTM = 39,
BinaryOp = 40,
UnaryOp = 41,
ConvolutionDepthWise = 42,
Padding = 43,
Squeeze = 44,
ExpandDims = 45,
Normalize = 46,
Permute = 47,
PriorBox = 48,
DetectionOutput = 49,
Interp = 50,
DeconvolutionDepthWise = 51,
ShuffleChannel = 52,
InstanceNorm = 53,
Clip = 54,
Reorg = 55,
YoloDetectionOutput = 56,
Quantize = 57,
Dequantize = 58,
Yolov3DetectionOutput = 59,
PSROIPooling = 60,
ROIAlign = 61,
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/mat.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_MAT_H
#define NCNN_MAT_H
#include
#include
#if __ARM_NEON
#include
#endif
#include "allocator.h"
#include "platform.h"
namespace ncnn {
// the three dimension matrix
class Mat
{
public:
// empty
Mat();
// vec
Mat(int w, size_t elemsize = 4u, Allocator* allocator = 0);
// image
Mat(int w, int h, size_t elemsize = 4u, Allocator* allocator = 0);
// dim
Mat(int w, int h, int c, size_t elemsize = 4u, Allocator* allocator = 0);
// copy
Mat(const Mat& m);
// external vec
Mat(int w, void* data, size_t elemsize = 4u, Allocator* allocator = 0);
// external image
Mat(int w, int h, void* data, size_t elemsize = 4u, Allocator* allocator = 0);
// external dim
Mat(int w, int h, int c, void* data, size_t elemsize = 4u, Allocator* allocator = 0);
// release
~Mat();
// assign
Mat& operator=(const Mat& m);
// set all
void fill(float v);
void fill(int v);
template void fill(T v);
// deep copy
Mat clone(Allocator* allocator = 0) const;
// reshape vec
Mat reshape(int w, Allocator* allocator = 0) const;
// reshape image
Mat reshape(int w, int h, Allocator* allocator = 0) const;
// reshape dim
Mat reshape(int w, int h, int c, Allocator* allocator = 0) const;
// allocate vec
void create(int w, size_t elemsize = 4u, Allocator* allocator = 0);
// allocate image
void create(int w, int h, size_t elemsize = 4u, Allocator* allocator = 0);
// allocate dim
void create(int w, int h, int c, size_t elemsize = 4u, Allocator* allocator = 0);
// refcount++
void addref();
// refcount--
void release();
bool empty() const;
size_t total() const;
// data reference
Mat channel(int c);
const Mat channel(int c) const;
float* row(int y);
const float* row(int y) const;
template T* row(int y);
template const T* row(int y) const;
// range reference
Mat channel_range(int c, int channels);
const Mat channel_range(int c, int channels) const;
Mat row_range(int y, int rows);
const Mat row_range(int y, int rows) const;
Mat range(int x, int n);
const Mat range(int x, int n) const;
// access raw data
template operator T*();
template operator const T*() const;
// convenient access float vec element
float& operator[](int i);
const float& operator[](int i) const;
#if NCNN_PIXEL
enum
{
PIXEL_CONVERT_SHIFT = 16,
PIXEL_FORMAT_MASK = 0x0000ffff,
PIXEL_CONVERT_MASK = 0xffff0000,
PIXEL_RGB = 1,
PIXEL_BGR = (1 << 1),
PIXEL_GRAY = (1 << 2),
PIXEL_RGBA = (1 << 3),
PIXEL_RGB2BGR = PIXEL_RGB | (PIXEL_BGR << PIXEL_CONVERT_SHIFT),
PIXEL_RGB2GRAY = PIXEL_RGB | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT),
PIXEL_BGR2RGB = PIXEL_BGR | (PIXEL_RGB << PIXEL_CONVERT_SHIFT),
PIXEL_BGR2GRAY = PIXEL_BGR | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT),
PIXEL_GRAY2RGB = PIXEL_GRAY | (PIXEL_RGB << PIXEL_CONVERT_SHIFT),
PIXEL_GRAY2BGR = PIXEL_GRAY | (PIXEL_BGR << PIXEL_CONVERT_SHIFT),
PIXEL_RGBA2RGB = PIXEL_RGBA | (PIXEL_RGB << PIXEL_CONVERT_SHIFT),
PIXEL_RGBA2BGR = PIXEL_RGBA | (PIXEL_BGR << PIXEL_CONVERT_SHIFT),
PIXEL_RGBA2GRAY = PIXEL_RGBA | (PIXEL_GRAY << PIXEL_CONVERT_SHIFT),
};
// convenient construct from pixel data
static Mat from_pixels(const unsigned char* pixels, int type, int w, int h, Allocator* allocator = 0);
// convenient construct from pixel data and resize to specific size
static Mat from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int target_width, int target_height, Allocator* allocator = 0);
// convenient export to pixel data
void to_pixels(unsigned char* pixels, int type) const;
// convenient export to pixel data and resize to specific size
void to_pixels_resize(unsigned char* pixels, int type, int target_width, int target_height) const;
#endif // NCNN_PIXEL
// substract channel-wise mean values, then multiply by normalize values, pass 0 to skip
void substract_mean_normalize(const float* mean_vals, const float* norm_vals);
// convenient construct from half precisoin floating point data
static Mat from_float16(const unsigned short* data, int size);
// pointer to the data
void* data;
// pointer to the reference counter
// when points to user-allocated data, the pointer is NULL
int* refcount;
// element size in bytes
// 4 = float32/int32
// 2 = float16
// 1 = int8/uint8
// 0 = empty
size_t elemsize;
// the allocator
Allocator* allocator;
// the dimensionality
int dims;
int w;
int h;
int c;
size_t cstep;
};
// misc function
#if NCNN_PIXEL
// convert yuv420sp(nv21) to rgb, the fast approximate version
void yuv420sp2rgb(const unsigned char* yuv420sp, int w, int h, unsigned char* rgb);
// image pixel bilinear resize
void resize_bilinear_c1(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
void resize_bilinear_c2(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
void resize_bilinear_c3(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
void resize_bilinear_c4(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
// image pixel bilinear resize, convenient wrapper for yuv420sp(nv21)
void resize_bilinear_yuv420sp(const unsigned char* src, int srcw, int srch, unsigned char* dst, int w, int h);
#endif // NCNN_PIXEL
// mat process
enum
{
BORDER_CONSTANT = 0,
BORDER_REPLICATE = 1,
};
void copy_make_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, int type, float v, Allocator* allocator = 0, int num_threads = 1);
void copy_cut_border(const Mat& src, Mat& dst, int top, int bottom, int left, int right, Allocator* allocator = 0, int num_threads = 1);
void resize_bilinear(const Mat& src, Mat& dst, int w, int h, Allocator* allocator = 0, int num_threads = 1);
inline Mat::Mat()
: data(0), refcount(0), elemsize(0), allocator(0), dims(0), w(0), h(0), c(0), cstep(0)
{
}
inline Mat::Mat(int _w, size_t _elemsize, Allocator* allocator)
: data(0), refcount(0), dims(0)
{
create(_w, _elemsize, allocator);
}
inline Mat::Mat(int _w, int _h, size_t _elemsize, Allocator* allocator)
: data(0), refcount(0), dims(0)
{
create(_w, _h, _elemsize, allocator);
}
inline Mat::Mat(int _w, int _h, int _c, size_t _elemsize, Allocator* allocator)
: data(0), refcount(0), dims(0)
{
create(_w, _h, _c, _elemsize, allocator);
}
inline Mat::Mat(const Mat& m)
: data(m.data), refcount(m.refcount), elemsize(m.elemsize), allocator(m.allocator), dims(m.dims)
{
if (refcount)
NCNN_XADD(refcount, 1);
w = m.w;
h = m.h;
c = m.c;
cstep = m.cstep;
}
inline Mat::Mat(int _w, void* _data, size_t _elemsize, Allocator* _allocator)
: data(_data), refcount(0), elemsize(_elemsize), allocator(_allocator), dims(1)
{
w = _w;
h = 1;
c = 1;
cstep = w;
}
inline Mat::Mat(int _w, int _h, void* _data, size_t _elemsize, Allocator* _allocator)
: data(_data), refcount(0), elemsize(_elemsize), allocator(_allocator), dims(2)
{
w = _w;
h = _h;
c = 1;
cstep = w * h;
}
inline Mat::Mat(int _w, int _h, int _c, void* _data, size_t _elemsize, Allocator* _allocator)
: data(_data), refcount(0), elemsize(_elemsize), allocator(_allocator), dims(3)
{
w = _w;
h = _h;
c = _c;
cstep = alignSize(w * h * elemsize, 16) / elemsize;
}
inline Mat::~Mat()
{
release();
}
inline Mat& Mat::operator=(const Mat& m)
{
if (this == &m)
return *this;
if (m.refcount)
NCNN_XADD(m.refcount, 1);
release();
data = m.data;
refcount = m.refcount;
elemsize = m.elemsize;
allocator = m.allocator;
dims = m.dims;
w = m.w;
h = m.h;
c = m.c;
cstep = m.cstep;
return *this;
}
inline void Mat::fill(float _v)
{
int size = total();
float* ptr = (float*)data;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _c = vdupq_n_f32(_v);
#if __aarch64__
if (nn > 0)
{
asm volatile (
"0: \n"
"subs %w0, %w0, #1 \n"
"st1 {%4.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"subs %0, #1 \n"
"vst1.f32 {%e4-%f4}, [%1 :128]!\n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
*ptr++ = _v;
}
}
inline void Mat::fill(int _v)
{
int size = total();
int* ptr = (int*)data;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
int32x4_t _c = vdupq_n_s32(_v);
#if __aarch64__
if (nn > 0)
{
asm volatile (
"0: \n"
"subs %w0, %w0, #1 \n"
"st1 {%4.4s}, [%1], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"subs %0, #1 \n"
"vst1.s32 {%e4-%f4}, [%1 :128]!\n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr) // %1
: "0"(nn),
"1"(ptr),
"w"(_c) // %4
: "cc", "memory"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
*ptr++ = _v;
}
}
template
inline void Mat::fill(T _v)
{
int size = total();
T* ptr = (T*)data;
for (int i=0; i 0)
{
memcpy(m.data, data, total() * elemsize);
}
return m;
}
inline Mat Mat::reshape(int _w, Allocator* allocator) const
{
if (w * h * c != _w)
return Mat();
if (dims == 3 && cstep != (size_t)w * h)
{
Mat m;
m.create(_w, elemsize, allocator);
// flatten
for (int i=0; i 0)
{
size_t totalsize = alignSize(total() * elemsize, 4);
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
inline void Mat::create(int _w, int _h, size_t _elemsize, Allocator* _allocator)
{
if (dims == 2 && w == _w && h == _h && elemsize == _elemsize && allocator == _allocator)
return;
release();
elemsize = _elemsize;
allocator = _allocator;
dims = 2;
w = _w;
h = _h;
c = 1;
cstep = w * h;
if (total() > 0)
{
size_t totalsize = alignSize(total() * elemsize, 4);
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
inline void Mat::create(int _w, int _h, int _c, size_t _elemsize, Allocator* _allocator)
{
if (dims == 3 && w == _w && h == _h && c == _c && elemsize == _elemsize && allocator == _allocator)
return;
release();
elemsize = _elemsize;
allocator = _allocator;
dims = 3;
w = _w;
h = _h;
c = _c;
cstep = alignSize(w * h * elemsize, 16) / elemsize;
if (total() > 0)
{
size_t totalsize = alignSize(total() * elemsize, 4);
if (allocator)
data = allocator->fastMalloc(totalsize + (int)sizeof(*refcount));
else
data = fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
inline void Mat::addref()
{
if (refcount)
NCNN_XADD(refcount, 1);
}
inline void Mat::release()
{
if (refcount && NCNN_XADD(refcount, -1) == 1)
{
if (allocator)
allocator->fastFree(data);
else
fastFree(data);
}
data = 0;
elemsize = 0;
dims = 0;
w = 0;
h = 0;
c = 0;
cstep = 0;
refcount = 0;
}
inline bool Mat::empty() const
{
return data == 0 || total() == 0;
}
inline size_t Mat::total() const
{
return cstep * c;
}
inline Mat Mat::channel(int c)
{
return Mat(w, h, (unsigned char*)data + cstep * c * elemsize, elemsize, allocator);
}
inline const Mat Mat::channel(int c) const
{
return Mat(w, h, (unsigned char*)data + cstep * c * elemsize, elemsize, allocator);
}
inline float* Mat::row(int y)
{
return (float*)data + w * y;
}
inline const float* Mat::row(int y) const
{
return (const float*)data + w * y;
}
template
inline T* Mat::row(int y)
{
return (T*)data + w * y;
}
template
inline const T* Mat::row(int y) const
{
return (const T*)data + w * y;
}
inline Mat Mat::channel_range(int _c, int channels)
{
return Mat(w, h, channels, (unsigned char*)data + cstep * _c * elemsize, elemsize, allocator);
}
inline const Mat Mat::channel_range(int _c, int channels) const
{
return Mat(w, h, channels, (unsigned char*)data + cstep * _c * elemsize, elemsize, allocator);
}
inline Mat Mat::row_range(int y, int rows)
{
return Mat(w, rows, (unsigned char*)data + w * y * elemsize, elemsize, allocator);
}
inline const Mat Mat::row_range(int y, int rows) const
{
return Mat(w, rows, (unsigned char*)data + w * y * elemsize, elemsize, allocator);
}
inline Mat Mat::range(int x, int n)
{
return Mat(n, (unsigned char*)data + x * elemsize, elemsize, allocator);
}
inline const Mat Mat::range(int x, int n) const
{
return Mat(n, (unsigned char*)data + x * elemsize, elemsize, allocator);
}
template
inline Mat::operator T*()
{
return (T*)data;
}
template
inline Mat::operator const T*() const
{
return (const T*)data;
}
inline float& Mat::operator[](int i)
{
return ((float*)data)[i];
}
inline const float& Mat::operator[](int i) const
{
return ((const float*)data)[i];
}
} // namespace ncnn
#endif // NCNN_MAT_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/modelbin.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_MODELBIN_H
#define NCNN_MODELBIN_H
#include
#include "mat.h"
#include "platform.h"
namespace ncnn {
class Net;
class ModelBin
{
public:
// element type
// 0 = auto
// 1 = float32
// 2 = float16
// 3 = int8
// load vec
virtual Mat load(int w, int type) const = 0;
// load image
virtual Mat load(int w, int h, int type) const;
// load dim
virtual Mat load(int w, int h, int c, int type) const;
};
#if NCNN_STDIO
class ModelBinFromStdio : public ModelBin
{
public:
// construct from file
ModelBinFromStdio(FILE* binfp);
virtual Mat load(int w, int type) const;
protected:
FILE* binfp;
};
#endif // NCNN_STDIO
class ModelBinFromMemory : public ModelBin
{
public:
// construct from external memory
ModelBinFromMemory(const unsigned char*& mem);
virtual Mat load(int w, int type) const;
protected:
const unsigned char*& mem;
};
class ModelBinFromMatArray : public ModelBin
{
public:
// construct from weight blob array
ModelBinFromMatArray(const Mat* weights);
virtual Mat load(int w, int type) const;
protected:
mutable const Mat* weights;
};
} // namespace ncnn
#endif // NCNN_MODELBIN_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/net.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_NET_H
#define NCNN_NET_H
#include
#include
#include "blob.h"
#include "layer.h"
#include "mat.h"
#include "platform.h"
namespace ncnn {
class Extractor;
class Net
{
public:
// empty init
Net();
// clear and destroy
~Net();
#if NCNN_STRING
// register custom layer by layer type name
// return 0 if success
int register_custom_layer(const char* type, layer_creator_func creator);
#endif // NCNN_STRING
// register custom layer by layer type
// return 0 if success
int register_custom_layer(int index, layer_creator_func creator);
#if NCNN_STDIO
#if NCNN_STRING
// load network structure from plain param file
// return 0 if success
int load_param(FILE* fp);
int load_param(const char* protopath);
int load_param_mem(const char* mem);
#endif // NCNN_STRING
// load network structure from binary param file
// return 0 if success
int load_param_bin(FILE* fp);
int load_param_bin(const char* protopath);
// load network weight data from model file
// return 0 if success
int load_model(FILE* fp);
int load_model(const char* modelpath);
#endif // NCNN_STDIO
// load network structure from external memory
// memory pointer must be 32-bit aligned
// return bytes consumed
int load_param(const unsigned char* mem);
// reference network weight data from external memory
// weight data is not copied but referenced
// so external memory should be retained when used
// memory pointer must be 32-bit aligned
// return bytes consumed
int load_model(const unsigned char* mem);
// unload network structure and weight data
void clear();
// construct an Extractor from network
Extractor create_extractor() const;
public:
// enable winograd convolution optimization
// improve convolution 3x3 stride1 performace, may consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
int use_winograd_convolution;
// enable sgemm convolution optimization
// improve convolution 1x1 stride1 performace, may consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
int use_sgemm_convolution;
// enable quantized int8 inference
// use low-precision int8 path for quantized model
// changes should be applied before loading network structure and weight
// enabled by default
int use_int8_inference;
protected:
friend class Extractor;
#if NCNN_STRING
int find_blob_index_by_name(const char* name) const;
int find_layer_index_by_name(const char* name) const;
int custom_layer_to_index(const char* type);
Layer* create_custom_layer(const char* type);
#endif // NCNN_STRING
Layer* create_custom_layer(int index);
int forward_layer(int layer_index, std::vector& blob_mats, Option& opt) const;
protected:
std::vector blobs;
std::vector layers;
std::vector custom_layer_registry;
};
class Extractor
{
public:
// enable light mode
// intermediate blob will be recycled when enabled
// enabled by default
void set_light_mode(bool enable);
// set thread count for this extractor
// this will overwrite the global setting
// default count is system depended
void set_num_threads(int num_threads);
// set blob memory allocator
void set_blob_allocator(Allocator* allocator);
// set workspace memory allocator
void set_workspace_allocator(Allocator* allocator);
#if NCNN_STRING
// set input by blob name
// return 0 if success
int input(const char* blob_name, const Mat& in);
// get result by blob name
// return 0 if success
int extract(const char* blob_name, Mat& feat);
#endif // NCNN_STRING
// set input by blob index
// return 0 if success
int input(int blob_index, const Mat& in);
// get result by blob index
// return 0 if success
int extract(int blob_index, Mat& feat);
protected:
friend Extractor Net::create_extractor() const;
Extractor(const Net* net, int blob_count);
private:
const Net* net;
std::vector blob_mats;
Option opt;
};
} // namespace ncnn
#endif // NCNN_NET_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/opencv.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_OPENCV_H
#define NCNN_OPENCV_H
#include "platform.h"
#if NCNN_OPENCV
#include
#include
#include "mat.h"
// minimal opencv style data structure implementation
namespace cv
{
struct Size
{
Size() : width(0), height(0) {}
Size(int _w, int _h) : width(_w), height(_h) {}
int width;
int height;
};
template
struct Rect_
{
Rect_() : x(0), y(0), width(0), height(0) {}
Rect_(_Tp _x, _Tp _y, _Tp _w, _Tp _h) : x(_x), y(_y), width(_w), height(_h) {}
_Tp x;
_Tp y;
_Tp width;
_Tp height;
// area
_Tp area() const
{
return width * height;
}
};
template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )
{
_Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y);
a.width = std::min(a.x + a.width, b.x + b.width) - x1;
a.height = std::min(a.y + a.height, b.y + b.height) - y1;
a.x = x1; a.y = y1;
if( a.width <= 0 || a.height <= 0 )
a = Rect_<_Tp>();
return a;
}
template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )
{
_Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y);
a.width = std::max(a.x + a.width, b.x + b.width) - x1;
a.height = std::max(a.y + a.height, b.y + b.height) - y1;
a.x = x1; a.y = y1;
return a;
}
template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
Rect_<_Tp> c = a;
return c &= b;
}
template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
Rect_<_Tp> c = a;
return c |= b;
}
typedef Rect_ Rect;
typedef Rect_ Rect2f;
template
struct Point_
{
Point_() : x(0), y(0) {}
Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {}
_Tp x;
_Tp y;
};
typedef Point_ Point;
typedef Point_ Point2f;
#define CV_8UC1 1
#define CV_8UC3 3
#define CV_8UC4 4
#define CV_32FC1 4
struct Mat
{
Mat() : data(0), refcount(0), rows(0), cols(0), c(0) {}
Mat(int _rows, int _cols, int flags) : data(0), refcount(0)
{
create(_rows, _cols, flags);
}
// copy
Mat(const Mat& m) : data(m.data), refcount(m.refcount)
{
if (refcount)
NCNN_XADD(refcount, 1);
rows = m.rows;
cols = m.cols;
c = m.c;
}
Mat(int _rows, int _cols, int flags, void* _data) : data((unsigned char*)_data), refcount(0)
{
rows = _rows;
cols = _cols;
c = flags;
}
~Mat()
{
release();
}
// assign
Mat& operator=(const Mat& m)
{
if (this == &m)
return *this;
if (m.refcount)
NCNN_XADD(m.refcount, 1);
release();
data = m.data;
refcount = m.refcount;
rows = m.rows;
cols = m.cols;
c = m.c;
return *this;
}
void create(int _rows, int _cols, int flags)
{
release();
rows = _rows;
cols = _cols;
c = flags;
if (total() > 0)
{
// refcount address must be aligned, so we expand totalsize here
size_t totalsize = (total() + 3) >> 2 << 2;
data = (unsigned char*)ncnn::fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((unsigned char*)data) + totalsize);
*refcount = 1;
}
}
void release()
{
if (refcount && NCNN_XADD(refcount, -1) == 1)
ncnn::fastFree(data);
data = 0;
rows = 0;
cols = 0;
c = 0;
refcount = 0;
}
Mat clone() const
{
if (empty())
return Mat();
Mat m(rows, cols, c);
if (total() > 0)
{
memcpy(m.data, data, total());
}
return m;
}
bool empty() const { return data == 0 || total() == 0; }
int channels() const { return c; }
size_t total() const { return cols * rows * c; }
const unsigned char* ptr(int y) const { return data + y * cols * c; }
unsigned char* ptr(int y) { return data + y * cols * c; }
// roi
Mat operator()( const Rect& roi ) const
{
if (empty())
return Mat();
Mat m(roi.height, roi.width, c);
int sy = roi.y;
for (int y = 0; y < roi.height; y++)
{
const unsigned char* sptr = ptr(sy) + roi.x * c;
unsigned char* dptr = m.ptr(y);
memcpy(dptr, sptr, roi.width * c);
sy++;
}
return m;
}
unsigned char* data;
// pointer to the reference counter;
// when points to user-allocated data, the pointer is NULL
int* refcount;
int rows;
int cols;
int c;
};
#define CV_LOAD_IMAGE_GRAYSCALE 1
#define CV_LOAD_IMAGE_COLOR 3
Mat imread(const std::string& path, int flags);
void imwrite(const std::string& path, const Mat& m);
#if NCNN_PIXEL
void resize(const Mat& src, Mat& dst, const Size& size, float sw = 0.f, float sh = 0.f, int flags = 0);
#endif // NCNN_PIXEL
} // namespace cv
#endif // NCNN_OPENCV
#endif // NCNN_OPENCV_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/paramdict.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PARAMDICT_H
#define NCNN_PARAMDICT_H
#include
#include "mat.h"
#include "platform.h"
// at most 20 parameters
#define NCNN_MAX_PARAM_COUNT 20
namespace ncnn {
class Net;
class ParamDict
{
public:
// empty
ParamDict();
// get int
int get(int id, int def) const;
// get float
float get(int id, float def) const;
// get array
Mat get(int id, const Mat& def) const;
// set int
void set(int id, int i);
// set float
void set(int id, float f);
// set array
void set(int id, const Mat& v);
public:
int use_winograd_convolution;
int use_sgemm_convolution;
int use_int8_inference;
protected:
friend class Net;
void clear();
#if NCNN_STDIO
#if NCNN_STRING
int load_param(FILE* fp);
int load_param_mem(const char*& mem);
#endif // NCNN_STRING
int load_param_bin(FILE* fp);
#endif // NCNN_STDIO
int load_param(const unsigned char*& mem);
protected:
struct
{
int loaded;
union { int i; float f; };
Mat v;
} params[NCNN_MAX_PARAM_COUNT];
};
} // namespace ncnn
#endif // NCNN_PARAMDICT_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/cpp/include/platform.h
================================================
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PLATFORM_H
#define NCNN_PLATFORM_H
#define NCNN_STDIO 1
#define NCNN_STRING 1
#define NCNN_OPENCV 0
#define NCNN_BENCHMARK 0
#define NCNN_PIXEL 1
#define NCNN_PIXEL_ROTATE 0
#endif // NCNN_PLATFORM_H
================================================
FILE: MobileNetSSD_demo_single/app/src/main/java/com/example/che/mobilenetssd_demo/MainActivity.java
================================================
package com.example.che.mobilenetssd_demo;
import android.Manifest;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.res.AssetManager;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.Paint;
import android.net.Uri;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.ContextCompat;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import com.bumptech.glide.Glide;
import com.bumptech.glide.load.engine.DiskCacheStrategy;
import com.bumptech.glide.request.RequestOptions;
public class MainActivity extends AppCompatActivity {
private static final String TAG = MainActivity.class.getName();
private static final int USE_PHOTO = 1001;
private String camera_image_path;
private ImageView show_image;
private TextView result_text;
private boolean load_result = false;
private int[] ddims = {1, 3, 300, 300}; //这里的维度的值要和train model的input 一一对应
private int model_index = 1;
private List resultLabel = new ArrayList<>();
private MobileNetssd mobileNetssd = new MobileNetssd(); //java接口实例化 下面直接利用java函数调用NDK c++函数
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
try
{
initMobileNetSSD();
} catch (IOException e) {
Log.e("MainActivity", "initMobileNetSSD error");
}
init_view();
readCacheLabelFromLocalFile();
}
/**
*
* MobileNetssd初始化,也就是把model文件进行加载
*/
private void initMobileNetSSD() throws IOException {
byte[] param = null;
byte[] bin = null;
{
//用io流读取二进制文件,最后存入到byte[]数组中
InputStream assetsInputStream = getAssets().open("MobileNetSSD_deploy.param.bin");// param: 网络结构文件
int available = assetsInputStream.available();
param = new byte[available];
int byteCode = assetsInputStream.read(param);
assetsInputStream.close();
}
{
//用io流读取二进制文件,最后存入到byte上,转换为int型
InputStream assetsInputStream = getAssets().open("MobileNetSSD_deploy.bin");//bin: model文件
int available = assetsInputStream.available();
bin = new byte[available];
int byteCode = assetsInputStream.read(bin);
assetsInputStream.close();
}
load_result = mobileNetssd.Init(param, bin);// 再将文件传入java的NDK接口(c++ 代码中的init接口 )
Log.d("load model", "MobileNetSSD_load_model_result:" + load_result);
}
// initialize view
private void init_view() {
request_permissions();
show_image = (ImageView) findViewById(R.id.show_image);
result_text = (TextView) findViewById(R.id.result_text);
result_text.setMovementMethod(ScrollingMovementMethod.getInstance());
Button use_photo = (Button) findViewById(R.id.use_photo);
// use photo click
use_photo.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (!load_result) {
Toast.makeText(MainActivity.this, "never load model", Toast.LENGTH_SHORT).show();
return;
}
PhotoUtil.use_photo(MainActivity.this, USE_PHOTO);
}
});
}
// load label's name
private void readCacheLabelFromLocalFile() {
try {
AssetManager assetManager = getApplicationContext().getAssets();
BufferedReader reader = new BufferedReader(new InputStreamReader(assetManager.open("words.txt")));//这里是label的文件
String readLine = null;
while ((readLine = reader.readLine()) != null) {
resultLabel.add(readLine);
}
reader.close();
} catch (Exception e) {
Log.e("labelCache", "error " + e);
}
}
protected void onActivityResult(int requestCode, int resultCode, @Nullable Intent data) {
String image_path;
RequestOptions options = new RequestOptions().skipMemoryCache(true).diskCacheStrategy(DiskCacheStrategy.NONE);
if (resultCode == Activity.RESULT_OK) {
switch (requestCode) {
case USE_PHOTO:
if (data == null) {
Log.w(TAG, "user photo data is null");
return;
}
Uri image_uri = data.getData();
//Glide.with(MainActivity.this).load(image_uri).apply(options).into(show_image);
// get image path from uri
image_path = PhotoUtil.get_path_from_URI(MainActivity.this, image_uri);
// predict image
predict_image(image_path);
break;
}
}
}
// predict image
private void predict_image(String image_path) {
// picture to float array
Bitmap bmp = PhotoUtil.getScaleBitmap(image_path);
Bitmap rgba = bmp.copy(Bitmap.Config.ARGB_8888, true);
// resize to 300*300
Bitmap input_bmp = Bitmap.createScaledBitmap(rgba, ddims[2], ddims[3], false);
try {
// Data format conversion takes too long
// Log.d("inputData", Arrays.toString(inputData));
long start = System.currentTimeMillis();
// get predict result
float[] result = mobileNetssd.Detect(input_bmp);
// time end
long end = System.currentTimeMillis();
Log.d(TAG, "origin predict result:" + Arrays.toString(result));
long time = end - start;
Log.d("result length", "length of result: " + String.valueOf(result.length));
// show predict result and time
float[] r = get_max_result(result);
String show_text = "result:" + Arrays.toString(r) + "\nname:" + resultLabel.get((int) r[0]) + "\nprobability:" + r[1] + "\ntime:" + time + "ms" ;
result_text.setText(show_text);
Canvas canvas = new Canvas(rgba);
//图像上画矩形
Paint paint = new Paint();
paint.setColor(Color.RED);
paint.setStyle(Paint.Style.STROKE);//不填充
paint.setStrokeWidth(10); //线的宽度
canvas.drawRect(r[2]*rgba.getWidth(), r[3]*rgba.getHeight(), r[4]*rgba.getWidth(), r[5]*rgba.getHeight(), paint);
show_image.setImageBitmap(rgba);
} catch (Exception e) {
e.printStackTrace();
}
}
// get max probability label
private float[] get_max_result(float[] result) {
int num_rs = result.length / 6;
float maxProp = result[1];
int maxI = 0;
for(int i = 1; i permissionList = new ArrayList<>();
if (ContextCompat.checkSelfPermission(this, Manifest.permission.CAMERA) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.CAMERA);
}
if (ContextCompat.checkSelfPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.WRITE_EXTERNAL_STORAGE);
}
if (ContextCompat.checkSelfPermission(this, Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED) {
permissionList.add(Manifest.permission.READ_EXTERNAL_STORAGE);
}
// if list is not empty will request permissions
if (!permissionList.isEmpty()) {
ActivityCompat.requestPermissions(this, permissionList.toArray(new String[permissionList.size()]), 1);
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
switch (requestCode) {
case 1:
if (grantResults.length > 0) {
for (int i = 0; i < grantResults.length; i++) {
int grantResult = grantResults[i];
if (grantResult == PackageManager.PERMISSION_DENIED) {
String s = permissions[i];
Toast.makeText(this, s + "permission was denied", Toast.LENGTH_SHORT).show();
}
}
}
break;
}
}
}
================================================
FILE: MobileNetSSD_demo_single/app/src/main/java/com/example/che/mobilenetssd_demo/MobileNetssd.java
================================================
package com.example.che.mobilenetssd_demo;
import android.graphics.Bitmap;
/**
* MobileNetssd的java接口,与本地c++代码相呼应 native为本地 此文件与 MobileNetssd.cpp相呼应
*/
public class MobileNetssd {
public native boolean Init(byte[] param, byte[] bin); // 初始化函数
public native float[] Detect(Bitmap bitmap); // 检测函数
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("MobileNetssd");
}
}
================================================
FILE: MobileNetSSD_demo_single/app/src/main/java/com/example/che/mobilenetssd_demo/PhotoUtil.java
================================================
package com.example.che.mobilenetssd_demo;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.provider.MediaStore;
public class PhotoUtil {
// get picture in photo
public static void use_photo(Activity activity, int requestCode) {
Intent intent = new Intent(Intent.ACTION_PICK);
intent.setType("image/*");
activity.startActivityForResult(intent, requestCode);
}
// get photo from Uri
public static String get_path_from_URI(Context context, Uri uri) {
String result;
Cursor cursor = context.getContentResolver().query(uri, null, null, null, null);
if (cursor == null) {
result = uri.getPath();
} else {
cursor.moveToFirst();
int idx = cursor.getColumnIndex(MediaStore.Images.ImageColumns.DATA);
result = cursor.getString(idx);
cursor.close();
}
return result;
}
// compress picture
public static Bitmap getScaleBitmap(String filePath) {
BitmapFactory.Options opt = new BitmapFactory.Options();
opt.inJustDecodeBounds = true;
BitmapFactory.decodeFile(filePath, opt);
int bmpWidth = opt.outWidth;
int bmpHeight = opt.outHeight;
int maxSize = 500;
// compress picture with inSampleSize
opt.inSampleSize = 1;
while (true) {
if (bmpWidth / opt.inSampleSize < maxSize || bmpHeight / opt.inSampleSize < maxSize) {
break;
}
opt.inSampleSize *= 2;
}
opt.inJustDecodeBounds = false;
return BitmapFactory.decodeFile(filePath, opt);
}
}
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/drawable/ic_launcher_background.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/drawable-v24/ic_launcher_foreground.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/layout/activity_main.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/values/colors.xml
================================================
#008577#00574B#D81B60
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/values/strings.xml
================================================
MobileNetSSD_demo
================================================
FILE: MobileNetSSD_demo_single/app/src/main/res/values/styles.xml
================================================
================================================
FILE: MobileNetSSD_demo_single/app/src/test/java/com/example/che/mobilenetssd_demo/ExampleUnitTest.java
================================================
package com.example.che.mobilenetssd_demo;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see Testing documentation
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() {
assertEquals(4, 2 + 2);
}
}
================================================
FILE: MobileNetSSD_demo_single/build.gradle
================================================
// Top-level build file where you can add configuration options common to all sub-projects/modules.
buildscript {
repositories {
google()
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:3.2.1'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
}
}
allprojects {
repositories {
google()
jcenter()
}
}
task clean(type: Delete) {
delete rootProject.buildDir
}
================================================
FILE: MobileNetSSD_demo_single/gradle/wrapper/gradle-wrapper.properties
================================================
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.6-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
================================================
FILE: MobileNetSSD_demo_single/gradle.properties
================================================
# Project-wide Gradle settings.
# IDE (e.g. Android Studio) users:
# Gradle settings configured through the IDE *will override*
# any settings specified in this file.
# For more details on how to configure your build environment visit
# http://www.gradle.org/docs/current/userguide/build_environment.html
# Specifies the JVM arguments used for the daemon process.
# The setting is particularly useful for tweaking memory settings.
org.gradle.jvmargs=-Xmx1536m
# When configured, Gradle will run in incubating parallel mode.
# This option should only be used with decoupled projects. More details, visit
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
# org.gradle.parallel=true
================================================
FILE: MobileNetSSD_demo_single/gradlew
================================================
#!/usr/bin/env sh
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn () {
echo "$*"
}
die () {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
NONSTOP* )
nonstop=true
;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Escape application args
save () {
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
echo " "
}
APP_ARGS=$(save "$@")
# Collect all arguments for the java command, following the shell quoting and substitution rules
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
cd "$(dirname "$0")"
fi
exec "$JAVACMD" "$@"
================================================
FILE: MobileNetSSD_demo_single/gradlew.bat
================================================
@if "%DEBUG%" == "" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS=
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto init
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto init
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:init
@rem Get command-line arguments, handling Windows variants
if not "%OS%" == "Windows_NT" goto win9xME_args
:win9xME_args
@rem Slurp the command line arguments.
set CMD_LINE_ARGS=
set _SKIP=2
:win9xME_args_slurp
if "x%~1" == "x" goto execute
set CMD_LINE_ARGS=%*
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega
================================================
FILE: MobileNetSSD_demo_single/settings.gradle
================================================
include ':app'
================================================
FILE: README.md
================================================
# ncnnforandroid_objectiondetection_Mobilenetssd
利用Mobilenetssd目标检测框架,ncnn前向推理,android项目
请看我的个人blog配套教程
- [MobileNetSSD通过Ncnn前向推理框架在PC端的使用(目标检测 objection detection)](https://blog.csdn.net/qq_33431368/article/details/84990390)
- [MobileNetSSD通过Ncnn前向推理框架在Android端的使用--Cmake编译(目标检测 objection detection)](https://blog.csdn.net/qq_33431368/article/details/85009758)
- [MobileNetSSD通过Ncnn前向推理框架在Android端的使用--Cmake编译(目标检测 objection detection)补充篇章(多目标也可以显示)](https://blog.csdn.net/qq_33431368/article/details/85019234)