Copy disabled (too large)
Download .txt
Showing preview only (24,345K chars total). Download the full file to get everything.
Repository: yzfzzz/Stereo-Detection
Branch: main
Commit: fed1abb589a1
Files: 74
Total size: 98.9 MB
Directory structure:
gitextract_kq4i821q/
├── BM(C++)/
│ └── BM.cpp
├── BM(Python)/
│ ├── BM.py
│ └── camera_configs.py
├── Jeston nano_tensorrt/
│ ├── CMakeLists.txt
│ ├── calibrator.cpp
│ ├── calibrator.h
│ ├── common.hpp
│ ├── cuda_utils.h
│ ├── gen_wts.py
│ ├── logging.h
│ ├── macros.h
│ ├── preprocess.cu
│ ├── preprocess.h
│ ├── samples
│ ├── utils.h
│ ├── yololayer.cu
│ ├── yololayer.h
│ ├── yolov5.cpp
│ └── 部署/
│ └── python_trt.py
├── LICENSE
├── README.md
├── SGBM(C++)/
│ └── SGBM.cpp
├── SGBM(Python)/
│ └── sgbm-video.py
├── Win_tensorrt/
│ ├── python_trt.py
│ ├── yolov5_640.engine
│ └── yolov5s_480.engine
├── distance_measurement_cpp/
│ ├── camera_config.json
│ ├── json.hpp
│ ├── main.cpp
│ ├── mouse_controller.cpp
│ ├── mouse_controller.h
│ ├── stereo_match_algorithm.cpp
│ └── stereo_match_algorithm.h
├── stereo_introduce/
│ ├── USB摄像头使用说明书.docx
│ └── VideoCap.ini
├── stereo_shot.py
├── yolov5-v6.1-pytorch-master/
│ ├── .gitignore
│ ├── LICENSE
│ ├── get_map.py
│ ├── kmeans_for_anchors.py
│ ├── nets/
│ │ ├── CSPdarknet.py
│ │ ├── __init__.py
│ │ ├── yolo.py
│ │ └── yolo_training.py
│ ├── predict.py
│ ├── requirements.txt
│ ├── sgbm-video.py
│ ├── summary.py
│ ├── train.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── callbacks.py
│ │ ├── dataloader.py
│ │ ├── utils.py
│ │ ├── utils_bbox.py
│ │ ├── utils_fit.py
│ │ └── utils_map.py
│ ├── utils_coco/
│ │ ├── coco_annotation.py
│ │ └── get_map_coco.py
│ ├── voc_annotation.py
│ └── yolo.py
├── 双目视觉资料/
│ ├── opencv 双目摄像头拍照(分别左右镜头拍照).txt
│ ├── opencv 双目摄像头拍照(分别左右镜头拍照)_一颗小树x的博客-CSDN博客_双目摄像头拍照.mhtml
│ ├── 一篇文章认识《双目立体视觉》_一颗小树x的博客-CSDN博客(1).mhtml
│ ├── 制作标定板——matlab编程实现_一颗小树x的博客-CSDN博客_matlab 标定板.mhtml
│ ├── 双目 机器视觉-- 测距.txt
│ ├── 双目 机器视觉-- 测距_一颗小树x的博客-CSDN博客_机器视觉测距.mhtml
│ ├── 双目摄像头内参如何使用? 如何转化数据?_一颗小树x的博客-CSDN博客_摄像头内参.mhtml
│ ├── 双目测距 SGBM算法 Python版.txt
│ ├── 双目测距 SGBM算法 Python版_一颗小树x的博客-CSDN博客_双目相机测距python.mhtml
│ └── 立体匹配算法(局部立体匹配 、全局立体匹配 、深度学习立体匹配 )_一颗小树x的博客-CSDN博客_深度学习立体匹配.mhtml
├── 常见问题答疑.md
└── 数据处理/
├── README.md
├── clean-xml-jpg.py
└── coco2voc.py
================================================
FILE CONTENTS
================================================
================================================
FILE: BM(C++)/BM.cpp
================================================
/* ˫Ŀ */
#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>
using namespace std;
using namespace cv;
const int imageWidth = 640; //ͷķֱ
const int imageHeight = 480;
Vec3f point3;
float d;
Size imageSize = Size(imageWidth, imageHeight);
Mat img;
Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;
Rect m_l_select;
Rect m_r_select;
Rect validROIL;//ͼУ֮ͼвüvalidROIָü֮
Rect validROIR;
Mat mapLx, mapLy, mapRx, mapRy; //ӳ
Mat Rl, Rr, Pl, Pr, Q; //УתRͶӰP ͶӰQ
Mat xyz; //ά
Point origin; //갴µʼ
Rect selection; //ѡ
bool selectObject = false; //Ƿѡ
int blockSize = 8, uniquenessRatio = 0, numDisparities = 3;
Ptr<StereoBM> bm = StereoBM::create(16, 9);
/*ȱ궨õڲξ
fx 0 cx
0 fy cy
0 0 1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 516.5066236, -1.444673028, 320.2950423, 0, 516.5816117, 270.7881873, 0, 0, 1.);
//õĻ
/*418.523322187048 0 0
-1.26842201390676 421.222568242056 0
344.758267538961 243.318992284899 1 */ //2
Mat distCoeffL = (Mat_<double>(5, 1) << -0.046645194, 0.077595167, 0.012476819, -0.000711358, 0);
//[0.006636837611004,0.050240447649195] [0.006681263320267,0.003130367429418]
/*ȱ궨õڲξ
fx 0 cx
0 fy cy
0 0 1
*/
Mat cameraMatrixR = (Mat_<double>(3, 3) << 511.8428182, 1.295112628, 317.310253, 0, 513.0748795, 269.5885026,0, 0, 1);
/*
417.417985082506 0 0
0.498638151824367 419.795432389420 0
309.903372309072 236.256106972796 1
*/ //2
Mat distCoeffR = (Mat_<double>(5, 1) << -0.061588946, 0.122384376, 0.011081232, -0.000750439, 0);
//[-0.038407383078874,0.236392800301615] [0.004121779274885,0.002296129959664]
Mat T = (Mat_<double>(3, 1) <<-120.3559901,-0.188953775,-0.662073075);//Tƽ
//[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
//ӦMatlabT
//Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//recתӦmatlab om
Mat rec = (Mat_<double>(3, 3) << 0.999911333, -0.004351508, 0.012585312,
0.004184066, 0.999902792, 0.013300386,
-0.012641965, -0.013246549, 0.999832341); //recתӦmatlab om
/* 0.999341122700880 0.000660748031451783 -0.0362888948713456
-0.00206388651740061 0.999250989651683 -0.0386419468010579
0.0362361815232777 0.0386913826603732 0.998593969567432 */
//Mat T = (Mat_<double>(3, 1) << -48.4, 0.241, -0.0344);//Tƽ
//[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
//ӦMatlabT
Mat R;//R ת
/*****ƥ*****/
void stereo_match(int, void*)
{
bm->setBlockSize(2 * blockSize + 5); //SADڴС5~21֮Ϊ
bm->setROI1(validROIL);
bm->setROI2(validROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //СӲĬֵΪ0, Ǹֵint
bm->setNumDisparities(numDisparities * 16 + 16);//ӲڣӲֵСӲֵ֮,ڴС16int
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatioҪԷֹƥ
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
Mat disp, disp8;
bm->compute(rectifyImageL, rectifyImageR, disp);//ͼΪҶͼ
disp.convertTo(disp8, CV_8U, 255 / ((numDisparities * 16 + 16)*16.));//ӲCV_16Sʽ
reprojectImageTo3D(disp, xyz, Q, true); //ʵʱReprojectTo3DX / W, Y / W, Z / WҪ16(ҲW16)ܵõȷάϢ
xyz = xyz * 16;
imshow("disparity", disp8);
}
/*****ص*****/
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
}
switch (event)
{
case EVENT_LBUTTONDOWN: //ťµ¼
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
//cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
point3 = xyz.at<Vec3f>(origin);
point3[0];
//cout << "point3[0]:" << point3[0] << "point3[1]:" << point3[1] << "point3[2]:" << point3[2]<<endl;
cout << "꣺" << endl;
cout << "x: " << point3[0] << " y: " << point3[1] << " z: " << point3[2] << endl;
d = point3[0] * point3[0] + point3[1] * point3[1] + point3[2] * point3[2];
d = sqrt(d); //mm
// cout << ":" << d << "mm" << endl;
d = d / 10.0; //cm
cout << ":" << d << "cm" << endl;
// d = d/1000.0; //m
// cout << ":" << d << "m" << endl;
break;
case EVENT_LBUTTONUP: //ťͷŵ¼
selectObject = false;
if (selection.width > 0 && selection.height > 0)
break;
}
}
/**********/
int main()
{
/*
У
*/
Rodrigues(rec, R); //Rodrigues任
stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
0, imageSize, &validROIL, &validROIR);
initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
/*
ȡͼƬ
*/
m_l_select = Rect(0, 0, 640, 480);
img = imread("car.jpg", IMREAD_COLOR);
//imshow("Image", img);
rgbImageL = img(m_l_select);
cvtColor(rgbImageL, grayImageL, COLOR_BGR2GRAY);
m_r_select = Rect(640, 0, 640, 480);
rgbImageR = img(m_r_select);
cvtColor(rgbImageR, grayImageR, COLOR_BGR2GRAY);
//imshow("ImageL", rgbImageL);
//imshow("ImageR", rgbImageR);
/*
remap֮ͼѾ沢ж
*/
remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
/*
Уʾ
*/
Mat rgbRectifyImageL, rgbRectifyImageR;
cvtColor(rectifyImageL, rgbRectifyImageL, COLOR_GRAY2BGR); //αɫͼ
cvtColor(rectifyImageR, rgbRectifyImageR, COLOR_GRAY2BGR);
//ʾ
//rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
//rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
//imshow("ImageL After Rectify", rgbRectifyImageL);
//imshow("ImageR After Rectify", rgbRectifyImageR);
//ʾͬһͼ
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h, w * 2, CV_8UC3); //עͨ
//ͼ
Mat canvasPart = canvas(Rect(w * 0, 0, w, h)); //õһ
resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); //ͼŵcanvasPartһС
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf), //ñȡ
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
//rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //һ
cout << "Painted ImageL" << endl;
//ͼ
canvasPart = canvas(Rect(w, 0, w, h)); //ûһ
resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
//rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
cout << "Painted ImageR" << endl;
//϶Ӧ
for (int i = 0; i < canvas.rows; i += 16)
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
/*
ƥ
*/
namedWindow("disparity", WINDOW_AUTOSIZE);
/*************************οӻ**********************************************/
// SAD Trackbar
//createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
// ӲΨһٷֱȴ Trackbar
//createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
// Ӳ Trackbar
//createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
//ӦsetMouseCallback(, ص, صIJһȡ0)
setMouseCallback("disparity", onMouse, 0);
stereo_match(0, 0);
waitKey();
return 0;
}
================================================
FILE: BM(Python)/BM.py
================================================
import cv2
import camera_configs
import math
import os
# 引入函数库
import datetime as dt
floder = os.getcwd()
cap = cv2.VideoCapture(0)
cap.set(3, 2560)
cap.set(4, 720) #打开并设置摄像头
# 鼠标回调函数
def onmouse_pick_points(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
threeD = param
print('\n像素坐标 x = %d, y = %d' % (x, y))
# print("世界坐标是:", threeD[y][x][0], threeD[y][x][1], threeD[y][x][2], "mm")
print("世界坐标xyz 是:", threeD[y][x][0]/ 1000.0 , threeD[y][x][1]/ 1000.0 , threeD[y][x][2]/ 1000.0 , "m")
distance = math.sqrt( threeD[y][x][0] **2 + threeD[y][x][1] **2 + threeD[y][x][2] **2 )
distance = distance / 1000.0 # mm -> m
print("距离是:", distance, "m")
WIN_NAME = 'Deep disp'
cv2.namedWindow(WIN_NAME, cv2.WINDOW_AUTOSIZE)
while True:
ret, frame = cap.read()
frame1 = frame[0:720, 0:1280]
frame2 = frame[0:720, 1280:2560] #割开双目图像
imgL = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) # 将BGR格式转换成灰度图片
imgR = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# cv2.remap 重映射,就是把一幅图像中某位置的像素放置到另一个图片指定位置的过程。
# 依据MATLAB测量数据重建无畸变图片
img1_rectified = cv2.remap(imgL, camera_configs.left_map1, camera_configs.left_map2, cv2.INTER_LINEAR)
img2_rectified = cv2.remap(imgR, camera_configs.right_map1, camera_configs.right_map2, cv2.INTER_LINEAR)
imageL = cv2.cvtColor(img1_rectified, cv2.COLOR_GRAY2BGR)
imageR = cv2.cvtColor(img2_rectified, cv2.COLOR_GRAY2BGR)
# BM
numberOfDisparities = ((1280 // 8) + 15) & -16 # 640对应是分辨率的宽
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=9) #立体匹配
stereo.setROI1(camera_configs.validPixROI1)
stereo.setROI2(camera_configs.validPixROI2)
stereo.setPreFilterCap(31)
stereo.setBlockSize(15)
stereo.setMinDisparity(0)
stereo.setNumDisparities(numberOfDisparities)
stereo.setTextureThreshold(10)
stereo.setUniquenessRatio(15)
stereo.setSpeckleWindowSize(100)
stereo.setSpeckleRange(32)
stereo.setDisp12MaxDiff(1)
disparity = stereo.compute(img1_rectified, img2_rectified) # 计算视差
disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) #归一化函数算法
threeD = cv2.reprojectImageTo3D(disparity, camera_configs.Q, handleMissingValues=True) #计算三维坐标数据值
threeD = threeD * 16
# threeD[y][x] x:0~640; y:0~480; !!!!!!!!!!
cv2.setMouseCallback(WIN_NAME, onmouse_pick_points, threeD)
cv2.imshow("left", frame1)
# cv2.imshow("right", frame2)
# cv2.imshow("left_r", imgL)
# cv2.imshow("right_r", imgR)
cv2.imshow(WIN_NAME, disp) #显示深度图的双目画面
key = cv2.waitKey(1)
if key == ord("s"):
# 获取当前时间
now_time = dt.datetime.now().strftime('%X')
path = folder + "\img_" + str(counter) + ".jpg"
cv2.imwrite(path, frame)
print("snapshot saved into: " + path)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
================================================
FILE: BM(Python)/camera_configs.py
================================================
# author: young
import cv2
import numpy as np
# 效果好
left_camera_matrix = np.array([[986.4572391,1.673607456,651.0717611],[0,1001.238398,535.8195077],[0.,0.,1.]])
# left_distortion = np.array([[-0.154511565,0.325173292, 0.006934081,0.017466934, -0.340007548]])
left_distortion = np.array([[-0.154511565,0.325173292, 0.006934081,0.017466934, 0]])
right_camera_matrix = np.array([[998.5848065,7.37746018,667.3698587],[0,1006.305891,528.9731771],[0.,0.,1.]])
# right_distortion = np.array([[-0.192887524,0.706728768, 0.004233541,0.021340116,-1.175486913]])
right_distortion = np.array([[-0.192887524,0.706728768, 0.004233541,0.021340116,0]])
R = np.array([[0.999925137,-0.003616734,-0.01168927],
[0.003742452,0.999935202,0.010751105],
[0.011649629,-0.010794046,0.999873879]])
T = np.array([-117.3364039,0.277054571,-3.7672413])
# 角度很多但效果一般
# right_camera_matrix = np.array([[1011.630992,6.392290621,667.5686089],
# [ 0,1013.460245,503.4011875],
# [0., 0,1.0000]])
# right_distortion = np.array([[-0.078598405,0.181429846, 0.005991071,0.011485758,-0.216528642]])
#
# left_camera_matrix = np.array([[999.2148594,1.517083305,664.099101],[ 0.,1004.928519,509.320943,], [0., 0,1.0000]])
# left_distortion = np.array([[-0.068650958,0.329526482,0.009413124,0.007593334, -0.762189196]])
#
# R = np.matrix([[ 0.999995654,-0.001219991,-0.002683778],[0.001249395,0.999938918,0.010981796],[ 0.002670216,-0.010985102,0.999936097],])
# T = np.array([-115.3587822,-0.643409169,1.336821271])
# 墙平移
# left_camera_matrix = np.array([[1023.60598,2.050151356,602.5534506],[0,976.0768203,398.6863484],[0.,0.,1.]])
#
# # left_distortion = np.array([[-0.154511565,0.325173292, 0.006934081,0.017466934, -0.340007548]])
# left_distortion = np.array([[-0.001241482, 0.372245099, -0.02528585, 0.00214508,-1.007572978]])
#
# right_camera_matrix = np.array([[1117.411713,-7.109583921,648.9929444],[0,1069.494628,404.4101094],[0.,0.,1.]])
#
# # right_distortion = np.array([[-0.192887524,0.706728768, 0.004233541,0.021340116,-1.175486913]])
# right_distortion = np.array([[0.064401477,0.162144856,-0.021187394, 0.019547213,-0.331058671]])
#
# R = np.array([[0.999965088,0.007987609,0.002453816], [-0.008017785,0.999889171,0.012544361],[-0.002353345,-0.012563598,0.999918306]])
#
# T = np.array([-181.8512309, -19.42425901,125.5877407])
size = (1280, 720) # open windows size
# R1:左摄像机旋转矩阵, P1:左摄像机投影矩阵, Q:重投影矩阵
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
right_camera_matrix, right_distortion, size, R, T)
# 校正查找映射表,将原始图像和校正后的图像上的点一一对应起来
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
print(Q)
import numpy as np
####################仅仅是一个示例###################################
# 双目相机参数
class stereoCamera(object):
def __init__(self):
# 左相机内参
self.cam_matrix_left = left_camera_matrix
# 右相机内参
self.cam_matrix_right = right_camera_matrix
# 左右相机畸变系数:[k1, k2, p1, p2, k3]
self.distortion_l = left_distortion
self.distortion_r = right_distortion
# 旋转矩阵
self.R = R
# 平移矩阵
self.T = T
# 焦距
self.focal_length = 859.367 # 默认值,一般取立体校正后的重投影矩阵Q中的 Q[2,3]
# 基线距离
self.baseline = 119.9578 # 单位:mm, 为平移向量的第一个参数(取绝对值)
================================================
FILE: Jeston nano_tensorrt/CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.6)
project(yolov5)
add_definitions(-std=c++11)
add_definitions(-DAPI_EXPORTS)
option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)
find_package(CUDA REQUIRED)
if(WIN32)
enable_language(CUDA)
endif(WIN32)
include_directories(${PROJECT_SOURCE_DIR}/include)
# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
# cuda
include_directories(/usr/local/cuda/include)
link_directories(/usr/local/cuda/lib64)
# tensorrt
include_directories(/usr/include/x86_64-linux-gnu/)
link_directories(/usr/lib/x86_64-linux-gnu/)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Ofast -g -Wfatal-errors -D_MWAITXINTRIN_H_INCLUDED")
cuda_add_library(myplugins SHARED yololayer.cu)
target_link_libraries(myplugins nvinfer cudart)
find_package(OpenCV)
include_directories(${OpenCV_INCLUDE_DIRS})
#cuda_add_executable(yolov5 calibrator.cpp yolov5.cpp preprocess.cu)
cuda_add_library(yolov5 SHARED ${PROJECT_SOURCE_DIR}/yolov5.cpp ${PROJECT_SOURCE_DIR}/yololayer.cu ${PROJECT_SOURCE_DIR}/yololayer.h ${PROJECT_SOURCE_DIR}/preprocess.cu)
target_link_libraries(yolov5 nvinfer)
target_link_libraries(yolov5 cudart)
target_link_libraries(yolov5 myplugins)
target_link_libraries(yolov5 ${OpenCV_LIBS})
if(UNIX)
add_definitions(-O2 -pthread)
endif(UNIX)
================================================
FILE: Jeston nano_tensorrt/calibrator.cpp
================================================
#include <iostream>
#include <iterator>
#include <fstream>
#include <opencv2/dnn/dnn.hpp>
#include "calibrator.h"
#include "cuda_utils.h"
#include "utils.h"
Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache)
: batchsize_(batchsize)
, input_w_(input_w)
, input_h_(input_h)
, img_idx_(0)
, img_dir_(img_dir)
, calib_table_name_(calib_table_name)
, input_blob_name_(input_blob_name)
, read_cache_(read_cache)
{
input_count_ = 3 * input_w * input_h * batchsize;
CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float)));
read_files_in_dir(img_dir, img_files_);
}
Int8EntropyCalibrator2::~Int8EntropyCalibrator2()
{
CUDA_CHECK(cudaFree(device_input_));
}
int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT
{
return batchsize_;
}
bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT
{
if (img_idx_ + batchsize_ > (int)img_files_.size()) {
return false;
}
std::vector<cv::Mat> input_imgs_;
for (int i = img_idx_; i < img_idx_ + batchsize_; i++) {
std::cout << img_files_[i] << " " << i << std::endl;
cv::Mat temp = cv::imread(img_dir_ + img_files_[i]);
if (temp.empty()){
std::cerr << "Fatal error: image cannot open!" << std::endl;
return false;
}
cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_);
input_imgs_.push_back(pr_img);
}
img_idx_ += batchsize_;
cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false);
CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr<float>(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice));
assert(!strcmp(names[0], input_blob_name_));
bindings[0] = device_input_;
return true;
}
const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT
{
std::cout << "reading calib cache: " << calib_table_name_ << std::endl;
calib_cache_.clear();
std::ifstream input(calib_table_name_, std::ios::binary);
input >> std::noskipws;
if (read_cache_ && input.good())
{
std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), std::back_inserter(calib_cache_));
}
length = calib_cache_.size();
return length ? calib_cache_.data() : nullptr;
}
void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT
{
std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl;
std::ofstream output(calib_table_name_, std::ios::binary);
output.write(reinterpret_cast<const char*>(cache), length);
}
================================================
FILE: Jeston nano_tensorrt/calibrator.h
================================================
#ifndef ENTROPY_CALIBRATOR_H
#define ENTROPY_CALIBRATOR_H
#include <NvInfer.h>
#include <string>
#include <vector>
#include "macros.h"
//! \class Int8EntropyCalibrator2
//!
//! \brief Implements Entropy calibrator 2.
//! CalibrationAlgoType is kENTROPY_CALIBRATION_2.
//!
class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
{
public:
Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
virtual ~Int8EntropyCalibrator2();
int getBatchSize() const TRT_NOEXCEPT override;
bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
private:
int batchsize_;
int input_w_;
int input_h_;
int img_idx_;
std::string img_dir_;
std::vector<std::string> img_files_;
size_t input_count_;
std::string calib_table_name_;
const char* input_blob_name_;
bool read_cache_;
void* device_input_;
std::vector<char> calib_cache_;
};
#endif // ENTROPY_CALIBRATOR_H
================================================
FILE: Jeston nano_tensorrt/common.hpp
================================================
#ifndef YOLOV5_COMMON_H_
#define YOLOV5_COMMON_H_
#include <fstream>
#include <map>
#include <sstream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "yololayer.h"
using namespace nvinfer1;
cv::Rect get_rect(cv::Mat& img, float bbox[4]) {
float l, r, t, b;
float r_w = Yolo::INPUT_W / (img.cols * 1.0);
float r_h = Yolo::INPUT_H / (img.rows * 1.0);
if (r_h > r_w) {
l = bbox[0] - bbox[2] / 2.f;
r = bbox[0] + bbox[2] / 2.f;
t = bbox[1] - bbox[3] / 2.f - (Yolo::INPUT_H - r_w * img.rows) / 2;
b = bbox[1] + bbox[3] / 2.f - (Yolo::INPUT_H - r_w * img.rows) / 2;
l = l / r_w;
r = r / r_w;
t = t / r_w;
b = b / r_w;
} else {
l = bbox[0] - bbox[2] / 2.f - (Yolo::INPUT_W - r_h * img.cols) / 2;
r = bbox[0] + bbox[2] / 2.f - (Yolo::INPUT_W - r_h * img.cols) / 2;
t = bbox[1] - bbox[3] / 2.f;
b = bbox[1] + bbox[3] / 2.f;
l = l / r_h;
r = r / r_h;
t = t / r_h;
b = b / r_h;
}
return cv::Rect(round(l), round(t), round(r - l), round(b - t));
}
float iou(float lbox[4], float rbox[4]) {
float interBox[] = {
(std::max)(lbox[0] - lbox[2] / 2.f , rbox[0] - rbox[2] / 2.f), //left
(std::min)(lbox[0] + lbox[2] / 2.f , rbox[0] + rbox[2] / 2.f), //right
(std::max)(lbox[1] - lbox[3] / 2.f , rbox[1] - rbox[3] / 2.f), //top
(std::min)(lbox[1] + lbox[3] / 2.f , rbox[1] + rbox[3] / 2.f), //bottom
};
if (interBox[2] > interBox[3] || interBox[0] > interBox[1])
return 0.0f;
float interBoxS = (interBox[1] - interBox[0])*(interBox[3] - interBox[2]);
return interBoxS / (lbox[2] * lbox[3] + rbox[2] * rbox[3] - interBoxS);
}
bool cmp(const Yolo::Detection& a, const Yolo::Detection& b) {
return a.conf > b.conf;
}
void nms(std::vector<Yolo::Detection>& res, float *output, float conf_thresh, float nms_thresh = 0.5) {
int det_size = sizeof(Yolo::Detection) / sizeof(float);
std::map<float, std::vector<Yolo::Detection>> m;
for (int i = 0; i < output[0] && i < Yolo::MAX_OUTPUT_BBOX_COUNT; i++) {
if (output[1 + det_size * i + 4] <= conf_thresh) continue;
Yolo::Detection det;
memcpy(&det, &output[1 + det_size * i], det_size * sizeof(float));
if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector<Yolo::Detection>());
m[det.class_id].push_back(det);
}
for (auto it = m.begin(); it != m.end(); it++) {
//std::cout << it->second[0].class_id << " --- " << std::endl;
auto& dets = it->second;
std::sort(dets.begin(), dets.end(), cmp);
for (size_t m = 0; m < dets.size(); ++m) {
auto& item = dets[m];
res.push_back(item);
for (size_t n = m + 1; n < dets.size(); ++n) {
if (iou(item.bbox, dets[n].bbox) > nms_thresh) {
dets.erase(dets.begin() + n);
--n;
}
}
}
}
}
// TensorRT weight files have a simple space delimited format:
// [type] [size] <data x size in hex>
std::map<std::string, Weights> loadWeights(const std::string file) {
std::cout << "Loading weights: " << file << std::endl;
std::map<std::string, Weights> weightMap;
// Open weights file
std::ifstream input(file);
assert(input.is_open() && "Unable to load weight file. please check if the .wts file path is right!!!!!!");
// Read number of weight blobs
int32_t count;
input >> count;
assert(count > 0 && "Invalid weight map file.");
while (count--)
{
Weights wt{ DataType::kFLOAT, nullptr, 0 };
uint32_t size;
// Read name and type of blob
std::string name;
input >> name >> std::dec >> size;
wt.type = DataType::kFLOAT;
// Load blob
uint32_t* val = reinterpret_cast<uint32_t*>(malloc(sizeof(val) * size));
for (uint32_t x = 0, y = size; x < y; ++x)
{
input >> std::hex >> val[x];
}
wt.values = val;
wt.count = size;
weightMap[name] = wt;
}
return weightMap;
}
IScaleLayer* addBatchNorm2d(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, std::string lname, float eps) {
float *gamma = (float*)weightMap[lname + ".weight"].values;
float *beta = (float*)weightMap[lname + ".bias"].values;
float *mean = (float*)weightMap[lname + ".running_mean"].values;
float *var = (float*)weightMap[lname + ".running_var"].values;
int len = weightMap[lname + ".running_var"].count;
float *scval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
for (int i = 0; i < len; i++) {
scval[i] = gamma[i] / sqrt(var[i] + eps);
}
Weights scale{ DataType::kFLOAT, scval, len };
float *shval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
for (int i = 0; i < len; i++) {
shval[i] = beta[i] - mean[i] * gamma[i] / sqrt(var[i] + eps);
}
Weights shift{ DataType::kFLOAT, shval, len };
float *pval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
for (int i = 0; i < len; i++) {
pval[i] = 1.0;
}
Weights power{ DataType::kFLOAT, pval, len };
weightMap[lname + ".scale"] = scale;
weightMap[lname + ".shift"] = shift;
weightMap[lname + ".power"] = power;
IScaleLayer* scale_1 = network->addScale(input, ScaleMode::kCHANNEL, shift, scale, power);
assert(scale_1);
return scale_1;
}
ILayer* convBlock(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int outch, int ksize, int s, int g, std::string lname) {
Weights emptywts{ DataType::kFLOAT, nullptr, 0 };
int p = ksize / 3;
IConvolutionLayer* conv1 = network->addConvolutionNd(input, outch, DimsHW{ ksize, ksize }, weightMap[lname + ".conv.weight"], emptywts);
assert(conv1);
conv1->setStrideNd(DimsHW{ s, s });
conv1->setPaddingNd(DimsHW{ p, p });
conv1->setNbGroups(g);
IScaleLayer* bn1 = addBatchNorm2d(network, weightMap, *conv1->getOutput(0), lname + ".bn", 1e-3);
// silu = x * sigmoid
auto sig = network->addActivation(*bn1->getOutput(0), ActivationType::kSIGMOID);
assert(sig);
auto ew = network->addElementWise(*bn1->getOutput(0), *sig->getOutput(0), ElementWiseOperation::kPROD);
assert(ew);
return ew;
}
ILayer* focus(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int inch, int outch, int ksize, std::string lname) {
ISliceLayer *s1 = network->addSlice(input, Dims3{ 0, 0, 0 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 });
ISliceLayer *s2 = network->addSlice(input, Dims3{ 0, 1, 0 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 });
ISliceLayer *s3 = network->addSlice(input, Dims3{ 0, 0, 1 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 });
ISliceLayer *s4 = network->addSlice(input, Dims3{ 0, 1, 1 }, Dims3{ inch, Yolo::INPUT_H / 2, Yolo::INPUT_W / 2 }, Dims3{ 1, 2, 2 });
ITensor* inputTensors[] = { s1->getOutput(0), s2->getOutput(0), s3->getOutput(0), s4->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 4);
auto conv = convBlock(network, weightMap, *cat->getOutput(0), outch, ksize, 1, 1, lname + ".conv");
return conv;
}
ILayer* bottleneck(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, bool shortcut, int g, float e, std::string lname) {
auto cv1 = convBlock(network, weightMap, input, (int)((float)c2 * e), 1, 1, 1, lname + ".cv1");
auto cv2 = convBlock(network, weightMap, *cv1->getOutput(0), c2, 3, 1, g, lname + ".cv2");
if (shortcut && c1 == c2) {
auto ew = network->addElementWise(input, *cv2->getOutput(0), ElementWiseOperation::kSUM);
return ew;
}
return cv2;
}
ILayer* bottleneckCSP(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) {
Weights emptywts{ DataType::kFLOAT, nullptr, 0 };
int c_ = (int)((float)c2 * e);
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto cv2 = network->addConvolutionNd(input, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv2.weight"], emptywts);
ITensor *y1 = cv1->getOutput(0);
for (int i = 0; i < n; i++) {
auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i));
y1 = b->getOutput(0);
}
auto cv3 = network->addConvolutionNd(*y1, c_, DimsHW{ 1, 1 }, weightMap[lname + ".cv3.weight"], emptywts);
ITensor* inputTensors[] = { cv3->getOutput(0), cv2->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 2);
IScaleLayer* bn = addBatchNorm2d(network, weightMap, *cat->getOutput(0), lname + ".bn", 1e-4);
auto lr = network->addActivation(*bn->getOutput(0), ActivationType::kLEAKY_RELU);
lr->setAlpha(0.1);
auto cv4 = convBlock(network, weightMap, *lr->getOutput(0), c2, 1, 1, 1, lname + ".cv4");
return cv4;
}
ILayer* C3(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int n, bool shortcut, int g, float e, std::string lname) {
int c_ = (int)((float)c2 * e);
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto cv2 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv2");
ITensor *y1 = cv1->getOutput(0);
for (int i = 0; i < n; i++) {
auto b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, g, 1.0, lname + ".m." + std::to_string(i));
y1 = b->getOutput(0);
}
ITensor* inputTensors[] = { y1, cv2->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 2);
auto cv3 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv3");
return cv3;
}
ILayer* SPP(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int k1, int k2, int k3, std::string lname) {
int c_ = c1 / 2;
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k1, k1 });
pool1->setPaddingNd(DimsHW{ k1 / 2, k1 / 2 });
pool1->setStrideNd(DimsHW{ 1, 1 });
auto pool2 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k2, k2 });
pool2->setPaddingNd(DimsHW{ k2 / 2, k2 / 2 });
pool2->setStrideNd(DimsHW{ 1, 1 });
auto pool3 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k3, k3 });
pool3->setPaddingNd(DimsHW{ k3 / 2, k3 / 2 });
pool3->setStrideNd(DimsHW{ 1, 1 });
ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 4);
auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2");
return cv2;
}
ILayer* SPPF(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int c1, int c2, int k, std::string lname) {
int c_ = c1 / 2;
auto cv1 = convBlock(network, weightMap, input, c_, 1, 1, 1, lname + ".cv1");
auto pool1 = network->addPoolingNd(*cv1->getOutput(0), PoolingType::kMAX, DimsHW{ k, k });
pool1->setPaddingNd(DimsHW{ k / 2, k / 2 });
pool1->setStrideNd(DimsHW{ 1, 1 });
auto pool2 = network->addPoolingNd(*pool1->getOutput(0), PoolingType::kMAX, DimsHW{ k, k });
pool2->setPaddingNd(DimsHW{ k / 2, k / 2 });
pool2->setStrideNd(DimsHW{ 1, 1 });
auto pool3 = network->addPoolingNd(*pool2->getOutput(0), PoolingType::kMAX, DimsHW{ k, k });
pool3->setPaddingNd(DimsHW{ k / 2, k / 2 });
pool3->setStrideNd(DimsHW{ 1, 1 });
ITensor* inputTensors[] = { cv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0) };
auto cat = network->addConcatenation(inputTensors, 4);
auto cv2 = convBlock(network, weightMap, *cat->getOutput(0), c2, 1, 1, 1, lname + ".cv2");
return cv2;
}
std::vector<std::vector<float>> getAnchors(std::map<std::string, Weights>& weightMap, std::string lname) {
std::vector<std::vector<float>> anchors;
Weights wts = weightMap[lname + ".anchor_grid"];
int anchor_len = Yolo::CHECK_COUNT * 2;
for (int i = 0; i < wts.count / anchor_len; i++) {
auto *p = (const float*)wts.values + i * anchor_len;
std::vector<float> anchor(p, p + anchor_len);
anchors.push_back(anchor);
}
return anchors;
}
IPluginV2Layer* addYoLoLayer(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, std::string lname, std::vector<IConvolutionLayer*> dets) {
auto creator = getPluginRegistry()->getPluginCreator("YoloLayer_TRT", "1");
auto anchors = getAnchors(weightMap, lname);
PluginField plugin_fields[2];
int netinfo[4] = {Yolo::CLASS_NUM, Yolo::INPUT_W, Yolo::INPUT_H, Yolo::MAX_OUTPUT_BBOX_COUNT};
plugin_fields[0].data = netinfo;
plugin_fields[0].length = 4;
plugin_fields[0].name = "netinfo";
plugin_fields[0].type = PluginFieldType::kFLOAT32;
int scale = 8;
std::vector<Yolo::YoloKernel> kernels;
for (size_t i = 0; i < anchors.size(); i++) {
Yolo::YoloKernel kernel;
kernel.width = Yolo::INPUT_W / scale;
kernel.height = Yolo::INPUT_H / scale;
memcpy(kernel.anchors, &anchors[i][0], anchors[i].size() * sizeof(float));
kernels.push_back(kernel);
scale *= 2;
}
plugin_fields[1].data = &kernels[0];
plugin_fields[1].length = kernels.size();
plugin_fields[1].name = "kernels";
plugin_fields[1].type = PluginFieldType::kFLOAT32;
PluginFieldCollection plugin_data;
plugin_data.nbFields = 2;
plugin_data.fields = plugin_fields;
IPluginV2 *plugin_obj = creator->createPlugin("yololayer", &plugin_data);
std::vector<ITensor*> input_tensors;
for (auto det: dets) {
input_tensors.push_back(det->getOutput(0));
}
auto yolo = network->addPluginV2(&input_tensors[0], input_tensors.size(), *plugin_obj);
return yolo;
}
#endif
================================================
FILE: Jeston nano_tensorrt/cuda_utils.h
================================================
#ifndef TRTX_CUDA_UTILS_H_
#define TRTX_CUDA_UTILS_H_
#include <cuda_runtime_api.h>
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr)\
{\
cudaError_t error_code = callstr;\
if (error_code != cudaSuccess) {\
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
assert(0);\
}\
}
#endif // CUDA_CHECK
#endif // TRTX_CUDA_UTILS_H_
================================================
FILE: Jeston nano_tensorrt/gen_wts.py
================================================
import sys
import argparse
import os
import struct
import torch
from utils.torch_utils import select_device
def parse_args():
parser = argparse.ArgumentParser(description='Convert .pt file to .wts')
parser.add_argument('-w', '--weights', required=True, help='Input weights (.pt) file path (required)')
parser.add_argument('-o', '--output', help='Output (.wts) file path (optional)')
args = parser.parse_args()
if not os.path.isfile(args.weights):
raise SystemExit('Invalid input file')
if not args.output:
args.output = os.path.splitext(args.weights)[0] + '.wts'
elif os.path.isdir(args.output):
args.output = os.path.join(
args.output,
os.path.splitext(os.path.basename(args.weights))[0] + '.wts')
return args.weights, args.output
pt_file, wts_file = parse_args()
# Initialize
device = select_device('cpu')
# Load model
model = torch.load(pt_file, map_location=device) # load to FP32
model = model['ema' if model.get('ema') else 'model'].float()
# update anchor_grid info
anchor_grid = model.model[-1].anchors * model.model[-1].stride[...,None,None]
# model.model[-1].anchor_grid = anchor_grid
delattr(model.model[-1], 'anchor_grid') # model.model[-1] is detect layer
model.model[-1].register_buffer("anchor_grid",anchor_grid) #The parameters are saved in the OrderDict through the "register_buffer" method, and then saved to the weight.
model.to(device).eval()
with open(wts_file, 'w') as f:
f.write('{}\n'.format(len(model.state_dict().keys())))
for k, v in model.state_dict().items():
vr = v.reshape(-1).cpu().numpy()
f.write('{} {} '.format(k, len(vr)))
for vv in vr:
f.write(' ')
f.write(struct.pack('>f' ,float(vv)).hex())
f.write('\n')
================================================
FILE: Jeston nano_tensorrt/logging.h
================================================
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORRT_LOGGING_H
#define TENSORRT_LOGGING_H
#include "NvInferRuntimeCommon.h"
#include <cassert>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
#include "macros.h"
using Severity = nvinfer1::ILogger::Severity;
class LogStreamConsumerBuffer : public std::stringbuf
{
public:
LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mOutput(stream)
, mPrefix(prefix)
, mShouldLog(shouldLog)
{
}
LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
: mOutput(other.mOutput)
{
}
~LogStreamConsumerBuffer()
{
// std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
// std::streambuf::pptr() gives a pointer to the current position of the output sequence
// if the pointer to the beginning is not equal to the pointer to the current position,
// call putOutput() to log the output to the stream
if (pbase() != pptr())
{
putOutput();
}
}
// synchronizes the stream buffer and returns 0 on success
// synchronizing the stream buffer consists of inserting the buffer contents into the stream,
// resetting the buffer and flushing the stream
virtual int sync()
{
putOutput();
return 0;
}
void putOutput()
{
if (mShouldLog)
{
// prepend timestamp
std::time_t timestamp = std::time(nullptr);
tm* tm_local = std::localtime(×tamp);
std::cout << "[";
std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
// std::stringbuf::str() gets the string contents of the buffer
// insert the buffer contents pre-appended by the appropriate prefix into the stream
mOutput << mPrefix << str();
// set the buffer to empty
str("");
// flush the stream
mOutput.flush();
}
}
void setShouldLog(bool shouldLog)
{
mShouldLog = shouldLog;
}
private:
std::ostream& mOutput;
std::string mPrefix;
bool mShouldLog;
};
//!
//! \class LogStreamConsumerBase
//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
//!
class LogStreamConsumerBase
{
public:
LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mBuffer(stream, prefix, shouldLog)
{
}
protected:
LogStreamConsumerBuffer mBuffer;
};
//!
//! \class LogStreamConsumer
//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
//! Order of base classes is LogStreamConsumerBase and then std::ostream.
//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
//! Please do not change the order of the parent classes.
//!
class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
{
public:
//! \brief Creates a LogStreamConsumer which logs messages with level severity.
//! Reportable severity determines if the messages are severe enough to be logged.
LogStreamConsumer(Severity reportableSeverity, Severity severity)
: LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(severity <= reportableSeverity)
, mSeverity(severity)
{
}
LogStreamConsumer(LogStreamConsumer&& other)
: LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(other.mShouldLog)
, mSeverity(other.mSeverity)
{
}
void setReportableSeverity(Severity reportableSeverity)
{
mShouldLog = mSeverity <= reportableSeverity;
mBuffer.setShouldLog(mShouldLog);
}
private:
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
static std::string severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
bool mShouldLog;
Severity mSeverity;
};
//! \class Logger
//!
//! \brief Class which manages logging of TensorRT tools and samples
//!
//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
//! and supports logging two types of messages:
//!
//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
//! - Test pass/fail messages
//!
//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
//!
//! In the future, this class could be extended to support dumping test results to a file in some standard format
//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
//!
//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
//! library and messages coming from the sample.
//!
//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
//! object.
class Logger : public nvinfer1::ILogger
{
public:
Logger(Severity severity = Severity::kWARNING)
: mReportableSeverity(severity)
{
}
//!
//! \enum TestResult
//! \brief Represents the state of a given test
//!
enum class TestResult
{
kRUNNING, //!< The test is running
kPASSED, //!< The test passed
kFAILED, //!< The test failed
kWAIVED //!< The test was waived
};
//!
//! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
//! \return The nvinfer1::ILogger associated with this Logger
//!
//! TODO Once all samples are updated to use this method to register the logger with TensorRT,
//! we can eliminate the inheritance of Logger from ILogger
//!
nvinfer1::ILogger& getTRTLogger()
{
return *this;
}
//!
//! \brief Implementation of the nvinfer1::ILogger::log() virtual method
//!
//! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
//! inheritance from nvinfer1::ILogger
//!
void log(Severity severity, const char* msg) TRT_NOEXCEPT override
{
LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
}
//!
//! \brief Method for controlling the verbosity of logging output
//!
//! \param severity The logger will only emit messages that have severity of this level or higher.
//!
void setReportableSeverity(Severity severity)
{
mReportableSeverity = severity;
}
//!
//! \brief Opaque handle that holds logging information for a particular test
//!
//! This object is an opaque handle to information used by the Logger to print test results.
//! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
//! with Logger::reportTest{Start,End}().
//!
class TestAtom
{
public:
TestAtom(TestAtom&&) = default;
private:
friend class Logger;
TestAtom(bool started, const std::string& name, const std::string& cmdline)
: mStarted(started)
, mName(name)
, mCmdline(cmdline)
{
}
bool mStarted;
std::string mName;
std::string mCmdline;
};
//!
//! \brief Define a test for logging
//!
//! \param[in] name The name of the test. This should be a string starting with
//! "TensorRT" and containing dot-separated strings containing
//! the characters [A-Za-z0-9_].
//! For example, "TensorRT.sample_googlenet"
//! \param[in] cmdline The command line used to reproduce the test
//
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
//!
static TestAtom defineTest(const std::string& name, const std::string& cmdline)
{
return TestAtom(false, name, cmdline);
}
//!
//! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
//! as input
//!
//! \param[in] name The name of the test
//! \param[in] argc The number of command-line arguments
//! \param[in] argv The array of command-line arguments (given as C strings)
//!
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
{
auto cmdline = genCmdlineString(argc, argv);
return defineTest(name, cmdline);
}
//!
//! \brief Report that a test has started.
//!
//! \pre reportTestStart() has not been called yet for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has started
//!
static void reportTestStart(TestAtom& testAtom)
{
reportTestResult(testAtom, TestResult::kRUNNING);
assert(!testAtom.mStarted);
testAtom.mStarted = true;
}
//!
//! \brief Report that a test has ended.
//!
//! \pre reportTestStart() has been called for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has ended
//! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
//! TestResult::kFAILED, TestResult::kWAIVED
//!
static void reportTestEnd(const TestAtom& testAtom, TestResult result)
{
assert(result != TestResult::kRUNNING);
assert(testAtom.mStarted);
reportTestResult(testAtom, result);
}
static int reportPass(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kPASSED);
return EXIT_SUCCESS;
}
static int reportFail(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kFAILED);
return EXIT_FAILURE;
}
static int reportWaive(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kWAIVED);
return EXIT_SUCCESS;
}
static int reportTest(const TestAtom& testAtom, bool pass)
{
return pass ? reportPass(testAtom) : reportFail(testAtom);
}
Severity getReportableSeverity() const
{
return mReportableSeverity;
}
private:
//!
//! \brief returns an appropriate string for prefixing a log message with the given severity
//!
static const char* severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate string for prefixing a test result message with the given result
//!
static const char* testResultString(TestResult result)
{
switch (result)
{
case TestResult::kRUNNING: return "RUNNING";
case TestResult::kPASSED: return "PASSED";
case TestResult::kFAILED: return "FAILED";
case TestResult::kWAIVED: return "WAIVED";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
//!
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
//!
//! \brief method that implements logging test results
//!
static void reportTestResult(const TestAtom& testAtom, TestResult result)
{
severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
<< testAtom.mCmdline << std::endl;
}
//!
//! \brief generate a command line string from the given (argc, argv) values
//!
static std::string genCmdlineString(int argc, char const* const* argv)
{
std::stringstream ss;
for (int i = 0; i < argc; i++)
{
if (i > 0)
ss << " ";
ss << argv[i];
}
return ss.str();
}
Severity mReportableSeverity;
};
namespace
{
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
//!
//! Example usage:
//!
//! LOG_VERBOSE(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
//!
//! Example usage:
//!
//! LOG_INFO(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_INFO(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
//!
//! Example usage:
//!
//! LOG_WARN(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_WARN(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
//!
//! Example usage:
//!
//! LOG_ERROR(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_ERROR(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
// ("fatal" severity)
//!
//! Example usage:
//!
//! LOG_FATAL(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_FATAL(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
}
} // anonymous namespace
#endif // TENSORRT_LOGGING_H
================================================
FILE: Jeston nano_tensorrt/macros.h
================================================
#ifndef __MACROS_H
#define __MACROS_H
#ifdef API_EXPORTS
#if defined(_MSC_VER)
#define API __declspec(dllexport)
#else
#define API __attribute__((visibility("default")))
#endif
#else
#if defined(_MSC_VER)
#define API __declspec(dllimport)
#else
#define API
#endif
#endif // API_EXPORTS
#if NV_TENSORRT_MAJOR >= 8
#define TRT_NOEXCEPT noexcept
#define TRT_CONST_ENQUEUE const
#else
#define TRT_NOEXCEPT
#define TRT_CONST_ENQUEUE
#endif
#endif // __MACROS_H
================================================
FILE: Jeston nano_tensorrt/preprocess.cu
================================================
#include "preprocess.h"
#include <opencv2/opencv.hpp>
__global__ void warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
//normalization
c0 = c0 / 255.0f;
c1 = c1 / 255.0f;
c2 = c2 / 255.0f;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
void preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5;
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5;
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
================================================
FILE: Jeston nano_tensorrt/preprocess.h
================================================
#ifndef __PREPROCESS_H
#define __PREPROCESS_H
#include <cuda_runtime.h>
#include <cstdint>
struct AffineMatrix{
float value[6];
};
void preprocess_kernel_img(uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
#endif // __PREPROCESS_H
================================================
FILE: Jeston nano_tensorrt/samples
================================================
../yolov3-spp/samples/
================================================
FILE: Jeston nano_tensorrt/utils.h
================================================
#ifndef TRTX_YOLOV5_UTILS_H_
#define TRTX_YOLOV5_UTILS_H_
#include <dirent.h>
#include <opencv2/opencv.hpp>
static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
int w, h, x, y;
float r_w = input_w / (img.cols*1.0);
float r_h = input_h / (img.rows*1.0);
if (r_h > r_w) {
w = input_w;
h = r_w * img.rows;
x = 0;
y = (input_h - h) / 2;
} else {
w = r_h * img.cols;
h = input_h;
x = (input_w - w) / 2;
y = 0;
}
cv::Mat re(h, w, CV_8UC3);
cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
return out;
}
static inline int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
DIR *p_dir = opendir(p_dir_name);
if (p_dir == nullptr) {
return -1;
}
struct dirent* p_file = nullptr;
while ((p_file = readdir(p_dir)) != nullptr) {
if (strcmp(p_file->d_name, ".") != 0 &&
strcmp(p_file->d_name, "..") != 0) {
//std::string cur_file_name(p_dir_name);
//cur_file_name += "/";
//cur_file_name += p_file->d_name;
std::string cur_file_name(p_file->d_name);
file_names.push_back(cur_file_name);
}
}
closedir(p_dir);
return 0;
}
#endif // TRTX_YOLOV5_UTILS_H_
================================================
FILE: Jeston nano_tensorrt/yololayer.cu
================================================
#include <assert.h>
#include <vector>
#include <iostream>
#include "yololayer.h"
#include "cuda_utils.h"
namespace Tn
{
template<typename T>
void write(char*& buffer, const T& val)
{
*reinterpret_cast<T*>(buffer) = val;
buffer += sizeof(T);
}
template<typename T>
void read(const char*& buffer, T& val)
{
val = *reinterpret_cast<const T*>(buffer);
buffer += sizeof(T);
}
}
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel)
{
mClassCount = classCount;
mYoloV5NetWidth = netWidth;
mYoloV5NetHeight = netHeight;
mMaxOutObject = maxOut;
mYoloKernel = vYoloKernel;
mKernelCount = vYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaFree(mAnchor[ii]));
}
CUDA_CHECK(cudaFreeHost(mAnchor));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
read(d, mYoloV5NetWidth);
read(d, mYoloV5NetHeight);
read(d, mMaxOutObject);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(mYoloKernel.data(), d, kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2;
for (int ii = 0; ii < mKernelCount; ii++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
write(d, mYoloV5NetWidth);
write(d, mYoloV5NetHeight);
write(d, mMaxOutObject);
auto kernelSize = mKernelCount * sizeof(YoloKernel);
memcpy(d, mYoloKernel.data(), kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject);
}
int YoloLayerPlugin::initialize() TRT_NOEXCEPT
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT
{
//output the result to channel
int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {}
const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT
{
return "1";
}
void YoloLayerPlugin::destroy() TRT_NOEXCEPT
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT
{
YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel);
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int noElements,
const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid * bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < CHECK_COUNT; ++k) {
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (box_prob < IGNORE_THRESH) continue;
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float *res_count = output + bnIdx * outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= maxoutobject) return;
char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection);
Detection *det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// X: (sigmoid(tx) + cx)/FeaturemapW * netwidth
det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth;
det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight;
// W: (Pw * e^tw) / FeaturemapW * netwidth
// v5: https://github.com/ultralytics/yolov5/issues/471
det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]);
det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k];
det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]);
det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1];
det->conf = box_prob * max_cls_prob;
det->class_id = class_id;
}
}
void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream));
}
int numElem = 0;
for (unsigned int i = 0; i < mYoloKernel.size(); ++i) {
const auto& yolo = mYoloKernel[i];
numElem = yolo.width * yolo.height * batchSize;
if (numElem < mThreadCount) mThreadCount = numElem;
//printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight);
CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
(inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT
{
forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT
{
assert(fc->nbFields == 2);
assert(strcmp(fc->fields[0].name, "netinfo") == 0);
assert(strcmp(fc->fields[1].name, "kernels") == 0);
int *p_netinfo = (int*)(fc->fields[0].data);
int class_count = p_netinfo[0];
int input_w = p_netinfo[1];
int input_h = p_netinfo[2];
int max_output_object_count = p_netinfo[3];
std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length);
memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel));
YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT
{
// This object will be deleted when the network is destroyed, which will
// call YoloLayerPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
================================================
FILE: Jeston nano_tensorrt/yololayer.h
================================================
#ifndef _YOLO_LAYER_H
#define _YOLO_LAYER_H
#include <vector>
#include <string>
#include <NvInfer.h>
#include "macros.h"
namespace Yolo
{
static constexpr int CHECK_COUNT = 3;
static constexpr float IGNORE_THRESH = 0.1f;
struct YoloKernel
{
int width;
int height;
float anchors[CHECK_COUNT * 2];
};
static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000;
static constexpr int CLASS_NUM = 80;
static constexpr int INPUT_H = 480; // yolov5's input height and width must be divisible by 32.
static constexpr int INPUT_W = 640;
static constexpr int LOCATIONS = 4;
struct alignas(float) Detection {
//center_x center_y w h
float bbox[LOCATIONS];
float conf; // bbox_conf * cls_conf
float class_id;
};
}
namespace nvinfer1
{
class API YoloLayerPlugin : public IPluginV2IOExt
{
public:
YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel);
YoloLayerPlugin(const void* data, size_t length);
~YoloLayerPlugin();
int getNbOutputs() const TRT_NOEXCEPT override
{
return 1;
}
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override;
int initialize() TRT_NOEXCEPT override;
virtual void terminate() TRT_NOEXCEPT override {};
virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; }
virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override;
virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
virtual void serialize(void* buffer) const TRT_NOEXCEPT override;
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override {
return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
}
const char* getPluginType() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
void destroy() TRT_NOEXCEPT override;
IPluginV2IOExt* clone() const TRT_NOEXCEPT override;
void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override;
const char* getPluginNamespace() const TRT_NOEXCEPT override;
DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override;
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override;
bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override;
void attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override;
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override;
void detachFromContext() TRT_NOEXCEPT override;
private:
void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1);
int mThreadCount = 256;
const char* mPluginNamespace;
int mKernelCount;
int mClassCount;
int mYoloV5NetWidth;
int mYoloV5NetHeight;
int mMaxOutObject;
std::vector<Yolo::YoloKernel> mYoloKernel;
void** mAnchor;
};
class API YoloPluginCreator : public IPluginCreator
{
public:
YoloPluginCreator();
~YoloPluginCreator() override = default;
const char* getPluginName() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override;
IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override;
IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override;
void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override
{
mNamespace = libNamespace;
}
const char* getPluginNamespace() const TRT_NOEXCEPT override
{
return mNamespace.c_str();
}
private:
std::string mNamespace;
static PluginFieldCollection mFC;
static std::vector<PluginField> mPluginAttributes;
};
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
};
#endif // _YOLO_LAYER_H
================================================
FILE: Jeston nano_tensorrt/yolov5.cpp
================================================
#include <iostream>
#include <chrono>
#include <cmath>
#include "cuda_utils.h"
#include "logging.h"
#include "common.hpp"
#include "utils.h"
#include "calibrator.h"
#include "preprocess.h"
#include "macros.h"
#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32
#define DEVICE 0 // GPU id
#define NMS_THRESH 0.4
#define CONF_THRESH 0.5
#define BATCH_SIZE 1
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
// stuff we know about the network and the input/output blobs
static const int INPUT_H = Yolo::INPUT_H;
static const int INPUT_W = Yolo::INPUT_W;
static const int CLASS_NUM = Yolo::CLASS_NUM;
static const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1; // we assume the yololayer outputs no more than MAX_OUTPUT_BBOX_COUNT boxes that conf >= 0.1
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
static Logger gLogger;
static int get_width(int x, float gw, int divisor = 8) {
return int(ceil((x * gw) / divisor)) * divisor;
}
static int get_depth(int x, float gd) {
if (x == 1) return 1;
int r = round(x * gd);
if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) {
--r;
}
return std::max<int>(r, 1);
}
ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
/* ------ yolov5 backbone------ */
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
assert(conv0);
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto bottleneck_CSP2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *bottleneck_CSP2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto bottleneck_csp4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *bottleneck_csp4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto bottleneck_csp6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *bottleneck_csp6->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.7");
auto bottleneck_csp8 = C3(network, weightMap, *conv7->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto spp9 = SPPF(network, weightMap, *bottleneck_csp8->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.9");
/* ------ yolov5 head ------ */
auto conv10 = convBlock(network, weightMap, *spp9->getOutput(0), get_width(512, gw), 1, 1, 1, "model.10");
auto upsample11 = network->addResize(*conv10->getOutput(0));
assert(upsample11);
upsample11->setResizeMode(ResizeMode::kNEAREST);
upsample11->setOutputDimensions(bottleneck_csp6->getOutput(0)->getDimensions());
ITensor* inputTensors12[] = { upsample11->getOutput(0), bottleneck_csp6->getOutput(0) };
auto cat12 = network->addConcatenation(inputTensors12, 2);
auto bottleneck_csp13 = C3(network, weightMap, *cat12->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.13");
auto conv14 = convBlock(network, weightMap, *bottleneck_csp13->getOutput(0), get_width(256, gw), 1, 1, 1, "model.14");
auto upsample15 = network->addResize(*conv14->getOutput(0));
assert(upsample15);
upsample15->setResizeMode(ResizeMode::kNEAREST);
upsample15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());
ITensor* inputTensors16[] = { upsample15->getOutput(0), bottleneck_csp4->getOutput(0) };
auto cat16 = network->addConcatenation(inputTensors16, 2);
auto bottleneck_csp17 = C3(network, weightMap, *cat16->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.17");
/* ------ detect ------ */
IConvolutionLayer* det0 = network->addConvolutionNd(*bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.0.weight"], weightMap["model.24.m.0.bias"]);
auto conv18 = convBlock(network, weightMap, *bottleneck_csp17->getOutput(0), get_width(256, gw), 3, 2, 1, "model.18");
ITensor* inputTensors19[] = { conv18->getOutput(0), conv14->getOutput(0) };
auto cat19 = network->addConcatenation(inputTensors19, 2);
auto bottleneck_csp20 = C3(network, weightMap, *cat19->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.20");
IConvolutionLayer* det1 = network->addConvolutionNd(*bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.1.weight"], weightMap["model.24.m.1.bias"]);
auto conv21 = convBlock(network, weightMap, *bottleneck_csp20->getOutput(0), get_width(512, gw), 3, 2, 1, "model.21");
ITensor* inputTensors22[] = { conv21->getOutput(0), conv10->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors22, 2);
auto bottleneck_csp23 = C3(network, weightMap, *cat22->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
IConvolutionLayer* det2 = network->addConvolutionNd(*bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.24.m.2.weight"], weightMap["model.24.m.2.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.24", std::vector<IConvolutionLayer*>{det0, det1, det2});
yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
network->markOutput(*yolo->getOutput(0));
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap)
{
free((void*)(mem.second.values));
}
return engine;
}
ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config, DataType dt, float& gd, float& gw, std::string& wts_name) {
INetworkDefinition* network = builder->createNetworkV2(0U);
// Create input tensor of shape {3, INPUT_H, INPUT_W} with name INPUT_BLOB_NAME
ITensor* data = network->addInput(INPUT_BLOB_NAME, dt, Dims3{ 3, INPUT_H, INPUT_W });
assert(data);
std::map<std::string, Weights> weightMap = loadWeights(wts_name);
/* ------ yolov5 backbone------ */
auto conv0 = convBlock(network, weightMap, *data, get_width(64, gw), 6, 2, 1, "model.0");
auto conv1 = convBlock(network, weightMap, *conv0->getOutput(0), get_width(128, gw), 3, 2, 1, "model.1");
auto c3_2 = C3(network, weightMap, *conv1->getOutput(0), get_width(128, gw), get_width(128, gw), get_depth(3, gd), true, 1, 0.5, "model.2");
auto conv3 = convBlock(network, weightMap, *c3_2->getOutput(0), get_width(256, gw), 3, 2, 1, "model.3");
auto c3_4 = C3(network, weightMap, *conv3->getOutput(0), get_width(256, gw), get_width(256, gw), get_depth(6, gd), true, 1, 0.5, "model.4");
auto conv5 = convBlock(network, weightMap, *c3_4->getOutput(0), get_width(512, gw), 3, 2, 1, "model.5");
auto c3_6 = C3(network, weightMap, *conv5->getOutput(0), get_width(512, gw), get_width(512, gw), get_depth(9, gd), true, 1, 0.5, "model.6");
auto conv7 = convBlock(network, weightMap, *c3_6->getOutput(0), get_width(768, gw), 3, 2, 1, "model.7");
auto c3_8 = C3(network, weightMap, *conv7->getOutput(0), get_width(768, gw), get_width(768, gw), get_depth(3, gd), true, 1, 0.5, "model.8");
auto conv9 = convBlock(network, weightMap, *c3_8->getOutput(0), get_width(1024, gw), 3, 2, 1, "model.9");
auto c3_10 = C3(network, weightMap, *conv9->getOutput(0), get_width(1024, gw), get_width(1024, gw), get_depth(3, gd), true, 1, 0.5, "model.10");
auto sppf11 = SPPF(network, weightMap, *c3_10->getOutput(0), get_width(1024, gw), get_width(1024, gw), 5, "model.11");
/* ------ yolov5 head ------ */
auto conv12 = convBlock(network, weightMap, *sppf11->getOutput(0), get_width(768, gw), 1, 1, 1, "model.12");
auto upsample13 = network->addResize(*conv12->getOutput(0));
assert(upsample13);
upsample13->setResizeMode(ResizeMode::kNEAREST);
upsample13->setOutputDimensions(c3_8->getOutput(0)->getDimensions());
ITensor* inputTensors14[] = { upsample13->getOutput(0), c3_8->getOutput(0) };
auto cat14 = network->addConcatenation(inputTensors14, 2);
auto c3_15 = C3(network, weightMap, *cat14->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.15");
auto conv16 = convBlock(network, weightMap, *c3_15->getOutput(0), get_width(512, gw), 1, 1, 1, "model.16");
auto upsample17 = network->addResize(*conv16->getOutput(0));
assert(upsample17);
upsample17->setResizeMode(ResizeMode::kNEAREST);
upsample17->setOutputDimensions(c3_6->getOutput(0)->getDimensions());
ITensor* inputTensors18[] = { upsample17->getOutput(0), c3_6->getOutput(0) };
auto cat18 = network->addConcatenation(inputTensors18, 2);
auto c3_19 = C3(network, weightMap, *cat18->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.19");
auto conv20 = convBlock(network, weightMap, *c3_19->getOutput(0), get_width(256, gw), 1, 1, 1, "model.20");
auto upsample21 = network->addResize(*conv20->getOutput(0));
assert(upsample21);
upsample21->setResizeMode(ResizeMode::kNEAREST);
upsample21->setOutputDimensions(c3_4->getOutput(0)->getDimensions());
ITensor* inputTensors21[] = { upsample21->getOutput(0), c3_4->getOutput(0) };
auto cat22 = network->addConcatenation(inputTensors21, 2);
auto c3_23 = C3(network, weightMap, *cat22->getOutput(0), get_width(512, gw), get_width(256, gw), get_depth(3, gd), false, 1, 0.5, "model.23");
auto conv24 = convBlock(network, weightMap, *c3_23->getOutput(0), get_width(256, gw), 3, 2, 1, "model.24");
ITensor* inputTensors25[] = { conv24->getOutput(0), conv20->getOutput(0) };
auto cat25 = network->addConcatenation(inputTensors25, 2);
auto c3_26 = C3(network, weightMap, *cat25->getOutput(0), get_width(1024, gw), get_width(512, gw), get_depth(3, gd), false, 1, 0.5, "model.26");
auto conv27 = convBlock(network, weightMap, *c3_26->getOutput(0), get_width(512, gw), 3, 2, 1, "model.27");
ITensor* inputTensors28[] = { conv27->getOutput(0), conv16->getOutput(0) };
auto cat28 = network->addConcatenation(inputTensors28, 2);
auto c3_29 = C3(network, weightMap, *cat28->getOutput(0), get_width(1536, gw), get_width(768, gw), get_depth(3, gd), false, 1, 0.5, "model.29");
auto conv30 = convBlock(network, weightMap, *c3_29->getOutput(0), get_width(768, gw), 3, 2, 1, "model.30");
ITensor* inputTensors31[] = { conv30->getOutput(0), conv12->getOutput(0) };
auto cat31 = network->addConcatenation(inputTensors31, 2);
auto c3_32 = C3(network, weightMap, *cat31->getOutput(0), get_width(2048, gw), get_width(1024, gw), get_depth(3, gd), false, 1, 0.5, "model.32");
/* ------ detect ------ */
IConvolutionLayer* det0 = network->addConvolutionNd(*c3_23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.0.weight"], weightMap["model.33.m.0.bias"]);
IConvolutionLayer* det1 = network->addConvolutionNd(*c3_26->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.1.weight"], weightMap["model.33.m.1.bias"]);
IConvolutionLayer* det2 = network->addConvolutionNd(*c3_29->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.2.weight"], weightMap["model.33.m.2.bias"]);
IConvolutionLayer* det3 = network->addConvolutionNd(*c3_32->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{ 1, 1 }, weightMap["model.33.m.3.weight"], weightMap["model.33.m.3.bias"]);
auto yolo = addYoLoLayer(network, weightMap, "model.33", std::vector<IConvolutionLayer*>{det0, det1, det2, det3});
yolo->getOutput(0)->setName(OUTPUT_BLOB_NAME);
network->markOutput(*yolo->getOutput(0));
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(16 * (1 << 20)); // 16MB
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#elif defined(USE_INT8)
std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
assert(builder->platformHasFastInt8());
config->setFlag(BuilderFlag::kINT8);
Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./coco_calib/", "int8calib.table", INPUT_BLOB_NAME);
config->setInt8Calibrator(calibrator);
#endif
std::cout << "Building engine, please wait for a while..." << std::endl;
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
std::cout << "Build engine successfully!" << std::endl;
// Don't need the network any more
network->destroy();
// Release host memory
for (auto& mem : weightMap)
{
free((void*)(mem.second.values));
}
return engine;
}
void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bool& is_p6, float& gd, float& gw, std::string& wts_name) {
// Create builder
IBuilder* builder = createInferBuilder(gLogger);
IBuilderConfig* config = builder->createBuilderConfig();
// Create model to populate the network, then set the outputs and create an engine
ICudaEngine *engine = nullptr;
if (is_p6) {
engine = build_engine_p6(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
} else {
engine = build_engine(maxBatchSize, builder, config, DataType::kFLOAT, gd, gw, wts_name);
}
assert(engine != nullptr);
// Serialize the engine
(*modelStream) = engine->serialize();
// Close everything down
engine->destroy();
builder->destroy();
config->destroy();
}
//void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize) {
// // infer on the batch asynchronously, and DMA output back to host
// context.enqueue(batchSize, buffers, stream, nullptr);
// CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
// cudaStreamSynchronize(stream);
//}
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* input, float* output, int batchSize) {
// DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
CUDA_CHECK(cudaMemcpyAsync(buffers[0], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
bool parse_args(int argc, char** argv, std::string& wts, std::string& engine, bool& is_p6, float& gd, float& gw, std::string& img_dir) {
if (argc < 4) return false;
if (std::string(argv[1]) == "-s" && (argc == 5 || argc == 7)) {
wts = std::string(argv[2]);
engine = std::string(argv[3]);
auto net = std::string(argv[4]);
if (net[0] == 'n') {
gd = 0.33;
gw = 0.25;
} else if (net[0] == 's') {
gd = 0.33;
gw = 0.50;
} else if (net[0] == 'm') {
gd = 0.67;
gw = 0.75;
} else if (net[0] == 'l') {
gd = 1.0;
gw = 1.0;
} else if (net[0] == 'x') {
gd = 1.33;
gw = 1.25;
} else if (net[0] == 'c' && argc == 7) {
gd = atof(argv[5]);
gw = atof(argv[6]);
} else {
return false;
}
if (net.size() == 2 && net[1] == '6') {
is_p6 = true;
}
} else if (std::string(argv[1]) == "-d" && argc == 4) {
engine = std::string(argv[2]);
img_dir = std::string(argv[3]);
} else {
return false;
}
return true;
}
int main(int argc, char** argv) {
cudaSetDevice(DEVICE);
std::string wts_name = "";
std::string engine_name = "";
bool is_p6 = false;
float gd = 0.0f, gw = 0.0f;
std::string img_dir;
if (!parse_args(argc, argv, wts_name, engine_name, is_p6, gd, gw, img_dir)) {
std::cerr << "arguments not right!" << std::endl;
std::cerr << "./yolov5 -s [.wts] [.engine] [n/s/m/l/x/n6/s6/m6/l6/x6 or c/c6 gd gw] // serialize model to plan file" << std::endl;
std::cerr << "./yolov5 -d [.engine] ../samples // deserialize plan file and run inference" << std::endl;
return -1;
}
// create a model using the API directly and serialize it to a stream
if (!wts_name.empty()) {
IHostMemory* modelStream{ nullptr };
APIToModel(BATCH_SIZE, &modelStream, is_p6, gd, gw, wts_name);
assert(modelStream != nullptr);
std::ofstream p(engine_name, std::ios::binary);
if (!p) {
std::cerr << "could not open plan output file" << std::endl;
return -1;
}
p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
modelStream->destroy();
return 0;
}
// deserialize the .engine and run inference
std::ifstream file(engine_name, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << engine_name << " error!" << std::endl;
return -1;
}
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end);
size = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size);
file.close();
std::vector<std::string> file_names;
if (read_files_in_dir(img_dir.c_str(), file_names) < 0) {
std::cerr << "read_files_in_dir failed." << std::endl;
return -1;
}
// prepare input data ---------------------------
static float data[BATCH_SIZE * 3 * INPUT_H * INPUT_W];
//for (int i = 0; i < 3 * INPUT_H * INPUT_W; i++)
// data[i] = 1.0;
static float prob[BATCH_SIZE * OUTPUT_SIZE];
IRuntime* runtime = createInferRuntime(gLogger);
assert(runtime != nullptr);
ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);
assert(engine != nullptr);
IExecutionContext* context = engine->createExecutionContext();
assert(context != nullptr);
delete[] trtModelStream;
assert(engine->getNbBindings() == 2);
void* buffers[2];
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
const int inputIndex = engine->getBindingIndex(INPUT_BLOB_NAME);
const int outputIndex = engine->getBindingIndex(OUTPUT_BLOB_NAME);
assert(inputIndex == 0);
assert(outputIndex == 1);
// Create GPU buffers on device
CUDA_CHECK(cudaMalloc(&buffers[inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float)));
CUDA_CHECK(cudaMalloc(&buffers[outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof(float)));
// Create stream
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
int fcount = 0;
for (int f = 0; f < (int)file_names.size(); f++) {
fcount++;
if (fcount < BATCH_SIZE && f + 1 != (int)file_names.size()) continue;
for (int b = 0; b < fcount; b++) {
cv::Mat img = cv::imread(img_dir + "/" + file_names[f - fcount + 1 + b]);
if (img.empty()) continue;
cv::Mat pr_img = preprocess_img(img, INPUT_W, INPUT_H); // letterbox BGR to RGB
int i = 0;
for (int row = 0; row < INPUT_H; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < INPUT_W; ++col) {
data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
uc_pixel += 3;
++i;
}
}
}
// Run inference
auto start = std::chrono::system_clock::now();
doInference(*context, stream, buffers, data, prob, BATCH_SIZE);
auto end = std::chrono::system_clock::now();
std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
std::vector<std::vector<Yolo::Detection>> batch_res(fcount);
for (int b = 0; b < fcount; b++) {
auto& res = batch_res[b];
nms(res, &prob[b * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH);
}
for (int b = 0; b < fcount; b++) {
auto& res = batch_res[b];
//std::cout << res.size() << std::endl;
cv::Mat img = cv::imread(img_dir + "/" + file_names[f - fcount + 1 + b]);
for (size_t j = 0; j < res.size(); j++) {
cv::Rect r = get_rect(img, res[j].bbox);
cv::rectangle(img, r, cv::Scalar(0x27, 0xC1, 0x36), 2);
cv::putText(img, std::to_string((int)res[j].class_id), cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar(0xFF, 0xFF, 0xFF), 2);
}
cv::imwrite("_" + file_names[f - fcount + 1 + b], img);
}
fcount = 0;
}
// Release stream and buffers
cudaStreamDestroy(stream);
CUDA_CHECK(cudaFree(buffers[inputIndex]));
CUDA_CHECK(cudaFree(buffers[outputIndex]));
// Destroy the engine
context->destroy();
engine->destroy();
runtime->destroy();
// Print histogram of the output distribution
//std::cout << "\nOutput:\n\n";
//for (unsigned int i = 0; i < OUTPUT_SIZE; i++)
//{
// std::cout << prob[i] << ", ";
// if (i % 10 == 0) std::cout << std::endl;
//}
//std::cout << std::endl;
return 0;
}
typedef struct
{
float *data;
float *prob;
IRuntime *runtime;
ICudaEngine *engine;
IExecutionContext *context;
void* buffers[2];
cudaStream_t stream;
int inputIndex;
int outputIndex;
}Yolov5TRTContext;
extern "C" API void* Init(char *model_path)
{
cudaSetDevice(DEVICE);
// create a model using the API directly and serialize it to a stream
char *trtModelStream{ nullptr };
size_t size_e{ 0 };
std::string engine_name = model_path;
std::ifstream file(engine_name, std::ios::binary);
Yolov5TRTContext * trt = new Yolov5TRTContext();
if (file.good()) {
file.seekg(0, file.end);
size_e = file.tellg();
file.seekg(0, file.beg);
trtModelStream = new char[size_e];
assert(trtModelStream);
file.read(trtModelStream, size_e);
file.close();
}
trt->runtime = createInferRuntime(gLogger);
assert(trt->runtime != nullptr);
trt->engine = trt->runtime->deserializeCudaEngine(trtModelStream, size_e);
assert(trt->engine != nullptr);
trt->context = trt->engine->createExecutionContext();
assert(trt->context != nullptr);
//delete[] trtModelStream;
assert(trt->engine->getNbBindings() == 2);
trt->data = new float[BATCH_SIZE * 3 * INPUT_H * INPUT_W];
trt->prob = new float[BATCH_SIZE * OUTPUT_SIZE];
trt->inputIndex = trt->engine->getBindingIndex(INPUT_BLOB_NAME);
trt->outputIndex = trt->engine->getBindingIndex(OUTPUT_BLOB_NAME);
assert(trt->inputIndex == 0);
assert(trt->outputIndex == 1);
// Create GPU buffers on device
CUDA_CHECK(cudaMalloc(&trt->buffers[trt->inputIndex], BATCH_SIZE * 3 * INPUT_H * INPUT_W * sizeof(float)));
CUDA_CHECK(cudaMalloc(&trt->buffers[trt->outputIndex], BATCH_SIZE * OUTPUT_SIZE * sizeof(float)));
// Create stream
CUDA_CHECK(cudaStreamCreate(&trt->stream));
// In order to bind the buffers, we need to know the names of the input and output tensors.
// Note that indices are guaranteed to be less than IEngine::getNbBindings()
return (void*)trt;
}
extern "C" API void Detect(void *h, int rows, int cols, unsigned char *src_data, float(*res_array)[6])
{
Yolov5TRTContext *trt = (Yolov5TRTContext *)h;
cv::Mat img = cv::Mat(rows, cols, CV_8UC3, src_data);
// prepare input data ---------------------------
cv::Mat pr_img = preprocess_img(img, INPUT_W, INPUT_H); // letterbox BGR to RGB
int i = 0;
for (int row = 0; row < INPUT_H; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < INPUT_W; ++col)
{
trt->data[0 * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
trt->data[0 * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
trt->data[0 * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
uc_pixel += 3;
++i;
}
}
// Run inference
doInference(*trt->context, trt->stream, trt->buffers, trt->data, trt->prob, BATCH_SIZE);
std::vector<std::vector<Yolo::Detection>> batch_res(1);
auto& res = batch_res[0];
nms(res, &trt->prob[0 * OUTPUT_SIZE], CONF_THRESH, NMS_THRESH);
int len = res.size();
for (size_t j = 0; j < res.size(); j++) {
cv::Rect r = get_rect(img, res[j].bbox);
res_array[j][0] = r.x;
res_array[j][1] = r.y;
res_array[j][2] = r.width;
res_array[j][3] = r.height;
res_array[j][4] = res[j].class_id;
res_array[j][5] = res[j].conf;
}
}
extern "C" API void cuda_free(void*h) {
Yolov5TRTContext *trt = (Yolov5TRTContext *)h;
cudaStreamDestroy(trt->stream);
CUDA_CHECK(cudaFree(trt->buffers[trt->inputIndex]));
CUDA_CHECK(cudaFree(trt->buffers[trt->outputIndex]));
trt->context->destroy();
trt->engine->destroy();
trt->runtime->destroy();
}
================================================
FILE: Jeston nano_tensorrt/部署/python_trt.py
================================================
from ctypes import *
import cv2
import numpy as np
import numpy.ctypeslib as npct
import time
import math
from PIL import Image
# -----------------------------------双目相机的基本参数---------------------------------------------------------
# left_camera_matrix 左相机的内参矩阵
# right_camera_matrix 右相机的内参矩阵
#
# left_distortion 左相机的畸变系数 格式(K1,K2,P1,P2,0)
# right_distortion 右相机的畸变系数
# -------------------------------------------------------------------------------------------------------------
# 左镜头的内参,如焦距
left_camera_matrix = np.array([[516.5066236,-1.444673028,320.2950423],[0,516.5816117,270.7881873],[0.,0.,1.]])
right_camera_matrix = np.array([[511.8428182,1.295112628,317.310253],[0,513.0748795,269.5885026],[0.,0.,1.]])
# 畸变系数,K1、K2、K3为径向畸变,P1、P2为切向畸变
left_distortion = np.array([[-0.046645194,0.077595167, 0.012476819,-0.000711358,0]])
right_distortion = np.array([[-0.061588946,0.122384376,0.011081232,-0.000750439,0]])
# 旋转矩阵
R = np.array([[0.999911333,-0.004351508,0.012585312],
[0.004184066,0.999902792,0.013300386],
[-0.012641965,-0.013246549,0.999832341]])
# 平移矩阵
T = np.array([-120.3559901,-0.188953775,-0.662073075])
size = (640, 480)
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
right_camera_matrix, right_distortion, size, R,
T)
# 校正查找映射表,将原始图像和校正后的图像上的点一一对应起来
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
print(Q)
# ---------------------------------------------------------------------------------------------------------
# classes coco数据集的种类,网络返回‘0’时,对应着person,依次类推
# ---------------------------------------------------------------------------------------------------------
classes = ('person','bicycle','car','motorbike','aeroplane','bus','train','truck','boat','traffic light',
'fire hydrant','stop sign','parking meter','bench','bird','cat','dog','horse','sheep','cow','elephant',
'bear','zebra','giraffe','backpack','umbrella','handbag','tie','suitcase','frisbee','skis','snowboard',
'sports ball','kite','baseball bat','baseball glove','skateboard','surfboard','tennis racket','bottle',
'wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange','broccoli','carrot',
'hot dog','pizza','donut','cake','chair','sofa','pottedplant','bed','diningtable','toilet','tvmonitor',
'laptop','mouse','remote','keyboard','cell phone','microwave','oven','toaster','sink','refrigerator',
'book','clock','vase','scissors','teddy bear','hair drier','toothbrush')
# ---------------------------------------------------------------------------------------------------------
# Detector() 配置tensorrt加速
# ---------------------------------------------------------------------------------------------------------
class Detector():
def __init__(self,model_path,dll_path):
self.yolov5 = CDLL(dll_path)
self.yolov5.Detect.argtypes = [c_void_p,c_int,c_int,POINTER(c_ubyte),npct.ndpointer(dtype = np.float32, ndim = 2, shape = (50, 6), flags="C_CONTIGUOUS")]
self.yolov5.Init.restype = c_void_p
self.yolov5.Init.argtypes = [c_void_p]
self.yolov5.cuda_free.argtypes = [c_void_p]
self.c_point = self.yolov5.Init(model_path)
def predict(self,img):
rows, cols = img.shape[0], img.shape[1]
res_arr = np.zeros((50,6),dtype=np.float32)
self.yolov5.Detect(self.c_point,c_int(rows), c_int(cols), img.ctypes.data_as(POINTER(c_ubyte)),res_arr)
self.bbox_array = res_arr[~(res_arr==0).all(1)]
return self.bbox_array
def free(self):
self.yolov5.cuda_free(self.c_point)
# ------------------------------------visualize可视化程序-------------------------------------------------------
# img 输入的图片
# bbox_array 多组yolo网络预测的结果
# middle_x、middle_y 检测目标的中心点坐标,用于测出距离distance
# -------------------------------------------------------------------------------------------------------------
def visualize(img,bbox_array):
for temp in bbox_array:
bbox = [temp[0],temp[1],temp[2],temp[3]] #xywh
clas = int(temp[4])
score = temp[5]
middle_x = int(np.floor(temp[0]+temp[2]*0.5))
middle_y = int(np.floor(temp[1]+temp[3]*0.5))
distance = math.sqrt(threeD[middle_y][middle_x][0] ** 2 +
threeD[middle_y][middle_x][1] ** 2 + threeD[middle_y][middle_x][2] ** 2)
distance = distance / 1000.0 # mm -> m
cv2.rectangle(img,(int(temp[0]),int(temp[1])),(int(temp[0]+temp[2]),int(temp[1]+temp[3])), (0, 0, 225), 2)
img = cv2.putText(img, str(classes[int(clas)])+" "+str(round(score,2))+" dis="+str(round(distance,2)),
(int(temp[0]),int(temp[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 225), 2)
return img
#---------------------------------------------------#
# 对输入图像进行不失真resize
#---------------------------------------------------#
def resize_image(image, size):
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
#----------------------加载推理所需要的engine、dll文件---------------------------------#
# yolov5s_480 图片推理的输入格式为640x480
# yolov5 图片推理的输入格式为640x640,若使用它,需要打开182行的resize函数
#------------------------------------------------------------------------------------#
det = Detector(model_path=b"./yolov5s.engine",dll_path="./libyolov5.so") # b'' is needed
# 加载视频文件
capture = cv2.VideoCapture("car.avi")
WIN_NAME = 'Deep disp'
cv2.namedWindow(WIN_NAME, cv2.WINDOW_AUTOSIZE)
# 3 读取视频
fps = 0.0
ret, frame = capture.read()
while ret:
# 是否读取到了帧,读取到了则为True
ret, frame = capture.read()
# 开始计时,用于计算帧率
t1 = time.time()
img_color = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame1 = frame[0:480, 0:640]
frame2 = frame[0:480, 640:1280] # 割开双目图像
imgL = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) # 将BGR格式转换成灰度图片
imgR = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# cv2.remap 重映射,就是把一幅图像中某位置的像素放置到另一个图片指定位置的过程。
# 依据MATLAB测量数据重建无畸变图片
img1_rectified = cv2.remap(imgL, left_map1, left_map2, cv2.INTER_LINEAR)
img2_rectified = cv2.remap(imgR, right_map1, right_map2, cv2.INTER_LINEAR)
imageL = cv2.cvtColor(img1_rectified, cv2.COLOR_GRAY2BGR)
imageR = cv2.cvtColor(img2_rectified, cv2.COLOR_GRAY2BGR)
# ------------------------------------SGBM算法----------------------------------------------------------
# blockSize 深度图成块,blocksize越低,其深度图就越零碎,0<blockSize<10
# img_channels BGR图像的颜色通道,img_channels=3,不可更改
# numDisparities SGBM感知的范围,越大生成的精度越好,速度越慢,需要被16整除,如numDisparities
# 取16、32、48、64等
# mode sgbm算法选择模式,以速度由快到慢为:STEREO_SGBM_MODE_SGBM_3WAY、
# STEREO_SGBM_MODE_HH4、STEREO_SGBM_MODE_SGBM、STEREO_SGBM_MODE_HH。精度反之
# -----------------------------------------------------------------------------------------------------
blockSize = 8
img_channels = 3
stereo = cv2.StereoSGBM_create(minDisparity=1,
numDisparities=32,
blockSize=blockSize,
P1=8 * img_channels * blockSize * blockSize,
P2=32 * img_channels * blockSize * blockSize,
disp12MaxDiff=-1,
preFilterCap=1,
uniquenessRatio=10,
speckleWindowSize=100,
speckleRange=100,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)
# 计算视差
disparity = stereo.compute(img1_rectified, img2_rectified)
# 归一化函数算法
disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# 计算三维坐标数据值
threeD = cv2.reprojectImageTo3D(disparity, Q, handleMissingValues=True)
threeD = threeD * 16
# 格式转变,BGRtoRGB
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
# 转变成Image格式
frame1 = Image.fromarray(np.uint8(frame1))
frame1_shape = np.array(np.shape(frame1)[0:2])
# 调整图片大小、颜色通道,使其适应YOLO推理的格式
# frame1 = resize_image(frame1,(640,480))
frame1 = cv2.cvtColor(np.array(frame1), cv2.COLOR_RGB2BGR)
# 推理图片
result = det.predict(frame1)
# 画框,标出识别的类别、距离、置信度等
frame1 = visualize(frame1, result)
# 计算帧率
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame1 = cv2.putText(frame1, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("frame1", frame1)
cv2.imshow(WIN_NAME, disp) # 显示深度图的双目画面
# 若键盘按下q则退出播放
if cv2.waitKey(1) & 0xff == ord('q'):
break
# 4 释放资源
det.free()
capture.release()
# 5 关闭所有窗口
cv2.destroyAllWindows()
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2024 yzfzzz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
<img src="https://yzfzzz.oss-cn-shenzhen.aliyuncs.com/image/dafafa.drawio%20(5)%20(1).png" alt="dafafa.drawio (5) (1)" style="zoom:80%;" />
## 项目日志
- [x] 双目相机的标定和初始化(2022.7.3)
- [x] 运行BM、SGBM算法(2022.7.6)
- [x] 研究SGBM算法并得出良好的open3d模型(2022.7.15)
- [x] 实现双目测距(2022.7.27)
- [x] 双目相机测出Yolov5检测物体的距离(2022.7.29)
- [x] 视频帧率提高至6FPS(2022.7.30)
- [x] 使用C++重勾BM算法(2022.8.1)
- [x] 使用C++重构SGBM算法(2022.8.1)
- [x] 使用TensorRT、C++部署yolov5模型(2022.8.3)
- [x] 完成项目,帧率至少达到20FPS(2022.8.3)
- [x] 新增`Jeston nano`部署文件
## 环境说明
- 🔥Tensorrt 8.4
- 🚀Cuda 11.6.1 Cudnn 8.4.1
- Opencv 4.5.1
- Cmake 3.23.3
- Visual Studio 2017
- MX350,Windows10
## 文件说明
- 💼**BM、SGBM**算法均有C++和Python两个版本
- 📂**tensorrt**:模型部署文件,帧率为23fps
- 📁**yolov5-v6.1-pytorch-master**:未部署前的python代码文件,帧率为5fps
- **stereo_introduce**:双目摄像头基本资料
- 📒**双目视觉资料**:从双目相机的标定(Matlab)到sgbm生成深度图的图文教程
- **stereo_shot.py**:摄像头拍摄代码
- 🎁**Jeston nano_tensorrt**:Jeston nano(Linux)部署资料
## 怎么用?
### 双目相机的标定:https://www.bilibili.com/video/BV1GP41157Ti
### SGBM算法应用(Python版):https://www.bilibili.com/video/BV1zT411w7oZ
### 在YOLOv5中加入双目测距,实现目标测距:https://www.bilibili.com/video/BV1qG41147ZW
### Jeston nano部署yolov5,并实现双目测距:https://www.bilibili.com/video/BV15g411Q7ZV
## 参考资料
1. 🍔YOLOv5 Tensorrt Python/C++部署:https://www.bilibili.com/video/BV113411J7nk/?spm_id_from=333.788.recommend_more_video.-1&vd_source=97aec9e652524c83bb4f4b9481ee059e
2. 🍞Pytorch 搭建自己的YoloV5目标检测平台Bubbliiiing:https://www.bilibili.com/video/BV1FZ4y1m777?spm_id_from=333.999.0.0
3. 🍟双目摄像头-立体视觉:https://blog.csdn.net/qq_41204464/category_10766478.html?spm=1001.2014.3001.5482)
4. CUDA的正确安装/升级/重装/使用方式:https://zhuanlan.zhihu.com/p/520536351
5. 报错【Could not locate zlibwapi.dll. Please make sure it is in your library path】:https://blog.csdn.net/qq_44224801/article/details/125525721
6. 🍿windows下 C++ openCV配置及x86编译(傻瓜式教程):https://blog.csdn.net/qq_37059136/article/details/124165080
7. 树莓派安装pytorch:https://blog.csdn.net/weixin_53798505/article/details/125235377
8. 树莓派开机自启动:https://blog.csdn.net/TohkaQAQ/article/details/121056564
================================================
FILE: SGBM(C++)/SGBM.cpp
================================================
/* ˫Ŀ */
#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>
using namespace std;
using namespace cv;
const int imageWidth = 640; //ͷķֱ
const int imageHeight = 480;
Vec3f point3;
float d;
Size imageSize = Size(imageWidth, imageHeight);
Mat img;
Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat rectifyImageL, rectifyImageR;
Rect m_l_select;
Rect m_r_select;
Rect validROIL;//ͼУ֮ͼвüvalidROIָü֮
Rect validROIR;
Mat mapLx, mapLy, mapRx, mapRy; //ӳ
Mat Rl, Rr, Pl, Pr, Q; //УתRͶӰP ͶӰQ
Mat xyz; //ά
Point origin; //갴µʼ
Rect selection; //ѡ
bool selectObject = false; //Ƿѡ
int blockSize = 8, mindisparity = 1, ndisparities = 64, img_channels = 3;
Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create(mindisparity, ndisparities, blockSize);
/*ȱ궨õڲξ
fx 0 cx
0 fy cy
0 0 1
*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 516.5066236, -1.444673028, 320.2950423, 0, 516.5816117, 270.7881873, 0, 0, 1.);
//õĻ
/*418.523322187048 0 0
-1.26842201390676 421.222568242056 0
344.758267538961 243.318992284899 1 */ //2
Mat distCoeffL = (Mat_<double>(5, 1) << -0.046645194, 0.077595167, 0.012476819, -0.000711358, 0);
//[0.006636837611004,0.050240447649195] [0.006681263320267,0.003130367429418]
/*ȱ궨õڲξ
fx 0 cx
0 fy cy
0 0 1
*/
Mat cameraMatrixR = (Mat_<double>(3, 3) << 511.8428182, 1.295112628, 317.310253, 0, 513.0748795, 269.5885026, 0, 0, 1);
/*
417.417985082506 0 0
0.498638151824367 419.795432389420 0
309.903372309072 236.256106972796 1
*/ //2
Mat distCoeffR = (Mat_<double>(5, 1) << -0.061588946, 0.122384376, 0.011081232, -0.000750439, 0);
//[-0.038407383078874,0.236392800301615] [0.004121779274885,0.002296129959664]
Mat T = (Mat_<double>(3, 1) << -120.3559901, -0.188953775, -0.662073075);//Tƽ
//[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
//ӦMatlabT
//Mat rec = (Mat_<double>(3, 1) << -0.00306, -0.03207, 0.00206);//recתӦmatlab om
Mat rec = (Mat_<double>(3, 3) << 0.999911333, -0.004351508, 0.012585312,
0.004184066, 0.999902792, 0.013300386,
-0.012641965, -0.013246549, 0.999832341); //recתӦmatlab om
/* 0.999341122700880 0.000660748031451783 -0.0362888948713456
-0.00206388651740061 0.999250989651683 -0.0386419468010579
0.0362361815232777 0.0386913826603732 0.998593969567432 */
//Mat T = (Mat_<double>(3, 1) << -48.4, 0.241, -0.0344);//Tƽ
//[-1.210187345641146e+02,0.519235426836325,-0.425535566316217]
//ӦMatlabT
Mat R;//R ת
/*****ƥ*****/
void stereo_match(int, void*)
{
/*
bm->setBlockSize(2 * blockSize + 5); //SADڴС5~21֮Ϊ
bm->setROI1(validROIL);
bm->setROI2(validROIR);
bm->setPreFilterCap(31);
bm->setMinDisparity(0); //СӲĬֵΪ0, Ǹֵint
bm->setNumDisparities(numDisparities * 16 + 16);//ӲڣӲֵСӲֵ֮,ڴС16int
bm->setTextureThreshold(10);
bm->setUniquenessRatio(uniquenessRatio);//uniquenessRatioҪԷֹƥ
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(-1);
*/
int P1 = 8 * img_channels * blockSize * blockSize;
int P2 = 32 * img_channels * blockSize * blockSize;
sgbm->setP1(P1);
sgbm->setP2(P2);
sgbm->setPreFilterCap(1);
sgbm->setUniquenessRatio(10);
sgbm->setSpeckleRange(100);
sgbm->setSpeckleWindowSize(100);
sgbm->setDisp12MaxDiff(-1);
//sgbm->setNumDisparities(1);
sgbm->setMode(cv::StereoSGBM::MODE_HH);
Mat disp, disp8;
sgbm->compute(rectifyImageL, rectifyImageR, disp);//ͼΪҶͼ
disp8 = Mat(disp.rows, disp.cols, CV_8UC1);
normalize(disp, disp8, 0, 255, NORM_MINMAX, CV_8UC1);
reprojectImageTo3D(disp, xyz, Q, true); //ʵʱReprojectTo3DX / W, Y / W, Z / WҪ16(ҲW16)ܵõȷάϢ
xyz = xyz * 16;
imshow("disparity", disp8);
}
/*****ص*****/
static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y);
}
switch (event)
{
case EVENT_LBUTTONDOWN: //ťµ¼
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
//cout << origin << "in world coordinate is: " << xyz.at<Vec3f>(origin) << endl;
point3 = xyz.at<Vec3f>(origin);
point3[0];
//cout << "point3[0]:" << point3[0] << "point3[1]:" << point3[1] << "point3[2]:" << point3[2]<<endl;
cout << "꣺" << endl;
cout << "x: " << point3[0] << " y: " << point3[1] << " z: " << point3[2] << endl;
d = point3[0] * point3[0] + point3[1] * point3[1] + point3[2] * point3[2];
d = sqrt(d); //mm
// cout << ":" << d << "mm" << endl;
d = d / 10.0; //cm
cout << ":" << d << "cm" << endl;
// d = d/1000.0; //m
// cout << ":" << d << "m" << endl;
break;
case EVENT_LBUTTONUP: //ťͷŵ¼
selectObject = false;
if (selection.width > 0 && selection.height > 0)
break;
}
}
/**********/
int main()
{
/*
У
*/
Rodrigues(rec, R); //Rodrigues任
stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY,
0, imageSize, &validROIL, &validROIR);
initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);
/*
ȡͼƬ
*/
m_l_select = Rect(0, 0, 640, 480);
img = imread("car.jpg", IMREAD_COLOR);
//imshow("Image", img);
rgbImageL = img(m_l_select);
cvtColor(rgbImageL, grayImageL, COLOR_BGR2GRAY);
m_r_select = Rect(640, 0, 640, 480);
rgbImageR = img(m_r_select);
cvtColor(rgbImageR, grayImageR, COLOR_BGR2GRAY);
//imshow("ImageL", rgbImageL);
//imshow("ImageR", rgbImageR);
/*
remap֮ͼѾ沢ж
*/
remap(grayImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
remap(grayImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
/*
Уʾ
*/
Mat rgbRectifyImageL, rgbRectifyImageR;
cvtColor(rectifyImageL, rgbRectifyImageL, COLOR_GRAY2BGR); //αɫͼ
cvtColor(rectifyImageR, rgbRectifyImageR, COLOR_GRAY2BGR);
//ʾ
//rectangle(rgbRectifyImageL, validROIL, Scalar(0, 0, 255), 3, 8);
//rectangle(rgbRectifyImageR, validROIR, Scalar(0, 0, 255), 3, 8);
//imshow("ImageL After Rectify", rgbRectifyImageL);
//imshow("ImageR After Rectify", rgbRectifyImageR);
//ʾͬһͼ
Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(imageSize.width, imageSize.height);
w = cvRound(imageSize.width * sf);
h = cvRound(imageSize.height * sf);
canvas.create(h, w * 2, CV_8UC3); //עͨ
//ͼ
Mat canvasPart = canvas(Rect(w * 0, 0, w, h)); //õһ
resize(rgbRectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); //ͼŵcanvasPartһС
Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf), //ñȡ
cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
//rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8); //һ
cout << "Painted ImageL" << endl;
//ͼ
canvasPart = canvas(Rect(w, 0, w, h)); //ûһ
resize(rgbRectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
//rectangle(canvasPart, vroiR, Scalar(0, 0, 255), 3, 8);
cout << "Painted ImageR" << endl;
//϶Ӧ
for (int i = 0; i < canvas.rows; i += 16)
line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);
imshow("rectified", canvas);
/*
ƥ
*/
namedWindow("disparity", WINDOW_AUTOSIZE);
/*************************οӻ**********************************************/
// SAD Trackbar
//createTrackbar("BlockSize:\n", "disparity", &blockSize, 8, stereo_match);
// ӲΨһٷֱȴ Trackbar
//createTrackbar("UniquenessRatio:\n", "disparity", &uniquenessRatio, 50, stereo_match);
// Ӳ Trackbar
//createTrackbar("NumDisparities:\n", "disparity", &numDisparities, 16, stereo_match);
//ӦsetMouseCallback(, ص, صIJһȡ0)
setMouseCallback("disparity", onMouse, 0);
stereo_match(0, 0);
waitKey();
return 0;
}
================================================
FILE: SGBM(Python)/sgbm-video.py
================================================
import cv2
import numpy as np
import time
import random
import math
# -----------------------------------双目相机的基本参数---------------------------------------------------------
# left_camera_matrix 左相机的内参矩阵
# right_camera_matrix 右相机的内参矩阵
#
# left_distortion 左相机的畸变系数 格式(K1,K2,P1,P2,0)
# right_distortion 右相机的畸变系数
# -------------------------------------------------------------------------------------------------------------
# 左镜头的内参,如焦距
left_camera_matrix = np.array([[516.5066236,-1.444673028,320.2950423],[0,516.5816117,270.7881873],[0.,0.,1.]])
right_camera_matrix = np.array([[511.8428182,1.295112628,317.310253],[0,513.0748795,269.5885026],[0.,0.,1.]])
# 畸变系数,K1、K2、K3为径向畸变,P1、P2为切向畸变
left_distortion = np.array([[-0.046645194,0.077595167, 0.012476819,-0.000711358,0]])
right_distortion = np.array([[-0.061588946,0.122384376,0.011081232,-0.000750439,0]])
# 旋转矩阵
R = np.array([[0.999911333,-0.004351508,0.012585312],
[0.004184066,0.999902792,0.013300386],
[-0.012641965,-0.013246549,0.999832341]])
# 平移矩阵
T = np.array([-120.3559901,-0.188953775,-0.662073075])
size = (640, 480)
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
right_camera_matrix, right_distortion, size, R,
T)
# 校正查找映射表,将原始图像和校正后的图像上的点一一对应起来
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
print(Q)
# --------------------------鼠标回调函数---------------------------------------------------------
# event 鼠标事件
# param 输入参数
# -----------------------------------------------------------------------------------------------
def onmouse_pick_points(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
threeD = param
print('\n像素坐标 x = %d, y = %d' % (x, y))
# print("世界坐标是:", threeD[y][x][0], threeD[y][x][1], threeD[y][x][2], "mm")
print("世界坐标xyz 是:", threeD[y][x][0] / 1000.0, threeD[y][x][1] / 1000.0, threeD[y][x][2] / 1000.0, "m")
distance = math.sqrt(threeD[y][x][0] ** 2 + threeD[y][x][1] ** 2 + threeD[y][x][2] ** 2)
distance = distance / 1000.0 # mm -> m
print("距离是:", distance, "m")
# 加载视频文件
capture = cv2.VideoCapture("./car.avi")
WIN_NAME = 'Deep disp'
cv2.namedWindow(WIN_NAME, cv2.WINDOW_AUTOSIZE)
# 读取视频
fps = 0.0
ret, frame = capture.read()
while ret:
# 开始计时
t1 = time.time()
# 是否读取到了帧,读取到了则为True
ret, frame = capture.read()
# 切割为左右两张图片
frame1 = frame[0:480, 0:640]
frame2 = frame[0:480, 640:1280]
# 将BGR格式转换成灰度图片,用于畸变矫正
imgL = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
imgR = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# 重映射,就是把一幅图像中某位置的像素放置到另一个图片指定位置的过程。
# 依据MATLAB测量数据重建无畸变图片,输入图片要求为灰度图
img1_rectified = cv2.remap(imgL, left_map1, left_map2, cv2.INTER_LINEAR)
img2_rectified = cv2.remap(imgR, right_map1, right_map2, cv2.INTER_LINEAR)
# 转换为opencv的BGR格式
imageL = cv2.cvtColor(img1_rectified, cv2.COLOR_GRAY2BGR)
imageR = cv2.cvtColor(img2_rectified, cv2.COLOR_GRAY2BGR)
# ------------------------------------SGBM算法----------------------------------------------------------
# blockSize 深度图成块,blocksize越低,其深度图就越零碎,0<blockSize<10
# img_channels BGR图像的颜色通道,img_channels=3,不可更改
# numDisparities SGBM感知的范围,越大生成的精度越好,速度越慢,需要被16整除,如numDisparities
# 取16、32、48、64等
# mode sgbm算法选择模式,以速度由快到慢为:STEREO_SGBM_MODE_SGBM_3WAY、
# STEREO_SGBM_MODE_HH4、STEREO_SGBM_MODE_SGBM、STEREO_SGBM_MODE_HH。精度反之
# ------------------------------------------------------------------------------------------------------
blockSize = 3
img_channels = 3
stereo = cv2.StereoSGBM_create(minDisparity=1,
numDisparities=64,
blockSize=blockSize,
P1=8 * img_channels * blockSize * blockSize,
P2=32 * img_channels * blockSize * blockSize,
disp12MaxDiff=-1,
preFilterCap=1,
uniquenessRatio=10,
speckleWindowSize=100,
speckleRange=100,
mode=cv2.STEREO_SGBM_MODE_HH)
# 计算视差
disparity = stereo.compute(img1_rectified, img2_rectified)
# 归一化函数算法,生成深度图(灰度图)
disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# 生成深度图(颜色图)
dis_color = disparity
dis_color = cv2.normalize(dis_color, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
dis_color = cv2.applyColorMap(dis_color, 2)
# 计算三维坐标数据值
threeD = cv2.reprojectImageTo3D(disparity, Q, handleMissingValues=True)
# 计算出的threeD,需要乘以16,才等于现实中的距离
threeD = threeD * 16
# 鼠标回调事件
cv2.setMouseCallback("depth", onmouse_pick_points, threeD)
#完成计时,计算帧率
fps = (fps + (1. / (time.time() - t1))) / 2
frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("depth", dis_color)
cv2.imshow("left", frame1)
cv2.imshow(WIN_NAME, disp) # 显示深度图的双目画面
# 若键盘按下q则退出播放
if cv2.waitKey(1) & 0xff == ord('q'):
break
# 释放资源
capture.release()
# 关闭所有窗口
cv2.destroyAllWindows()
================================================
FILE: Win_tensorrt/python_trt.py
================================================
from ctypes import *
import cv2
import numpy as np
import numpy.ctypeslib as npct
import time
import math
from PIL import Image
# -----------------------------------双目相机的基本参数---------------------------------------------------------
# left_camera_matrix 左相机的内参矩阵
# right_camera_matrix 右相机的内参矩阵
#
# left_distortion 左相机的畸变系数 格式(K1,K2,P1,P2,0)
# right_distortion 右相机的畸变系数
# -------------------------------------------------------------------------------------------------------------
# 左镜头的内参,如焦距
left_camera_matrix = np.array([[516.5066236,-1.444673028,320.2950423],[0,516.5816117,270.7881873],[0.,0.,1.]])
right_camera_matrix = np.array([[511.8428182,1.295112628,317.310253],[0,513.0748795,269.5885026],[0.,0.,1.]])
# 畸变系数,K1、K2、K3为径向畸变,P1、P2为切向畸变
left_distortion = np.array([[-0.046645194,0.077595167, 0.012476819,-0.000711358,0]])
right_distortion = np.array([[-0.061588946,0.122384376,0.011081232,-0.000750439,0]])
# 旋转矩阵
R = np.array([[0.999911333,-0.004351508,0.012585312],
[0.004184066,0.999902792,0.013300386],
[-0.012641965,-0.013246549,0.999832341]])
# 平移矩阵
T = np.array([-120.3559901,-0.188953775,-0.662073075])
size = (640, 480)
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
right_camera_matrix, right_distortion, size, R,
T)
# 校正查找映射表,将原始图像和校正后的图像上的点一一对应起来
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
print(Q)
# ---------------------------------------------------------------------------------------------------------
# classes coco数据集的种类,网络返回‘1’时,对应着person,依次类推
# ---------------------------------------------------------------------------------------------------------
classes = ('person','bicycle','car','motorbike','aeroplane','bus','train','truck','boat','traffic light',
'fire hydrant','stop sign','parking meter','bench','bird','cat','dog','horse','sheep','cow','elephant',
'bear','zebra','giraffe','backpack','umbrella','handbag','tie','suitcase','frisbee','skis','snowboard',
'sports ball','kite','baseball bat','baseball glove','skateboard','surfboard','tennis racket','bottle',
'wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange','broccoli','carrot',
'hot dog','pizza','donut','cake','chair','sofa','pottedplant','bed','diningtable','toilet','tvmonitor',
'laptop','mouse','remote','keyboard','cell phone','microwave','oven','toaster','sink','refrigerator',
'book','clock','vase','scissors','teddy bear','hair drier','toothbrush')
# ---------------------------------------------------------------------------------------------------------
# Detector() 配置tensorrt加速
# ---------------------------------------------------------------------------------------------------------
class Detector():
def __init__(self,model_path,dll_path):
self.yolov5 = CDLL(dll_path)
self.yolov5.Detect.argtypes = [c_void_p,c_int,c_int,POINTER(c_ubyte),npct.ndpointer(dtype = np.float32, ndim = 2, shape = (50, 6), flags="C_CONTIGUOUS")]
self.yolov5.Init.restype = c_void_p
self.yolov5.Init.argtypes = [c_void_p]
self.yolov5.cuda_free.argtypes = [c_void_p]
self.c_point = self.yolov5.Init(model_path)
def predict(self,img):
rows, cols = img.shape[0], img.shape[1]
res_arr = np.zeros((50,6),dtype=np.float32)
self.yolov5.Detect(self.c_point,c_int(rows), c_int(cols), img.ctypes.data_as(POINTER(c_ubyte)),res_arr)
self.bbox_array = res_arr[~(res_arr==0).all(1)]
return self.bbox_array
def free(self):
self.yolov5.cuda_free(self.c_point)
# ------------------------------------visualize可视化程序-------------------------------------------------------
# img 输入的图片
# bbox_array 多组yolo网络预测的结果
# middle_x、middle_y 检测目标的中心点坐标,用于测出距离distance
# -------------------------------------------------------------------------------------------------------------
def visualize(img,bbox_array):
for temp in bbox_array:
bbox = [temp[0],temp[1],temp[2],temp[3]] #xywh
clas = int(temp[4])
score = temp[5]
middle_x = int(np.floor(temp[0]+temp[2]*0.5))
middle_y = int(np.floor(temp[1]+temp[3]*0.5))
distance = math.sqrt(threeD[middle_y][middle_x][0] ** 2 +
threeD[middle_y][middle_x][1] ** 2 + threeD[middle_y][middle_x][2] ** 2)
distance = distance / 1000.0 # mm -> m
cv2.rectangle(img,(int(temp[0]),int(temp[1])),(int(temp[0]+temp[2]),int(temp[1]+temp[3])), (0, 0, 225), 2)
img = cv2.putText(img, str(classes[int(clas)])+" "+str(round(score,2))+" dis="+str(round(distance,2)),
(int(temp[0]),int(temp[1])-5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 225), 2)
return img
#---------------------------------------------------#
# 对输入图像进行不失真resize
#---------------------------------------------------#
def resize_image(image, size):
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
#----------------------加载推理所需要的engine、dll文件---------------------------------#
# yolov5s_480 图片推理的输入格式为640x480
# yolov5 图片推理的输入格式为640x640,若使用它,需要打开182行的resize函数
#------------------------------------------------------------------------------------#
det = Detector(model_path=b"D:\GDEE\Project\Release\yolov5s_480.engine",dll_path="D:\GDEE\Project\Release\yolov5s_480.dll") # b'' is needed
# 加载视频文件
capture = cv2.VideoCapture("output.avi")
WIN_NAME = 'Deep disp'
cv2.namedWindow(WIN_NAME, cv2.WINDOW_AUTOSIZE)
# 3 读取视频
fps = 0.0
ret, frame = capture.read()
while ret:
# 是否读取到了帧,读取到了则为True
ret, frame = capture.read()
# 开始计时,用于计算帧率
t1 = time.time()
img_color = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame1 = frame[0:480, 0:640]
frame2 = frame[0:480, 640:1280] # 割开双目图像
imgL = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) # 将BGR格式转换成灰度图片
imgR = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# cv2.remap 重映射,就是把一幅图像中某位置的像素放置到另一个图片指定位置的过程。
# 依据MATLAB测量数据重建无畸变图片
img1_rectified = cv2.remap(imgL, left_map1, left_map2, cv2.INTER_LINEAR)
img2_rectified = cv2.remap(imgR, right_map1, right_map2, cv2.INTER_LINEAR)
imageL = cv2.cvtColor(img1_rectified, cv2.COLOR_GRAY2BGR)
imageR = cv2.cvtColor(img2_rectified, cv2.COLOR_GRAY2BGR)
# ------------------------------------SGBM算法----------------------------------------------------------
# blockSize 深度图成块,blocksize越低,其深度图就越零碎,0<blockSize<10
# img_channels BGR图像的颜色通道,img_channels=3,不可更改
# numDisparities SGBM感知的范围,越大生成的精度越好,速度越慢,需要被16整除,如numDisparities
# 取16、32、48、64等
# mode sgbm算法选择模式,以速度由快到慢为:STEREO_SGBM_MODE_SGBM_3WAY、
# STEREO_SGBM_MODE_HH4、STEREO_SGBM_MODE_SGBM、STEREO_SGBM_MODE_HH。精度反之
# -----------------------------------------------------------------------------------------------------
blockSize = 8
img_channels = 3
stereo = cv2.StereoSGBM_create(minDisparity=1,
numDisparities=32,
blockSize=blockSize,
P1=8 * img_channels * blockSize * blockSize,
P2=32 * img_channels * blockSize * blockSize,
disp12MaxDiff=-1,
preFilterCap=1,
uniquenessRatio=10,
speckleWindowSize=100,
speckleRange=100,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)
# 计算视差
disparity = stereo.compute(img1_rectified, img2_rectified)
# 归一化函数算法
disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# 计算三维坐标数据值
threeD = cv2.reprojectImageTo3D(disparity, Q, handleMissingValues=True)
threeD = threeD * 16
# 格式转变,BGRtoRGB
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
# 转变成Image格式
frame1 = Image.fromarray(np.uint8(frame1))
frame1_shape = np.array(np.shape(frame1)[0:2])
# 调整图片大小、颜色通道,使其适应YOLO推理的格式
# frame1 = resize_image(frame1,(640,480))
frame1 = cv2.cvtColor(np.array(frame1), cv2.COLOR_RGB2BGR)
# 推理图片
result = det.predict(frame1)
# 画框,标出识别的类别、距离、置信度等
frame1 = visualize(frame1, result)
# 计算帧率
fps = (fps + (1. / (time.time() - t1))) / 2
print("fps= %.2f" % (fps))
frame1 = cv2.putText(frame1, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("frame1", frame1)
cv2.imshow(WIN_NAME, disp) # 显示深度图的双目画面
# 若键盘按下q则退出播放
if cv2.waitKey(1) & 0xff == ord('q'):
break
# 4 释放资源
det.free()
capture.release()
# 5 关闭所有窗口
cv2.destroyAllWindows()
================================================
FILE: Win_tensorrt/yolov5_640.engine
================================================
[File too large to display: 31.2 MB]
================================================
FILE: Win_tensorrt/yolov5s_480.engine
================================================
[File too large to display: 28.8 MB]
================================================
FILE: distance_measurement_cpp/camera_config.json
================================================
{
"left_camera_matrix": [
[ 516.5066236, -1.444673028, 320.2950423 ],
[ 0, 516.5816117, 270.7881873 ],
[ 0, 0, 1.0 ]
],
"left_camera_distcoeff": [
[ -0.046645194 ],
[ 0.077595167 ],
[ 0.012476819 ],
[ -0.000711358 ],
[ 0 ]
],
"right_camera_matrix": [
[ 511.8428182, 1.295112628, 317.310253 ],
[ 0, 513.0748795, 269.5885026 ],
[ 0, 0, 1.0 ]
],
"right_camera_distcoeff": [
[ -0.061588946 ],
[ 0.122384376 ],
[ 0.011081232 ],
[ -0.000750439 ],
[ 0 ]
],
"translation_vector": [
[ -120.3559901 ],
[ -0.188953775 ],
[ -0.662073075 ]
],
"rotation_vector": [
[ 0.999911333, -0.004351508, 0.012585312 ],
[ 0.004184066, 0.999902792, 0.013300386 ],
[ -0.012641965, -0.013246549, 0.999832341 ]
]
}
================================================
FILE: distance_measurement_cpp/json.hpp
================================================
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
/****************************************************************************\
* Note on documentation: The source files contain links to the online *
* documentation of the public API at https://json.nlohmann.me. This URL *
* contains the most recent documentation and should also be applicable to *
* previous versions; documentation for deprecated functions is not *
* removed, but marked deprecated. See "Generate documentation" section in *
* file docs/README.md. *
\****************************************************************************/
#ifndef INCLUDE_NLOHMANN_JSON_HPP_
#define INCLUDE_NLOHMANN_JSON_HPP_
#include <algorithm> // all_of, find, for_each
#include <cstddef> // nullptr_t, ptrdiff_t, size_t
#include <functional> // hash, less
#include <initializer_list> // initializer_list
#ifndef JSON_NO_IO
#include <iosfwd> // istream, ostream
#endif // JSON_NO_IO
#include <iterator> // random_access_iterator_tag
#include <memory> // unique_ptr
#include <string> // string, stoi, to_string
#include <utility> // declval, forward, move, pair, swap
#include <vector> // vector
// #include <nlohmann/adl_serializer.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#include <utility>
// #include <nlohmann/detail/abi_macros.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
// This file contains all macro definitions affecting or depending on the ABI
#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK
#if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH)
#if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0
#warning "Already included a different version of the library!"
#endif
#endif
#endif
#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum)
#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum)
#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum)
#ifndef JSON_DIAGNOSTICS
#define JSON_DIAGNOSTICS 0
#endif
#ifndef JSON_DIAGNOSTIC_POSITIONS
#define JSON_DIAGNOSTIC_POSITIONS 0
#endif
#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
#define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0
#endif
#if JSON_DIAGNOSTICS
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag
#else
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS
#endif
#if JSON_DIAGNOSTIC_POSITIONS
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp
#else
#define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS
#endif
#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp
#else
#define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION
#define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0
#endif
// Construct the namespace ABI tags component
#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c
#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \
NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c)
#define NLOHMANN_JSON_ABI_TAGS \
NLOHMANN_JSON_ABI_TAGS_CONCAT( \
NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \
NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \
NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS)
// Construct the namespace version component
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \
_v ## major ## _ ## minor ## _ ## patch
#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch)
#if NLOHMANN_JSON_NAMESPACE_NO_VERSION
#define NLOHMANN_JSON_NAMESPACE_VERSION
#else
#define NLOHMANN_JSON_NAMESPACE_VERSION \
NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \
NLOHMANN_JSON_VERSION_MINOR, \
NLOHMANN_JSON_VERSION_PATCH)
#endif
// Combine namespace components
#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b
#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \
NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b)
#ifndef NLOHMANN_JSON_NAMESPACE
#define NLOHMANN_JSON_NAMESPACE \
nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \
NLOHMANN_JSON_ABI_TAGS, \
NLOHMANN_JSON_NAMESPACE_VERSION)
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN
#define NLOHMANN_JSON_NAMESPACE_BEGIN \
namespace nlohmann \
{ \
inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \
NLOHMANN_JSON_ABI_TAGS, \
NLOHMANN_JSON_NAMESPACE_VERSION) \
{
#endif
#ifndef NLOHMANN_JSON_NAMESPACE_END
#define NLOHMANN_JSON_NAMESPACE_END \
} /* namespace (inline namespace) NOLINT(readability/namespace) */ \
} // namespace nlohmann
#endif
// #include <nlohmann/detail/conversions/from_json.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#include <algorithm> // transform
#include <array> // array
#include <forward_list> // forward_list
#include <iterator> // inserter, front_inserter, end
#include <map> // map
#ifdef JSON_HAS_CPP_17
#include <optional> // optional
#endif
#include <string> // string
#include <tuple> // tuple, make_tuple
#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
#include <unordered_map> // unordered_map
#include <utility> // pair, declval
#include <valarray> // valarray
// #include <nlohmann/detail/exceptions.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#include <cstddef> // nullptr_t
#include <exception> // exception
#if JSON_DIAGNOSTICS
#include <numeric> // accumulate
#endif
#include <stdexcept> // runtime_error
#include <string> // to_string
#include <vector> // vector
// #include <nlohmann/detail/value_t.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#include <array> // array
#include <cstddef> // size_t
#include <cstdint> // uint8_t
#include <string> // string
// #include <nlohmann/detail/macro_scope.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#include <utility> // declval, pair
// #include <nlohmann/detail/meta/detected.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
#include <type_traits>
// #include <nlohmann/detail/meta/void_t.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-License-Identifier: MIT
// #include <nlohmann/detail/abi_macros.hpp>
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
template<typename ...Ts> struct make_void
{
using type = void;
};
template<typename ...Ts> using void_t = typename make_void<Ts...>::type;
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END
NLOHMANN_JSON_NAMESPACE_BEGIN
namespace detail
{
// https://en.cppreference.com/w/cpp/experimental/is_detected
struct nonesuch
{
nonesuch() = delete;
~nonesuch() = delete;
nonesuch(nonesuch const&) = delete;
nonesuch(nonesuch const&&) = delete;
void operator=(nonesuch const&) = delete;
void operator=(nonesuch&&) = delete;
};
template<class Default,
class AlwaysVoid,
template<class...> class Op,
class... Args>
struct detector
{
using value_t = std::false_type;
using type = Default;
};
template<class Default, template<class...> class Op, class... Args>
struct detector<Default, void_t<Op<Args...>>, Op, Args...>
{
using value_t = std::true_type;
using type = Op<Args...>;
};
template<template<class...> class Op, class... Args>
using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
template<template<class...> class Op, class... Args>
struct is_detected_lazy : is_detected<Op, Args...> { };
template<template<class...> class Op, class... Args>
using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
template<class Default, template<class...> class Op, class... Args>
using detected_or = detector<Default, void, Op, Args...>;
template<class Default, template<class...> class Op, class... Args>
using detected_or_t = typename detected_or<Default, Op, Args...>::type;
template<class Expected, template<class...> class Op, class... Args>
using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
template<class To, template<class...> class Op, class... Args>
using is_detected_convertible =
std::is_convertible<detected_t<Op, Args...>, To>;
} // namespace detail
NLOHMANN_JSON_NAMESPACE_END
// #include <nlohmann/thirdparty/hedley/hedley.hpp>
// __ _____ _____ _____
// __| | __| | | | JSON for Modern C++
// | | |__ | | | | | | version 3.12.0
// |_____|_____|_____|_|___| https://github.com/nlohmann/json
//
// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann <https://nlohmann.me>
// SPDX-FileCopyrightText: 2016 - 2021 Evan Nemerson <evan@nemerson.com>
// SPDX-License-Identifier: MIT
/* Hedley - https://nemequ.github.io/hedley
* Created by Evan Nemerson <evan@nemerson.com>
*/
#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15)
#if defined(JSON_HEDLEY_VERSION)
#undef JSON_HEDLEY_VERSION
#endif
#define JSON_HEDLEY_VERSION 15
#if defined(JSON_HEDLEY_STRINGIFY_EX)
#undef JSON_HEDLEY_STRINGIFY_EX
#endif
#define JSON_HEDLEY_STRINGIFY_EX(x) #x
#if defined(JSON_HEDLEY_STRINGIFY)
#undef JSON_HEDLEY_STRINGIFY
#endif
#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x)
#if defined(JSON_HEDLEY_CONCAT_EX)
#undef JSON_HEDLEY_CONCAT_EX
#endif
#define JSON_HEDLEY_CONCAT_EX(a,b) a##b
#if defined(JSON_HEDLEY_CONCAT)
#undef JSON_HEDLEY_CONCAT
#endif
#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b)
#if defined(JSON_HEDLEY_CONCAT3_EX)
#undef JSON_HEDLEY_CONCAT3_EX
#endif
#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c
#if defined(JSON_HEDLEY_CONCAT3)
#undef JSON_HEDLEY_CONCAT3
#endif
#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c)
#if defined(JSON_HEDLEY_VERSION_ENCODE)
#undef JSON_HEDLEY_VERSION_ENCODE
#endif
#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision))
#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR)
#undef JSON_HEDLEY_VERSION_DECODE_MAJOR
#endif
#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000)
#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR)
#undef JSON_HEDLEY_VERSION_DECODE_MINOR
#endif
#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000)
#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION)
#undef JSON_HEDLEY_VERSION_DECODE_REVISION
#endif
#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000)
#if defined(JSON_HEDLEY_GNUC_VERSION)
#undef JSON_HEDLEY_GNUC_VERSION
#endif
#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__)
#define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
#elif defined(__GNUC__)
#define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0)
#endif
#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK)
#undef JSON_HEDLEY_GNUC_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_GNUC_VERSION)
#define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_MSVC_VERSION)
#undef JSON_HEDLEY_MSVC_VERSION
#endif
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL)
#define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100)
#elif defined(_MSC_FULL_VER) && !defined(__ICL)
#define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10)
#elif defined(_MSC_VER) && !defined(__ICL)
#define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0)
#endif
#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK)
#undef JSON_HEDLEY_MSVC_VERSION_CHECK
#endif
#if !defined(JSON_HEDLEY_MSVC_VERSION)
#define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0)
#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
#define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch)))
#elif defined(_MSC_VER) && (_MSC_VER >= 1200)
#define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch)))
#else
#define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor)))
#endif
#if defined(JSON_HEDLEY_INTEL_VERSION)
#undef JSON_HEDLEY_INTEL_VERSION
#endif
#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL)
#define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE)
#elif defined(__INTEL_COMPILER) && !defined(__ICL)
#define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0)
#endif
#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK)
#undef JSON_HEDLEY_INTEL_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_INTEL_VERSION)
#define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_INTEL_CL_VERSION)
#undef JSON_HEDLEY_INTEL_CL_VERSION
#endif
#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL)
#define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0)
#endif
#if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK)
#undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_INTEL_CL_VERSION)
#define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_PGI_VERSION)
#undef JSON_HEDLEY_PGI_VERSION
#endif
#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__)
#define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__)
#endif
#if defined(JSON_HEDLEY_PGI_VERSION_CHECK)
#undef JSON_HEDLEY_PGI_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_PGI_VERSION)
#define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_SUNPRO_VERSION)
#undef JSON_HEDLEY_SUNPRO_VERSION
#endif
#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000)
#define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10)
#elif defined(__SUNPRO_C)
#define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf)
#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000)
#define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10)
#elif defined(__SUNPRO_CC)
#define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf)
#endif
#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK)
#undef JSON_HEDLEY_SUNPRO_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_SUNPRO_VERSION)
#define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION)
#undef JSON_HEDLEY_EMSCRIPTEN_VERSION
#endif
#if defined(__EMSCRIPTEN__)
#define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__)
#endif
#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK)
#undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION)
#define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_ARM_VERSION)
#undef JSON_HEDLEY_ARM_VERSION
#endif
#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION)
#define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100)
#elif defined(__CC_ARM) && defined(__ARMCC_VERSION)
#define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100)
#endif
#if defined(JSON_HEDLEY_ARM_VERSION_CHECK)
#undef JSON_HEDLEY_ARM_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_ARM_VERSION)
#define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_IBM_VERSION)
#undef JSON_HEDLEY_IBM_VERSION
#endif
#if defined(__ibmxl__)
#define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__)
#elif defined(__xlC__) && defined(__xlC_ver__)
#define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff)
#elif defined(__xlC__)
#define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0)
#endif
#if defined(JSON_HEDLEY_IBM_VERSION_CHECK)
#undef JSON_HEDLEY_IBM_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_IBM_VERSION)
#define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_VERSION)
#undef JSON_HEDLEY_TI_VERSION
#endif
#if \
defined(__TI_COMPILER_VERSION__) && \
( \
defined(__TMS470__) || defined(__TI_ARM__) || \
defined(__MSP430__) || \
defined(__TMS320C2000__) \
)
#if (__TI_COMPILER_VERSION__ >= 16000000)
#define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#endif
#if defined(JSON_HEDLEY_TI_VERSION_CHECK)
#undef JSON_HEDLEY_TI_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_VERSION)
#define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_CL2000_VERSION)
#undef JSON_HEDLEY_TI_CL2000_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__)
#define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK)
#undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_CL2000_VERSION)
#define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_CL430_VERSION)
#undef JSON_HEDLEY_TI_CL430_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__)
#define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK)
#undef JSON_HEDLEY_TI_CL430_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_CL430_VERSION)
#define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_ARMCL_VERSION)
#undef JSON_HEDLEY_TI_ARMCL_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__))
#define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK)
#undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_ARMCL_VERSION)
#define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_CL6X_VERSION)
#undef JSON_HEDLEY_TI_CL6X_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__)
#define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK)
#undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_CL6X_VERSION)
#define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_CL7X_VERSION)
#undef JSON_HEDLEY_TI_CL7X_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__)
#define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK)
#undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_CL7X_VERSION)
#define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TI_CLPRU_VERSION)
#undef JSON_HEDLEY_TI_CLPRU_VERSION
#endif
#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__)
#define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
#endif
#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK)
#undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TI_CLPRU_VERSION)
#define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_CRAY_VERSION)
#undef JSON_HEDLEY_CRAY_VERSION
#endif
#if defined(_CRAYC)
#if defined(_RELEASE_PATCHLEVEL)
#define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL)
#else
#define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0)
#endif
#endif
#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK)
#undef JSON_HEDLEY_CRAY_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_CRAY_VERSION)
#define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_IAR_VERSION)
#undef JSON_HEDLEY_IAR_VERSION
#endif
#if defined(__IAR_SYSTEMS_ICC__)
#if __VER__ > 1000
#define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000))
#else
#define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0)
#endif
#endif
#if defined(JSON_HEDLEY_IAR_VERSION_CHECK)
#undef JSON_HEDLEY_IAR_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_IAR_VERSION)
#define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_TINYC_VERSION)
#undef JSON_HEDLEY_TINYC_VERSION
#endif
#if defined(__TINYC__)
#define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100)
#endif
#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK)
#undef JSON_HEDLEY_TINYC_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_TINYC_VERSION)
#define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_DMC_VERSION)
#undef JSON_HEDLEY_DMC_VERSION
#endif
#if defined(__DMC__)
#define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf)
#endif
#if defined(JSON_HEDLEY_DMC_VERSION_CHECK)
#undef JSON_HEDLEY_DMC_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_DMC_VERSION)
#define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_COMPCERT_VERSION)
#undef JSON_HEDLEY_COMPCERT_VERSION
#endif
#if defined(__COMPCERT_VERSION__)
#define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100)
#endif
#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK)
#undef JSON_HEDLEY_COMPCERT_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_COMPCERT_VERSION)
#define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_PELLES_VERSION)
#undef JSON_HEDLEY_PELLES_VERSION
#endif
#if defined(__POCC__)
#define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0)
#endif
#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK)
#undef JSON_HEDLEY_PELLES_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_PELLES_VERSION)
#define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_MCST_LCC_VERSION)
#undef JSON_HEDLEY_MCST_LCC_VERSION
#endif
#if defined(__LCC__) && defined(__LCC_MINOR__)
#define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__)
#endif
#if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK)
#undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_MCST_LCC_VERSION)
#define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_GCC_VERSION)
#undef JSON_HEDLEY_GCC_VERSION
#endif
#if \
defined(JSON_HEDLEY_GNUC_VERSION) && \
!defined(__clang__) && \
!defined(JSON_HEDLEY_INTEL_VERSION) && \
!defined(JSON_HEDLEY_PGI_VERSION) && \
!defined(JSON_HEDLEY_ARM_VERSION) && \
!defined(JSON_HEDLEY_CRAY_VERSION) && \
!defined(JSON_HEDLEY_TI_VERSION) && \
!defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \
!defined(JSON_HEDLEY_TI_CL430_VERSION) && \
!defined(JSON_HEDLEY_TI_CL2000_VERSION) && \
!defined(JSON_HEDLEY_TI_CL6X_VERSION) && \
!defined(JSON_HEDLEY_TI_CL7X_VERSION) && \
!defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \
!defined(__COMPCERT__) && \
!defined(JSON_HEDLEY_MCST_LCC_VERSION)
#define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION
#endif
#if defined(JSON_HEDLEY_GCC_VERSION_CHECK)
#undef JSON_HEDLEY_GCC_VERSION_CHECK
#endif
#if defined(JSON_HEDLEY_GCC_VERSION)
#define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
#else
#define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0)
#endif
#if defined(JSON_HEDLEY_HAS_ATTRIBUTE)
#undef JSON_HEDLEY_HAS_ATTRIBUTE
#endif
#if \
defined(__has_attribute) && \
( \
(!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \
)
# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute)
#else
# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE)
#undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE
#endif
#if defined(__has_attribute)
#define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute)
#else
#define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE)
#undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE
#endif
#if defined(__has_attribute)
#define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute)
#else
#define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE)
#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE
#endif
#if \
defined(__has_cpp_attribute) && \
defined(__cplusplus) && \
(!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0))
#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute)
#else
#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0)
#endif
#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS)
#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS
#endif
#if !defined(__cplusplus) || !defined(__has_cpp_attribute)
#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
#elif \
!defined(JSON_HEDLEY_PGI_VERSION) && \
!defined(JSON_HEDLEY_IAR_VERSION) && \
(!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \
(!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0))
#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute)
#else
#define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE)
#undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE
#endif
#if defined(__has_cpp_attribute) && defined(__cplusplus)
#define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
#else
#define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE)
#undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE
#endif
#if defined(__has_cpp_attribute) && defined(__cplusplus)
#define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
#else
#define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_HAS_BUILTIN)
#undef JSON_HEDLEY_HAS_BUILTIN
#endif
#if defined(__has_builtin)
#define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin)
#else
#define JSON_HEDLEY_HAS_BUILTIN(builtin) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN)
#undef JSON_HEDLEY_GNUC_HAS_BUILTIN
#endif
#if defined(__has_builtin)
#define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
#else
#define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN)
#undef JSON_HEDLEY_GCC_HAS_BUILTIN
#endif
#if defined(__has_builtin)
#define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
#else
#define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_HAS_FEATURE)
#undef JSON_HEDLEY_HAS_FEATURE
#endif
#if defined(__has_feature)
#define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature)
#else
#define JSON_HEDLEY_HAS_FEATURE(feature) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE)
#undef JSON_HEDLEY_GNUC_HAS_FEATURE
#endif
#if defined(__has_feature)
#define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
#else
#define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_FEATURE)
#undef JSON_HEDLEY_GCC_HAS_FEATURE
#endif
#if defined(__has_feature)
#define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
#else
#define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_HAS_EXTENSION)
#undef JSON_HEDLEY_HAS_EXTENSION
#endif
#if defined(__has_extension)
#define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension)
#else
#define JSON_HEDLEY_HAS_EXTENSION(extension) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION)
#undef JSON_HEDLEY_GNUC_HAS_EXTENSION
#endif
#if defined(__has_extension)
#define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
#else
#define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION)
#undef JSON_HEDLEY_GCC_HAS_EXTENSION
#endif
#if defined(__has_extension)
#define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
#else
#define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE)
#undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE
#endif
#if defined(__has_declspec_attribute)
#define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute)
#else
#define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE)
#undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE
#endif
#if defined(__has_declspec_attribute)
#define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
#else
#define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE)
#undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE
#endif
#if defined(__has_declspec_attribute)
#define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
#else
#define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_HAS_WARNING)
#undef JSON_HEDLEY_HAS_WARNING
#endif
#if defined(__has_warning)
#define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning)
#else
#define JSON_HEDLEY_HAS_WARNING(warning) (0)
#endif
#if defined(JSON_HEDLEY_GNUC_HAS_WARNING)
#undef JSON_HEDLEY_GNUC_HAS_WARNING
#endif
#if defined(__has_warning)
#define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
#else
#define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
#endif
#if defined(JSON_HEDLEY_GCC_HAS_WARNING)
#undef JSON_HEDLEY_GCC_HAS_WARNING
#endif
#if defined(__has_warning)
#define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
#else
#define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
#endif
#if \
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
defined(__clang__) || \
JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \
JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \
JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \
(JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR))
#define JSON_HEDLEY_PRAGMA(value) _Pragma(#value)
#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
#define JSON_HEDLEY_PRAGMA(value) __pragma(value)
#else
#define JSON_HEDLEY_PRAGMA(value)
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH)
#undef JSON_HEDLEY_DIAGNOSTIC_PUSH
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_POP)
#undef JSON_HEDLEY_DIAGNOSTIC_POP
#endif
#if defined(__clang__)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push")
#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop")
#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push")
#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop")
#elif \
JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \
JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push))
#define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop))
#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push")
#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop")
#elif \
JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push")
#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop")
#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0)
#define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
#define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
#else
#define JSON_HEDLEY_DIAGNOSTIC_PUSH
#define JSON_HEDLEY_DIAGNOSTIC_POP
#endif
/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for
HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_
#endif
#if defined(__cplusplus)
# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat")
# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions")
# if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions")
# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
JSON_HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
_Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \
_Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \
xpr \
JSON_HEDLEY_DIAGNOSTIC_POP
# else
# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
JSON_HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
_Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \
xpr \
JSON_HEDLEY_DIAGNOSTIC_POP
# endif
# else
# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
JSON_HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
xpr \
JSON_HEDLEY_DIAGNOSTIC_POP
# endif
# endif
#endif
#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x
#endif
#if defined(JSON_HEDLEY_CONST_CAST)
#undef JSON_HEDLEY_CONST_CAST
#endif
#if defined(__cplusplus)
# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast<T>(expr))
#elif \
JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \
JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \
JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \
JSON_HEDLEY_DIAGNOSTIC_PUSH \
JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
((T) (expr)); \
JSON_HEDLEY_DIAGNOSTIC_POP \
}))
#else
# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr))
#endif
#if defined(JSON_HEDLEY_REINTERPRET_CAST)
#undef JSON_HEDLEY_REINTERPRET_CAST
#endif
#if defined(__cplusplus)
#define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast<T>(expr))
#else
#define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr))
#endif
#if defined(JSON_HEDLEY_STATIC_CAST)
#undef JSON_HEDLEY_STATIC_CAST
#endif
#if defined(__cplusplus)
#define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast<T>(expr))
#else
#define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr))
#endif
#if defined(JSON_HEDLEY_CPP_CAST)
#undef JSON_HEDLEY_CPP_CAST
#endif
#if defined(__cplusplus)
# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast")
# define JSON_HEDLEY_CPP_CAST(T, expr) \
JSON_HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \
((T) (expr)) \
JSON_HEDLEY_DIAGNOSTIC_POP
# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0)
# define JSON_HEDLEY_CPP_CAST(T, expr) \
JSON_HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("diag_suppress=Pe137") \
JSON_HEDLEY_DIAGNOSTIC_POP
# else
# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr))
# endif
#else
# define JSON_HEDLEY_CPP_CAST(T, expr) (expr)
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED)
#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
#endif
#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations")
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)")
#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786))
#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445")
#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444")
#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996))
#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444")
#elif \
JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718")
#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)")
#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)")
#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215")
#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)")
#else
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS)
#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
#endif
#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"")
#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)")
#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161))
#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675")
#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"")
#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068))
#elif \
JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161")
#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161")
#else
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES)
#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
#endif
#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes")
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"")
#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)")
#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292))
#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030))
#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098")
#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097")
#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)")
#elif \
JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173")
#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097")
#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097")
#else
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL)
#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
#endif
#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual")
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)")
#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
#else
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
#endif
#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION)
#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION
#endif
#if JSON_HEDLEY_HAS_WARNING("-Wunused-function")
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"")
#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"")
#elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505))
#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142")
#else
#define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION
#endif
#if defined(JSON_HEDLEY_DEPRECATED)
#undef JSON_HEDLEY_DEPRECATED
#endif
#if defined(JSON_HEDLEY_DEPRECATED_FOR)
#undef JSON_HEDLEY_DEPRECATED_FOR
#endif
#if \
JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \
JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
#define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since))
#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement))
#elif \
(JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \
JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \
JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \
JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \
JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10)
#define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since)))
#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement)))
#elif defined(__cplusplus) && (__cplusplus >= 201402L)
#define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]])
#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]])
#elif \
JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \
JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
(JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
(JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
(JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \
JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0)
#define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__))
#define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__))
#elif \
JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \
JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0)
#define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated)
#de
gitextract_kq4i821q/
├── BM(C++)/
│ └── BM.cpp
├── BM(Python)/
│ ├── BM.py
│ └── camera_configs.py
├── Jeston nano_tensorrt/
│ ├── CMakeLists.txt
│ ├── calibrator.cpp
│ ├── calibrator.h
│ ├── common.hpp
│ ├── cuda_utils.h
│ ├── gen_wts.py
│ ├── logging.h
│ ├── macros.h
│ ├── preprocess.cu
│ ├── preprocess.h
│ ├── samples
│ ├── utils.h
│ ├── yololayer.cu
│ ├── yololayer.h
│ ├── yolov5.cpp
│ └── 部署/
│ └── python_trt.py
├── LICENSE
├── README.md
├── SGBM(C++)/
│ └── SGBM.cpp
├── SGBM(Python)/
│ └── sgbm-video.py
├── Win_tensorrt/
│ ├── python_trt.py
│ ├── yolov5_640.engine
│ └── yolov5s_480.engine
├── distance_measurement_cpp/
│ ├── camera_config.json
│ ├── json.hpp
│ ├── main.cpp
│ ├── mouse_controller.cpp
│ ├── mouse_controller.h
│ ├── stereo_match_algorithm.cpp
│ └── stereo_match_algorithm.h
├── stereo_introduce/
│ ├── USB摄像头使用说明书.docx
│ └── VideoCap.ini
├── stereo_shot.py
├── yolov5-v6.1-pytorch-master/
│ ├── .gitignore
│ ├── LICENSE
│ ├── get_map.py
│ ├── kmeans_for_anchors.py
│ ├── nets/
│ │ ├── CSPdarknet.py
│ │ ├── __init__.py
│ │ ├── yolo.py
│ │ └── yolo_training.py
│ ├── predict.py
│ ├── requirements.txt
│ ├── sgbm-video.py
│ ├── summary.py
│ ├── train.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── callbacks.py
│ │ ├── dataloader.py
│ │ ├── utils.py
│ │ ├── utils_bbox.py
│ │ ├── utils_fit.py
│ │ └── utils_map.py
│ ├── utils_coco/
│ │ ├── coco_annotation.py
│ │ └── get_map_coco.py
│ ├── voc_annotation.py
│ └── yolo.py
├── 双目视觉资料/
│ ├── opencv 双目摄像头拍照(分别左右镜头拍照).txt
│ ├── opencv 双目摄像头拍照(分别左右镜头拍照)_一颗小树x的博客-CSDN博客_双目摄像头拍照.mhtml
│ ├── 一篇文章认识《双目立体视觉》_一颗小树x的博客-CSDN博客(1).mhtml
│ ├── 制作标定板——matlab编程实现_一颗小树x的博客-CSDN博客_matlab 标定板.mhtml
│ ├── 双目 机器视觉-- 测距.txt
│ ├── 双目 机器视觉-- 测距_一颗小树x的博客-CSDN博客_机器视觉测距.mhtml
│ ├── 双目摄像头内参如何使用? 如何转化数据?_一颗小树x的博客-CSDN博客_摄像头内参.mhtml
│ ├── 双目测距 SGBM算法 Python版.txt
│ ├── 双目测距 SGBM算法 Python版_一颗小树x的博客-CSDN博客_双目相机测距python.mhtml
│ └── 立体匹配算法(局部立体匹配 、全局立体匹配 、深度学习立体匹配 )_一颗小树x的博客-CSDN博客_深度学习立体匹配.mhtml
├── 常见问题答疑.md
└── 数据处理/
├── README.md
├── clean-xml-jpg.py
└── coco2voc.py
SYMBOL INDEX (621 symbols across 36 files)
FILE: BM(C++)/BM.cpp
function stereo_match (line 98) | void stereo_match(int, void*)
function onMouse (line 120) | static void onMouse(int event, int x, int y, int, void*)
function main (line 162) | int main()
FILE: BM(Python)/BM.py
function onmouse_pick_points (line 13) | def onmouse_pick_points(event, x, y, flags, param):
FILE: BM(Python)/camera_configs.py
class stereoCamera (line 71) | class stereoCamera(object):
method __init__ (line 72) | def __init__(self):
FILE: Jeston nano_tensorrt/calibrator.h
function class (line 14) | class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
FILE: Jeston nano_tensorrt/common.hpp
function get_rect (line 14) | cv::Rect get_rect(cv::Mat& img, float bbox[4]) {
function iou (line 40) | float iou(float lbox[4], float rbox[4]) {
function cmp (line 55) | bool cmp(const Yolo::Detection& a, const Yolo::Detection& b) {
function nms (line 59) | void nms(std::vector<Yolo::Detection>& res, float *output, float conf_th...
function loadWeights (line 88) | std::map<std::string, Weights> loadWeights(const std::string file) {
function IScaleLayer (line 126) | IScaleLayer* addBatchNorm2d(INetworkDefinition *network, std::map<std::s...
function ILayer (line 161) | ILayer* convBlock(INetworkDefinition *network, std::map<std::string, Wei...
function ILayer (line 179) | ILayer* focus(INetworkDefinition *network, std::map<std::string, Weights...
function ILayer (line 190) | ILayer* bottleneck(INetworkDefinition *network, std::map<std::string, We...
function ILayer (line 200) | ILayer* bottleneckCSP(INetworkDefinition *network, std::map<std::string,...
function ILayer (line 223) | ILayer* C3(INetworkDefinition *network, std::map<std::string, Weights>& ...
function ILayer (line 240) | ILayer* SPP(INetworkDefinition *network, std::map<std::string, Weights>&...
function ILayer (line 261) | ILayer* SPPF(INetworkDefinition *network, std::map<std::string, Weights>...
function getAnchors (line 280) | std::vector<std::vector<float>> getAnchors(std::map<std::string, Weights...
function IPluginV2Layer (line 292) | IPluginV2Layer* addYoLoLayer(INetworkDefinition *network, std::map<std::...
FILE: Jeston nano_tensorrt/gen_wts.py
function parse_args (line 9) | def parse_args():
FILE: Jeston nano_tensorrt/logging.h
function class (line 32) | class LogStreamConsumerBuffer : public std::stringbuf
function class (line 107) | class LogStreamConsumerBase
function std (line 161) | static std::string severityPrefix(Severity severity)
function TestResult (line 214) | enum class TestResult
function LogStreamConsumer (line 448) | inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
function LogStreamConsumer (line 460) | inline LogStreamConsumer LOG_INFO(const Logger& logger)
function LogStreamConsumer (line 472) | inline LogStreamConsumer LOG_WARN(const Logger& logger)
function LogStreamConsumer (line 484) | inline LogStreamConsumer LOG_ERROR(const Logger& logger)
function LogStreamConsumer (line 497) | inline LogStreamConsumer LOG_FATAL(const Logger& logger)
FILE: Jeston nano_tensorrt/preprocess.h
type AffineMatrix (line 8) | struct AffineMatrix{
FILE: Jeston nano_tensorrt/utils.h
function cv (line 7) | static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int inpu...
function read_files_in_dir (line 29) | static inline int read_files_in_dir(const char *p_dir_name, std::vector<...
FILE: Jeston nano_tensorrt/yololayer.h
function namespace (line 9) | namespace Yolo
function initialize (line 49) | int initialize() TRT_NOEXCEPT override;
function virtual (line 53) | virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT ove...
function virtual (line 57) | virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
FILE: Jeston nano_tensorrt/yolov5.cpp
function get_width (line 28) | static int get_width(int x, float gw, int divisor = 8) {
function get_depth (line 32) | static int get_depth(int x, float gd) {
function ICudaEngine (line 41) | ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder, ...
function ICudaEngine (line 128) | ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builde...
function APIToModel (line 232) | void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream, bo...
function doInference (line 262) | void doInference(IExecutionContext& context, cudaStream_t& stream, void ...
function parse_args (line 270) | bool parse_args(int argc, char** argv, std::string& wts, std::string& en...
function main (line 309) | int main(int argc, char** argv) {
function API (line 468) | API void* Init(char *model_path)
function API (line 513) | API void Detect(void *h, int rows, int cols, unsigned char *src_data, fl...
function API (line 550) | API void cuda_free(void*h) {
FILE: Jeston nano_tensorrt/部署/python_trt.py
class Detector (line 56) | class Detector():
method __init__ (line 57) | def __init__(self,model_path,dll_path):
method predict (line 65) | def predict(self,img):
method free (line 72) | def free(self):
function visualize (line 80) | def visualize(img,bbox_array):
function resize_image (line 102) | def resize_image(image, size):
FILE: SGBM(C++)/SGBM.cpp
function stereo_match (line 97) | void stereo_match(int, void*)
function onMouse (line 135) | static void onMouse(int event, int x, int y, int, void*)
function main (line 177) | int main()
FILE: SGBM(Python)/sgbm-video.py
function onmouse_pick_points (line 44) | def onmouse_pick_points(event, x, y, flags, param):
FILE: Win_tensorrt/python_trt.py
class Detector (line 56) | class Detector():
method __init__ (line 57) | def __init__(self,model_path,dll_path):
method predict (line 65) | def predict(self,img):
method free (line 72) | def free(self):
function visualize (line 80) | def visualize(img,bbox_array):
function resize_image (line 102) | def resize_image(image, size):
FILE: distance_measurement_cpp/json.hpp
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 261) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_END (line 272) | NLOHMANN_JSON_NAMESPACE_END
function from_json (line 2602) | inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) ...
type would_call_std_ (line 2941) | struct would_call_std_
type value_t (line 2999) | enum class value_t : std::uint8_t
function NLOHMANN_JSON_NAMESPACE_END (line 3064) | NLOHMANN_JSON_NAMESPACE_END
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3157) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3203) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3394) | NLOHMANN_JSON_NAMESPACE_BEGIN
class json_pointer (line 3543) | class json_pointer
type ordered_map (line 3554) | struct ordered_map
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 3565) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4357) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4500) | NLOHMANN_JSON_NAMESPACE_BEGIN
class parse_error (line 4626) | class parse_error : public exception
method parse_error (line 4639) | static parse_error create(int id_, const position_t& pos, const std::s...
method parse_error (line 4647) | static parse_error create(int id_, std::size_t byte_, const std::strin...
method parse_error (line 4667) | parse_error(int id_, std::size_t byte_, const char* what_arg)
method position_string (line 4670) | static std::string position_string(const position_t& pos)
class invalid_iterator (line 4679) | class invalid_iterator : public exception
method invalid_iterator (line 4683) | static invalid_iterator create(int id_, const std::string& what_arg, B...
method JSON_HEDLEY_NON_NULL (line 4690) | JSON_HEDLEY_NON_NULL(3)
class type_error (line 4697) | class type_error : public exception
method type_error (line 4701) | static type_error create(int id_, const std::string& what_arg, BasicJs...
method JSON_HEDLEY_NON_NULL (line 4708) | JSON_HEDLEY_NON_NULL(3)
class out_of_range (line 4714) | class out_of_range : public exception
method out_of_range (line 4718) | static out_of_range create(int id_, const std::string& what_arg, Basic...
method JSON_HEDLEY_NON_NULL (line 4725) | JSON_HEDLEY_NON_NULL(3)
class other_error (line 4731) | class other_error : public exception
method other_error (line 4735) | static other_error create(int id_, const std::string& what_arg, BasicJ...
method JSON_HEDLEY_NON_NULL (line 4742) | JSON_HEDLEY_NON_NULL(3)
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4771) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4797) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4805) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 4820) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 5431) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_END (line 5452) | NLOHMANN_JSON_NAMESPACE_END
function NLOHMANN_JSON_NAMESPACE_END (line 5632) | NLOHMANN_JSON_NAMESPACE_END
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 5678) | NLOHMANN_JSON_NAMESPACE_BEGIN
type adl_serializer (line 6135) | struct adl_serializer
method from_json (line 6140) | static auto from_json(BasicJsonType && j, TargetType& val) noexcept(
method from_json (line 6150) | static auto from_json(BasicJsonType && j) noexcept(
method to_json (line 6160) | static auto to_json(BasicJsonType& j, TargetType && val) noexcept(
function set_subtype (line 6241) | void set_subtype(subtype_type subtype_) noexcept
function subtype_type (line 6249) | constexpr subtype_type subtype() const noexcept
function has_subtype (line 6256) | constexpr bool has_subtype() const noexcept
function clear_subtype (line 6263) | void clear_subtype() noexcept
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 6302) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 6479) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 7048) | NLOHMANN_JSON_NAMESPACE_BEGIN
function reset (line 8345) | void reset() noexcept
function char_int_type (line 8363) | char_int_type get()
function unget (line 8400) | void unget()
function add (line 8427) | void add(char_int_type c)
function number_unsigned_t (line 8444) | constexpr number_unsigned_t get_number_unsigned() const noexcept
function number_float_t (line 8450) | constexpr number_float_t get_number_float() const noexcept
function string_t (line 8456) | string_t& get_string()
function position_t (line 8471) | constexpr position_t get_position() const noexcept
function get_token_string (line 8479) | std::string get_token_string() const
function JSON_HEDLEY_RETURNS_NON_NULL (line 8503) | JSON_HEDLEY_RETURNS_NON_NULL
function skip_bom (line 8517) | bool skip_bom()
function skip_whitespace (line 8531) | void skip_whitespace()
function token_type (line 8540) | token_type scan()
function NLOHMANN_JSON_NAMESPACE_END (line 8665) | NLOHMANN_JSON_NAMESPACE_END
type detail (line 8796) | namespace detail
function unknown_size (line 8798) | constexpr std::size_t unknown_size()
class json_sax_dom_parser (line 8817) | class json_sax_dom_parser
method json_sax_dom_parser (line 8832) | explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exce...
method json_sax_dom_parser (line 8837) | json_sax_dom_parser(const json_sax_dom_parser&) = delete;
method json_sax_dom_parser (line 8838) | json_sax_dom_parser(json_sax_dom_parser&&) = default;
method json_sax_dom_parser (line 8839) | json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete;
method json_sax_dom_parser (line 8840) | json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default;
method null (line 8843) | bool null()
method boolean (line 8849) | bool boolean(bool val)
method number_integer (line 8855) | bool number_integer(number_integer_t val)
method number_unsigned (line 8861) | bool number_unsigned(number_unsigned_t val)
method number_float (line 8867) | bool number_float(number_float_t val, const string_t& /*unused*/)
method string (line 8873) | bool string(string_t& val)
method binary (line 8879) | bool binary(binary_t& val)
method start_object (line 8885) | bool start_object(std::size_t len)
method key (line 8908) | bool key(string_t& val)
method end_object (line 8918) | bool end_object()
method start_array (line 8936) | bool start_array(std::size_t len)
method end_array (line 8957) | bool end_array()
method parse_error (line 8976) | bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
method is_errored (line 8988) | constexpr bool is_errored() const
method handle_diagnostic_positions_for_json_value (line 8996) | void handle_diagnostic_positions_for_json_value(BasicJsonType& v)
method JSON_HEDLEY_RETURNS_NON_NULL (line 9070) | JSON_HEDLEY_RETURNS_NON_NULL
class json_sax_dom_callback_parser (line 9123) | class json_sax_dom_callback_parser
method json_sax_dom_callback_parser (line 9135) | json_sax_dom_callback_parser(BasicJsonType& r,
method json_sax_dom_callback_parser (line 9145) | json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = ...
method json_sax_dom_callback_parser (line 9146) | json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default;
method json_sax_dom_callback_parser (line 9147) | json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_...
method json_sax_dom_callback_parser (line 9148) | json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser...
method null (line 9151) | bool null()
method boolean (line 9157) | bool boolean(bool val)
method number_integer (line 9163) | bool number_integer(number_integer_t val)
method number_unsigned (line 9169) | bool number_unsigned(number_unsigned_t val)
method number_float (line 9175) | bool number_float(number_float_t val, const string_t& /*unused*/)
method string (line 9181) | bool string(string_t& val)
method binary (line 9187) | bool binary(binary_t& val)
method start_object (line 9193) | bool start_object(std::size_t len)
method key (line 9225) | bool key(string_t& val)
method end_object (line 9242) | bool end_object()
method start_array (line 9292) | bool start_array(std::size_t len)
method end_array (line 9324) | bool end_array()
method parse_error (line 9371) | bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
method is_errored (line 9383) | constexpr bool is_errored() const
method handle_diagnostic_positions_for_json_value (line 9391) | void handle_diagnostic_positions_for_json_value(BasicJsonType& v)
method handle_value (line 9470) | std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool s...
class json_sax_acceptor (line 9560) | class json_sax_acceptor
method null (line 9569) | bool null()
method boolean (line 9574) | bool boolean(bool /*unused*/)
method number_integer (line 9579) | bool number_integer(number_integer_t /*unused*/)
method number_unsigned (line 9584) | bool number_unsigned(number_unsigned_t /*unused*/)
method number_float (line 9589) | bool number_float(number_float_t /*unused*/, const string_t& /*unuse...
method string (line 9594) | bool string(string_t& /*unused*/)
method binary (line 9599) | bool binary(binary_t& /*unused*/)
method start_object (line 9604) | bool start_object(std::size_t /*unused*/ = detail::unknown_size())
method key (line 9609) | bool key(string_t& /*unused*/)
method end_object (line 9614) | bool end_object()
method start_array (line 9619) | bool start_array(std::size_t /*unused*/ = detail::unknown_size())
method end_array (line 9624) | bool end_array()
method parse_error (line 9629) | bool parse_error(std::size_t /*unused*/, const std::string& /*unused...
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 9664) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_END (line 9804) | NLOHMANN_JSON_NAMESPACE_END
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 12885) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 13409) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_END (line 13525) | NLOHMANN_JSON_NAMESPACE_END
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 13580) | NLOHMANN_JSON_NAMESPACE_BEGIN
function pointer (line 13882) | pointer operator->() const
function iter_impl (line 13924) | iter_impl operator++(int)& // NOLINT(cert-dcl21-cpp)
function iter_impl (line 13935) | iter_impl& operator++()
function iter_impl (line 13975) | iter_impl operator--(int)& // NOLINT(cert-dcl21-cpp)
function iter_impl (line 13986) | iter_impl& operator--()
function iter_impl (line 14143) | iter_impl& operator+=(difference_type i)
function iter_impl (line 14180) | iter_impl& operator-=(difference_type i)
function iter_impl (line 14189) | iter_impl operator+(difference_type i) const
function friend (line 14200) | friend iter_impl operator+(difference_type i, const iter_impl& it)
function iter_impl (line 14211) | iter_impl operator-(difference_type i) const
function difference_type (line 14222) | difference_type operator-(const iter_impl& other) const
function reference (line 14251) | reference operator[](difference_type n) const
function reference (line 14305) | reference value() const
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 14340) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 14473) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 14534) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_BASIC_JSON_TPL_DECLARATION (line 14554) | NLOHMANN_BASIC_JSON_TPL_DECLARATION
function json_pointer (line 14566) | explicit json_pointer(const string_t& s = "")
function string_t (line 14572) | string_t to_string() const
function friend (line 14593) | friend std::ostream& operator<<(std::ostream& o, const json_pointer& ptr)
function json_pointer (line 14602) | json_pointer& operator/=(const json_pointer& ptr)
function json_pointer (line 14612) | json_pointer& operator/=(string_t token)
function json_pointer (line 14620) | json_pointer& operator/=(std::size_t array_idx)
function friend (line 14627) | friend json_pointer operator/(const json_pointer& lhs,
function friend (line 14635) | friend json_pointer operator/(const json_pointer& lhs, string_t token) /...
function friend (line 14642) | friend json_pointer operator/(const json_pointer& lhs, std::size_t array...
function json_pointer (line 14649) | json_pointer parent_pointer() const
function pop_back (line 14663) | void pop_back()
function string_t (line 14675) | const string_t& back() const
function push_back (line 14687) | void push_back(const string_t& token)
function push_back (line 14694) | void push_back(string_t&& token)
function empty (line 14701) | bool empty() const noexcept
function BasicJsonType (line 14778) | BasicJsonType& get_and_create(BasicJsonType& j) const
function BasicJsonType (line 14858) | BasicJsonType& get_unchecked(BasicJsonType* ptr) const
function BasicJsonType (line 14926) | BasicJsonType& get_checked(BasicJsonType* ptr) const
function BasicJsonType (line 14984) | const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
function BasicJsonType (line 15033) | const BasicJsonType& get_checked(const BasicJsonType* ptr) const
function contains (line 15082) | bool contains(const BasicJsonType* ptr) const
function split (line 15170) | static std::vector<string_t> split(const string_t& reference_string)
function BasicJsonType (line 15310) | static BasicJsonType
function convert (line 15339) | json_pointer<string_t> convert() const&
function convert (line 15346) | json_pointer<string_t> convert()&&
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 15513) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 15640) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_END (line 15762) | NLOHMANN_JSON_NAMESPACE_END
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 17641) | NLOHMANN_JSON_NAMESPACE_BEGIN
function NLOHMANN_JSON_NAMESPACE_END (line 18738) | NLOHMANN_JSON_NAMESPACE_END
function hex_bytes (line 19398) | static std::string hex_bytes(std::uint8_t byte)
function is_negative_number (line 19409) | bool is_negative_number(NumberType x)
function is_negative_number (line 19415) | bool is_negative_number(NumberType /*unused*/)
function dump_integer (line 19435) | void dump_integer(NumberType x)
function dump_float (line 19520) | void dump_float(number_float_t x)
function dump_float (line 19541) | void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_dou...
function dump_float (line 19549) | void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_do...
function decode (line 19621) | static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, co...
function number_unsigned_t (line 19661) | number_unsigned_t remove_sign(number_unsigned_t x)
function number_unsigned_t (line 19676) | number_unsigned_t remove_sign(number_integer_t x) noexcept
function ordered_map (line 19763) | ordered_map() noexcept(noexcept(Container())) : Container{} {}
function ordered_map (line 19764) | explicit ordered_map(const Allocator& alloc) noexcept(noexcept(Container...
function ordered_map (line 19766) | ordered_map(It first, It last, const Allocator& alloc = Allocator())
function ordered_map (line 19768) | ordered_map(std::initializer_list<value_type> init, const Allocator& all...
function emplace (line 19771) | std::pair<iterator, bool> emplace(const key_type& key, T&& t)
function emplace (line 19786) | std::pair<iterator, bool> emplace(KeyType && key, T && t)
function T (line 19799) | T& operator[](const key_type& key)
function T (line 19806) | T & operator[](KeyType && key)
function T (line 19811) | const T& operator[](const key_type& key) const
function T (line 19818) | const T & operator[](KeyType && key) const
function T (line 19823) | T& at(const key_type& key)
function T (line 19838) | T & at(KeyType && key) // NOLINT(cppcoreguidelines-missing-std-forward)
function T (line 19851) | const T& at(const key_type& key) const
function T (line 19866) | const T & at(KeyType && key) const // NOLINT(cppcoreguidelines-missing-s...
function size_type (line 19879) | size_type erase(const key_type& key)
function size_type (line 19900) | size_type erase(KeyType && key) // NOLINT(cppcoreguidelines-missing-std-...
function iterator (line 19919) | iterator erase(iterator pos)
function iterator (line 19924) | iterator erase(iterator first, iterator last)
function size_type (line 19977) | size_type count(const key_type& key) const
function size_type (line 19991) | size_type count(KeyType && key) const // NOLINT(cppcoreguidelines-missin...
function iterator (line 20003) | iterator find(const key_type& key)
function iterator (line 20017) | iterator find(KeyType && key) // NOLINT(cppcoreguidelines-missing-std-fo...
function const_iterator (line 20029) | const_iterator find(const key_type& key) const
function insert (line 20041) | std::pair<iterator, bool> insert( value_type&& value )
function insert (line 20046) | std::pair<iterator, bool> insert( const value_type& value )
function insert (line 20064) | void insert(InputIt first, InputIt last)
function NLOHMANN_JSON_NAMESPACE_BEGIN (line 20091) | NLOHMANN_JSON_NAMESPACE_BEGIN
function set_parents (line 20720) | void set_parents()
function iterator (line 20757) | iterator set_parents(iterator it, typename iterator::difference_type cou...
function reference (line 20770) | reference set_parent(reference j, std::size_t old_capacity = detail::unk...
function basic_json (line 20832) | basic_json(const value_t v)
function basic_json (line 20840) | basic_json(std::nullptr_t = nullptr) noexcept // NOLINT(bugprone-excepti...
function basic_json (line 20852) | basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-...
function basic_json (line 20924) | basic_json(initializer_list_t init,
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20982) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 20993) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 21004) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 21015) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 21026) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 21034) | JSON_HEDLEY_WARN_UNUSED_RESULT
function basic_json (line 21042) | basic_json(size_type cnt, const basic_json& val):
function basic_json (line 21054) | basic_json(InputIT first, InputIT last) // NOLINT(performance-unnecessar...
function basic_json (line 21163) | basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {}
function basic_json (line 21167) | basic_json(const basic_json& other)
function basic_json (line 21240) | basic_json(basic_json&& other) noexcept
function basic_json (line 21266) | basic_json& operator=(basic_json other) noexcept (
function value_t (line 21335) | constexpr value_t type() const noexcept
function is_primitive (line 21342) | constexpr bool is_primitive() const noexcept
function is_structured (line 21349) | constexpr bool is_structured() const noexcept
function is_null (line 21356) | constexpr bool is_null() const noexcept
function is_boolean (line 21363) | constexpr bool is_boolean() const noexcept
function is_number (line 21370) | constexpr bool is_number() const noexcept
function is_number_integer (line 21377) | constexpr bool is_number_integer() const noexcept
function is_number_unsigned (line 21384) | constexpr bool is_number_unsigned() const noexcept
function is_number_float (line 21391) | constexpr bool is_number_float() const noexcept
function is_object (line 21398) | constexpr bool is_object() const noexcept
function is_array (line 21405) | constexpr bool is_array() const noexcept
function is_string (line 21412) | constexpr bool is_string() const noexcept
function is_binary (line 21419) | constexpr bool is_binary() const noexcept
function is_discarded (line 21426) | constexpr bool is_discarded() const noexcept
function object_t (line 21457) | object_t* get_impl_ptr(object_t* /*unused*/) noexcept
function object_t (line 21463) | constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const...
function array_t (line 21469) | array_t* get_impl_ptr(array_t* /*unused*/) noexcept
function array_t (line 21475) | constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const n...
function string_t (line 21481) | string_t* get_impl_ptr(string_t* /*unused*/) noexcept
function string_t (line 21487) | constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const...
function boolean_t (line 21493) | boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept
function boolean_t (line 21499) | constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) con...
function number_integer_t (line 21505) | number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept
function number_integer_t (line 21511) | constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /...
function number_unsigned_t (line 21517) | number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept
function number_unsigned_t (line 21523) | constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t*...
function number_float_t (line 21529) | number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept
function number_float_t (line 21535) | constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unu...
function binary_t (line 21541) | binary_t* get_impl_ptr(binary_t* /*unused*/) noexcept
function binary_t (line 21547) | constexpr const binary_t* get_impl_ptr(const binary_t* /*unused*/) const...
function ReferenceType (line 21564) | static ReferenceType get_ref_impl(ThisType& obj)
function get_ptr (line 21597) | constexpr auto get_ptr() const noexcept -> decltype(std::declval<const b...
function ValueType (line 21689) | ValueType get_impl(detail::priority_tag<1> /*unused*/) const noexcept(no...
function BasicJsonType (line 21714) | BasicJsonType get_impl(detail::priority_tag<2> /*unused*/) const
function basic_json (line 21737) | basic_json get_impl(detail::priority_tag<3> /*unused*/) const
function get_impl (line 21750) | constexpr auto get_impl(detail::priority_tag<4> /*unused*/) const noexcept
function get (line 21826) | auto get() noexcept -> decltype(std::declval<basic_json_t&>().template g...
function ValueType (line 21839) | ValueType & get_to(ValueType& v) const noexcept(noexcept(
function ValueType (line 21852) | ValueType & get_to(ValueType& v) const
function Array (line 21863) | Array get_to(T (&v)[N]) const // NOLINT(cppcoreguidelines-avoid-c-arrays...
function ReferenceType (line 21875) | ReferenceType get_ref()
function ReferenceType (line 21886) | ReferenceType get_ref() const
function binary_t (line 21945) | binary_t& get_binary()
function binary_t (line 21957) | const binary_t& get_binary() const
function reference (line 21979) | reference at(size_type idx)
function const_reference (line 22002) | const_reference at(size_type idx) const
function reference (line 22025) | reference at(const typename object_t::key_type& key)
function reference (line 22045) | reference at(KeyType && key)
function const_reference (line 22063) | const_reference at(const typename object_t::key_type& key) const
function const_reference (line 22083) | const_reference at(KeyType && key) const
function reference (line 22101) | reference operator[](size_type idx)
function const_reference (line 22147) | const_reference operator[](size_type idx) const
function reference (line 22160) | reference operator[](typename object_t::key_type key) // NOLINT(performa...
function const_reference (line 22182) | const_reference operator[](const typename object_t::key_type& key) const
function reference (line 22198) | reference operator[](T* key)
function const_reference (line 22204) | const_reference operator[](T* key) const
function reference (line 22213) | reference operator[](KeyType && key)
function const_reference (line 22237) | const_reference operator[](KeyType && key) const
class ValueType (line 22263) | class ValueType
function ReturnType (line 22292) | ReturnType value(const typename object_t::key_type& key, ValueType && de...
function ValueType (line 22318) | ValueType value(KeyType && key, const ValueType& default_value) const
function ReturnType (line 22345) | ReturnType value(KeyType && key, ValueType && default_value) const
function ValueType (line 22368) | ValueType value(const json_pointer& ptr, const ValueType& default_value)...
function ReturnType (line 22393) | ReturnType value(const json_pointer& ptr, ValueType && default_value) const
function ValueType (line 22417) | ValueType value(const ::nlohmann::json_pointer<BasicJsonType>& ptr, cons...
function ReturnType (line 22428) | ReturnType value(const ::nlohmann::json_pointer<BasicJsonType>& ptr, Val...
function reference (line 22435) | reference front()
function const_reference (line 22442) | const_reference front() const
function reference (line 22449) | reference back()
function const_reference (line 22458) | const_reference back() const
function IteratorType (line 22470) | IteratorType erase(IteratorType pos) // NOLINT(performance-unnecessary-v...
function IteratorType (line 22540) | IteratorType erase(IteratorType first, IteratorType last) // NOLINT(perf...
function erase_internal (line 22608) | private:
function size_type (line 22624) | size_type erase_internal(KeyType && key)
function size_type (line 22656) | size_type erase(KeyType && key)
function erase (line 22663) | void erase(const size_type idx)
function iterator (line 22692) | iterator find(const typename object_t::key_type& key)
function const_iterator (line 22706) | const_iterator find(const typename object_t::key_type& key) const
function iterator (line 22722) | iterator find(KeyType && key)
function const_iterator (line 22738) | const_iterator find(KeyType && key) const
function size_type (line 22752) | size_type count(const typename object_t::key_type& key) const
function size_type (line 22762) | size_type count(KeyType && key) const
function contains (line 22770) | bool contains(const typename object_t::key_type& key) const
function contains (line 22779) | bool contains(KeyType && key) const
function contains (line 22786) | bool contains(const json_pointer& ptr) const
function contains (line 22793) | bool contains(const typename ::nlohmann::json_pointer<BasicJsonType>& pt...
function iterator (line 22809) | iterator begin() noexcept
function const_iterator (line 22818) | const_iterator begin() const noexcept
function const_iterator (line 22825) | const_iterator cbegin() const noexcept
function iterator (line 22834) | iterator end() noexcept
function const_iterator (line 22843) | const_iterator end() const noexcept
function const_iterator (line 22850) | const_iterator cend() const noexcept
function reverse_iterator (line 22859) | reverse_iterator rbegin() noexcept
function const_reverse_iterator (line 22866) | const_reverse_iterator rbegin() const noexcept
function reverse_iterator (line 22873) | reverse_iterator rend() noexcept
function const_reverse_iterator (line 22880) | const_reverse_iterator rend() const noexcept
function const_reverse_iterator (line 22887) | const_reverse_iterator crbegin() const noexcept
function const_reverse_iterator (line 22894) | const_reverse_iterator crend() const noexcept
function iterator_wrapper (line 22906) | static iteration_proxy<iterator> iterator_wrapper(reference ref) noexcept
function iterator_wrapper (line 22917) | static iteration_proxy<const_iterator> iterator_wrapper(const_reference ...
function items (line 22924) | iteration_proxy<iterator> items() noexcept
function items (line 22931) | iteration_proxy<const_iterator> items() const noexcept
function empty (line 22947) | bool empty() const noexcept
function size_type (line 22986) | size_type size() const noexcept
function size_type (line 23025) | size_type max_size() const noexcept
function clear (line 23068) | void clear() noexcept
function push_back (line 23129) | void push_back(basic_json&& val)
function reference (line 23154) | reference operator+=(basic_json&& val)
function push_back (line 23162) | void push_back(const basic_json& val)
function reference (line 23186) | reference operator+=(const basic_json& val)
function push_back (line 23194) | void push_back(const typename object_t::value_type& val)
function reference (line 23217) | reference operator+=(const typename object_t::value_type& val)
function push_back (line 23225) | void push_back(initializer_list_t init)
function reference (line 23241) | reference operator+=(initializer_list_t init)
function reference (line 23250) | reference emplace_back(Args&& ... args)
function emplace (line 23275) | std::pair<iterator, bool> emplace(Args&& ... args)
function iterator (line 23307) | iterator insert_iterator(const_iterator pos, Args&& ... args) // NOLINT(...
function iterator (line 23326) | iterator insert(const_iterator pos, const basic_json& val) // NOLINT(per...
function iterator (line 23346) | iterator insert(const_iterator pos, basic_json&& val) // NOLINT(performa...
function iterator (line 23353) | iterator insert(const_iterator pos, size_type cnt, const basic_json& val...
function iterator (line 23373) | iterator insert(const_iterator pos, const_iterator first, const_iterator...
function iterator (line 23404) | iterator insert(const_iterator pos, initializer_list_t ilist) // NOLINT(...
function insert (line 23424) | void insert(const_iterator first, const_iterator last) // NOLINT(perform...
function update (line 23450) | void update(const_reference j, bool merge_objects = false)
function update (line 23457) | void update(const_iterator first, const_iterator last, bool merge_object...
function swap (line 23504) | void swap(reference other) noexcept (
function friend (line 23521) | friend void swap(reference left, reference right) noexcept (
function swap (line 23533) | void swap(array_t& other) // NOLINT(bugprone-exception-escape,cppcoregui...
function swap (line 23549) | void swap(object_t& other) // NOLINT(bugprone-exception-escape,cppcoregu...
function swap (line 23565) | void swap(string_t& other) // NOLINT(bugprone-exception-escape,cppcoregu...
function swap (line 23581) | void swap(binary_t& other) // NOLINT(bugprone-exception-escape,cppcoregu...
function swap (line 23597) | void swap(typename binary_t::container_type& other) // NOLINT(bugprone-e...
function else (line 23686) | else if(compares_unordered(lhs, rhs))\
function compares_unordered (line 23715) | bool compares_unordered(const_reference rhs, bool inverse = false) const...
function friend (line 23828) | friend bool operator==(const_reference lhs, const_reference rhs) noexcept
function friend (line 23860) | friend bool operator!=(const_reference lhs, const_reference rhs) noexcept
function friend (line 23917) | friend bool operator<=(const_reference lhs, const_reference rhs) noexcept
function friend (line 23946) | friend bool operator>(const_reference lhs, const_reference rhs) noexcept
function friend (line 23976) | friend bool operator>=(const_reference lhs, const_reference rhs) noexcept
function friend (line 24017) | friend std::ostream& operator<<(std::ostream& o, const basic_json& j)
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24056) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24070) | JSON_HEDLEY_WARN_UNUSED_RESULT
function basic_json (line 24084) | static basic_json parse(detail::span_input_adapter&& i,
function accept (line 24097) | static bool accept(InputType&& i,
function accept (line 24106) | static bool accept(IteratorType first, IteratorType last,
function accept (line 24114) | static bool accept(detail::span_input_adapter&& i,
function sax_parse (line 24124) | static bool sax_parse(InputType&& i, SAX* sax,
function sax_parse (line 24139) | static bool sax_parse(IteratorType first, IteratorType last, SAX* sax,
function sax_parse (line 24158) | static bool sax_parse(detail::span_input_adapter&& i, SAX* sax,
function JSON_HEDLEY_RETURNS_NON_NULL (line 24199) | JSON_HEDLEY_RETURNS_NON_NULL
type data (line 24231) | struct data
method data (line 24239) | data(const value_t v)
method data (line 24244) | data(size_type cnt, const basic_json& val)
method data (line 24250) | data() noexcept = default;
method data (line 24251) | data(data&&) noexcept = default;
method data (line 24252) | data(const data&) noexcept = delete;
method data (line 24253) | data& operator=(data&&) noexcept = delete;
method data (line 24254) | data& operator=(const data&) noexcept = delete;
function end_pos (line 24280) | constexpr std::size_t end_pos() const noexcept
function to_cbor (line 24305) | static void to_cbor(const basic_json& j, detail::output_adapter<std::uin...
function to_cbor (line 24312) | static void to_cbor(const basic_json& j, detail::output_adapter<char> o)
function to_msgpack (line 24319) | static std::vector<std::uint8_t> to_msgpack(const basic_json& j)
function to_msgpack (line 24328) | static void to_msgpack(const basic_json& j, detail::output_adapter<std::...
function to_msgpack (line 24335) | static void to_msgpack(const basic_json& j, detail::output_adapter<char> o)
function to_ubjson (line 24342) | static std::vector<std::uint8_t> to_ubjson(const basic_json& j,
function to_ubjson (line 24353) | static void to_ubjson(const basic_json& j, detail::output_adapter<std::u...
function to_ubjson (line 24361) | static void to_ubjson(const basic_json& j, detail::output_adapter<char> o,
function to_bjdata (line 24369) | static std::vector<std::uint8_t> to_bjdata(const basic_json& j,
function to_bjdata (line 24381) | static void to_bjdata(const basic_json& j, detail::output_adapter<std::u...
function to_bjdata (line 24390) | static void to_bjdata(const basic_json& j, detail::output_adapter<char> o,
function to_bson (line 24399) | static std::vector<std::uint8_t> to_bson(const basic_json& j)
function to_bson (line 24408) | static void to_bson(const basic_json& j, detail::output_adapter<std::uin...
function to_bson (line 24415) | static void to_bson(const basic_json& j, detail::output_adapter<char> o)
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24423) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24439) | JSON_HEDLEY_WARN_UNUSED_RESULT
function basic_json (line 24455) | static basic_json from_cbor(const T* ptr, std::size_t len,
function basic_json (line 24465) | static basic_json from_cbor(detail::span_input_adapter&& i,
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24481) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24496) | JSON_HEDLEY_WARN_UNUSED_RESULT
function basic_json (line 24511) | static basic_json from_msgpack(const T* ptr, std::size_t len,
function basic_json (line 24520) | static basic_json from_msgpack(detail::span_input_adapter&& i,
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24535) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24550) | JSON_HEDLEY_WARN_UNUSED_RESULT
function basic_json (line 24565) | static basic_json from_ubjson(const T* ptr, std::size_t len,
function basic_json (line 24574) | static basic_json from_ubjson(detail::span_input_adapter&& i,
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24589) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24604) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24619) | JSON_HEDLEY_WARN_UNUSED_RESULT
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 24634) | JSON_HEDLEY_WARN_UNUSED_RESULT
function basic_json (line 24649) | static basic_json from_bson(const T* ptr, std::size_t len,
function basic_json (line 24658) | static basic_json from_bson(detail::span_input_adapter&& i,
function reference (line 24680) | reference operator[](const json_pointer& ptr)
function reference (line 24687) | reference operator[](const ::nlohmann::json_pointer<BasicJsonType>& ptr)
function const_reference (line 24694) | const_reference operator[](const json_pointer& ptr) const
function const_reference (line 24701) | const_reference operator[](const ::nlohmann::json_pointer<BasicJsonType>...
function reference (line 24708) | reference at(const json_pointer& ptr)
function reference (line 24715) | reference at(const ::nlohmann::json_pointer<BasicJsonType>& ptr)
function const_reference (line 24722) | const_reference at(const json_pointer& ptr) const
function const_reference (line 24729) | const_reference at(const ::nlohmann::json_pointer<BasicJsonType>& ptr) c...
function basic_json (line 24736) | basic_json flatten() const
function basic_json (line 24745) | basic_json unflatten() const
function patch_inplace (line 24761) | void patch_inplace(const basic_json& json_patch)
function basic_json (line 25032) | basic_json patch(const basic_json& json_patch) const
function JSON_HEDLEY_WARN_UNUSED_RESULT (line 25041) | JSON_HEDLEY_WARN_UNUSED_RESULT
function merge_patch (line 25184) | void merge_patch(const basic_json& apply_patch)
function NLOHMANN_BASIC_JSON_TPL_DECLARATION (line 25215) | NLOHMANN_BASIC_JSON_TPL_DECLARATION
function NLOHMANN_JSON_NAMESPACE_END (line 25252) | NLOHMANN_JSON_NAMESPACE_END
FILE: distance_measurement_cpp/main.cpp
function main (line 3) | int main() {
FILE: distance_measurement_cpp/mouse_controller.h
function class (line 6) | class MouseController {
FILE: distance_measurement_cpp/stereo_match_algorithm.cpp
type stereo (line 6) | namespace stereo {
function test1 (line 245) | int test1() {
FILE: distance_measurement_cpp/stereo_match_algorithm.h
function namespace (line 9) | namespace stereo {
FILE: stereo_shot.py
function shot (line 13) | def shot( frame):
FILE: yolov5-v6.1-pytorch-master/kmeans_for_anchors.py
function cas_ratio (line 14) | def cas_ratio(box,cluster):
function avg_ratio (line 21) | def avg_ratio(box,cluster):
function kmeans (line 24) | def kmeans(box,k):
function load_data (line 77) | def load_data(path):
FILE: yolov5-v6.1-pytorch-master/nets/CSPdarknet.py
class SiLU (line 7) | class SiLU(nn.Module):
method forward (line 9) | def forward(x):
function autopad (line 12) | def autopad(k, p=None):
class Conv (line 17) | class Conv(nn.Module):
method __init__ (line 18) | def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
method forward (line 24) | def forward(self, x):
method forward_fuse (line 27) | def forward_fuse(self, x):
class Bottleneck (line 30) | class Bottleneck(nn.Module):
method __init__ (line 32) | def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_ou...
method forward (line 39) | def forward(self, x):
class C3 (line 42) | class C3(nn.Module):
method __init__ (line 44) | def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ...
method forward (line 53) | def forward(self, x):
class SPPF (line 61) | class SPPF(nn.Module):
method __init__ (line 63) | def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
method forward (line 70) | def forward(self, x):
class CSPDarknet (line 78) | class CSPDarknet(nn.Module):
method __init__ (line 79) | def __init__(self, base_channels, base_depth, phi, pretrained):
method forward (line 148) | def forward(self, x):
FILE: yolov5-v6.1-pytorch-master/nets/yolo.py
class YoloBody (line 10) | class YoloBody(nn.Module):
method __init__ (line 11) | def __init__(self, anchors_mask, num_classes, phi, pretrained=False):
method forward (line 46) | def forward(self, x):
FILE: yolov5-v6.1-pytorch-master/nets/yolo_training.py
class YOLOLoss (line 10) | class YOLOLoss(nn.Module):
method __init__ (line 11) | def __init__(self, anchors, num_classes, input_shape, cuda, anchors_ma...
method clip_by_tensor (line 33) | def clip_by_tensor(self, t, t_min, t_max):
method MSELoss (line 39) | def MSELoss(self, pred, target):
method BCELoss (line 42) | def BCELoss(self, pred, target):
method box_giou (line 48) | def box_giou(self, b1, b2):
method smooth_labels (line 105) | def smooth_labels(self, y_true, label_smoothing, num_classes):
method forward (line 108) | def forward(self, l, input, targets=None, y_true=None):
method get_near_points (line 208) | def get_near_points(self, x, y, i, j):
method get_target (line 220) | def get_target(self, l, targets, anchors, in_h, in_w):
method get_pred_boxes (line 325) | def get_pred_boxes(self, l, x, y, h, w, targets, scaled_anchors, in_h,...
function is_parallel (line 356) | def is_parallel(model):
function de_parallel (line 360) | def de_parallel(model):
function copy_attr (line 364) | def copy_attr(a, b, include=(), exclude=()):
class ModelEMA (line 372) | class ModelEMA:
method __init__ (line 378) | def __init__(self, model, decay=0.9999, tau=2000, updates=0):
method update (line 388) | def update(self, model):
method update_attr (line 400) | def update_attr(self, model, include=(), exclude=('process_group', 're...
function weights_init (line 404) | def weights_init(net, init_type='normal', init_gain = 0.02):
function get_lr_scheduler (line 424) | def get_lr_scheduler(lr_decay_type, lr, min_lr, total_iters, warmup_iter...
function set_optimizer_lr (line 462) | def set_optimizer_lr(optimizer, lr_scheduler_func, epoch):
FILE: yolov5-v6.1-pytorch-master/sgbm-video.py
function onmouse_pick_points (line 44) | def onmouse_pick_points(event, x, y, flags, param):
FILE: yolov5-v6.1-pytorch-master/utils/callbacks.py
class LossHistory (line 21) | class LossHistory():
method __init__ (line 22) | def __init__(self, log_dir, model, input_shape):
method append_loss (line 35) | def append_loss(self, epoch, loss, val_loss):
method loss_plot (line 53) | def loss_plot(self):
class EvalCallback (line 80) | class EvalCallback():
method __init__ (line 81) | def __init__(self, net, input_shape, anchors, anchors_mask, class_name...
method get_map_txt (line 112) | def get_map_txt(self, image_id, image, class_names, map_out_path):
method on_epoch_end (line 171) | def on_epoch_end(self, epoch, model_eval):
FILE: yolov5-v6.1-pytorch-master/utils/dataloader.py
class YoloDataset (line 12) | class YoloDataset(Dataset):
method __init__ (line 13) | def __init__(self, annotation_lines, input_shape, num_classes, anchors...
method __len__ (line 35) | def __len__(self):
method __getitem__ (line 38) | def __getitem__(self, index):
method rand (line 76) | def rand(self, a=0, b=1):
method get_random_data (line 79) | def get_random_data(self, annotation_line, input_shape, jitter=.3, hue...
method merge_bboxes (line 194) | def merge_bboxes(self, bboxes, cutx, cuty):
method get_random_data_with_Mosaic (line 240) | def get_random_data_with_Mosaic(self, annotation_line, input_shape, ji...
method get_random_data_with_MixUp (line 371) | def get_random_data_with_MixUp(self, image_1, box_1, image_2, box_2):
method get_near_points (line 381) | def get_near_points(self, x, y, i, j):
method get_target (line 393) | def get_target(self, targets):
function yolo_dataset_collate (line 491) | def yolo_dataset_collate(batch):
FILE: yolov5-v6.1-pytorch-master/utils/utils.py
function cvtColor (line 9) | def cvtColor(image):
function resize_image (line 19) | def resize_image(image, size, letterbox_image):
function get_classes (line 37) | def get_classes(classes_path):
function get_anchors (line 46) | def get_anchors(anchors_path):
function get_lr (line 57) | def get_lr(optimizer):
function preprocess_input (line 61) | def preprocess_input(image):
function show_config (line 65) | def show_config(**kwargs):
function download_weights (line 74) | def download_weights(phi, model_dir="./model_data"):
FILE: yolov5-v6.1-pytorch-master/utils/utils_bbox.py
class DecodeBox (line 6) | class DecodeBox():
method __init__ (line 7) | def __init__(self, anchors, num_classes, input_shape, anchors_mask = [...
method decode_box (line 20) | def decode_box(self, inputs):
method yolo_correct_boxes (line 118) | def yolo_correct_boxes(self, box_xy, box_wh, input_shape, image_shape,...
method non_max_suppression (line 145) | def non_max_suppression(self, prediction, num_classes, input_shape, im...
function get_anchors_and_decode (line 243) | def get_anchors_and_decode(input, input_shape, anchors, anchors_mask, nu...
FILE: yolov5-v6.1-pytorch-master/utils/utils_fit.py
function fit_one_epoch (line 8) | def fit_one_epoch(model_train, model, ema, yolo_loss, loss_history, eval...
FILE: yolov5-v6.1-pytorch-master/utils/utils_map.py
function log_average_miss_rate (line 31) | def log_average_miss_rate(precision, fp_cumsum, num_images):
function error (line 72) | def error(msg):
function is_float_between_0_and_1 (line 79) | def is_float_between_0_and_1(value):
function voc_ap (line 95) | def voc_ap(rec, prec):
function file_lines_to_list (line 142) | def file_lines_to_list(path):
function draw_text_in_image (line 153) | def draw_text_in_image(img, text, pos, color, line_width):
function adjust_axes (line 170) | def adjust_axes(r, t, fig, axes):
function draw_plot_func (line 185) | def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_la...
function get_map (line 276) | def get_map(MINOVERLAP, draw_plot, score_threhold=0.5, path = './map_out'):
function preprocess_gt (line 800) | def preprocess_gt(gt_path, class_names):
function preprocess_dr (line 870) | def preprocess_dr(dr_path, class_names):
function get_coco_map (line 894) | def get_coco_map(class_names, path):
FILE: yolov5-v6.1-pytorch-master/utils_coco/get_map_coco.py
class mAP_YOLO (line 31) | class mAP_YOLO(YOLO):
method detect_image (line 35) | def detect_image(self, image_id, image, results, clsid2catid):
FILE: yolov5-v6.1-pytorch-master/voc_annotation.py
function convert_annotation (line 45) | def convert_annotation(year, image_id, list_file):
function printTable (line 126) | def printTable(List1, List2):
FILE: yolov5-v6.1-pytorch-master/yolo.py
class YOLO (line 56) | class YOLO(object):
method get_defaults (line 103) | def get_defaults(cls, n):
method __init__ (line 112) | def __init__(self, **kwargs):
method generate (line 138) | def generate(self, onnx=False):
method detect_image (line 155) | def detect_image(self, image, crop = False, count = False):
method get_FPS (line 341) | def get_FPS(self, image, test_interval):
method detect_heatmap (line 391) | def detect_heatmap(self, image, heatmap_save_path):
method convert_to_onnx (line 442) | def convert_to_onnx(self, simplify, model_path):
method get_map_txt (line 480) | def get_map_txt(self, image_id, image, class_names, map_out_path):
FILE: 数据处理/coco2voc.py
function trans_id (line 11) | def trans_id(category_id):
Copy disabled (too large)
Download .json
Condensed preview — 74 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (25,117K chars).
[
{
"path": "BM(C++)/BM.cpp",
"chars": 7886,
"preview": "/* ˫Ŀ */\r\n\r\n#include <opencv2/opencv.hpp> \r\n#include <iostream> \r\n#include <math.h> \r\n\r\nusing namespace "
},
{
"path": "BM(Python)/BM.py",
"chars": 2938,
"preview": "import cv2\r\nimport camera_configs\r\nimport math\r\nimport os\r\n# 引入函数库\r\nimport datetime as dt\r\n\r\nfloder = os.getcwd()\r\ncap ="
},
{
"path": "BM(Python)/camera_configs.py",
"chars": 3727,
"preview": "# author: young\r\nimport cv2\r\nimport numpy as np\r\n\r\n# 效果好\r\nleft_camera_matrix = np.array([[986.4572391,1.673607456,651.07"
},
{
"path": "Jeston nano_tensorrt/CMakeLists.txt",
"chars": 1354,
"preview": "cmake_minimum_required(VERSION 2.6)\n\nproject(yolov5)\n\nadd_definitions(-std=c++11)\nadd_definitions(-DAPI_EXPORTS)\noption("
},
{
"path": "Jeston nano_tensorrt/calibrator.cpp",
"chars": 2849,
"preview": "#include <iostream>\n#include <iterator>\n#include <fstream>\n#include <opencv2/dnn/dnn.hpp>\n#include \"calibrator.h\"\n#inclu"
},
{
"path": "Jeston nano_tensorrt/calibrator.h",
"chars": 1245,
"preview": "#ifndef ENTROPY_CALIBRATOR_H\n#define ENTROPY_CALIBRATOR_H\n\n#include <NvInfer.h>\n#include <string>\n#include <vector>\n#inc"
},
{
"path": "Jeston nano_tensorrt/common.hpp",
"chars": 14336,
"preview": "#ifndef YOLOV5_COMMON_H_\n#define YOLOV5_COMMON_H_\n\n#include <fstream>\n#include <map>\n#include <sstream>\n#include <vector"
},
{
"path": "Jeston nano_tensorrt/cuda_utils.h",
"chars": 417,
"preview": "#ifndef TRTX_CUDA_UTILS_H_\n#define TRTX_CUDA_UTILS_H_\n\n#include <cuda_runtime_api.h>\n\n#ifndef CUDA_CHECK\n#define CUDA_CH"
},
{
"path": "Jeston nano_tensorrt/gen_wts.py",
"chars": 1798,
"preview": "import sys\nimport argparse\nimport os\nimport struct\nimport torch\nfrom utils.torch_utils import select_device\n\n\ndef parse_"
},
{
"path": "Jeston nano_tensorrt/logging.h",
"chars": 16584,
"preview": "/*\n * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n *\n * Licensed under the Apache License, Version 2.0 "
},
{
"path": "Jeston nano_tensorrt/macros.h",
"chars": 462,
"preview": "#ifndef __MACROS_H\n#define __MACROS_H\n\n#ifdef API_EXPORTS\n#if defined(_MSC_VER)\n#define API __declspec(dllexport)\n#else\n"
},
{
"path": "Jeston nano_tensorrt/preprocess.cu",
"chars": 3557,
"preview": "#include \"preprocess.h\"\n#include <opencv2/opencv.hpp>\n\n__global__ void warpaffine_kernel( \n uint8_t* src, int src_lin"
},
{
"path": "Jeston nano_tensorrt/preprocess.h",
"chars": 357,
"preview": "#ifndef __PREPROCESS_H\n#define __PREPROCESS_H\n\n#include <cuda_runtime.h>\n#include <cstdint>\n\n\nstruct AffineMatrix{\n f"
},
{
"path": "Jeston nano_tensorrt/samples",
"chars": 22,
"preview": "../yolov3-spp/samples/"
},
{
"path": "Jeston nano_tensorrt/utils.h",
"chars": 1463,
"preview": "#ifndef TRTX_YOLOV5_UTILS_H_\n#define TRTX_YOLOV5_UTILS_H_\n\n#include <dirent.h>\n#include <opencv2/opencv.hpp>\n\nstatic inl"
},
{
"path": "Jeston nano_tensorrt/yololayer.cu",
"chars": 11887,
"preview": "#include <assert.h>\n#include <vector>\n#include <iostream>\n#include \"yololayer.h\"\n#include \"cuda_utils.h\"\n\nnamespace Tn\n{"
},
{
"path": "Jeston nano_tensorrt/yololayer.h",
"chars": 4711,
"preview": "#ifndef _YOLO_LAYER_H\n#define _YOLO_LAYER_H\n\n#include <vector>\n#include <string>\n#include <NvInfer.h>\n#include \"macros.h"
},
{
"path": "Jeston nano_tensorrt/yolov5.cpp",
"chars": 26490,
"preview": "#include <iostream>\n#include <chrono>\n#include <cmath>\n#include \"cuda_utils.h\"\n#include \"logging.h\"\n#include \"common.hpp"
},
{
"path": "Jeston nano_tensorrt/部署/python_trt.py",
"chars": 9438,
"preview": "from ctypes import *\nimport cv2\nimport numpy as np\nimport numpy.ctypeslib as npct\nimport time\nimport math\nfrom PIL impor"
},
{
"path": "LICENSE",
"chars": 1063,
"preview": "MIT License\n\nCopyright (c) 2024 yzfzzz\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof "
},
{
"path": "README.md",
"chars": 2126,
"preview": "<img src=\"https://yzfzzz.oss-cn-shenzhen.aliyuncs.com/image/dafafa.drawio%20(5)%20(1).png\" alt=\"dafafa.drawio (5) (1)\" s"
},
{
"path": "SGBM(C++)/SGBM.cpp",
"chars": 8323,
"preview": "/* ˫Ŀ */\r\n\r\n#include <opencv2/opencv.hpp> \r\n#include <iostream> \r\n#include <math.h> \r\n\r\nusing namespace "
},
{
"path": "SGBM(Python)/sgbm-video.py",
"chars": 5955,
"preview": "import cv2\r\nimport numpy as np\r\nimport time\r\nimport random\r\nimport math\r\n\r\n# -----------------------------------双目相机的基本参"
},
{
"path": "Win_tensorrt/python_trt.py",
"chars": 9492,
"preview": "from ctypes import *\nimport cv2\nimport numpy as np\nimport numpy.ctypeslib as npct\nimport time\nimport math\nfrom PIL impor"
},
{
"path": "distance_measurement_cpp/camera_config.json",
"chars": 799,
"preview": "{\n \"left_camera_matrix\": [\n [ 516.5066236, -1.444673028, 320.2950423 ],\n [ 0, 516.5816117, 270.7881873 ],\n [ 0"
},
{
"path": "distance_measurement_cpp/json.hpp",
"chars": 953435,
"preview": "// __ _____ _____ _____\n// __| | __| | | | JSON for Modern C++\n// | | |__ | | | | | | version 3.12"
},
{
"path": "distance_measurement_cpp/main.cpp",
"chars": 61,
"preview": "#include \"stereo_match_algorithm.h\"\n\nint main() {\n\ttest1();\n}"
},
{
"path": "distance_measurement_cpp/mouse_controller.cpp",
"chars": 1638,
"preview": "#include \"mouse_controller.h\"\n\nMouseController::MouseController(const std::string& windowName)\n\t: m_windowName(windowNam"
},
{
"path": "distance_measurement_cpp/mouse_controller.h",
"chars": 1305,
"preview": "#pragma once\n#include <opencv2/opencv.hpp>\n#include <functional>\n#include <map>\n\nclass MouseController {\npublic:\n\t// ¼ö\n"
},
{
"path": "distance_measurement_cpp/stereo_match_algorithm.cpp",
"chars": 9807,
"preview": "#include \"stereo_match_algorithm.h\"\n#include \"mouse_controller.h\"\n#include <vector>\n#include <math.h>\n\nnamespace stereo "
},
{
"path": "distance_measurement_cpp/stereo_match_algorithm.h",
"chars": 3125,
"preview": "#pragma once\n#include <fstream>\n#include <iostream>\n#include <opencv2/opencv.hpp>\n#include <string>\n#include <direct.h>\n"
},
{
"path": "stereo_introduce/VideoCap.ini",
"chars": 267,
"preview": "[Still]\r\nStiMethod=2\r\nStiFormat=jpg\r\n\r\n[Preview]\r\nImgSaveFolder=\r\nAppName=\r\nStiShow=yes\r\nConnectAudio=yes\r\nShowFrameRate"
},
{
"path": "stereo_shot.py",
"chars": 1037,
"preview": "import cv2\r\nimport time\r\n\r\ncounter = 1\r\nAUTO = True # 自动拍照,或手动按s键拍照\r\nINTERVAL = 2 # 自动拍照间隔\r\ncamera = cv2.VideoCapture(0"
},
{
"path": "yolov5-v6.1-pytorch-master/.gitignore",
"chars": 1925,
"preview": "# ignore map, miou, datasets\nmap_out/\nmiou_out/\nVOCdevkit/\ndatasets/\nMedical_Datasets/\nlfw/\nlogs/\nmodel_data/\n.temp_map_"
},
{
"path": "yolov5-v6.1-pytorch-master/LICENSE",
"chars": 35149,
"preview": " GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n Copyright (C) 2007 Free "
},
{
"path": "yolov5-v6.1-pytorch-master/get_map.py",
"chars": 6672,
"preview": "import os\nimport xml.etree.ElementTree as ET\n\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom utils.utils import get_c"
},
{
"path": "yolov5-v6.1-pytorch-master/kmeans_for_anchors.py",
"chars": 5906,
"preview": "#-------------------------------------------------------------------------------------------------------#\n# kmeans虽然会对"
},
{
"path": "yolov5-v6.1-pytorch-master/nets/CSPdarknet.py",
"chars": 7047,
"preview": "import warnings\n\nimport torch\nimport torch.nn as nn\n\n\nclass SiLU(nn.Module):\n @staticmethod\n def forward(x):\n "
},
{
"path": "yolov5-v6.1-pytorch-master/nets/__init__.py",
"chars": 1,
"preview": "#"
},
{
"path": "yolov5-v6.1-pytorch-master/nets/yolo.py",
"chars": 4446,
"preview": "import torch\nimport torch.nn as nn\n\nfrom nets.CSPdarknet import C3, Conv, CSPDarknet\n\n\n#--------------------------------"
},
{
"path": "yolov5-v6.1-pytorch-master/nets/yolo_training.py",
"chars": 22574,
"preview": "import math\nfrom copy import deepcopy\nfrom functools import partial\n\nimport numpy as np\nimport torch\nimport torch.nn as "
},
{
"path": "yolov5-v6.1-pytorch-master/predict.py",
"chars": 7615,
"preview": "#-----------------------------------------------------------------------#\n# predict.py将单张图片预测、摄像头检测、FPS测试和目录遍历检测等功能\n# "
},
{
"path": "yolov5-v6.1-pytorch-master/requirements.txt",
"chars": 141,
"preview": "scipy==1.2.1\nnumpy==1.17.0\nmatplotlib==3.1.2\nopencv_python==4.1.2.30\ntorch==1.2.0\ntorchvision==0.4.0\ntqdm==4.60.0\nPillow"
},
{
"path": "yolov5-v6.1-pytorch-master/sgbm-video.py",
"chars": 5955,
"preview": "import cv2\r\nimport numpy as np\r\nimport time\r\nimport random\r\nimport math\r\n\r\n# -----------------------------------双目相机的基本参"
},
{
"path": "yolov5-v6.1-pytorch-master/summary.py",
"chars": 1204,
"preview": "#--------------------------------------------#\n# 该部分代码用于看网络结构\n#--------------------------------------------#\nimport to"
},
{
"path": "yolov5-v6.1-pytorch-master/train.py",
"chars": 27478,
"preview": "#-------------------------------------#\n# 对数据集进行训练\n#-------------------------------------#\nimport datetime\nimport "
},
{
"path": "yolov5-v6.1-pytorch-master/utils/__init__.py",
"chars": 1,
"preview": "#"
},
{
"path": "yolov5-v6.1-pytorch-master/utils/callbacks.py",
"chars": 9700,
"preview": "import datetime\nimport os\n\nimport torch\nimport matplotlib\nmatplotlib.use('Agg')\nimport scipy.signal\nfrom matplotlib impo"
},
{
"path": "yolov5-v6.1-pytorch-master/utils/dataloader.py",
"chars": 21355,
"preview": "from random import sample, shuffle\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.da"
},
{
"path": "yolov5-v6.1-pytorch-master/utils/utils.py",
"chars": 3293,
"preview": "import numpy as np\nfrom PIL import Image\n\n\n#---------------------------------------------------------#\n# 将图像转换成RGB图像,防"
},
{
"path": "yolov5-v6.1-pytorch-master/utils/utils_bbox.py",
"chars": 20515,
"preview": "import numpy as np\nimport torch\nfrom torchvision.ops import nms\n\n\nclass DecodeBox():\n def __init__(self, anchors, num"
},
{
"path": "yolov5-v6.1-pytorch-master/utils/utils_fit.py",
"chars": 5568,
"preview": "import os\n\nimport torch\nfrom tqdm import tqdm\n\nfrom utils.utils import get_lr\n \ndef fit_one_epoch(model_train, mo"
},
{
"path": "yolov5-v6.1-pytorch-master/utils/utils_map.py",
"chars": 36872,
"preview": "import glob\nimport json\nimport math\nimport operator\nimport os\nimport shutil\nimport sys\ntry:\n from pycocotools.coco im"
},
{
"path": "yolov5-v6.1-pytorch-master/utils_coco/coco_annotation.py",
"chars": 3873,
"preview": "#-------------------------------------------------------#\n# 用于处理COCO数据集,根据json文件生成txt文件用于训练\n#-------------------------"
},
{
"path": "yolov5-v6.1-pytorch-master/utils_coco/get_map_coco.py",
"chars": 4909,
"preview": "import json\nimport os\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom pycocotools.coco import COCO\nfrom pyco"
},
{
"path": "yolov5-v6.1-pytorch-master/voc_annotation.py",
"chars": 6585,
"preview": "import os\nimport random\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\n\nfrom utils.utils import get_classes\n\n#--"
},
{
"path": "yolov5-v6.1-pytorch-master/yolo.py",
"chars": 25815,
"preview": "import colorsys\nimport math\nimport os\nimport time\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom"
},
{
"path": "双目视觉资料/opencv 双目摄像头拍照(分别左右镜头拍照).txt",
"chars": 2843,
"preview": " \r\n#include\"stdafx.h\"\r\n#include<iostream>\r\n#include<string>\r\n#include<sstream>\r\n#include<opencv2/core.hpp>\r\n#include<ope"
},
{
"path": "双目视觉资料/opencv 双目摄像头拍照(分别左右镜头拍照)_一颗小树x的博客-CSDN博客_双目摄像头拍照.mhtml",
"chars": 4411243,
"preview": "From: <Saved by Blink>\r\nSnapshot-Content-Location: https://guo-pu.blog.csdn.net/article/details/86518807\r\nSubject: =?utf"
},
{
"path": "双目视觉资料/制作标定板——matlab编程实现_一颗小树x的博客-CSDN博客_matlab 标定板.mhtml",
"chars": 3148219,
"preview": "From: <Saved by Blink>\r\nSnapshot-Content-Location: https://guo-pu.blog.csdn.net/article/details/117565629\r\nSubject: =?ut"
},
{
"path": "双目视觉资料/双目 机器视觉-- 测距.txt",
"chars": 17980,
"preview": "/* 双目测距 */\r\n \r\n#include <opencv2/opencv.hpp> \r\n#include <iostream> \r\n#include <math.h> \r\n \r\nusing namesp"
},
{
"path": "双目视觉资料/双目 机器视觉-- 测距_一颗小树x的博客-CSDN博客_机器视觉测距.mhtml",
"chars": 5162018,
"preview": "From: <Saved by Blink>\r\nSnapshot-Content-Location: https://guo-pu.blog.csdn.net/article/details/86744936\r\nSubject: =?utf"
},
{
"path": "双目视觉资料/双目摄像头内参如何使用? 如何转化数据?_一颗小树x的博客-CSDN博客_摄像头内参.mhtml",
"chars": 3622687,
"preview": "From: <Saved by Blink>\r\nSnapshot-Content-Location: https://guo-pu.blog.csdn.net/article/details/86710737\r\nSubject: =?utf"
},
{
"path": "双目视觉资料/双目测距 SGBM算法 Python版.txt",
"chars": 5895,
"preview": "import cv2\r\nimport numpy as np\r\n \r\n# 左相机内参\r\nleft_camera_matrix = np.array([[416.841180253704, 0.0, 338.485167779639],\r\n "
},
{
"path": "双目视觉资料/双目测距 SGBM算法 Python版_一颗小树x的博客-CSDN博客_双目相机测距python.mhtml",
"chars": 3386479,
"preview": "From: <Saved by Blink>\r\nSnapshot-Content-Location: https://guo-pu.blog.csdn.net/article/details/120330050\r\nSubject: =?ut"
},
{
"path": "双目视觉资料/立体匹配算法(局部立体匹配 、全局立体匹配 、深度学习立体匹配 )_一颗小树x的博客-CSDN博客_深度学习立体匹配.mhtml",
"chars": 3164012,
"preview": "From: <Saved by Blink>\r\nSnapshot-Content-Location: https://guo-pu.blog.csdn.net/article/details/121421543\r\nSubject: =?ut"
},
{
"path": "常见问题答疑.md",
"chars": 1354,
"preview": "# 双目检测常见问题答疑\r\n\r\n> **开源链接**:https://github.com/yzfzzz/Stereo-Detection \r\n>\r\n> **CSDN**:https://blog.csdn.net/henghuizan27"
},
{
"path": "数据处理/README.md",
"chars": 88,
"preview": "# 文件说明\r\n\r\n`clean-xml-jpg.py`:清洗没有图片的标签;或者没有标签的图片\r\n\r\n`coco2voc.py`:COCO转VOC格式\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"path": "数据处理/clean-xml-jpg.py",
"chars": 843,
"preview": "import os\r\n\r\nimages_dir = 'D:\\\\BaiduNetdiskDownload\\\\VOCdevkit\\\\VOC2007\\\\Annotations\\\\'\r\nxml_dir = 'D:\\\\BaiduNetdiskDown"
},
{
"path": "数据处理/coco2voc.py",
"chars": 3533,
"preview": "# translate coco_json to xml\r\n# 使用时仅需修改21、22、24行路径文件\r\nimport os\r\nimport time\r\nimport json\r\nimport pandas as pd\r\nfrom tqd"
}
]
// ... and 4 more files (download for full content)
About this extraction
This page contains the full source code of the yzfzzz/Stereo-Detection GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 74 files (98.9 MB), approximately 6.1M tokens, and a symbol index with 621 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.