Compare commits

..

No commits in common. "main" and "onnxDetector" have entirely different histories.

29 changed files with 165 additions and 9997 deletions

View File

@ -12,9 +12,6 @@ EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "camAlgoSW", "camAlgoSW\camAlgoSW.vcxproj", "{C280CB49-2B9B-43B9-A99E-5C90B7CEF033}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "camAlgoSW_test", "camAlgoSW_test\camAlgoSW_test.vcxproj", "{AA8276D9-77FD-451F-BC95-AC8E77D9CEFC}"
ProjectSection(ProjectDependencies) = postProject
{C280CB49-2B9B-43B9-A99E-5C90B7CEF033} = {C280CB49-2B9B-43B9-A99E-5C90B7CEF033}
EndProjectSection
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "camCalib", "camCalib\camCalib.vcxproj", "{3C3A4670-25E6-4E17-9723-2897316D2437}"
EndProject

View File

@ -97,12 +97,10 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>.\sourceCode;.\sourceCode\CG_frontEnd\inc;$(VC_IncludePath);$(WindowsSDK_IncludePath);</IncludePath>
<OutDir>$(SolutionDir)build\$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>.\sourceCode;.\sourceCode\CG_frontEnd\inc;$(VC_IncludePath);$(WindowsSDK_IncludePath);</IncludePath>
<OutDir>$(SolutionDir)build\$(Platform)\$(Configuration)\</OutDir>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>

View File

@ -10,6 +10,17 @@
#include "compute3D.h"
#include "camAlgoSW_Export.h"
typedef struct
{
uint16_t WinRdx;//窗口在图像内的起始坐标
uint16_t y; //y坐标
uint16_t Rid; //region的ID
uint8_t Flag; //bit0是overlap标志。bit1是反光信号标志
uint8_t PeakRltvRdx; //peak在窗口内的的相对坐标低4位是起始坐标高4位是结束坐标
uint8_t data[RGN_DATA_WIN_SIZE];
//double offset;
}Luma_rgnData;
#define SV_WIN_SIZE 16
#define ALIGN_RDX ((LAZER_WIN_SIZE-SV_WIN_SIZE)/2)
std::vector<Luma_rgnData> SvPickRgnRslt(
@ -269,15 +280,8 @@ void camAlgoSW(
CamPara& calibPara,
LineCalibK* lineCalibKx,
LineCalibK* lineCalibKy,
#if _ALGO_MODULE_DEBUG
std::vector<Luma_rgnData>& frontendData_debug,
std::vector<RgnPix>& centeringPnt_debug,
std::vector<RgnSubPix>& subpixPnt_debug,
std::vector<RgnSubPixCalib>& calibSubpixPnt_debug,
#endif
std::vector<Pnt3D>& resul3DPnts)
{
PickLaserResult pFrontendResult;
PickLazerWin(
1,
@ -308,17 +312,15 @@ void camAlgoSW(
frontendPara.RgnMeanETh,
&pFrontendResult
);
//将前端数据转成输入数据格式
std::vector<Luma_rgnData> frontendData = SvPickRgnRslt(
inData,
inFrmH,
inFrmW,
pFrontendResult
);
#if _ALGO_MODULE_DEBUG
frontendData_debug.insert(frontendData_debug.end(), frontendData.begin(), frontendData.end());
#endif
std::vector<RgnPix> InRgnPnt;
genTestSream(
frontendData,
@ -330,9 +332,6 @@ void camAlgoSW(
InRgnPnt,
OutCenteringPnt
);
#if _ALGO_MODULE_DEBUG
centeringPnt_debug.insert(centeringPnt_debug.end(), OutCenteringPnt.begin(), OutCenteringPnt.end());
#endif
//Convolve
std::vector<RgnPixConvolve> OutConvolvePnt;
@ -345,9 +344,6 @@ void camAlgoSW(
subpix(
OutConvolvePnt,
OutSubpixPnt);
#if _ALGO_MODULE_DEBUG
subpixPnt_debug.insert(subpixPnt_debug.end(), OutSubpixPnt.begin(), OutSubpixPnt.end());
#endif
//将subpix转成RgnSubPixCalib格式。在硬件实现时这一步骤由硬件完成
//数据格式每行由行同步和后续的8拍构成。行同步传送x和y数据后8拍依次传送KX0~KX7, 和KY0 ~ KY7
@ -361,9 +357,6 @@ void camAlgoSW(
inCalibData,
outSubpixCalib
);
#if _ALGO_MODULE_DEBUG
calibSubpixPnt_debug.insert(calibSubpixPnt_debug.end(), outSubpixCalib.begin(), outSubpixCalib.end());
#endif
// Compute3D
compute3D(

View File

@ -9,18 +9,6 @@
# define SG_APISHARED_EXPORT __declspec(dllimport)
#endif
typedef struct
{
uint16_t WinRdx;//窗口在图像内的起始坐标
uint16_t y; //y坐标
uint16_t Rid; //region的ID
uint8_t Flag; //bit0是overlap标志。bit1是反光信号标志
uint8_t PeakRltvRdx; //peak在窗口内的的相对坐标低4位是起始坐标高4位是结束坐标
uint8_t data[RGN_DATA_WIN_SIZE];
//double offset;
}Luma_rgnData;
#define _ALGO_MODULE_DEBUG 1
SG_APISHARED_EXPORT void camAlgoSW(
int inFrmH,
int inFrmW,
@ -30,10 +18,4 @@ SG_APISHARED_EXPORT void camAlgoSW(
CamPara& calibPara,
LineCalibK* lineCalibKx,
LineCalibK* lineCalibKy,
#if _ALGO_MODULE_DEBUG
std::vector<Luma_rgnData>& frontendData,
std::vector<RgnPix>& centeringPnt_debug,
std::vector<RgnSubPix>& subpixPnt_debug,
std::vector<RgnSubPixCalib>& calibSubpixPnt_debug,
#endif
std::vector< Pnt3D>& resul3DPnts);

View File

@ -62,14 +62,10 @@ void compute3D(
double u = (double)InData.x;
double v = (double)InData.y;
double tmp_x = (u - u0) * F_inv;
double tmp_y = (v - v0) * F_inv;
double z_inv = plane_a * tmp_x + plane_b * tmp_y - 1;
double z = 1 / z_inv;
z = -z * plane_c;
double x = tmp_x * z ;
double y = tmp_y * z ;
double z_inv = u * plane_a + v * plane_b + plane_c;
double z = 1/ z_inv;
double x = (u - u0) * z * F_inv;
double y = (v - v0) * z * F_inv;
outData.x = (float)x;
outData.y = (float)y;
outData.z = (float)z;

View File

@ -49,7 +49,6 @@ void peakCentering(
RgnPix zeroData = {0,0,0,0,0,0,0};
//ping-pong buffer
RgnPix HSyncData_d1 = {0,0,0,0,0,0,0};
ushort LazerWinX_d1 = 0;
uchar lineBuff_0[RGN_DATA_WIN_SIZE * 2];
//#pragma HLS RESOURCE variable=lineBuff_0 core=RAM_2P
uchar lineBuff_1[RGN_DATA_WIN_SIZE * 2];
@ -83,18 +82,9 @@ void peakCentering(
if(0 == i) //HSync
{
evenFlag = n % 2;
if (0 == linePk.len)
pkCenter = RGN_DATA_WIN_SIZE / 2;
else
pkCenter = linePk.start + (linePk.len >> 1);
HSyncData_d1.LazerWinX = LazerWinX_d1 + pkCenter;
if (HSyncData_d1.LazerWinY == 578)
int kkk = 1;
if ((n > 0) &&( linePk.len>0))
OutCenteringPnt.push_back(HSyncData_d1);
HSyncData_d1 = InData;
LazerWinX_d1 = InData.LazerWinX - RGN_DATA_WIN_SIZE;
pkRng_S = InData.RltvRdx & 0x0f;
pkRng_S += RGN_DATA_WIN_SIZE / 2;
@ -102,6 +92,11 @@ void peakCentering(
pkRng_E = InData.RltvRdx & 0x0f;
pkRng_E += RGN_DATA_WIN_SIZE / 2;
evenFlag = n%2;
if(0 == linePk.len)
pkCenter = RGN_DATA_WIN_SIZE/2;
else
pkCenter = linePk.start + (linePk.len >> 1);
linePk.start = 0;
linePk.len = 0;
linePk.value = 0;

View File

@ -84,8 +84,6 @@ void subpix(
InData = InConvolvePnt[readPtr++];
if (InData.LazerWinY == 579)
int kkk = 1;
float fx;
if(InData.nD2 == 0)
fx = 0;
@ -93,7 +91,7 @@ void subpix(
fx = (float)InData.nD1/(float)InData.nD2;
RgnSubPix a_pnt;
if(n == 0)
if(n == 1)
a_pnt.Sync = 0b01;
else
a_pnt.Sync = 0b00;
@ -101,7 +99,7 @@ void subpix(
a_pnt.y = InData.LazerWinY;
a_pnt.rid = InData.LazerWinRid;
a_pnt.value = InData.value;
a_pnt.x = (float)InData.LazerWinX + 8.0f + fx + 0.5; //+0.5: adjust pnt to pixel center
a_pnt.x = (float)InData.LazerWinX + 8.0f + fx;
a_pnt.rsv = 0;
OutSubpixPnt.push_back(a_pnt);
}

View File

@ -2,391 +2,10 @@
//
#include <iostream>
#include <fstream>
#include "..\camAlgoSW\sourceCode\camAlgoSW_Export.h"
#include <opencv2/opencv.hpp>
#include <vector>
#include "..\camAlgoSW\sourceCode\algoGlobals.h"
typedef struct
{
int nMin; //< 最小值
int nMax; //< 最大值
} SWdNLRange;
void saveFrontEndData(char* fileName, std::vector<Luma_rgnData>& frontendData)
{
if (0 == frontendData.size())
return;
std::ofstream TXTFile(fileName);
char TXTData[250];
snprintf(TXTData, sizeof(TXTData), "%3x", (0x800 | 0x10));//MEANSTG_WIN_SIZE);
TXTFile << TXTData << std::endl;
for (int i = 0, i_max = (int)frontendData.size(); i < i_max; i++)
{
Luma_rgnData data = frontendData[i];
snprintf(TXTData, sizeof(TXTData), "%04x %04x %04x %01x %02x", data.WinRdx, data.y, data.Rid, data.Flag, data.PeakRltvRdx);
TXTFile << TXTData << std::endl;
for (int j = 0; j < 16; j++)
{
snprintf(TXTData, sizeof(TXTData), "%02x", data.data[j]);
TXTFile << TXTData << std::endl;
}
}
TXTFile.close();
}
void readFrontEndDataFile(char* fileName, std::vector<Luma_rgnData>& frontendData)
{
std::ifstream inputFile(fileName);
std::string linedata;
if (inputFile.is_open() == false)
return;
std::getline(inputFile, linedata); //第一行
int idx = 0;
Luma_rgnData a_pk;
while (std::getline(inputFile, linedata))
{
if (idx == 0)
{
int WinRdx, y, Rid, Flag, PeakRltvRdx;
sscanf_s(linedata.c_str(), "%04x %04x %04x %01x %02x", &WinRdx, &y, &Rid, &Flag, &PeakRltvRdx);
a_pk.WinRdx = WinRdx;
a_pk.y = y;
a_pk.Rid = Rid;
a_pk.Flag = Flag;
a_pk.PeakRltvRdx = PeakRltvRdx;
idx++;
}
else
{
int value;
sscanf_s(linedata.c_str(), "%02x", &value);
a_pk.data[idx - 1] = value;
idx++;
}
if (idx == 17)
{
frontendData.push_back(a_pk);
idx = 0;
}
}
inputFile.close();
return;
}
void saveCenteringData(char* filename, std::vector<RgnPix>& centeringPnt)
{
if (centeringPnt.size() < 6)
return;
int vldSize = (int)centeringPnt.size() - 6;
if ( (vldSize % 9) != 0)
return;
int grp = vldSize / 9;
std::ofstream TXTFile(filename);
char TXTData[250];
snprintf(TXTData, sizeof(TXTData), "%3x", (0x800 | 0x10));//MEANSTG_WIN_SIZE);
TXTFile << TXTData << std::endl;
int headOffset = 6;
for (int i = 0; i < grp; i++)
{
RgnPix data = centeringPnt[i * 9 + headOffset];
snprintf(TXTData, sizeof(TXTData), "%04x %04x %04x %01x %02x", data.LazerWinX, data.LazerWinY, data.LazerWinRid, data.LazerWinFlag, data.RltvRdx);
TXTFile << TXTData << std::endl;
for (int j = 1; j <= 8; j++)
{
data = centeringPnt[i * 9 + j + headOffset];
snprintf(TXTData, sizeof(TXTData), "%02x", data.LazerWinX);
TXTFile << TXTData << std::endl;
snprintf(TXTData, sizeof(TXTData), "%02x", data.LazerWinY);
TXTFile << TXTData << std::endl;
}
}
TXTFile.close();
}
void saveSubpixData(char* filename, std::vector<RgnSubPix>& subpixPnt)
{
if (subpixPnt.size() < 6)
return;
std::ofstream TXTFile(filename);
char TXTData[250];
int headOffset = 6;
for (int i = 6, i_max = (int)subpixPnt.size(); i< i_max; i++)
{
RgnSubPix data = subpixPnt[i];
snprintf(TXTData, sizeof(TXTData), "%.5f %04d rid=%04d flag=%01x value=%03d", data.x, data.y, data.rid, data.flag, data.value);
TXTFile << TXTData << std::endl;
}
TXTFile.close();
}
void saveCalibSubpixData(char* filename, std::vector<RgnSubPixCalib>& subpixPnt)
{
if (subpixPnt.size() < 6)
return;
std::ofstream TXTFile(filename);
char TXTData[250];
int headOffset = 6;
for (int i = 6, i_max = (int)subpixPnt.size(); i < i_max; i++)
{
RgnSubPixCalib data = subpixPnt[i];
snprintf(TXTData, sizeof(TXTData), "%.5f %.5f rid=%04d flag=%01x value=%03d", data.x, data.y, data.rid, data.flag, data.value);
TXTFile << TXTData << std::endl;
}
TXTFile.close();
}
void readCalibK(char* paraFilename, LineCalibK* lineCalibK)
{
std::ifstream inputFile(paraFilename);
std::string linedata;
if (inputFile.is_open() == false)
return;
int i = 0;
while (std::getline(inputFile, linedata))
{
LineCalibK a_para;
sscanf_s(linedata.c_str(), "%lf, %lf, %lf, %lf, %lf, %lf, %lf, %lf", &a_para.calibK[0], &a_para.calibK[1], &a_para.calibK[2], &a_para.calibK[3], &a_para.calibK[4], &a_para.calibK[5], &a_para.calibK[6], &a_para.calibK[7]);
lineCalibK[i] = a_para;
i++;
}
inputFile.close();
return;
}
CamPara readLaserPlanePara(char* filename)
{
CamPara para;
memset(&para, 0, sizeof(CamPara));
std::ifstream inputFile(filename);
std::string linedata;
if (inputFile.is_open() == false)
return para;
//读取: fx, 0, cx
if (std::getline(inputFile, linedata))
{
double tmp;
sscanf_s(linedata.c_str(), "%lf, %lf, %lf", &para.F_inv, &tmp, &para.u0);
para.F_inv = 1.0 / para.F_inv;
}
else
return para;
//读取: 0, fy, cy
if (std::getline(inputFile, linedata))
{
double tmp_1, tmp_2;
sscanf_s(linedata.c_str(), "%lf, %lf, %lf", &tmp_1, &tmp_2, &para.v0);
}
else
return para;
//跳过现行
if (!std::getline(inputFile, linedata))
return para;
if (!std::getline(inputFile, linedata))
return para;
//读取: a, b, c
if (std::getline(inputFile, linedata))
sscanf_s(linedata.c_str(), "%lf, %lf, %lf", &para.plane_a, &para.plane_b, &para.plane_c);
return para;
}
void saveLineData(char* fileName, std::vector< Pnt3D>& resul3DPnts)
{
std::ofstream sw(fileName);
sw << "LineNum:1" << std::endl;
// sw << "DataType: 0" << std::endl;
// sw << "ScanSpeed: 0" << std::endl;
// sw << "PointAdjust:1" << std::endl;
// sw << "MaxTimeStamp:0_0" << std::endl;
int ptCounter = (int)resul3DPnts.size() - 4;
sw << "Line_0_0_" << ptCounter << std::endl;
for (int i = 4; i < (int)resul3DPnts.size(); i++)
{
Pnt3D a_pt = resul3DPnts[i];
float x = (float)a_pt.x;
float y = (float)a_pt.y;
float z = (float)a_pt.z;
sw << "{" << x << "," << y << "," << z << "}-";
sw << "{0,0}-{0,0}" << std::endl;
}
sw.close();
}
#define CALIB_TEST_GROUP 3
int main()
{
std::cout << "Hello World!\n";
const char* calibDataPath[CALIB_TEST_GROUP] = {
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\chessboard\\", //1
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\circlePoint\\", //2
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\charuCo\\", //3
};
const SWdNLRange fileIdx[CALIB_TEST_GROUP] = {
{1,33},{1,33},{1,10}
};
//算法参数
ProcObj frontendPara;
frontendPara.StartIdx = 0;
frontendPara.EndIdx = 0;
frontendPara.MinStgTh = 5;
frontendPara.LazerWMin = 2;
frontendPara.LazerWMax = 10;
frontendPara.RflctPixTh = 128;
frontendPara.RflctOutEna = 0;//??
frontendPara.OverlapPixTh = 32;
frontendPara.EnhanStep = 2;
frontendPara.PickEna = 1;
frontendPara.RgnFltrTh = 0;
frontendPara.RmvWkEndEna = 1;
frontendPara.RmvWkEndTh = 2;
frontendPara.RmvWkEndMultCoe = 16;
frontendPara.RmvWkEndMinLen = 3;
frontendPara.PickBFEna = 0;
frontendPara.PickBkGrnd = 0;
frontendPara.PickRLenTh = 4;
frontendPara.EnergyPickEna = 1;
frontendPara.PickEnergyType = 0;
frontendPara.RgnEnergyPLen = 32;
frontendPara.RgnMeanETh = 0;
LineCalibK* lineCalibKx = (LineCalibK*)malloc(sizeof(LineCalibK) * MAX_HEIGHT);
LineCalibK* lineCalibKy = (LineCalibK*)malloc(sizeof(LineCalibK) * MAX_HEIGHT);
FramePara inFramePara;
CamPara calibPara;
for (int grp = 0; grp < CALIB_TEST_GROUP; grp++)
{
memset(lineCalibKx, 0, sizeof(LineCalibK) * MAX_HEIGHT);
memset(lineCalibKy, 0, sizeof(LineCalibK) * MAX_HEIGHT);
//读取校正系数
char paraFilename[256];
sprintf_s(paraFilename, "%scalib_param_x.txt", calibDataPath[grp]);
readCalibK(paraFilename, lineCalibKx);
sprintf_s(paraFilename, "%scalib_param_y.txt", calibDataPath[grp]);
readCalibK(paraFilename, lineCalibKy);
//读取光刀面
sprintf_s(paraFilename, "%scalib_param_K_D.txt", calibDataPath[grp]);
calibPara = readLaserPlanePara(paraFilename);
int startIndex = fileIdx[grp].nMin;
int endIndex = fileIdx[grp].nMax;
int index;
for (index = startIndex; index <= endIndex; index++)
{
char filename[256];
sprintf_s(filename, "%slaser_rotate_mask_%03d.png", calibDataPath[grp], index);
cv::Mat srcImg = cv::imread(filename);
if (srcImg.empty())
break;
inFramePara.FrmNo = index;
inFramePara.timeStamp = index;
inFramePara.encInfo = 0;
inFramePara.frameROI_w = srcImg.cols;
inFramePara.frameROI_h = srcImg.rows;
inFramePara.frameROI_x = 0;
inFramePara.frameROI_y = 0;
cv::Mat gray;
if (srcImg.channels() == 3)
cv::cvtColor(srcImg, gray, cv::COLOR_BGR2GRAY);
else
gray = srcImg.clone();
unsigned char* dataPtr = gray.data;
std::vector< Pnt3D> resul3DPnts;
#if _ALGO_MODULE_DEBUG
std::vector<Luma_rgnData> frontendData;
std::vector<RgnPix> centeringPnt;
std::vector<RgnSubPix> subpixPnt;
std::vector<RgnSubPixCalib> calibSubpixPnt;
#endif
//cv::Mat img;
//inputImg.convertTo(img, CV_64FC1);
//cv::GaussianBlur(gray, gray, cv::Size(5, 5), 1.2, 1.2);
cv::GaussianBlur(gray, gray, cv::Size(5, 5), 1.2, 1.2);
camAlgoSW(
gray.rows,
gray.cols,
dataPtr,
frontendPara,
inFramePara,
calibPara,
lineCalibKx,
lineCalibKy,
#if _ALGO_MODULE_DEBUG
frontendData,
centeringPnt,
subpixPnt,
calibSubpixPnt,
#endif
resul3DPnts);
#if _ALGO_MODULE_DEBUG
//保存frontendData
sprintf_s(filename, "%sresult\\%d_verilog.txt", calibDataPath[grp], index);
saveFrontEndData(filename, frontendData);
//保存centeringData
sprintf_s(filename, "%sresult\\%d_centeringData.txt", calibDataPath[grp], index);
saveCenteringData(filename, centeringPnt);
//保存subpixData
sprintf_s(filename, "%sresult\\%d_subpixData.txt", calibDataPath[grp], index);
saveSubpixData(filename, subpixPnt);
//显示亚像素点
cv::Mat enlargeImg;
if (srcImg.channels() == 1)
srcImg.convertTo(enlargeImg, cv::COLOR_GRAY2BGR);
else
enlargeImg = srcImg.clone();
cv::Size objSize = srcImg.size();
objSize.width = objSize.width * 5;
cv::resize(enlargeImg, enlargeImg, objSize, 0, 0, cv::INTER_NEAREST);
for (int i = 6, i_max = (int)subpixPnt.size(); i < i_max; i++)
{
RgnSubPix a_subPix = subpixPnt[i];
int row = (int)(a_subPix.y + 0.5);
int col = (int)(a_subPix.x * 5 + 0.5);
enlargeImg.at<cv::Vec3b>(row, col)[0] = 0;
enlargeImg.at<cv::Vec3b>(row, col)[1] = 0;
enlargeImg.at<cv::Vec3b>(row, col)[2] = 255;
}
sprintf_s(filename, "%sresult\\%d_subpix.png", calibDataPath[grp], index);
cv::imwrite(filename, enlargeImg);
//保存校正后的subpixData
sprintf_s(filename, "%sresult\\%d_calibSubpixData.txt", calibDataPath[grp], index);
saveCalibSubpixData(filename, calibSubpixPnt);
#endif
sprintf_s(filename, "%sresult\\%d_3D_data.txt", calibDataPath[grp], index);
saveLineData(filename, resul3DPnts);
}
}
}

View File

@ -78,13 +78,11 @@
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>F:\ShangGu\ProductDev\AlgoDev\thirdParty\opencv\build\include;$(IncludePath);F:\ShangGu\ProductDev\三角光相机\相机开发\camAlgo_git\camAlgoSW\sourceCode</IncludePath>
<OutDir>$(SolutionDir)build\$(Platform)\$(Configuration)\</OutDir>
<IncludePath>F:\ShangGu\ProductDev\AlgoDev\thirdParty\opencv\build\include;$(IncludePath)</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>F:\ShangGu\ProductDev\AlgoDev\thirdParty\opencv\build\include;$(VC_IncludePath);$(WindowsSDK_IncludePath);;F:\ShangGu\ProductDev\三角光相机\相机开发\camAlgo_git\camAlgoSW\sourceCode</IncludePath>
<OutDir>$(SolutionDir)build\$(Platform)\$(Configuration)\</OutDir>
<IncludePath>F:\ShangGu\ProductDev\AlgoDev\thirdParty\opencv\build\include;$(VC_IncludePath);$(WindowsSDK_IncludePath);</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
@ -124,8 +122,6 @@
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>F:\ShangGu\ProductDev\AlgoDev\thirdParty\opencv\build\x64\vc16\lib;..\build\x64\Debug;.\sourceCode\CG_frontEnd\lib\debug;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>opencv_world480d.lib;camAlgoSW.lib;ImgAccd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
@ -136,15 +132,12 @@
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
<Optimization>Disabled</Optimization>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>F:\ShangGu\ProductDev\AlgoDev\thirdParty\opencv\build\x64\vc16\lib;..\build\x64\Release;F:\ShangGu\ProductDev\三角光相机\相机开发\camAlgo_git\camAlgoSW\sourceCode\CG_frontEnd\lib\debug;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>opencv_world480.lib;camAlgoSW.lib;ImgAccd.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemGroup>

View File

@ -29,7 +29,7 @@ void sg_outputCalibK(const char* fileName, cv::Mat& fitMap)
return;
}
void sg_outputCalibKD(const char* fileName, cv::Mat& K, cv::Mat& D, cv::Vec4f& pe)
void sg_outputCalibKD(const char* fileName, cv::Mat& K, cv::Mat& D)
{
std::ofstream sw(fileName);
@ -40,7 +40,7 @@ void sg_outputCalibKD(const char* fileName, cv::Mat& K, cv::Mat& D, cv::Vec4f& p
double temp[3];
for (int _c = 0; _c < 3; _c++)
temp[_c] = K.ptr<double>(i)[_c];
//sprintf_s(dataStr, 250, "%lf, %lf, %lf, %lf, %lf, %lf, %lf, %lf", K[0], K[1], K[2], K[3], K[4], K[5], K[6], K[7]);
sprintf_s(dataStr, 250, "%g, %g, %g", temp[0], temp[1], temp[2]);
sw << dataStr << std::endl;
@ -50,20 +50,6 @@ void sg_outputCalibKD(const char* fileName, cv::Mat& K, cv::Mat& D, cv::Vec4f& p
temp[_c] = D.ptr<double>(0)[_c];
sprintf_s(dataStr, 250, "%g, %g, %g, %g, %g", temp[0], temp[1], temp[2], temp[3], temp[4]);
sw << dataStr << std::endl;
//ax+by+cz+d = 0
float a = (float)pe[0];
float b = (float)pe[1];
float c = (float)pe[2];
float d = (float)pe[3];
//将c变成-1转成z=ax+by+c的形式使用3个参数
a = -a / c;
b = -b / c;
d = -d / c;
c = -1;
sprintf_s(dataStr, 250, "%g, %g, %g", a, b, d);
sw << dataStr << std::endl;
sw.close();
return;
}
@ -88,7 +74,7 @@ void sg_readCalibKD(const char* fileName, cv::Mat& K, cv::Mat& D)
for (int _c = 0; _c < 3; _c++)
K.ptr<double>(line)[_c] = data[_c];
}
else if(line == 3)
else
{
double data[5];
sscanf_s(linedata.c_str(), "%lf, %lf, %lf, %lf, %lf", &data[0], &data[1], &data[2], &data[3], &data[4]);
@ -101,24 +87,6 @@ void sg_readCalibKD(const char* fileName, cv::Mat& K, cv::Mat& D)
return;
}
void saveSubpixData(char* filename, std::vector<cv::Point2f>& subpixPnt)
{
if (subpixPnt.size() < 6)
return;
std::ofstream TXTFile(filename);
char TXTData[250];
int headOffset = 6;
for (int i = 6, i_max = (int)subpixPnt.size(); i < i_max; i++)
{
cv::Point2f a_subPix = subpixPnt[i];
snprintf(TXTData, sizeof(TXTData), "%.5f %.5f", a_subPix.x, a_subPix.y);
TXTFile << TXTData << std::endl;
}
TXTFile.close();
}
typedef struct
{
int nMin; //< 最小值
@ -129,46 +97,19 @@ typedef struct
#define CALIB_CIRCLE_GRID 2
#define CALIB_CHARUCO 3
void initForwardRectMap(const cv::Mat& K, const cv::Mat& D, const cv::Mat& R,
const cv::Mat& newK, const cv::Size& size, cv::Mat& mapX, cv::Mat& mapY) {
std::vector<cv::Point2f> srcPts;
for (int r = 0; r < size.height; r++) {
for (int c = 0; c < size.width; c++) {
srcPts.push_back(cv::Point2f(c, r));
}
}
std::vector<cv::Point2f> dstPts;
cv::undistortPoints(srcPts, dstPts, K, D, R, newK);
mapX = cv::Mat::zeros(size.height, size.width, CV_32FC1);
mapY = cv::Mat::zeros(size.height, size.width, CV_32FC1);
int idx = 0;
for (int r = 0; r < size.height; r++) {
for (int c = 0; c < size.width; c++) {
mapX.ptr<float>(r)[c] = dstPts[idx].x;
mapY.ptr<float>(r)[c] = dstPts[idx].y;
idx++;
}
}
return;
}
#define CALIB_TEST_GROUP 4
int main()
{
std::cout << "Hello World!\n";
const char* calibDataPath[CALIB_TEST_GROUP] = {
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\撕裂原理相机标定图像\\", //0
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\chessboard\\", //1
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\circlePoint\\", //2
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo_git\\camCalib\\camCalibData\\charuCo\\", //3
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo\\camCalib\\camCalibData\\撕裂原理相机标定图像\\", //0
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo\\camCalib\\camCalibData\\chessboard\\", //1
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo\\camCalib\\camCalibData\\circlePoint\\", //2
"F:\\ShangGu\\ProductDev\\三角光相机\\相机开发\\CamAlgo\\camCalib\\camCalibData\\charuCo\\", //3
};
const SWdNLRange fileIdx[CALIB_TEST_GROUP] = {
{3,39},{1,33},{1,33},{1,10}
{3,39},{1,200},{1,166},{122,141}
};
const int boardType[CALIB_TEST_GROUP] =
{
@ -180,11 +121,10 @@ int main()
for(int grp = 0; grp < CALIB_TEST_GROUP; grp ++)
{
grp = 3;
grp = 2;
int calibType = boardType[grp];
cv::Size cbPattern;
float cbSquareSize;
float markSize;
if (CALIB_CHESS_BOARD == calibType)
{
cbPattern = cv::Size(8, 11); // 10);
@ -197,15 +137,13 @@ int main()
}
else if (CALIB_CHARUCO == calibType)
{
cbPattern = cv::Size(47, 21); // 10);
cbSquareSize = 0.05f;
markSize = 0.037f;
cbPattern = cv::Size(20, 46); // 10);
cbSquareSize = 50.0f; // 10.f;
}
else
continue;
std::vector<std::vector<cv::Point2f>> cbCornersList;
std::vector<std::vector<int>> cbCornersIdList;
cv::Size imageSize;
int startIndex = fileIdx[grp].nMin;
@ -240,9 +178,6 @@ int main()
cv::imwrite(filename, color);
}
#endif
if (corners.empty())
continue;
cbCornersList.push_back(corners);
}
else if (CALIB_CIRCLE_GRID == calibType)
{
@ -257,56 +192,26 @@ int main()
cv::imwrite(filename, color);
}
#endif
if (corners.empty())
continue;
cbCornersList.push_back(corners);
}
else if (CALIB_CHARUCO == calibType)
{
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f> > markerCorners;
std::vector<int> charucoIds;
detectCharucoCorners(img,
cbPattern,
cbSquareSize,
markSize,
markerIds,
markerCorners,
charucoIds,
corners);
cv::Mat imageCopy = img.clone();
if (markerIds.size() > 0)
{
cv::aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
}
if (charucoIds.size() > 0)
{
cv::aruco::drawDetectedCornersCharuco(imageCopy, corners, charucoIds, cv::Scalar(0, 255, 255));
}
char markFilename[256];
sprintf_s(markFilename, "%scalib_%03d_markers.bmp", calibDataPath[grp], index);
cv::imwrite(markFilename, imageCopy);
if (corners.empty())
continue;
cbCornersList.push_back(corners);
cbCornersIdList.push_back(charucoIds);
}
if (corners.empty())
continue;
imageSize = cv::Size(img.cols, img.rows);
cbCornersList.push_back(corners);
}
cv::Mat K, D;
std::vector<double> reprojectionError;
if (CALIB_CHARUCO == calibType)
monocularCalibration_charuco(cbCornersIdList, cbCornersList, imageSize, cbPattern, cbSquareSize, K, D, reprojectionError);
else
monocularCalibration_chessboard(cbCornersList, imageSize, cbPattern, cbSquareSize, K, D, reprojectionError);
monocularCalibration(cbCornersList, imageSize, cbPattern, cbSquareSize, K, D, reprojectionError);
for(int i = 0; i < reprojectionError.size(); i ++)
std::cout << reprojectionError[i] << std::endl;
// 输出映射类型通常使用CV_32FC1或CV_16SC2
cv::Mat backwardMap_x, backwardMap_y;
cv::Mat forwardMap_x, forwardMap_y;
cv::Mat map_x, map_y;
cv::Mat newCamMatrix;
// 生成畸变矫正映射
#if ENABLE_FISH_EYE
@ -314,13 +219,12 @@ int main()
#else
double alpha = 0.4; // 0.4;
newCamMatrix = cv::getOptimalNewCameraMatrix(K, D, imageSize, alpha, imageSize, 0);
cv::initUndistortRectifyMap(K, D, cv::Mat(), newCamMatrix, imageSize, CV_32FC1, backwardMap_x, backwardMap_y);
initForwardRectMap(K, D, cv::Mat(), newCamMatrix, imageSize, forwardMap_x, forwardMap_y);
cv::initUndistortRectifyMap(K, D, cv::Mat(), newCamMatrix, imageSize, CV_32FC1, map_x, map_y);
#endif
// 生成系数表
cv::Mat fitMap_x = GetFitParamMap(forwardMap_x, 1);
cv::Mat fitMap_y = GetFitParamMap(forwardMap_y, 1);
cv::Mat fitMap_x = GetFitParamMap(map_x, 1);
cv::Mat fitMap_y = GetFitParamMap(map_y, 1);
//输出系数文件
char calibParamName[256];
sprintf_s(calibParamName, "%scalib_param_x.txt", calibDataPath[grp]);
@ -334,8 +238,8 @@ int main()
//搜索最大和平均误差
// 计算绝对差异
cv::Mat diff_x, diff_y;
cv::absdiff(forwardMap_x, mapGen_x, diff_x);
cv::absdiff(forwardMap_y, mapGen_y, diff_y);
cv::absdiff(map_x, mapGen_x, diff_x);
cv::absdiff(map_y, mapGen_y, diff_y);
// 查找最大值和最小值
double minVal_x, maxVal_x;
cv::minMaxLoc(diff_x, &minVal_x, &maxVal_x);
@ -351,7 +255,9 @@ int main()
std::cout << "Y Mean difference: " << meanVal_y[0] << std::endl;
//生成矫正图像
for (index = startIndex; index <= endIndex; index++) {
index = 3;
for (;; index++) {
char filename[256];
sprintf_s(filename, "%scalib_%03d.bmp", calibDataPath[grp], index);
cv::Mat srcImg = cv::imread(filename);
@ -361,10 +267,10 @@ int main()
cv::Mat img;
cv::rotate(srcImg, img, cv::ROTATE_90_COUNTERCLOCKWISE);
cv::Mat calibImg;
cv::remap(img,
remap(img,
calibImg,
backwardMap_x,
backwardMap_y,
mapGen_x,
mapGen_y,
cv::INTER_LINEAR,
cv::BORDER_CONSTANT,
cv::Scalar(0, 0, 0));
@ -372,22 +278,18 @@ int main()
cv::imwrite(filename, calibImg);
}
#else
//output K and D
char calibKDName[256];
sprintf_s(calibKDName, "%scalib_param_K_D.txt", calibDataPath[grp]);
sg_outputCalibKD(calibKDName, K, D);
#else
char calibKDName[256];
sprintf_s(calibKDName, "%scalib_param_K_D.txt", cbImagePath);
cv::Mat K, D;
sg_readCalibKD(calibKDName, K, D);
//生成opencv校正表
cv::Mat backwardMap_x, backwardMap_y;
double alpha = 0.4; // 0.4;
imageSize = cv::Size(1200, 2048);
cv::Mat newCamMatrix = cv::getOptimalNewCameraMatrix(K, D, imageSize, alpha, imageSize, 0);
cv::initUndistortRectifyMap(K, D, cv::Mat(), newCamMatrix, imageSize, CV_32FC1, backwardMap_x, backwardMap_y);
#endif
#if ENABLE_GEN_IMAGE
#if ENABLE_GEN_IMAGE
cv::Vec4f laserPE;
generateLaserLine(10.f, 5.f, laserPE);
std::cout << "generateLaserLine pe: " << laserPE << std::endl;
@ -411,79 +313,40 @@ int main()
cv::Mat image = generateVirtualLaserLineImage(laserPE, pe, K, D, imageSize);
#if ENABLE_DEBUG
#if ENABLE_DEBUG
cv::Mat color = image.clone();
cv::resize(color, color, cv::Size(), 0.5, 0.5);
cv::imshow("image", color);
cv::waitKey(10);·
#endif
sprintf_s(filename, "%s%d.bmp", laserImagePath, index);
sprintf_s(filename, "%s%d.bmp", laserImagePath, index);
cv::imwrite(filename, image);
}
#endif
std::vector<cv::Point3f> all_pts3d;
for (index = startIndex; index <= endIndex; index++) {
{
std::vector<cv::Point3f> all_pts3d;
char filename[256];
sprintf_s(filename, "%scalib_%03d.bmp", calibDataPath[grp], index);
cv::Mat srcImg = cv::imread(filename);
if (srcImg.empty())
break;
index = 3;
for (;; index++) {
cv::Mat img;
cv::rotate(srcImg, img, cv::ROTATE_90_COUNTERCLOCKWISE);
#if 0
cv::Mat charucoCalibImg;
cv::remap(img,
charucoCalibImg,
backwardMap_x,
backwardMap_y,
cv::INTER_LINEAR,
cv::BORDER_CONSTANT,
cv::Scalar(0, 0, 0));
cv::Size _size = charucoCalibImg.size();
_size.width = _size.width * 5;
//cv::resize(charucoCalibImg, charucoCalibImg, _size, 0, 0, cv::INTER_NEAREST);
sprintf_s(filename, "%scalib_%03d_calib.bmp", calibDataPath[grp], index);
cv::imwrite(filename, charucoCalibImg);
#endif
char filename[256];
sprintf_s(filename, "%scalib_%03d.bmp", calibDataPath[grp], index);
cv::Mat srcImg = cv::imread(filename);
if (srcImg.empty())
break;
std::vector<cv::Point2f> corners;
std::vector<int> charucoIds;
if (CALIB_CHESS_BOARD == calibType)
{
cv::Mat img;
cv::rotate(srcImg, img, cv::ROTATE_90_COUNTERCLOCKWISE);
std::vector<cv::Point2f> corners;
detectCorners(img, cbPattern, corners);
}
else if (CALIB_CIRCLE_GRID == calibType)
{
detectCirclePoints(img, cbPattern, corners);
}
else if (CALIB_CHARUCO == calibType)
{
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f> > markerCorners;
detectCharucoCorners(img,
cbPattern,
cbSquareSize,
markSize,
markerIds,
markerCorners,
charucoIds,
corners);
}
if (corners.empty())
continue;
// 创建棋盘格区域的掩码
cv::Mat chessMask;
if (CALIB_CHARUCO == calibType)
chessMask = cv::Mat::ones(img.size(), CV_8UC1);
else
{
chessMask = cv::Mat::zeros(img.size(), CV_8UC1);
if (corners.empty())
continue;
// 创建棋盘格区域的掩码
cv::Mat chessMask = cv::Mat::zeros(img.size(), CV_8UC1);
// 使用多边形近似来填充角点之间的区域
// 棋盘格区域需要比角点区域大一圈
std::vector<cv::Point2f> contour_line[4];
@ -514,7 +377,7 @@ int main()
pt_1.y = pt_c.y * 2 - pt_2.y;
contour_line[2].push_back(pt_1);
}
for (int i = cbPattern.height - 1; i >= 0; i--)
for (int i = cbPattern.height-1; i >= 0; i--)
{
cv::Point2f pt_c = corners[i * cbPattern.width];
cv::Point2f pt_2 = corners[i * cbPattern.width + 1];
@ -544,85 +407,57 @@ int main()
sprintf_s(filename, "%schessMask_%03d.png", calibDataPath[grp], index);
cv::imwrite(filename, chessMask);
#endif
cv::Vec4f pe;
fitChessboardPlane(corners, K, D, cbPattern, cbSquareSize, pe);
sprintf_s(filename, "%slaser_%03d.bmp", calibDataPath[grp], index);
cv::Mat srcLaserImg = cv::imread(filename);
if (srcLaserImg.empty())
break;
cv::Mat laserImg_unMask;
cv::rotate(srcLaserImg, laserImg_unMask, cv::ROTATE_90_COUNTERCLOCKWISE);
//与Mask相与保证待处理的激光线在标定板上
cv::Mat laserImg;
cv::bitwise_and(laserImg_unMask, laserImg_unMask, laserImg, chessMask);
#if 1
sprintf_s(filename, "%slaser_rotate_mask_%03d.png", calibDataPath[grp], index);
cv::imwrite(filename, laserImg);
#endif
std::vector<cv::Point2f> pts2d = detectLaserLine(laserImg);
//显示亚像素点
cv::Mat enlargeImg;
if (laserImg.channels() == 1)
laserImg.convertTo(enlargeImg, cv::COLOR_GRAY2BGR);
else
enlargeImg = laserImg.clone();
cv::Size objSize = laserImg.size();
objSize.width = objSize.width * 5;
cv::resize(enlargeImg, enlargeImg, objSize, 0, 0, cv::INTER_NEAREST);
for (int i = 0, i_max = pts2d.size(); i < i_max; i++)
{
cv::Point2f a_subPix = pts2d[i];
a_subPix.x += 0.5;
int row = (int)(a_subPix.y + 0.5);
int col = (int)(a_subPix.x * 5 + 0.5);
enlargeImg.at<cv::Vec3b>(row, col)[0] = 0;
enlargeImg.at<cv::Vec3b>(row, col)[1] = 0;
enlargeImg.at<cv::Vec3b>(row, col)[2] = 255;
}
sprintf_s(filename, "%slaser_rotate_enlarge_%03d_subpix.png", calibDataPath[grp], index);
cv::imwrite(filename, enlargeImg);
std::vector<cv::Point3f> pts3d = project2DTo3D(pts2d, pe, K, D);
all_pts3d.insert(all_pts3d.end(), pts3d.begin(), pts3d.end());
}
cv::Vec4f pe;
if (CALIB_CHARUCO == calibType)
fitChessboardPlane_charuco(charucoIds,corners, K, D, cbPattern, cbSquareSize, pe);
else
fitChessboardPlane_chessboard(corners, K, D, cbPattern, cbSquareSize, pe);
sprintf_s(filename, "%slaser_%03d.bmp", calibDataPath[grp], index);
cv::Mat srcLaserImg = cv::imread(filename);
if (srcLaserImg.empty())
break;
cv::Mat laserImg_unMask;
cv::rotate(srcLaserImg, laserImg_unMask, cv::ROTATE_90_COUNTERCLOCKWISE);
//与Mask相与保证待处理的激光线在标定板上
cv::Mat laserImg;
cv::bitwise_and(laserImg_unMask, laserImg_unMask, laserImg, chessMask);
#if 1
sprintf_s(filename, "%slaser_rotate_mask_%03d.png", calibDataPath[grp], index);
cv::imwrite(filename, laserImg);
cv::Mat laserCalibImg;
cv::remap(laserImg,
laserCalibImg,
backwardMap_x,
backwardMap_y,
cv::INTER_LINEAR,
cv::BORDER_CONSTANT,
cv::Scalar(0, 0, 0));
cv::Size laserImgSize = laserCalibImg.size();
laserImgSize.width = laserImgSize.width * 5;
cv::resize(laserCalibImg, laserCalibImg, laserImgSize, 0, 0, cv::INTER_NEAREST);
sprintf_s(filename, "%slaser_%03d_calib.bmp", calibDataPath[grp], index);
cv::imwrite(filename, laserCalibImg);
#endif
std::vector<cv::Point2f> pts2d = detectLaserLine(laserImg);
//显示亚像素点
cv::Mat enlargeImg;
if (laserImg.channels() == 1)
laserImg.convertTo(enlargeImg, cv::COLOR_GRAY2BGR);
else
enlargeImg = laserImg.clone();
cv::Size objSize = laserImg.size();
objSize.width = objSize.width * 5;
cv::resize(enlargeImg, enlargeImg, objSize, 0, 0, cv::INTER_NEAREST);
if (pts2d.size() > 0)
{
sprintf_s(filename, "%slaser_rotate_enlarge_%03d_subpixData.txt", calibDataPath[grp], index);
saveSubpixData(filename, pts2d);
}
for (int i = 0, i_max = (int)pts2d.size(); i < i_max; i++)
{
cv::Point2f a_subPix = pts2d[i];
int row = (int)(a_subPix.y + 0.5);
int col = (int)(a_subPix.x * 5 + 0.5);
enlargeImg.at<cv::Vec3b>(row, col)[0] = 0;
enlargeImg.at<cv::Vec3b>(row, col)[1] = 0;
enlargeImg.at<cv::Vec3b>(row, col)[2] = 255;
}
sprintf_s(filename, "%slaser_rotate_enlarge_%03d_subpix.png", calibDataPath[grp], index);
cv::imwrite(filename, enlargeImg);
std::vector<cv::Point3f> pts3d = project2DTo3D(pts2d, pe, K, D);
#if 1
//保存3D点
#endif
all_pts3d.insert(all_pts3d.end(), pts3d.begin(), pts3d.end());
cv::Vec4f pe = fitPlaneToPoints(all_pts3d);
std::cout << "pe: " << pe << std::endl;
}
cv::Vec4f pe = fitPlaneToPoints(all_pts3d);
std::cout << "pe: " << pe << std::endl;
//output K and D
char calibKDPName[256];
sprintf_s(calibKDPName, "%scalib_param_K_D.txt", calibDataPath[grp]);
sg_outputCalibKD(calibKDPName, K, D, pe);
}
return 0;
}

View File

@ -148,9 +148,6 @@
<ClCompile Include="camCalib.cpp" />
<ClCompile Include="lineDetection_steger.cpp" />
<ClCompile Include="onnxDetector.cpp" />
<ClCompile Include="sourceCode\aruco\aruco.cpp" />
<ClCompile Include="sourceCode\aruco\aruco_calib.cpp" />
<ClCompile Include="sourceCode\aruco\charuco.cpp" />
<ClCompile Include="sourceCode\FitMapParam.cpp" />
<ClCompile Include="sourceCode\MonoLaserCalibrate.cpp" />
</ItemGroup>

View File

@ -13,9 +13,6 @@
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
<Filter Include="源文件\aruco">
<UniqueIdentifier>{5709fb07-01a0-47f3-b3c7-11842aaaa85e}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="camCalib.cpp">
@ -33,15 +30,6 @@
<ClCompile Include="onnxDetector.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="sourceCode\aruco\aruco.cpp">
<Filter>源文件\aruco</Filter>
</ClCompile>
<ClCompile Include="sourceCode\aruco\aruco_calib.cpp">
<Filter>源文件\aruco</Filter>
</ClCompile>
<ClCompile Include="sourceCode\aruco\charuco.cpp">
<Filter>源文件\aruco</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="sourceCode\MonoLaserCalibrate.h">

View File

@ -1,4 +0,0 @@
1004.94, 0, 619.327
0, 1004.94, 1023.07
0, 0, 1
-0.0869453, 0.0919085, -1.61443e-05, -2.84163e-05, -0.0224749

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +0,0 @@
1005.62, 0, 619.278
0, 1005.62, 1021.59
0, 0, 1
-0.0864591, 0.0914504, -0.000462285, 7.09293e-05, -0.0220272

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -5,7 +5,6 @@
#include <corecrt_math_defines.h>
#include "lineDetection_steger.h"
#define USING_1D_MASK 1
#if 1
using namespace std;
using namespace cv;
@ -266,45 +265,6 @@ void generate_2nd_2Dmask_directSample(double sigma, int radius, cv::Mat& mask_xx
return;
}
double gaussKernel_1D(double x, double sigma)
{
double sigma2 = sigma * sigma;
double norm = 1.0 / (sqrt(2 * M_PI) * sigma);
return (norm * exp(-(x * x ) / (2 * sigma2)));
}
// 2D高斯一阶导数掩模生成
void generate_1st_1Dmask_directSample(double sigma, int radius, cv::Mat& mask_x)
{
int size = 2 * radius + 1;
mask_x = cv::Mat::zeros(1, size, CV_64FC1);
double norm = -1.0 / (sigma * sigma);
for (int col = 0; col < size; col++)
{
double x = (double)(col - radius);
double dx = x * norm * gaussKernel_1D(x, sigma);
mask_x.at<double>(0, col) = dx;
}
return;
}
// 2D高斯二阶导数掩模生成
void generate_2nd_1Dmask_directSample(double sigma, int radius, cv::Mat& mask_xx)
{
int size = 2 * radius + 1;
mask_xx = cv::Mat::zeros(1, size, CV_64FC1);
double sigma2 = sigma * sigma;
double norm = 1.0 / (sigma2 * sigma2);
double offset = 1.0 / sigma2;
for (int col = 0; col < size; col++)
{
double x = (double)(col - radius);
double dxx = (x * x * norm - offset) * gaussKernel_1D(x, sigma);
mask_xx.at<double>(0, col) = dxx;
}
return;
}
// 单点卷积函数
sPtDerivate pointConvolve(
cv::Point ptPos, //点位置
@ -342,35 +302,6 @@ sPtDerivate pointConvolve(
return result;
}
// 单点卷积函数
sPtDerivate pointConvolve_1D(
cv::Point ptPos, //点位置
int radius, //卷积窗半径
cv::Mat& img, //double型单通道图像
cv::Mat& mask_x, //一阶掩膜x
cv::Mat& mask_xx //二阶掩膜xx
)
{
sPtDerivate result = { 0.0, 0.0, 0.0, 0.0, 0.0 };
cv::Size imgSize = img.size(); //
if ((ptPos.x < radius) || (ptPos.x > (imgSize.width - radius - 1)) || (ptPos.y < radius) || (ptPos.y > (imgSize.height - radius - 1)))
return result;
int size = 2 * radius + 1;
for (int j = 0; j < size; j++)
{
int col = j - radius;
int x = ptPos.x + col;
double value = img.at<double>(ptPos.y, x);
result.dx += mask_x.at<double>(0, j) * value;
result.dy += 0;
result.dxx += mask_xx.at<double>(0, j) * value;
result.dyy += 0;
result.dxy += 0;
}
return result;
}
bool isMax(int i, int j, cv::Mat& img, int dx, int dy)//在法线方向上是否为极值
{
double val = img.at<double>(j, i);
@ -449,30 +380,6 @@ cv::Point2f computeHessianSubpix(
return subPix;
}
//计算1D得到亚像素
cv::Point2f compute1DSubpix(
cv::Point ptPos, //点位置
cv::Mat& img, //double型单通道图像
sPtDerivate _ptDerivate
)
{
double subTh = 0.7;
cv::Point2f subPix = { -1.0f, -1.0f }; //初始化成无效亚像素值
//t 公式 泰勒展开到二阶导数
double a = _ptDerivate.dxx;
double b = _ptDerivate.dx;
if (a != 0.0)
{
double t = b / a; // -b / a;
if (fabs(t) <= subTh )//(x + t * Nx, y + t * Ny)为亚像素坐标
{
subPix.x = (float)(ptPos.x + t);
subPix.y = (float)(ptPos.y);
}
}
return subPix;
}
// 从点的整数坐标计算亚像素
void computePointSubpix(
cv::Mat& inputImg, //uchar型单通道输入图像灰度图像
@ -484,10 +391,8 @@ void computePointSubpix(
cv::Mat mask_x, mask_y, mask_xx, mask_yy, mask_xy;
double sigma = (double)gaussWin / sqrt(3);
//double sigma = 1.0;
#if USING_1D_MASK
//SetFilterMat(mask_x, mask_y, mask_xx, mask_yy, mask_xy, gaussWin*2+1);
generate_1st_1Dmask_directSample(sigma, gaussWin, mask_x);
generate_2nd_1Dmask_directSample(sigma, gaussWin, mask_xx);
#if 0
SetFilterMat(mask_x, mask_y, mask_xx, mask_yy, mask_xy, gaussWin*2+1);
#else
generate_1st_2Dmask_directSample(sigma, gaussWin, mask_x, mask_y);
generate_2nd_2Dmask_directSample(sigma, gaussWin, mask_xx, mask_yy, mask_xy);
@ -504,7 +409,7 @@ void computePointSubpix(
//高斯滤波
cv::Mat img;
inputImg.convertTo(img, CV_64FC1);
cv::GaussianBlur(img, img, cv::Size(5, 5), 1.2, 1.2);
cv::GaussianBlur(img, img, cv::Size(3, 3), 0.9, 0.9);
//cv::imwrite("gauss_blur_src.bmp", img);
for (int i = 0, i_max = (int)pos.size(); i < i_max; i++)
@ -513,20 +418,6 @@ void computePointSubpix(
int doSubPix = 0;
while (doSubPix >= 0)
{
#if USING_1D_MASK
sPtDerivate a_ptDerivate = pointConvolve_1D(
ptPos, //点位置
gaussWin, //卷积窗半径
img, //double型单通道图像
mask_x, //一阶掩膜x
mask_xx //二阶掩膜xx
);
cv::Point2f subpix = compute1DSubpix(
ptPos, //点位置
img, //double型单通道图像
a_ptDerivate
);
#else
sPtDerivate a_ptDerivate = pointConvolve(
ptPos, //点位置
gaussWin, //卷积窗半径
@ -542,11 +433,10 @@ void computePointSubpix(
img, //double型单通道图像
a_ptDerivate
);
#endif
if ((subpix.x >= 0) && (subpix.y >= 0))
{
subpix.x += 0.5; //图像中的像素位置表示的是像素是左下角而此处像素坐标是指向像素中间位置所以有0.5像素差
subpix.y -= 0.5; //此处图像的Y坐标系方向与通常坐标系的方向相反
subpix.x -= 0.5; //此处图像的Y坐标系方向与通常坐标系的方向相反
posSubpix.push_back(subpix);
doSubPix = -1;
}

View File

@ -2,8 +2,6 @@
//
#include <iostream>
#include <opencv2/opencv.hpp>
#include "aruco.hpp"
#include "aruco/charuco.hpp"
#include <vector>
#include <math.h>
#include "MonoLaserCalibrate.h"
@ -160,91 +158,29 @@ void detectCirclePoints(const cv::Mat& img,
return;
}
/*Breif二维码标定板角点函数*/
void detectCharucoCorners(const cv::Mat& img,
const cv::Size& patternSize,
float sqSize,
float mkSize,
std::vector<int>& markerIds,
std::vector<std::vector<cv::Point2f> >& markerCorners,
std::vector<int>& charucoIds,
std::vector<cv::Point2f>& charucoCorners)
{
cv::Mat gray;
if (img.channels() == 3)
cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
else
gray = img.clone();
cv::aruco::Dictionary dictionary(cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_1000));
cv::aruco::CharucoBoard board = cv::aruco::CharucoBoard(patternSize, sqSize, mkSize, dictionary);
cv::aruco::DetectorParameters params = cv::aruco::DetectorParameters();
params.cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
cv::Ptr<cv::aruco::Dictionary> ptrDictionary = cv::makePtr<cv::aruco::Dictionary>(dictionary);
cv::Ptr<cv::aruco::DetectorParameters> ptrParams = cv::makePtr<cv::aruco::DetectorParameters>(params);
cv::aruco::detectMarkers(gray, ptrDictionary, markerCorners, markerIds, ptrParams);
// if at least one marker detected
if (markerIds.size() > 0) {
//cv::aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
cv::Ptr<cv::aruco::CharucoBoard> ptrBoard = cv::makePtr<cv::aruco::CharucoBoard>(board);
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, gray, ptrBoard, charucoCorners, charucoIds);
// if at least one charuco corner detected
#if 1
if (charucoIds.size() > 0)
// 亚像素精确化
cv::cornerSubPix(gray, charucoCorners, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 50, 0.1));
#endif
}
return;
}
void gen3DCoordinate_chessboard(
const std::vector<cv::Point2f>& corners,
const cv::Size& patternSize,
const float squareSize,
std::vector<cv::Point3f>& objectPoints
)
{
// 准备3D世界坐标点 (z=0)
for (int i = 0; i < patternSize.height; ++i)
for (int j = 0; j < patternSize.width; ++j)
objectPoints.emplace_back(j * squareSize, i * squareSize, 0);
return;
}
void gen3DCoordinate_charuco(
std::vector<int>& charucoIds,
std::vector<cv::Point2f>& charucoCorners,
const cv::Size& patternSize,
const float squareSize,
std::vector<cv::Point3f>& objectPoints)
{
float ratio = 1.002;
// 准备3D世界坐标点 (z=0)
for (int j = 0, j_max = (int)charucoCorners.size(); j < j_max; j++)
{
int id = charucoIds[j];
int id_row = id / (patternSize.width-1);
int id_col = id % (patternSize.width-1);
objectPoints.emplace_back(id_col * squareSize/ ratio, id_row *squareSize, 0);
}
return;
}
/*Breif单目相机标定函数*/
void monocularCalibration(
const std::vector<std::vector<cv::Point2f>>& imagePoints,
const cv::Size& imageSize,
std::vector<std::vector<cv::Point3f>>& objectPoints,
const cv::Size& patternSize,
const float squareSize,
cv::Mat& cameraMatrix,
cv::Mat& distCoeffs,
std::vector<double>& reprojectionError)
{
// 根据角点生成3d点
std::vector<std::vector<cv::Point3f>> objectPoints;
for (const auto& corners : imagePoints) {
// 准备3D世界坐标点 (z=0)
std::vector<cv::Point3f> obj;
for (int i = 0; i < patternSize.height; ++i)
for (int j = 0; j < patternSize.width; ++j)
obj.emplace_back(j * squareSize, i * squareSize, 0);
objectPoints.push_back(obj);
}
// 执行相机标定
std::vector<cv::Mat> rvecs, tvecs;
#if ENABLE_FISH_EYE
@ -255,7 +191,7 @@ void monocularCalibration(
flags |= cv::fisheye::CALIB_FIX_SKEW;
cv::fisheye::calibrate(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);// , flags, cv::TermCriteria(3, 20, 1e-6));
#else
cv::calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs); // , cv::CALIB_FIX_ASPECT_RATIO);
cv::calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs);
#endif
// 重投影三维点到二维图像点
// 计算重投影误差
@ -300,81 +236,23 @@ void monocularCalibration(
return;
}
/*Breif棋盘格单目相机标定函数*/
void monocularCalibration_chessboard(
const std::vector<std::vector<cv::Point2f>>& imagePoints,
const cv::Size& imageSize,
const cv::Size& patternSize,
const float squareSize,
cv::Mat& cameraMatrix,
cv::Mat& distCoeffs,
std::vector<double>& reprojectionError)
{
// 根据角点生成3d点
std::vector<std::vector<cv::Point3f>> objectPoints;
for (const auto& corners : imagePoints) {
std::vector<cv::Point3f> obj;
gen3DCoordinate_chessboard(
corners,
patternSize,
squareSize,
obj);
objectPoints.push_back(obj);
}
monocularCalibration(
imagePoints,
imageSize,
objectPoints,
cameraMatrix,
distCoeffs,
reprojectionError);
return;
}
/*Breif二维码标定板单目相机标定函数*/
void monocularCalibration_charuco(
std::vector<std::vector<int>>& charucoIds,
std::vector<std::vector<cv::Point2f>>& charucoCorners,
const cv::Size& imageSize,
const cv::Size& patternSize,
const float squareSize,
cv::Mat& cameraMatrix,
cv::Mat& distCoeffs,
std::vector<double>& reprojectionError)
{
// 根据角点生成3d点
std::vector<std::vector<cv::Point3f>> objectPoints;
for (int i = 0, i_max = (int)charucoCorners.size(); i < i_max; i++)
{
std::vector<cv::Point3f> obj;
gen3DCoordinate_charuco(
charucoIds[i],
charucoCorners[i],
patternSize,
squareSize,
obj);
objectPoints.push_back(obj);
}
monocularCalibration(
charucoCorners,
imageSize,
objectPoints,
cameraMatrix,
distCoeffs,
reprojectionError);
return;
}
/*Brief: 拟合棋盘格角点平面*/
void fitChessboardPlane(
std::vector<cv::Point2f>& corners,
std::vector<cv::Point3f>& objectPoints,
const std::vector<cv::Point2f>& corners,
const cv::Mat& cameraMatrix,
const cv::Mat& distCoeffs,
const cv::Size& patternSize,
const float squareSize,
cv::Vec4f& planeEquation)
{
// 1. 生成3D世界坐标点 (z=0)
std::vector<cv::Point3f> objectPoints;
for (int i = 0; i < patternSize.height; ++i) {
for (int j = 0; j < patternSize.width; ++j) {
objectPoints.emplace_back(j * squareSize, i * squareSize, 0);
}
}
// 2. 解算PnP获取棋盘格位姿
cv::Mat rvec, tvec;
cv::solvePnP(objectPoints, corners, cameraMatrix, distCoeffs, rvec, tvec);
@ -403,58 +281,6 @@ void fitChessboardPlane(
return;
}
/*Brief: 拟合棋盘格角点平面*/
void fitChessboardPlane_chessboard(
std::vector<cv::Point2f>& corners,
const cv::Mat& cameraMatrix,
const cv::Mat& distCoeffs,
const cv::Size& patternSize,
const float squareSize,
cv::Vec4f& planeEquation)
{
std::vector<cv::Point3f> objectPoints;
gen3DCoordinate_chessboard(
corners,
patternSize,
squareSize,
objectPoints);
fitChessboardPlane(
corners,
objectPoints,
cameraMatrix,
distCoeffs,
planeEquation);
return;
}
/*Brief: 拟合二维码标定板角点平面*/
void fitChessboardPlane_charuco(
std::vector<int>& charucoIds,
std::vector<cv::Point2f>& corners,
const cv::Mat& cameraMatrix,
const cv::Mat& distCoeffs,
const cv::Size& patternSize,
const float squareSize,
cv::Vec4f& planeEquation)
{
std::vector<cv::Point3f> objectPoints;
gen3DCoordinate_charuco(
charucoIds,
corners,
patternSize,
squareSize,
objectPoints);
fitChessboardPlane(
corners,
objectPoints,
cameraMatrix,
distCoeffs,
planeEquation);
return;
}
/*Brief: 构造激光出射平面方程*/
void generateLaserLine(
const float baseLine,
@ -671,7 +497,7 @@ std::vector<cv::Point2f> detectLaserLine(
//cv::imwrite("gauss_blur_src.bmp", img);
// 提取最大值点
int scaleWin = 5;
double minPkValue = 20;
double minPkValue = 100;
std::vector< cv::Point> pkPoints;
for (int i = 0; i < img.rows; i++)
{

View File

@ -17,18 +17,8 @@ void detectCirclePoints(const cv::Mat& img,
const cv::Size& patternSize,
std::vector<cv::Point2f>& corners);
/*Breif二维码标定板角点函数*/
void detectCharucoCorners(const cv::Mat& img,
const cv::Size& patternSize,
float sqSize,
float mkSize,
std::vector<int>& markerIds,
std::vector<std::vector<cv::Point2f> >& markerCorners,
std::vector<int>& charucoIds,
std::vector<cv::Point2f>& charucoCorners);
/*Breif棋盘格单目相机标定函数*/
void monocularCalibration_chessboard(
/*Breif单目相机标定函数*/
void monocularCalibration(
const std::vector<std::vector<cv::Point2f>>& imagePoints,
const cv::Size& imageSize,
const cv::Size& patternSize,
@ -37,30 +27,9 @@ void monocularCalibration_chessboard(
cv::Mat& distCoeffs,
std::vector<double>& reprojectionError);
/*Breif二维码标定板单目相机标定函数*/
void monocularCalibration_charuco(
std::vector<std::vector<int>>& charucoIds,
std::vector<std::vector<cv::Point2f>>& charucoCorners,
const cv::Size& imageSize,
const cv::Size& patternSize,
const float squareSize,
cv::Mat& cameraMatrix,
cv::Mat& distCoeffs,
std::vector<double>& reprojectionError);
/*Brief: 拟合棋盘格角点平面*/
void fitChessboardPlane_chessboard(
std::vector<cv::Point2f>& corners,
const cv::Mat& cameraMatrix,
const cv::Mat& distCoeffs,
const cv::Size& patternSize,
const float squareSize,
cv::Vec4f& planeEquation);
/*Brief: 拟合二维码标定板角点平面*/
void fitChessboardPlane_charuco(
std::vector<int>& charucoIds,
std::vector<cv::Point2f>& corners,
void fitChessboardPlane(
const std::vector<cv::Point2f>& corners,
const cv::Mat& cameraMatrix,
const cv::Mat& distCoeffs,
const cv::Size& patternSize,

View File

@ -1,103 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_ARUCO_HPP
#define OPENCV_ARUCO_HPP
#include "opencv2/objdetect/aruco_detector.hpp"
#include "aruco/aruco_calib.hpp"
namespace cv {
namespace aruco {
/**
* @defgroup aruco Aruco markers, module functionality was moved to objdetect module
* @{
* ArUco Marker Detection, module functionality was moved to objdetect module
* @sa ArucoDetector, CharucoDetector, Board, GridBoard, CharucoBoard
* @}
*/
//! @addtogroup aruco
//! @{
/** @brief detect markers
@deprecated Use class ArucoDetector::detectMarkers
*/
CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr<Dictionary> &dictionary, OutputArrayOfArrays corners,
OutputArray ids, const Ptr<DetectorParameters> &parameters = makePtr<DetectorParameters>(),
OutputArrayOfArrays rejectedImgPoints = noArray());
/** @brief refine detected markers
@deprecated Use class ArucoDetector::refineDetectedMarkers
*/
CV_EXPORTS_W void refineDetectedMarkers(InputArray image,const Ptr<Board> &board,
InputOutputArrayOfArrays detectedCorners,
InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
float minRepDistance = 10.f, float errorCorrectionRate = 3.f,
bool checkAllOrders = true, OutputArray recoveredIdxs = noArray(),
const Ptr<DetectorParameters> &parameters = makePtr<DetectorParameters>());
/** @brief draw planar board
@deprecated Use Board::generateImage
*/
CV_EXPORTS_W void drawPlanarBoard(const Ptr<Board> &board, Size outSize, OutputArray img, int marginSize,
int borderBits);
/** @brief get board object and image points
@deprecated Use Board::matchImagePoints
*/
CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArrayOfArrays detectedCorners,
InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints);
/** @deprecated Use Board::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess = false);
/**
* @brief Pose estimation for a ChArUco board given some of their corners
* @param charucoCorners vector of detected charuco corners
* @param charucoIds list of identifiers for each corner in charucoCorners
* @param board layout of ChArUco board.
* @param cameraMatrix input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
* (see cv::Rodrigues).
* @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
* @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
*
* This function estimates a Charuco board pose from some detected corners.
* The function checks if the input corners are enough and valid to perform pose estimation.
* If pose estimation is valid, returns true, else returns false.
* @deprecated Use CharucoBoard::matchImagePoints and cv::solvePnP
* @sa use cv::drawFrameAxes to get world coordinate system axis for object points
*/
CV_EXPORTS_W bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds,
const Ptr<CharucoBoard> &board, InputArray cameraMatrix,
InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess = false);
/** @deprecated Use cv::solvePnP
*/
CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArray rvecs, OutputArray tvecs, OutputArray objPoints = noArray(),
const Ptr<EstimateParameters>& estimateParameters = makePtr<EstimateParameters>());
/** @deprecated Use CharucoBoard::checkCharucoCornersCollinear
*/
CV_EXPORTS_W bool testCharucoCornersCollinear(const Ptr<CharucoBoard> &board, InputArray charucoIds);
//! @}
}
}
#endif

View File

@ -1,143 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "../aruco.hpp"
#include <opencv2/calib3d.hpp>
#include <opencv2/core/utils/logger.hpp>
namespace cv {
namespace aruco {
using namespace std;
void detectMarkers(InputArray _image, const Ptr<Dictionary> &_dictionary, OutputArrayOfArrays _corners,
OutputArray _ids, const Ptr<DetectorParameters> &_params,
OutputArrayOfArrays _rejectedImgPoints) {
ArucoDetector detector(*_dictionary, *_params);
detector.detectMarkers(_image, _corners, _ids, _rejectedImgPoints);
}
void refineDetectedMarkers(InputArray _image, const Ptr<Board> &_board,
InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds,
InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix,
InputArray _distCoeffs, float minRepDistance, float errorCorrectionRate,
bool checkAllOrders, OutputArray _recoveredIdxs,
const Ptr<DetectorParameters> &_params) {
RefineParameters refineParams(minRepDistance, errorCorrectionRate, checkAllOrders);
ArucoDetector detector(_board->getDictionary(), *_params, refineParams);
detector.refineDetectedMarkers(_image, *_board, _detectedCorners, _detectedIds, _rejectedCorners, _cameraMatrix,
_distCoeffs, _recoveredIdxs);
}
void drawPlanarBoard(const Ptr<Board> &board, Size outSize, const _OutputArray &img, int marginSize, int borderBits) {
board->generateImage(outSize, img, marginSize, borderBits);
}
void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArrayOfArrays detectedCorners, InputArray detectedIds,
OutputArray objPoints, OutputArray imgPoints) {
board->matchImagePoints(detectedCorners, detectedIds, objPoints, imgPoints);
}
int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess) {
CV_Assert(corners.total() == ids.total());
// get object and image points for the solvePnP function
Mat objPoints, imgPoints;
board->matchImagePoints(corners, ids, objPoints, imgPoints);
CV_Assert(imgPoints.total() == objPoints.total());
if(objPoints.total() == 0) // 0 of the detected markers in board
return 0;
solvePnP(objPoints, imgPoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess);
// divide by four since all the four corners are concatenated in the array for each marker
return (int)objPoints.total() / 4;
}
bool estimatePoseCharucoBoard(InputArray charucoCorners, InputArray charucoIds,
const Ptr<CharucoBoard> &board, InputArray cameraMatrix,
InputArray distCoeffs, InputOutputArray rvec,
InputOutputArray tvec, bool useExtrinsicGuess) {
CV_Assert((charucoCorners.getMat().total() == charucoIds.getMat().total()));
if(charucoIds.getMat().total() < 4) return false;
// get object and image points for the solvePnP function
Mat objPoints, imgPoints;
board->matchImagePoints(charucoCorners, charucoIds, objPoints, imgPoints);
try {
solvePnP(objPoints, imgPoints, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess);
}
catch (const cv::Exception& e) {
CV_LOG_WARNING(NULL, "estimatePoseCharucoBoard: " << std::endl << e.what());
return false;
}
return objPoints.total() > 0ull;
}
bool testCharucoCornersCollinear(const Ptr<CharucoBoard> &board, InputArray charucoIds) {
return board->checkCharucoCornersCollinear(charucoIds);
}
/**
* @brief Return object points for the system centered in a middle (by default) or in a top left corner of single
* marker, given the marker length
*/
static Mat _getSingleMarkerObjectPoints(float markerLength, const EstimateParameters& estimateParameters) {
CV_Assert(markerLength > 0);
Mat objPoints(4, 1, CV_32FC3);
// set coordinate system in the top-left corner of the marker, with Z pointing out
if (estimateParameters.pattern == ARUCO_CW_TOP_LEFT_CORNER) {
objPoints.ptr<Vec3f>(0)[0] = Vec3f(0.f, 0.f, 0);
objPoints.ptr<Vec3f>(0)[1] = Vec3f(markerLength, 0.f, 0);
objPoints.ptr<Vec3f>(0)[2] = Vec3f(markerLength, markerLength, 0);
objPoints.ptr<Vec3f>(0)[3] = Vec3f(0.f, markerLength, 0);
}
else if (estimateParameters.pattern == ARUCO_CCW_CENTER) {
objPoints.ptr<Vec3f>(0)[0] = Vec3f(-markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[1] = Vec3f(markerLength/2.f, markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[2] = Vec3f(markerLength/2.f, -markerLength/2.f, 0);
objPoints.ptr<Vec3f>(0)[3] = Vec3f(-markerLength/2.f, -markerLength/2.f, 0);
}
else
CV_Error(Error::StsBadArg, "Unknown estimateParameters pattern");
return objPoints;
}
void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvecs, OutputArray _tvecs, OutputArray _objPoints,
const Ptr<EstimateParameters>& estimateParameters) {
CV_Assert(markerLength > 0);
Mat markerObjPoints = _getSingleMarkerObjectPoints(markerLength, *estimateParameters);
int nMarkers = (int)_corners.total();
_rvecs.create(nMarkers, 1, CV_64FC3);
_tvecs.create(nMarkers, 1, CV_64FC3);
Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
//// for each marker, calculate its pose
parallel_for_(Range(0, nMarkers), [&](const Range& range) {
const int begin = range.start;
const int end = range.end;
for (int i = begin; i < end; i++) {
solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, rvecs.at<Vec3d>(i),
tvecs.at<Vec3d>(i), estimateParameters->useExtrinsicGuess, estimateParameters->solvePnPMethod);
}
});
if(_objPoints.needed()){
markerObjPoints.convertTo(_objPoints, -1);
}
}
}
}

View File

@ -1,99 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "aruco_calib.hpp"
#include <opencv2/calib3d.hpp>
namespace cv {
namespace aruco {
using namespace std;
using namespace cv;
EstimateParameters::EstimateParameters() : pattern(ARUCO_CCW_CENTER), useExtrinsicGuess(false),
solvePnPMethod(SOLVEPNP_ITERATIVE) {}
double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter,
const Ptr<Board> &board, Size imageSize, InputOutputArray _cameraMatrix,
InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs,
OutputArrayOfArrays _tvecs,
OutputArray _stdDeviationsIntrinsics,
OutputArray _stdDeviationsExtrinsics,
OutputArray _perViewErrors,
int flags, const TermCriteria& criteria) {
// for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera
// function
vector<Mat> processedObjectPoints, processedImagePoints;
size_t nFrames = _counter.total();
int markerCounter = 0;
for(size_t frame = 0; frame < nFrames; frame++) {
int nMarkersInThisFrame = _counter.getMat().ptr< int >()[frame];
vector<Mat> thisFrameCorners;
vector<int> thisFrameIds;
CV_Assert(nMarkersInThisFrame > 0);
thisFrameCorners.reserve((size_t) nMarkersInThisFrame);
thisFrameIds.reserve((size_t) nMarkersInThisFrame);
for(int j = markerCounter; j < markerCounter + nMarkersInThisFrame; j++) {
thisFrameCorners.push_back(_corners.getMat(j));
thisFrameIds.push_back(_ids.getMat().ptr< int >()[j]);
}
markerCounter += nMarkersInThisFrame;
Mat currentImgPoints, currentObjPoints;
board->matchImagePoints(thisFrameCorners, thisFrameIds, currentObjPoints,
currentImgPoints);
if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) {
processedImagePoints.push_back(currentImgPoints);
processedObjectPoints.push_back(currentObjPoints);
}
}
return calibrateCamera(processedObjectPoints, processedImagePoints, imageSize, _cameraMatrix, _distCoeffs, _rvecs,
_tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria);
}
double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, const Ptr<Board> &board,
Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, const TermCriteria& criteria) {
return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs,
_rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria);
}
double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds,
const Ptr<CharucoBoard> &_board, Size imageSize,
InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
OutputArray _stdDeviationsIntrinsics,
OutputArray _stdDeviationsExtrinsics,
OutputArray _perViewErrors,
int flags, const TermCriteria& criteria) {
CV_Assert(_charucoIds.total() > 0 && (_charucoIds.total() == _charucoCorners.total()));
// Join object points of charuco corners in a single vector for calibrateCamera() function
vector<vector<Point3f> > allObjPoints;
allObjPoints.resize(_charucoIds.total());
for(unsigned int i = 0; i < _charucoIds.total(); i++) {
unsigned int nCorners = (unsigned int)_charucoIds.getMat(i).total();
CV_Assert(nCorners > 0 && nCorners == _charucoCorners.getMat(i).total());
allObjPoints[i].reserve(nCorners);
for(unsigned int j = 0; j < nCorners; j++) {
int pointId = _charucoIds.getMat(i).at< int >(j);
CV_Assert(pointId >= 0 && pointId < (int)_board->getChessboardCorners().size());
allObjPoints[i].push_back(_board->getChessboardCorners()[pointId]);
}
}
return calibrateCamera(allObjPoints, _charucoCorners, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs,
_stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria);
}
double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds,
const Ptr<CharucoBoard> &_board, Size imageSize, InputOutputArray _cameraMatrix,
InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
int flags, const TermCriteria& criteria) {
return calibrateCameraCharuco(_charucoCorners, _charucoIds, _board, imageSize, _cameraMatrix, _distCoeffs, _rvecs,
_tvecs, noArray(), noArray(), noArray(), flags, criteria);
}
}
}

View File

@ -1,180 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_ARUCO_CALIB_POSE_HPP
#define OPENCV_ARUCO_CALIB_POSE_HPP
#include <opencv2/objdetect/aruco_board.hpp>
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
/** @brief rvec/tvec define the right handed coordinate system of the marker.
*
* PatternPositionType defines center this system and axes direction.
* Axis X (red color) - first coordinate, axis Y (green color) - second coordinate,
* axis Z (blue color) - third coordinate.
*
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*
* @sa estimatePoseSingleMarkers()
*/
enum PatternPositionType {
/** @brief The marker coordinate system is centered on the middle of the marker.
*
* The coordinates of the four corners (CCW order) of the marker in its own coordinate system are:
* (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
* (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0).
*/
ARUCO_CCW_CENTER,
/** @brief The marker coordinate system is centered on the top-left corner of the marker.
*
* The coordinates of the four corners (CW order) of the marker in its own coordinate system are:
* (0, 0, 0), (markerLength, 0, 0),
* (markerLength, markerLength, 0), (0, markerLength, 0).
*
* These pattern dots are convenient to use with a chessboard/ChArUco board.
*/
ARUCO_CW_TOP_LEFT_CORNER
};
/** @brief Pose estimation parameters
*
* @param pattern Defines center this system and axes direction (default PatternPositionType::ARUCO_CCW_CENTER).
* @param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided
* rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further
* optimizes them (default false).
* @param solvePnPMethod Method for solving a PnP problem: see @ref calib3d_solvePnP_flags (default SOLVEPNP_ITERATIVE).
*
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*
* @sa PatternPositionType, solvePnP()
*/
struct CV_EXPORTS_W_SIMPLE EstimateParameters {
CV_PROP_RW aruco::PatternPositionType pattern;
CV_PROP_RW bool useExtrinsicGuess;
CV_PROP_RW int solvePnPMethod;
CV_WRAP EstimateParameters();
};
/**
* @brief Calibrate a camera using aruco markers
*
* @param corners vector of detected marker corners in all frames.
* The corners should have the same format returned by detectMarkers (see #detectMarkers).
* @param ids list of identifiers for each marker in corners
* @param counter number of markers in each frame so that corners and ids can be split
* @param board Marker Board layout
* @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
* @param cameraMatrix Output 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
* \f$R_i, T_i\f$ are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
* @param criteria Termination criteria for the iterative optimization algorithm.
*
* This function calibrates a camera using an Aruco Board. The function receives a list of
* detected markers from several views of the Board. The process is similar to the chessboard
* calibration in calibrateCamera(). The function returns the final re-projection error.
*
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_AS(calibrateCameraArucoExtended)
double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviationsIntrinsics,
OutputArray stdDeviationsExtrinsics, OutputArray perViewErrors, int flags = 0,
const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/** @overload
* @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
* @deprecated Use Board::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_W double calibrateCameraAruco(InputArrayOfArrays corners, InputArray ids, InputArray counter,
const Ptr<Board> &board, Size imageSize, InputOutputArray cameraMatrix,
InputOutputArray distCoeffs, OutputArrayOfArrays rvecs = noArray(),
OutputArrayOfArrays tvecs = noArray(), int flags = 0,
const TermCriteria& criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS,
30, DBL_EPSILON));
/**
* @brief Calibrate a camera using Charuco corners
*
* @param charucoCorners vector of detected charuco corners per frame
* @param charucoIds list of identifiers for each corner in charucoCorners per frame
* @param board Marker Board layout
* @param imageSize input image size
* @param cameraMatrix Output 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
* and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
* initialized before calling the function.
* @param distCoeffs Output vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
* (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
* k-th translation vector (see the next output parameter description) brings the board pattern
* from the model coordinate space (in which object points are specified) to the world coordinate
* space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
* @param tvecs Output vector of translation vectors estimated for each pattern view.
* @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
* Order of deviations values:
* \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
* s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
* @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
* Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
* \f$R_i, T_i\f$ are concatenated 1x3 vectors.
* @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
* @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
* @param criteria Termination criteria for the iterative optimization algorithm.
*
* This function calibrates a camera using a set of corners of a Charuco Board. The function
* receives a list of detected corners and its identifiers from several views of the Board.
* The function returns the final re-projection error.
*
* @deprecated Use CharucoBoard::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_AS(calibrateCameraCharucoExtended)
double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds,
const Ptr<CharucoBoard> &board, Size imageSize, InputOutputArray cameraMatrix,
InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics,
OutputArray perViewErrors, int flags = 0, const TermCriteria& criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
/**
* @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.
*
* @deprecated Use CharucoBoard::matchImagePoints and cv::solvePnP
*/
CV_EXPORTS_W double calibrateCameraCharuco(InputArrayOfArrays charucoCorners, InputArrayOfArrays charucoIds,
const Ptr<CharucoBoard> &board, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs = noArray(),
OutputArrayOfArrays tvecs = noArray(), int flags = 0,
const TermCriteria& criteria=TermCriteria(TermCriteria::COUNT +
TermCriteria::EPS, 30, DBL_EPSILON));
//! @}
}
}
#endif

View File

@ -1,62 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include <opencv2/calib3d.hpp>
#include "charuco.hpp"
#include <opencv2/imgproc.hpp>
namespace cv {
namespace aruco {
using namespace std;
int interpolateCornersCharuco(InputArrayOfArrays _markerCorners, InputArray _markerIds,
InputArray _image, const Ptr<CharucoBoard> &_board,
OutputArray _charucoCorners, OutputArray _charucoIds,
InputArray _cameraMatrix, InputArray _distCoeffs, int minMarkers) {
CharucoParameters params;
params.minMarkers = minMarkers;
params.cameraMatrix = _cameraMatrix.getMat();
params.distCoeffs = _distCoeffs.getMat();
CharucoDetector detector(*_board, params);
vector<Mat> markerCorners;
_markerCorners.getMatVector(markerCorners);
detector.detectBoard(_image, _charucoCorners, _charucoIds, markerCorners, _markerIds.getMat());
return (int)_charucoIds.total();
}
void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, InputArray _markerIds,
float squareMarkerLengthRate, OutputArrayOfArrays _diamondCorners, OutputArray _diamondIds,
InputArray _cameraMatrix, InputArray _distCoeffs, Ptr<Dictionary> dictionary) {
CharucoParameters params;
params.cameraMatrix = _cameraMatrix.getMat();
params.distCoeffs = _distCoeffs.getMat();
CharucoBoard board({3, 3}, squareMarkerLengthRate, 1.f, *dictionary);
CharucoDetector detector(board, params);
vector<Mat> markerCorners;
_markerCorners.getMatVector(markerCorners);
detector.detectDiamonds(_image, _diamondCorners, _diamondIds, markerCorners, _markerIds.getMat());
}
void drawCharucoDiamond(const Ptr<Dictionary> &dictionary, Vec4i ids, int squareLength, int markerLength,
OutputArray _img, int marginSize, int borderBits) {
CV_Assert(squareLength > 0 && markerLength > 0 && squareLength > markerLength);
CV_Assert(marginSize >= 0 && borderBits > 0);
// assign the charuco marker ids
vector<int> tmpIds(4);
for(int i = 0; i < 4; i++)
tmpIds[i] = ids[i];
// create a charuco board similar to a charuco marker and print it
CharucoBoard board(Size(3, 3), (float)squareLength, (float)markerLength, *dictionary, tmpIds);
Size outSize(3 * squareLength + 2 * marginSize, 3 * squareLength + 2 * marginSize);
board.generateImage(outSize, _img, marginSize, borderBits);
}
}
}

View File

@ -1,110 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_CHARUCO_HPP
#define OPENCV_CHARUCO_HPP
#include <opencv2/core.hpp>
#include <vector>
#include "../aruco.hpp"
#include <opencv2/objdetect/charuco_detector.hpp>
#include "aruco_calib.hpp"
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
/**
* @brief Interpolate position of ChArUco board corners
* @param markerCorners vector of already detected markers corners. For each marker, its four
* corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
* dimensions of this array should be Nx4. The order of the corners should be clockwise.
* @param markerIds list of identifiers for each marker in corners
* @param image input image necesary for corner refinement. Note that markers are not detected and
* should be sent in corners and ids parameters.
* @param board layout of ChArUco board.
* @param charucoCorners interpolated chessboard corners
* @param charucoIds interpolated chessboard corners identifiers
* @param cameraMatrix optional 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs optional vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param minMarkers number of adjacent markers that must be detected to return a charuco corner
*
* This function receives the detected markers and returns the 2D position of the chessboard corners
* from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
* the process is based in an approximated pose estimation, else it is based on local homography.
* Only visible corners are returned. For each corner, its corresponding identifier is
* also returned in charucoIds.
* The function returns the number of interpolated corners.
*
* @deprecated Use CharucoDetector::detectBoard
*/
CV_EXPORTS_W int interpolateCornersCharuco(InputArrayOfArrays markerCorners, InputArray markerIds,
InputArray image, const Ptr<CharucoBoard> &board,
OutputArray charucoCorners, OutputArray charucoIds,
InputArray cameraMatrix = noArray(),
InputArray distCoeffs = noArray(), int minMarkers = 2);
/**
* @brief Detect ChArUco Diamond markers
*
* @param image input image necessary for corner subpixel.
* @param markerCorners list of detected marker corners from detectMarkers function.
* @param markerIds list of marker ids in markerCorners.
* @param squareMarkerLengthRate rate between square and marker length:
* squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
* @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
* is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
* format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
* @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
* type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
* diamond.
* @param cameraMatrix Optional camera calibration matrix.
* @param distCoeffs Optional camera distortion coefficients.
* @param dictionary dictionary of markers indicating the type of markers.
*
* This function detects Diamond markers from the previous detected ArUco markers. The diamonds
* are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
* are provided, the diamond search is based on reprojection. If not, diamond search is based on
* homography. Homography is faster than reprojection, but less accurate.
*
* @deprecated Use CharucoDetector::detectDiamonds
*/
CV_EXPORTS_W void detectCharucoDiamond(InputArray image, InputArrayOfArrays markerCorners,
InputArray markerIds, float squareMarkerLengthRate,
OutputArrayOfArrays diamondCorners, OutputArray diamondIds,
InputArray cameraMatrix = noArray(),
InputArray distCoeffs = noArray(),
Ptr<Dictionary> dictionary = makePtr<Dictionary>
(getPredefinedDictionary(PredefinedDictionaryType::DICT_4X4_50)));
/**
* @brief Draw a ChArUco Diamond marker
*
* @param dictionary dictionary of markers indicating the type of markers.
* @param ids list of 4 ids for each ArUco marker in the ChArUco marker.
* @param squareLength size of the chessboard squares in pixels.
* @param markerLength size of the markers in pixels.
* @param img output image with the marker. The size of this image will be
* 3*squareLength + 2*marginSize,.
* @param marginSize minimum margins (in pixels) of the marker in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of a ChArUco marker, ready to be printed.
*
* @deprecated Use CharucoBoard::generateImage()
*/
CV_EXPORTS_W void drawCharucoDiamond(const Ptr<Dictionary> &dictionary, Vec4i ids, int squareLength,
int markerLength, OutputArray img, int marginSize = 0,
int borderBits = 1);
//! @}
}
}
#endif

View File

@ -1,11 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_CCALIB_PRECOMP__
#define __OPENCV_CCALIB_PRECOMP__
#include <opencv2/core.hpp>
#include <vector>
#endif