diff --git a/include/Frame.h b/include/Frame.h
index d143a74e..8ea275c6 100644
--- a/include/Frame.h
+++ b/include/Frame.h
@@ -59,19 +59,19 @@ class Frame
Frame(const Frame &frame);
// Constructor for stereo cameras.
- Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera,Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
+ Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const cv::Mat *mask, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera,Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
// Constructor for RGB-D cameras.
- Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera,Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
+ Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const cv::Mat *mask, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera,Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
// Constructor for Monocular cameras.
- Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
+ Frame(const cv::Mat &imGray, const cv::Mat *mask, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
// Destructor
// ~Frame();
// Extract ORB on the image. 0 for left image and 1 for right image.
- void ExtractORB(int flag, const cv::Mat &im, const int x0, const int x1);
+ void ExtractORB(int flag, const cv::Mat &im, const int x0, const int x1, const cv::Mat * mask = nullptr);
// Compute Bag of Words representation.
void ComputeBoW();
@@ -343,7 +343,7 @@ class Frame
//Grid for the right image
std::vector mGridRight[FRAME_GRID_COLS][FRAME_GRID_ROWS];
- Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera, GeometricCamera* pCamera2, Sophus::SE3f& Tlr,Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
+ Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const cv::Mat *mask, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera, GeometricCamera* pCamera2, Sophus::SE3f& Tlr,Frame* pPrevF = static_cast(NULL), const IMU::Calib &ImuCalib = IMU::Calib());
//Stereo fisheye
void ComputeStereoFishEyeMatches();
diff --git a/include/ORBextractor.h b/include/ORBextractor.h
index bb7aace3..9e7fc93f 100644
--- a/include/ORBextractor.h
+++ b/include/ORBextractor.h
@@ -53,11 +53,14 @@ class ORBextractor
// Compute the ORB features and descriptors on an image.
// ORB are dispersed on the image using an octree.
- // Mask is ignored in the current implementation.
int operator()( cv::InputArray _image, cv::InputArray _mask,
std::vector& _keypoints,
cv::OutputArray _descriptors, std::vector &vLappingArea);
+ int operator()( cv::InputArray _image,
+ std::vector& _keypoints,
+ cv::OutputArray _descriptors, std::vector &vLappingArea);
+
int inline GetLevels(){
return nlevels;}
diff --git a/include/System.h b/include/System.h
index 872c86e7..f59903ad 100644
--- a/include/System.h
+++ b/include/System.h
@@ -107,18 +107,18 @@ class System
// Proccess the given stereo frame. Images must be synchronized and rectified.
// Input images: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale.
// Returns the camera pose (empty if tracking fails).
- Sophus::SE3f TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const double ×tamp, const vector& vImuMeas = vector(), string filename="");
+ Sophus::SE3f TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const cv::Mat *mask, const double ×tamp, const vector& vImuMeas = vector(), string filename="");
// Process the given rgbd frame. Depthmap must be registered to the RGB frame.
// Input image: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale.
// Input depthmap: Float (CV_32F).
// Returns the camera pose (empty if tracking fails).
- Sophus::SE3f TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const double ×tamp, const vector& vImuMeas = vector(), string filename="");
+ Sophus::SE3f TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const cv::Mat *mask, const double ×tamp, const vector& vImuMeas = vector(), string filename="");
// Proccess the given monocular frame and optionally imu data
// Input images: RGB (CV_8UC3) or grayscale (CV_8U). RGB is converted to grayscale.
// Returns the camera pose (empty if tracking fails).
- Sophus::SE3f TrackMonocular(const cv::Mat &im, const double ×tamp, const vector& vImuMeas = vector(), string filename="");
+ Sophus::SE3f TrackMonocular(const cv::Mat &im, const cv::Mat *mask, const double ×tamp, const vector& vImuMeas = vector(), string filename="");
// This stops local mapping thread (map building) and performs only camera tracking.
diff --git a/include/Tracking.h b/include/Tracking.h
index 863c98e4..670a7b75 100644
--- a/include/Tracking.h
+++ b/include/Tracking.h
@@ -69,9 +69,9 @@ class Tracking
bool ParseIMUParamFile(cv::FileStorage &fSettings);
// Preprocess the input and call Track(). Extract features and performs stereo matching.
- Sophus::SE3f GrabImageStereo(const cv::Mat &imRectLeft,const cv::Mat &imRectRight, const double ×tamp, string filename);
- Sophus::SE3f GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const double ×tamp, string filename);
- Sophus::SE3f GrabImageMonocular(const cv::Mat &im, const double ×tamp, string filename);
+ Sophus::SE3f GrabImageStereo(const cv::Mat &imRectLeft,const cv::Mat &imRectRight, const cv::Mat *mask, const double ×tamp, string filename);
+ Sophus::SE3f GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const cv::Mat *mask, const double ×tamp, string filename);
+ Sophus::SE3f GrabImageMonocular(const cv::Mat &im, const cv::Mat *mask, const double ×tamp, string filename);
void GrabImuData(const IMU::Point &imuMeasurement);
diff --git a/src/Frame.cc b/src/Frame.cc
index 4d5885b2..77231c8f 100644
--- a/src/Frame.cc
+++ b/src/Frame.cc
@@ -98,7 +98,7 @@ Frame::Frame(const Frame &frame)
}
-Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera, Frame* pPrevF, const IMU::Calib &ImuCalib)
+Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const cv::Mat * mask, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera, Frame* pPrevF, const IMU::Calib &ImuCalib)
:mpcpi(NULL), mpORBvocabulary(voc),mpORBextractorLeft(extractorLeft),mpORBextractorRight(extractorRight), mTimeStamp(timeStamp), mK(K.clone()), mK_(Converter::toMatrix3f(K)), mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth),
mImuCalib(ImuCalib), mpImuPreintegrated(NULL), mpPrevFrame(pPrevF),mpImuPreintegratedFrame(NULL), mpReferenceKF(static_cast(NULL)), mbIsSet(false), mbImuPreintegrated(false),
mpCamera(pCamera) ,mpCamera2(nullptr), mbHasPose(false), mbHasVelocity(false)
@@ -119,8 +119,8 @@ Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeSt
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_StartExtORB = std::chrono::steady_clock::now();
#endif
- thread threadLeft(&Frame::ExtractORB,this,0,imLeft,0,0);
- thread threadRight(&Frame::ExtractORB,this,1,imRight,0,0);
+ thread threadLeft(&Frame::ExtractORB,this,0,imLeft,0,0, mask);
+ thread threadRight(&Frame::ExtractORB,this,1,imRight,0,0, mask);
threadLeft.join();
threadRight.join();
#ifdef REGISTER_TIMES
@@ -197,7 +197,7 @@ Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeSt
AssignFeaturesToGrid();
}
-Frame::Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera,Frame* pPrevF, const IMU::Calib &ImuCalib)
+Frame::Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const cv::Mat * mask, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera,Frame* pPrevF, const IMU::Calib &ImuCalib)
:mpcpi(NULL),mpORBvocabulary(voc),mpORBextractorLeft(extractor),mpORBextractorRight(static_cast(NULL)),
mTimeStamp(timeStamp), mK(K.clone()), mK_(Converter::toMatrix3f(K)),mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth),
mImuCalib(ImuCalib), mpImuPreintegrated(NULL), mpPrevFrame(pPrevF), mpImuPreintegratedFrame(NULL), mpReferenceKF(static_cast(NULL)), mbIsSet(false), mbImuPreintegrated(false),
@@ -219,7 +219,7 @@ Frame::Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeSt
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_StartExtORB = std::chrono::steady_clock::now();
#endif
- ExtractORB(0,imGray,0,0);
+ ExtractORB(0,imGray,0,0, mask);
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_EndExtORB = std::chrono::steady_clock::now();
@@ -286,7 +286,7 @@ Frame::Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double &timeSt
}
-Frame::Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF, const IMU::Calib &ImuCalib)
+Frame::Frame(const cv::Mat &imGray, const cv::Mat * mask, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF, const IMU::Calib &ImuCalib)
:mpcpi(NULL),mpORBvocabulary(voc),mpORBextractorLeft(extractor),mpORBextractorRight(static_cast(NULL)),
mTimeStamp(timeStamp), mK(static_cast(pCamera)->toK()), mK_(static_cast(pCamera)->toK_()), mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth),
mImuCalib(ImuCalib), mpImuPreintegrated(NULL),mpPrevFrame(pPrevF),mpImuPreintegratedFrame(NULL), mpReferenceKF(static_cast(NULL)), mbIsSet(false), mbImuPreintegrated(false), mpCamera(pCamera),
@@ -308,7 +308,7 @@ Frame::Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extra
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_StartExtORB = std::chrono::steady_clock::now();
#endif
- ExtractORB(0,imGray,0,1000);
+ ExtractORB(0,imGray,0,1000, mask);
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_EndExtORB = std::chrono::steady_clock::now();
@@ -415,13 +415,20 @@ void Frame::AssignFeaturesToGrid()
}
}
-void Frame::ExtractORB(int flag, const cv::Mat &im, const int x0, const int x1)
+void Frame::ExtractORB(int flag, const cv::Mat &im, const int x0, const int x1, const cv::Mat * mask)
{
vector vLapping = {x0,x1};
- if(flag==0)
- monoLeft = (*mpORBextractorLeft)(im,cv::Mat(),mvKeys,mDescriptors,vLapping);
- else
- monoRight = (*mpORBextractorRight)(im,cv::Mat(),mvKeysRight,mDescriptorsRight,vLapping);
+ if (mask == nullptr){
+ if(flag==0)
+ monoLeft = (*mpORBextractorLeft)(im,mvKeys,mDescriptors,vLapping);
+ else
+ monoRight = (*mpORBextractorRight)(im,mvKeysRight,mDescriptorsRight,vLapping);
+ } else {
+ if(flag==0)
+ monoLeft = (*mpORBextractorLeft)(im,*mask,mvKeys,mDescriptors,vLapping);
+ else
+ monoRight = (*mpORBextractorRight)(im,*mask,mvKeysRight,mDescriptorsRight,vLapping);
+ }
}
bool Frame::isSet() const {
@@ -1031,7 +1038,7 @@ void Frame::setIntegrated()
mbImuPreintegrated = true;
}
-Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera, GeometricCamera* pCamera2, Sophus::SE3f& Tlr,Frame* pPrevF, const IMU::Calib &ImuCalib)
+Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const cv::Mat * mask, const double &timeStamp, ORBextractor* extractorLeft, ORBextractor* extractorRight, ORBVocabulary* voc, cv::Mat &K, cv::Mat &distCoef, const float &bf, const float &thDepth, GeometricCamera* pCamera, GeometricCamera* pCamera2, Sophus::SE3f& Tlr,Frame* pPrevF, const IMU::Calib &ImuCalib)
:mpcpi(NULL), mpORBvocabulary(voc),mpORBextractorLeft(extractorLeft),mpORBextractorRight(extractorRight), mTimeStamp(timeStamp), mK(K.clone()), mK_(Converter::toMatrix3f(K)), mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth),
mImuCalib(ImuCalib), mpImuPreintegrated(NULL), mpPrevFrame(pPrevF),mpImuPreintegratedFrame(NULL), mpReferenceKF(static_cast(NULL)), mbImuPreintegrated(false), mpCamera(pCamera), mpCamera2(pCamera2),
mbHasPose(false), mbHasVelocity(false)
@@ -1056,8 +1063,8 @@ Frame::Frame(const cv::Mat &imLeft, const cv::Mat &imRight, const double &timeSt
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_StartExtORB = std::chrono::steady_clock::now();
#endif
- thread threadLeft(&Frame::ExtractORB,this,0,imLeft,static_cast(mpCamera)->mvLappingArea[0],static_cast(mpCamera)->mvLappingArea[1]);
- thread threadRight(&Frame::ExtractORB,this,1,imRight,static_cast(mpCamera2)->mvLappingArea[0],static_cast(mpCamera2)->mvLappingArea[1]);
+ thread threadLeft(&Frame::ExtractORB,this,0,imLeft, static_cast(mpCamera)->mvLappingArea[0],static_cast(mpCamera)->mvLappingArea[1], mask);
+ thread threadRight(&Frame::ExtractORB,this,1,imRight, static_cast(mpCamera2)->mvLappingArea[0],static_cast(mpCamera2)->mvLappingArea[1], mask);
threadLeft.join();
threadRight.join();
#ifdef REGISTER_TIMES
diff --git a/src/ORBextractor.cc b/src/ORBextractor.cc
index 970b7cc7..aa891561 100644
--- a/src/ORBextractor.cc
+++ b/src/ORBextractor.cc
@@ -1083,16 +1083,103 @@ namespace ORB_SLAM3
computeOrbDescriptor(keypoints[i], image, &pattern[0], descriptors.ptr((int)i));
}
+ int ORBextractor::operator()( InputArray _image, vector& _keypoints,
+ OutputArray _descriptors, std::vector &vLappingArea)
+ {
+ // cout << "[ORBextractor]: Max Features: " << nfeatures << endl;
+ if(_image.empty())
+ return -1;
+
+ Mat image = _image.getMat();
+ assert(image.type() == CV_8UC1 );
+
+ // Pre-compute the scale pyramid
+ ComputePyramid(image);
+
+ vector < vector > allKeypoints;
+ ComputeKeyPointsOctTree(allKeypoints);
+ //ComputeKeyPointsOld(allKeypoints);
+
+ Mat descriptors;
+
+ int nkeypoints = 0;
+ for (int level = 0; level < nlevels; ++level)
+ nkeypoints += (int)allKeypoints[level].size();
+ if( nkeypoints == 0 )
+ _descriptors.release();
+ else
+ {
+ _descriptors.create(nkeypoints, 32, CV_8U);
+ descriptors = _descriptors.getMat();
+ }
+
+ //_keypoints.clear();
+ //_keypoints.reserve(nkeypoints);
+ _keypoints = vector(nkeypoints);
+
+ int offset = 0;
+ //Modified for speeding up stereo fisheye matching
+ int monoIndex = 0, stereoIndexFromBack = 1;
+ for (int level = 0; level < nlevels; ++level)
+ {
+ vector& keypoints = allKeypoints[level];
+ int nkeypointsLevel = (int)keypoints.size();
+
+ if(nkeypointsLevel==0)
+ continue;
+
+ // preprocess the resized image
+ Mat workingMat = mvImagePyramid[level].clone();
+ GaussianBlur(workingMat, workingMat, Size(7, 7), 2, 2, BORDER_REFLECT_101);
+
+ // Compute the descriptors
+ //Mat desc = descriptors.rowRange(offset, offset + nkeypointsLevel);
+ Mat desc = cv::Mat(nkeypointsLevel, 32, CV_8U);
+ computeDescriptors(workingMat, keypoints, desc, pattern);
+
+ offset += nkeypointsLevel;
+
+ float scale = mvScaleFactor[level]; //getScale(level, firstLevel, scaleFactor);
+ int i = 0;
+ for (vector::iterator keypoint = keypoints.begin(),
+ keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint){
+
+ // Scale keypoint coordinates
+ if (level != 0){
+ keypoint->pt *= scale;
+ }
+
+ if(keypoint->pt.x >= vLappingArea[0] && keypoint->pt.x <= vLappingArea[1]){
+ _keypoints.at(nkeypoints - stereoIndexFromBack) = (*keypoint);
+ desc.row(i).copyTo(descriptors.row(nkeypoints - stereoIndexFromBack));
+ stereoIndexFromBack++;
+ }
+ else{
+ _keypoints.at(monoIndex) = (*keypoint);
+ desc.row(i).copyTo(descriptors.row(monoIndex));
+ monoIndex++;
+ }
+ i++;
+ }
+ }
+ //cout << "[ORBextractor]: extracted " << _keypoints.size() << " KeyPoints" << endl;
+ return monoIndex;
+ }
+
int ORBextractor::operator()( InputArray _image, InputArray _mask, vector& _keypoints,
OutputArray _descriptors, std::vector &vLappingArea)
{
- //cout << "[ORBextractor]: Max Features: " << nfeatures << endl;
+ // cout << "[ORBextractor]: Max Features: " << nfeatures << endl;
if(_image.empty())
return -1;
Mat image = _image.getMat();
assert(image.type() == CV_8UC1 );
+ Mat mask = _mask.getMat();
+ assert(image.size() == mask.size());
+ assert(mask.type() == CV_8UC1 );
+
// Pre-compute the scale pyramid
ComputePyramid(image);
@@ -1119,7 +1206,7 @@ namespace ORB_SLAM3
int offset = 0;
//Modified for speeding up stereo fisheye matching
- int monoIndex = 0, stereoIndex = nkeypoints-1;
+ int monoIndex = 0, stereoIndexFromBack = 1;
for (int level = 0; level < nlevels; ++level)
{
vector& keypoints = allKeypoints[level];
@@ -1139,8 +1226,29 @@ namespace ORB_SLAM3
offset += nkeypointsLevel;
+// filter keypoints based on mask
- float scale = mvScaleFactor[level]; //getScale(level, firstLevel, scaleFactor);
+ // segfault because keypoints are outside of the image.... but why >:(
+ float scale = mvScaleFactor[level];
+
+ vector newPoints;
+ std::copy_if(keypoints.begin(), keypoints.end(), std::back_inserter(newPoints), [&mask, &scale, &nkeypoints, &level](cv::KeyPoint p){
+ KeyPoint newPoint = p;
+ newPoint.pt *= scale;
+
+ bool ret = mask.at(newPoint.pt.y,newPoint.pt.x) == 255;
+ //std::cout << ret << std::endl;
+ --nkeypoints;
+ return ret;
+
+ });
+
+ //std::cout << "old size " << keypoints.size() << " new size " << newPoints.size() << std::endl;
+ keypoints = newPoints;
+
+// end filter
+
+ //float scale = mvScaleFactor[level]; //getScale(level, firstLevel, scaleFactor);
int i = 0;
for (vector::iterator keypoint = keypoints.begin(),
keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint){
@@ -1151,9 +1259,9 @@ namespace ORB_SLAM3
}
if(keypoint->pt.x >= vLappingArea[0] && keypoint->pt.x <= vLappingArea[1]){
- _keypoints.at(stereoIndex) = (*keypoint);
- desc.row(i).copyTo(descriptors.row(stereoIndex));
- stereoIndex--;
+ _keypoints.at(nkeypoints - stereoIndexFromBack) = (*keypoint);
+ desc.row(i).copyTo(descriptors.row(nkeypoints - stereoIndexFromBack));
+ stereoIndexFromBack++;
}
else{
_keypoints.at(monoIndex) = (*keypoint);
diff --git a/src/System.cc b/src/System.cc
index 60d9c518..30fa989f 100644
--- a/src/System.cc
+++ b/src/System.cc
@@ -241,7 +241,7 @@ System::System(const string &strVocFile, const string &strSettingsFile, const eS
}
-Sophus::SE3f System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const double ×tamp, const vector& vImuMeas, string filename)
+Sophus::SE3f System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight, const cv::Mat *mask, const double ×tamp, const vector& vImuMeas, string filename)
{
if(mSensor!=STEREO && mSensor!=IMU_STEREO)
{
@@ -250,6 +250,7 @@ Sophus::SE3f System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight,
}
cv::Mat imLeftToFeed, imRightToFeed;
+ cv::Mat maskToFeed = mask->clone();
if(settings_ && settings_->needToRectify()){
cv::Mat M1l = settings_->M1l();
cv::Mat M2l = settings_->M2l();
@@ -262,6 +263,9 @@ Sophus::SE3f System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight,
else if(settings_ && settings_->needToResize()){
cv::resize(imLeft,imLeftToFeed,settings_->newImSize());
cv::resize(imRight,imRightToFeed,settings_->newImSize());
+ if (mask != nullptr){
+ cv::resize(*mask,maskToFeed,settings_->newImSize());
+ }
}
else{
imLeftToFeed = imLeft.clone();
@@ -313,7 +317,7 @@ Sophus::SE3f System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight,
mpTracker->GrabImuData(vImuMeas[i_imu]);
// std::cout << "start GrabImageStereo" << std::endl;
- Sophus::SE3f Tcw = mpTracker->GrabImageStereo(imLeftToFeed,imRightToFeed,timestamp,filename);
+ Sophus::SE3f Tcw = mpTracker->GrabImageStereo(imLeftToFeed,imRightToFeed, mask == nullptr ? nullptr : &maskToFeed, timestamp,filename);
// std::cout << "out grabber" << std::endl;
@@ -325,7 +329,7 @@ Sophus::SE3f System::TrackStereo(const cv::Mat &imLeft, const cv::Mat &imRight,
return Tcw;
}
-Sophus::SE3f System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const double ×tamp, const vector& vImuMeas, string filename)
+Sophus::SE3f System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const cv::Mat *mask, const double ×tamp, const vector& vImuMeas, string filename)
{
if(mSensor!=RGBD && mSensor!=IMU_RGBD)
{
@@ -335,12 +339,15 @@ Sophus::SE3f System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const
cv::Mat imToFeed = im.clone();
cv::Mat imDepthToFeed = depthmap.clone();
+ cv::Mat maskToFeed = mask->clone();
if(settings_ && settings_->needToResize()){
cv::Mat resizedIm;
cv::resize(im,resizedIm,settings_->newImSize());
imToFeed = resizedIm;
cv::resize(depthmap,imDepthToFeed,settings_->newImSize());
+ if (mask != nullptr)
+ cv::resize(*mask, maskToFeed, settings_->newImSize());
}
// Check mode change
@@ -387,7 +394,7 @@ Sophus::SE3f System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const
for(size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++)
mpTracker->GrabImuData(vImuMeas[i_imu]);
- Sophus::SE3f Tcw = mpTracker->GrabImageRGBD(imToFeed,imDepthToFeed,timestamp,filename);
+ Sophus::SE3f Tcw = mpTracker->GrabImageRGBD(imToFeed,imDepthToFeed, mask == nullptr ? nullptr : &maskToFeed, timestamp,filename);
unique_lock lock2(mMutexState);
mTrackingState = mpTracker->mState;
@@ -396,7 +403,7 @@ Sophus::SE3f System::TrackRGBD(const cv::Mat &im, const cv::Mat &depthmap, const
return Tcw;
}
-Sophus::SE3f System::TrackMonocular(const cv::Mat &im, const double ×tamp, const vector& vImuMeas, string filename)
+Sophus::SE3f System::TrackMonocular(const cv::Mat &im, const cv::Mat *mask, const double ×tamp, const vector& vImuMeas, string filename)
{
{
@@ -412,10 +419,15 @@ Sophus::SE3f System::TrackMonocular(const cv::Mat &im, const double ×tamp,
}
cv::Mat imToFeed = im.clone();
+ cv::Mat maskToFeed = mask->clone();
if(settings_ && settings_->needToResize()){
cv::Mat resizedIm;
cv::resize(im,resizedIm,settings_->newImSize());
imToFeed = resizedIm;
+
+ if (mask != nullptr) {
+ cv::resize(*mask, maskToFeed, settings_->newImSize());
+ }
}
// Check mode change
@@ -463,7 +475,7 @@ Sophus::SE3f System::TrackMonocular(const cv::Mat &im, const double ×tamp,
for(size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++)
mpTracker->GrabImuData(vImuMeas[i_imu]);
- Sophus::SE3f Tcw = mpTracker->GrabImageMonocular(imToFeed,timestamp,filename);
+ Sophus::SE3f Tcw = mpTracker->GrabImageMonocular(imToFeed, mask == nullptr ? nullptr : &maskToFeed, timestamp,filename);
unique_lock lock2(mMutexState);
mTrackingState = mpTracker->mState;
diff --git a/src/Tracking.cc b/src/Tracking.cc
index 5191451a..0cf26fc8 100644
--- a/src/Tracking.cc
+++ b/src/Tracking.cc
@@ -1451,7 +1451,7 @@ bool Tracking::GetStepByStep()
-Sophus::SE3f Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRectRight, const double ×tamp, string filename)
+Sophus::SE3f Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat &imRectRight, const cv::Mat *mask, const double ×tamp, string filename)
{
//cout << "GrabImageStereo" << endl;
@@ -1491,13 +1491,13 @@ Sophus::SE3f Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat
//cout << "Incoming frame creation" << endl;
if (mSensor == System::STEREO && !mpCamera2)
- mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera);
+ mCurrentFrame = Frame(mImGray,imGrayRight, mask, timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera);
else if(mSensor == System::STEREO && mpCamera2)
- mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,mpCamera2,mTlr);
+ mCurrentFrame = Frame(mImGray,imGrayRight, mask, timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,mpCamera2,mTlr);
else if(mSensor == System::IMU_STEREO && !mpCamera2)
- mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,&mLastFrame,*mpImuCalib);
+ mCurrentFrame = Frame(mImGray,imGrayRight, mask, timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,&mLastFrame,*mpImuCalib);
else if(mSensor == System::IMU_STEREO && mpCamera2)
- mCurrentFrame = Frame(mImGray,imGrayRight,timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,mpCamera2,mTlr,&mLastFrame,*mpImuCalib);
+ mCurrentFrame = Frame(mImGray,imGrayRight, mask, timestamp,mpORBextractorLeft,mpORBextractorRight,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,mpCamera2,mTlr,&mLastFrame,*mpImuCalib);
//cout << "Incoming frame ended" << endl;
@@ -1517,7 +1517,7 @@ Sophus::SE3f Tracking::GrabImageStereo(const cv::Mat &imRectLeft, const cv::Mat
}
-Sophus::SE3f Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const double ×tamp, string filename)
+Sophus::SE3f Tracking::GrabImageRGBD(const cv::Mat &imRGB, const cv::Mat &imD, const cv::Mat *mask, const double ×tamp, string filename)
{
mImGray = imRGB;
cv::Mat imDepth = imD;
@@ -1541,9 +1541,9 @@ Sophus::SE3f Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, co
imDepth.convertTo(imDepth,CV_32F,mDepthMapFactor);
if (mSensor == System::RGBD)
- mCurrentFrame = Frame(mImGray,imDepth,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera);
+ mCurrentFrame = Frame(mImGray,imDepth, mask, timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera);
else if(mSensor == System::IMU_RGBD)
- mCurrentFrame = Frame(mImGray,imDepth,timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,&mLastFrame,*mpImuCalib);
+ mCurrentFrame = Frame(mImGray,imDepth, mask, timestamp,mpORBextractorLeft,mpORBVocabulary,mK,mDistCoef,mbf,mThDepth,mpCamera,&mLastFrame,*mpImuCalib);
@@ -1563,7 +1563,7 @@ Sophus::SE3f Tracking::GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, co
}
-Sophus::SE3f Tracking::GrabImageMonocular(const cv::Mat &im, const double ×tamp, string filename)
+Sophus::SE3f Tracking::GrabImageMonocular(const cv::Mat &im, const cv::Mat *mask, const double ×tamp, string filename)
{
mImGray = im;
if(mImGray.channels()==3)
@@ -1584,18 +1584,18 @@ Sophus::SE3f Tracking::GrabImageMonocular(const cv::Mat &im, const double ×
if (mSensor == System::MONOCULAR)
{
if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET ||(lastID - initID) < mMaxFrames)
- mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
+ mCurrentFrame = Frame(mImGray, mask, timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
else
- mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
+ mCurrentFrame = Frame(mImGray, mask, timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
}
else if(mSensor == System::IMU_MONOCULAR)
{
if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET)
{
- mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib);
+ mCurrentFrame = Frame(mImGray, mask, timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib);
}
else
- mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib);
+ mCurrentFrame = Frame(mImGray, mask, timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib);
}
if (mState==NO_IMAGES_YET)