SLAMflex SE  0.1.0
SLAMflex provides detection and tracking of dominant planes for smartphone devices. This plane can then be used to show AR content relative to the plane orientation. The detection of plane is performed in the field of view of the smartphone camera. In subsequent frames it is tracked. The interface returns the plane position and orientation.
Tracker.cpp
Go to the documentation of this file.
1 // Copyright 2008 Isis Innovation Limited
2 //#import <OpenGLES/ES1/gl.h>
3 //#import <OpenGLES/ES1/glext.h>
4 #include "Tracker.h"
5 #include "MEstimator.h"
6 #include "ShiTomasi.h"
7 #include "SmallMatrixOpts.h"
8 #include "PatchFinder.h"
9 #include "TrackerData.h"
10 #include "globals.h"
11 
12 #include "utility.h"
13 #include "fast_corner.h"
14 #include "vision.h"
15 #include "wls.h"
16 
17 #include <fstream>
18 #include <fcntl.h>
19 
20 #include <math.h>
21 
22 using namespace CVD;
23 using namespace std;
24 
25 
26 // The constructor mostly sets up interal reference variables
27 // to the other classes..
28 Tracker::Tracker(ImageRef irVideoSize, const ATANCamera &c, Map &m, MapMaker &mm) :
29 mMap(m),
30 mMapMaker(mm),
31 mCamera(c),
32 mRelocaliser(mMap, mCamera),
33 mirSize(irVideoSize)
34 {
35  mCurrentKF.bFixed = false;
37 
38  mpSBILastFrame = NULL;
39  mpSBIThisFrame = NULL;
40 
41  // Most of the initialisation is done in Reset()
42  Reset();
43 }
44 template <typename T> string tostr(string w, const T& t) {
45  ostringstream os;
46  os<<w<<t;
47  return os.str();
48 }
50 {
52 }
53 
55 {
57  Reset();
58 }
59 
61 {
62  sendToUnity = sm;
63 }
64 
66 {
67  return currentState;
68 }
69 
70 
71 // Resets the tracker, wipes the map.
72 // This is the main Reset-handler-entry-point of the program! Other classes' resets propagate from here.
73 // It's always called in the Tracker's thread, often as a GUI command.
75 {
76  std::cout << "Tracker resetting..." << std::endl;
77  LOGV(LOG_TAG, "Info: %s", "Tracker resetting...");
78  mbDidCoarse = false;
79  mbUserPressedSpacebar = false;
81  mnLostFrames = 0;
86  mlTrails.clear();
88  mCurrentKF.mMeasurements.clear();
90  mnFrame=0;
93  threshold = 10;
94  thresholdReached = false;
96 
97  // Tell the MapMaker to reset itself..
98  // this may take some time, since the mapmaker thread may have to wait
99  // for an abort-check during calculation, so sleep while waiting.
100  // MapMaker will also clear the map.
102  while(!mMapMaker.ResetDone())
103  usleep(10);
104 
105  std::cout << "Tracker reset." << std::endl;
106  LOGV(LOG_TAG, "Info: %s", "Tracker reset.");
107  sendToUnity.SendStringToUnity("Tracker_Reset");
108 }
109 
110 void Tracker::TrackFrame(Image<byte> &imFrame, uint hnd,bool bDraw)
111 {
112  mbDraw = bDraw;
113  mMessageForUser.str("");
115 
116  // Take the input video image, and convert it into the tracker's keyframe struct
117  // This does things like generate the image pyramid and find FAST corners
118  mCurrentKF.mMeasurements.clear();
119  // Finding corners
120  int numCrns;
121  numCrns = mCurrentKF.MakeKeyFrame_Lite(imFrame, threshold);
122 
124  {
126  {
127  threshold+=5;
128  }
130  {
131  if(threshold > 14)
132  threshold -= 5;
133  }
134  }
135 
136 
137  // Update the small images for the rotation estimator
138  static double gvdSBIBlur = TrackerRotationEstimatorBlur;
139  static int gvnUseSBI = TrackerUseRotationEstimator;
140  mbUseSBIInit = gvnUseSBI;
141  if(!mpSBIThisFrame)
142  {
143  mpSBIThisFrame = new SmallBlurryImage(mCurrentKF, gvdSBIBlur);
144  mpSBILastFrame = new SmallBlurryImage(mCurrentKF, gvdSBIBlur);
145  }
146  else
147  {
148  delete mpSBILastFrame;
150  mpSBIThisFrame = new SmallBlurryImage(mCurrentKF, gvdSBIBlur);
151  }
152 
153  // From now on we only use the keyframe struct!
154  mnFrame++;
155 
156  std::ostringstream points;
157  points << "Corners: " << mCurrentKF.aLevels[0].vCorners.size() << " Barrier: " << threshold << endl;
158  sendToUnity.SendStringToUnity(points.str().c_str());
159 
160  //Sending corners to Unity3D for rendering
162  {
163  int crnrs [mCurrentKF.aLevels[0].vCorners.size()];
164  for (int i=0; i<mCurrentKF.aLevels[0].vCorners.size(); i++)
165  {
166  crnrs[i]=(-640*(mCurrentKF.aLevels[0].vCorners[i].y-480))+mCurrentKF.aLevels[0].vCorners[i].x;
167  }
168 
170  }
171 
172  // Decide what to do - if there is a map, try to track the map ...
173  if(mMap.IsGood())
174  {
175  if(1)//mnLostFrames < 3) // .. but only if we're not lost!
176  {
177 // cout << "----Map is good----"<<endl;
178  if(mbUseSBIInit)
179  CalcSBIRotation();
180  ApplyMotionModel(); //
181  TrackMap(); // These three lines do the main tracking work.
182  UpdateMotionModel(); //
183 
184  AssessTrackingQuality(); // Check if we're lost or if tracking is poor.
185 
186  { // Provide some feedback for the user:
187  mMessageForUser << "Tracking Map, quality ";
188  if(mTrackingQuality == GOOD) mMessageForUser << "good.";
189  if(mTrackingQuality == DODGY) mMessageForUser << "poor.";
190  if(mTrackingQuality == BAD) mMessageForUser << "bad.";
191  mMessageForUser << " Found:";
192  for(int i=0; i<LEVELS; i++) mMessageForUser << " " << manMeasFound[i] << "/" << manMeasAttempted[i];
193  // mMessageForUser << " Found " << mnMeasFound << " of " << mnMeasAttempted <<". (";
194  mMessageForUser << " Map: " << mMap.vpPoints.size() << "P, " << mMap.vpKeyFrames.size() << "KF";
195  }
196 
197  // Heuristics to check if a key-frame should be added to the map:
198  if(mTrackingQuality == GOOD &&
201  mMapMaker.QueueSize() < 3)
202  {
203  mMessageForUser << " Adding key-frame.";
204  AddNewKeyFrame();
205  LOGV(LOG_TAG, "Info_Key-Frame: %s", "Adding key-frame");
206  };
207  }
208  else // what if there is a map, but tracking has been lost?
209  {
210  mMessageForUser << "** Attempting recovery **.";
211  if(AttemptRecovery())
212  {
213  TrackMap();
215  }
216  }
217 
218 
219  Matrix<3,3,double> rotMatrix = mse3CamFromWorld.get_rotation().get_matrix();
221  float heading, attitude, bank = 0;
222  GetEulerAnglesFromRotationMatrix(rotMatrix, heading , attitude, bank);
223  std::ostringstream data;
224  data <<RadiansToDegrees(bank)+180<<";"<<RadiansToDegrees(attitude)<<";"<<RadiansToDegrees(heading)<<":"<<transVector[0]<<";"<<transVector[1]<<";"<<transVector[2]<<endl;
225 
226  sendToUnity.SendPoseToUnity(RadiansToDegrees(bank)+180, RadiansToDegrees(attitude), RadiansToDegrees(heading), transVector[0], transVector[1], transVector[2]);
227 
228  // LOGV(LOG_TAG, "Pose: %s", data.str().c_str());
230  }
231  else // If there is no map, try to make one.
232  {
233  cout << "Track for initial map"<<endl;
235  }
236  // GUI interface
237  while(!mvQueuedCommands.empty())
238  {
239  GUICommandHandler(mvQueuedCommands.begin()->sCommand, mvQueuedCommands.begin()->sParams);
240  mvQueuedCommands.erase(mvQueuedCommands.begin());
241  }
242 };
243 
244 
254 void Tracker::GetEulerAnglesFromRotationMatrix(Matrix<3,3, double> &m, float &heading, float &attitude, float &bank) {
255  // Assuming the angles are in radians.
256  if (m(1,0) > 0.998) { // singularity at north pole
257  heading = atan2(m(0,2),m(2,2));
258  attitude = M_PI_2;
259  bank = 0;
260  return;
261  }
262  if (m(1,0) < -0.998) { // singularity at south pole
263  heading = atan2(m(0,2),m(2,2));
264  attitude = - M_PI_2;
265  bank = 0;
266  return;
267  }
268  heading = atan2(-m(2,0),m(0,0));
269  bank = atan2(-m(1,2),m(1,1));
270  attitude = asin(m(1,0));
271 }
272 
273 // Try to relocalise in case tracking was lost.
274 // Returns success or failure as a bool.
275 // Actually, the SBI relocaliser will almost always return true, even if
276 // it has no idea where it is, so graphics will go a bit
277 // crazy when lost. Could use a tighter SSD threshold and return more false,
278 // but the way it is now gives a snappier response and I prefer it.
280 {
281  bool bRelocGood = mRelocaliser.AttemptRecovery(mCurrentKF);
282  if(!bRelocGood)
283  return false;
284 
285  SE3<> se3Best = mRelocaliser.BestPose();
286  mse3CamFromWorld = mse3StartPos = se3Best;
289  return true;
290 }
291 
292 // GUI interface. Stuff commands onto the back of a queue so the tracker handles
293 // them in its own thread at the end of each frame. Note the charming lack of
294 // any thread safety (no lock on mvQueuedCommands).
295 void Tracker::GUICommandCallBack(void* ptr, string sCommand, string sParams)
296 {
297  Command c;
298  c.sCommand = sCommand;
299  c.sParams = sParams;
300  ((Tracker*) ptr)->mvQueuedCommands.push_back(c);
301 }
302 
303 // This is called in the tracker's own thread.
304 void Tracker::GUICommandHandler(string sCommand, string sParams) // Called by the callback func..
305 {
306  if(sCommand=="Reset")
307  {
308  Reset();
309  return;
310  }
311 
312  // KeyPress commands are issued by GLWindow
313  if(sCommand=="KeyPress")
314  {
315  if(sParams == "Space")
316  {
317  mbUserPressedSpacebar = true;
318  }
319  else if(sParams == "r")
320  {
321  Reset();
322  }
323  else if(sParams == "q" || sParams == "Escape")
324  {
325  //GUI.ParseLine("quit");
326  }
327  return;
328  }
329  if((sCommand=="PokeTracker"))
330  {
331  mbUserPressedSpacebar = true;
332  return;
333  }
334 
335 
336  cout << "! Tracker::GUICommandHandler: unhandled command "<< sCommand << endl;
337  exit(1);
338 };
339 
340 // Routine for establishing the initial map. This requires two spacebar presses from the user
341 // to define the first two key-frames. Salient points are tracked between the two keyframes
342 // using cheap frame-to-frame tracking (which is very brittle - quick camera motion will
343 // break it.) The salient points are stored in a list of `Trail' data structures.
344 // What action TrackForInitialMap() takes depends on the mnInitialStage enum variable..
346 {
347  // MiniPatch tracking threshhold.
348  static int gvnMaxSSD = TrackerMiniPatchMaxSSD;
349  MiniPatch::mnMaxSSD = gvnMaxSSD;
350 
351  // What stage of initial tracking are we at?
353  {
354  if(mbUserPressedSpacebar) // First spacebar = this is the first keyframe
355  {
356  mbUserPressedSpacebar = false;
359 
361  thresholdReached = true;
362  }
363  else
364  mMessageForUser << "Point camera at planar scene and press spacebar to start tracking for initial map." << endl;
365  return;
366  };
367 
369  {
370  int nGoodTrails = TrailTracking_Advance(); // This call actually tracks the trails
371  if(nGoodTrails < 10) // if most trails have been wiped out, no point continuing.
372  {
373  sendToUnity.SendStringToUnity("Tracker_ResetMM");
374  Reset();
375  return;
376  }
377 
378  // If the user pressed spacebar here, use trails to run stereo and make the intial map..
380  {
381  clock_t startMapMaker, stopMapMaker;
382  int numOfCorners;
383  startMapMaker = clock();
384  numOfCorners = mCurrentKF.aLevels[0].vCorners.size();
385 
386  mbUserPressedSpacebar = false;
387  vector<pair<ImageRef, ImageRef> > vMatches; // This is the format the mapmaker wants for the stereo pairs
388  for(list<Trail>::iterator i = mlTrails.begin(); i!=mlTrails.end(); i++)
389  vMatches.push_back(pair<ImageRef, ImageRef>(i->irInitialPos,
390  i->irCurrentPos));
391  bool result = mMapMaker.InitFromStereo(mFirstKF, mCurrentKF, vMatches, mse3CamFromWorld); // This will take some time!
392  if (!result)
393  {
394  Reset();
395  LOGV(LOG_TAG, "MapMaker.InitFromStereo: Failed, Tracker: Reset()");
396  }
397 
398  if(EnableLogging)
399  {
400  stopMapMaker = clock();
401  std::ostringstream message;
402  message <<"MapMaker: number of corners : "<< numOfCorners <<"::Number of trails: "<< vMatches.size()<< ":: Duration of initFromStereo: " << (float)(stopMapMaker - startMapMaker)/CLOCKS_PER_SEC*1000 << endl;
403  sendToUnity.SendLogToUnity(message.str().c_str());
404  }
406  }
407  else
408  mMessageForUser << "Translate the camera slowly sideways, and press spacebar again to perform stereo init." << endl;
409  }
410 }
411 
412 // The current frame is to be the first keyframe!
414 {
415  mCurrentKF.MakeKeyFrame_Rest(); // This populates the Candidates list, which is Shi-Tomasi thresholded.
416  mFirstKF = mCurrentKF;
417  vector<pair<double,ImageRef> > vCornersAndSTScores;
418  for(unsigned int i=0; i<mCurrentKF.aLevels[0].vCandidates.size(); i++) // Copy candidates into a trivially sortable vector
419  { // so that we can choose the image corners with max ST score
422  continue;
423  vCornersAndSTScores.push_back(pair<double,ImageRef>(-1.0 * c.dSTScore, c.irLevelPos)); // negative so highest score first in sorted list
424  };
425  sort(vCornersAndSTScores.begin(), vCornersAndSTScores.end()); // Sort according to Shi-Tomasi score
426  int nToAdd = MaxInitialTrails;
427  for(unsigned int i = 0; i<vCornersAndSTScores.size() && nToAdd > 0; i++)
428  {
429  if(!mCurrentKF.aLevels[0].im.in_image_with_border(vCornersAndSTScores[i].second, MiniPatch::mnHalfPatchSize))
430  continue;
431  Trail t;
432  t.mPatch.SampleFromImage(vCornersAndSTScores[i].second, mCurrentKF.aLevels[0].im);
433  t.irInitialPos = vCornersAndSTScores[i].second;
435  mlTrails.push_back(t);
436  nToAdd--;
437  }
438  mPreviousFrameKF = mFirstKF; // Always store the previous frame so married-matching can work.
439 }
440 
441 // Steady-state trail tracking: Advance from the previous frame, remove duds.
443 {
444  int nGoodTrails = 0;
445  MiniPatch BackwardsPatch;
446  Level &lCurrentFrame = mCurrentKF.aLevels[0];
447  Level &lPreviousFrame = mPreviousFrameKF.aLevels[0];
448  std::cout << "number of trails: " << mlTrails.size() << std::endl;
449  LOGV(LOG_TAG, "number of trails: %i", mlTrails.size());
450 
451  for(list<Trail>::iterator i = mlTrails.begin(); i!=mlTrails.end();)
452  {
453  list<Trail>::iterator next = i; next++;
454 
455  Trail &trail = *i;
456  ImageRef irStart = trail.irCurrentPos;
457  ImageRef irEnd = irStart;
458  bool bFound = trail.mPatch.FindPatch(irEnd, lCurrentFrame.im, 10, lCurrentFrame.vCorners);
459  if(bFound)
460  {
461  // Also find backwards in a married-matches check
462  BackwardsPatch.SampleFromImage(irEnd, lCurrentFrame.im);
463  ImageRef irBackWardsFound = irEnd;
464  bFound = BackwardsPatch.FindPatch(irBackWardsFound, lPreviousFrame.im, 10, lPreviousFrame.vCorners);
465  if((irBackWardsFound - irStart).mag_squared() > 2)
466  bFound = false;
467 
468  trail.irCurrentPos = irEnd;
469  nGoodTrails++;
470  }
471 
472  if(!bFound) // Erase from list of trails if not found this frame.
473  {
474  mlTrails.erase(i);
475  }
476  i = next;
477  }
478 
480  return nGoodTrails;
481 }
482 
483 // TrackMap is the main purpose of the Tracker.
484 // It first projects all map points into the image to find a potentially-visible-set (PVS);
485 // Then it tries to find some points of the PVS in the image;
486 // Then it updates camera pose according to any points found.
487 // Above may happen twice if a coarse tracking stage is performed.
488 // Finally it updates the tracker's current-frame-KeyFrame struct with any
489 // measurements made.
490 // A lot of low-level functionality is split into helper classes:
491 // class TrackerData handles the projection of a MapPoint and stores intermediate results;
492 // class PatchFinder finds a projected MapPoint in the current-frame-KeyFrame.
494 {
495  if (mbUserPressedSpacebar) {
496  StopTracking();
497  }
498  // Some accounting which will be used for tracking quality assessment:
499  for(int i=0; i<LEVELS; i++)
500  manMeasAttempted[i] = manMeasFound[i] = 0;
501 
502  // The Potentially-Visible-Set (PVS) is split into pyramid levels.
503  vector<TrackerData*> avPVS[LEVELS];
504  for(int i=0; i<LEVELS; i++)
505  avPVS[i].reserve(500);
506 
507  // For all points in the map..
508  for(unsigned int i=0; i<mMap.vpPoints.size(); i++)
509  {
510  MapPoint &p= *(mMap.vpPoints[i]);
511  // Ensure that this map point has an associated TrackerData struct.
512  if(!p.pTData) p.pTData = new TrackerData(&p);
513  TrackerData &TData = *p.pTData;
514 
515  // Project according to current view, and if it's not in the image, skip.
517  if(!TData.bInImage)
518  continue;
519 
520  // Calculate camera projection derivatives of this point.
521  TData.GetDerivsUnsafe(mCamera);
522 
523  // And check what the PatchFinder (included in TrackerData) makes of the mappoint in this view..
525  if(TData.nSearchLevel == -1)
526  continue; // a negative search pyramid level indicates an inappropriate warp for this view, so skip.
527 
528  // Otherwise, this point is suitable to be searched in the current image! Add to the PVS.
529  TData.bSearched = false;
530  TData.bFound = false;
531  avPVS[TData.nSearchLevel].push_back(&TData);
532  };
533 
534  // Next: A large degree of faffing about and deciding which points are going to be measured!
535  // First, randomly shuffle the individual levels of the PVS.
536  for(int i=0; i<LEVELS; i++)
537  random_shuffle(avPVS[i].begin(), avPVS[i].end());
538 
539  // The next two data structs contain the list of points which will next
540  // be searched for in the image, and then used in pose update.
541  vector<TrackerData*> vNextToSearch;
542  vector<TrackerData*> vIterationSet;
543 
544  // Tunable parameters to do with the coarse tracking stage:
545  static unsigned int gvnCoarseMin=TrackerCoarseMin; // Min number of large-scale features for coarse stage
546  static unsigned int gvnCoarseMax=TrackerCoarseMax; // Max number of large-scale features for coarse stage
547  static unsigned int gvnCoarseRange=TrackerCoarseRange; // Pixel search radius for coarse features
548  static int gvnCoarseSubPixIts=TrackerCoarseSubPixIts; // Max sub-pixel iterations for coarse features
549  static int gvnCoarseDisabled=TrackerDisableCoarse; // Set this to 1 to disable coarse stage (except after recovery)
550  static double gvdCoarseMinVel=TrackerCoarseMinVelocity; // Speed above which coarse stage is used.
551 
552  unsigned int nCoarseMax = gvnCoarseMax;
553  unsigned int nCoarseRange = gvnCoarseRange;
554 
555  mbDidCoarse = false;
556 
557  // Set of heuristics to check if we should do a coarse tracking stage.
558  bool bTryCoarse = true;
559  if(gvnCoarseDisabled ||
560  mdMSDScaledVelocityMagnitude < gvdCoarseMinVel ||
561  nCoarseMax == 0)
562  bTryCoarse = false;
564  {
565  bTryCoarse = true;
566  nCoarseMax *=2;
567  nCoarseRange *=2;
569  };
570 
571  // If we do want to do a coarse stage, also check that there's enough high-level
572  // PV map points. We use the lowest-res two pyramid levels (LEVELS-1 and LEVELS-2),
573  // with preference to LEVELS-1.
574  if(bTryCoarse && avPVS[LEVELS-1].size() + avPVS[LEVELS-2].size() > gvnCoarseMin )
575  {
576  // Now, fill the vNextToSearch struct with an appropriate number of
577  // TrackerDatas corresponding to coarse map points! This depends on how many
578  // there are in different pyramid levels compared to CoarseMin and CoarseMax.
579 
580  if(avPVS[LEVELS-1].size() <= nCoarseMax)
581  { // Fewer than CoarseMax in LEVELS-1? then take all of them, and remove them from the PVS list.
582  vNextToSearch = avPVS[LEVELS-1];
583  avPVS[LEVELS-1].clear();
584  }
585  else
586  { // ..otherwise choose nCoarseMax at random, again removing from the PVS list.
587  for(unsigned int i=0; i<nCoarseMax; i++)
588  vNextToSearch.push_back(avPVS[LEVELS-1][i]);
589  avPVS[LEVELS-1].erase(avPVS[LEVELS-1].begin(), avPVS[LEVELS-1].begin() + nCoarseMax);
590  }
591 
592  // If didn't source enough from LEVELS-1, get some from LEVELS-2... same as above.
593  if(vNextToSearch.size() < nCoarseMax)
594  {
595  unsigned int nMoreCoarseNeeded = nCoarseMax - vNextToSearch.size();
596  if(avPVS[LEVELS-2].size() <= nMoreCoarseNeeded)
597  {
598  vNextToSearch = avPVS[LEVELS-2];
599  avPVS[LEVELS-2].clear();
600  }
601  else
602  {
603  for(unsigned int i=0; i<nMoreCoarseNeeded; i++)
604  vNextToSearch.push_back(avPVS[LEVELS-2][i]);
605  avPVS[LEVELS-2].erase(avPVS[LEVELS-2].begin(), avPVS[LEVELS-2].begin() + nMoreCoarseNeeded);
606  }
607  }
608  // Now go and attempt to find these points in the image!
609  unsigned int nFound = SearchForPoints(vNextToSearch, nCoarseRange, gvnCoarseSubPixIts);
610  vIterationSet = vNextToSearch; // Copy over into the to-be-optimised list.
611  if(nFound >= gvnCoarseMin) // Were enough found to do any meaningful optimisation?
612  {
613  mbDidCoarse = true;
614  for(int iter = 0; iter<10; iter++) // If so: do ten Gauss-Newton pose updates iterations.
615  {
616  if(iter != 0)
617  { // Re-project the points on all but the first iteration.
618  for(unsigned int i=0; i<vIterationSet.size(); i++)
619  if(vIterationSet[i]->bFound)
620  vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
621  }
622  for(unsigned int i=0; i<vIterationSet.size(); i++)
623  if(vIterationSet[i]->bFound)
624  vIterationSet[i]->CalcJacobian();
625  double dOverrideSigma = 0.0;
626  // Hack: force the MEstimator to be pretty brutal
627  // with outliers beyond the fifth iteration.
628  if(iter > 5)
629  dOverrideSigma = 1.0;
630 
631  // Calculate and apply the pose update...
632  Vector<6> v6Update =
633  CalcPoseUpdate(vIterationSet, dOverrideSigma);
635  };
636  }
637  };
638 
639  // So, at this stage, we may or may not have done a coarse tracking stage.
640  // Now do the fine tracking stage. This needs many more points!
641 
642  int nFineRange = 10; // Pixel search range for the fine stage.
643  if(mbDidCoarse) // Can use a tighter search if the coarse stage was already done.
644  nFineRange = 5;
645 
646  // What patches shall we use this time? The high-level ones are quite important,
647  // so do all of these, with sub-pixel refinement.
648  {
649  int l = LEVELS - 1;
650  for(unsigned int i=0; i<avPVS[l].size(); i++)
651  avPVS[l][i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
652  SearchForPoints(avPVS[l], nFineRange, 8);
653  for(unsigned int i=0; i<avPVS[l].size(); i++)
654  vIterationSet.push_back(avPVS[l][i]); // Again, plonk all searched points onto the (maybe already populate) vIterationSet.
655  };
656 
657  // All the others levels: Initially, put all remaining potentially visible patches onto vNextToSearch.
658  vNextToSearch.clear();
659  for(int l=LEVELS - 2; l>=0; l--)
660  for(unsigned int i=0; i<avPVS[l].size(); i++)
661  vNextToSearch.push_back(avPVS[l][i]);
662 
663  // But we haven't got CPU to track _all_ patches in the map - arbitrarily limit
664  // ourselves to 1000, and choose these randomly.
665  static int gvnMaxPatchesPerFrame=TrackerMaxPatchesPerFrame;
666  int nFinePatchesToUse = gvnMaxPatchesPerFrame - vIterationSet.size();
667  if(nFinePatchesToUse < 0)
668  nFinePatchesToUse = 0;
669  if((int) vNextToSearch.size() > nFinePatchesToUse)
670  {
671  random_shuffle(vNextToSearch.begin(), vNextToSearch.end());
672  vNextToSearch.resize(nFinePatchesToUse); // Chop!
673  };
674 
675  // If we did a coarse tracking stage: re-project and find derivs of fine points
676  if(mbDidCoarse)
677  for(unsigned int i=0; i<vNextToSearch.size(); i++)
678  vNextToSearch[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
679 
680  // Find fine points in image:
681  SearchForPoints(vNextToSearch, nFineRange, 0);
682  // And attach them all to the end of the optimisation-set.
683  for(unsigned int i=0; i<vNextToSearch.size(); i++)
684  vIterationSet.push_back(vNextToSearch[i]);
685 
686  // Again, ten gauss-newton pose update iterations.
687  Vector<6> v6LastUpdate;
688  v6LastUpdate = Zeros;
689  for(int iter = 0; iter<10; iter++)
690  {
691  bool bNonLinearIteration; // For a bit of time-saving: don't do full nonlinear
692  // reprojection at every iteration - it really isn't necessary!
693  if(iter == 0 || iter == 4 || iter == 9)
694  bNonLinearIteration = true; // Even this is probably overkill, the reason we do many
695  else // iterations is for M-Estimator convergence rather than
696  bNonLinearIteration = false; // linearisation effects.
697 
698  if(iter != 0) // Either way: first iteration doesn't need projection update.
699  {
700  if(bNonLinearIteration)
701  {
702  for(unsigned int i=0; i<vIterationSet.size(); i++)
703  if(vIterationSet[i]->bFound)
704  vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
705  }
706  else
707  {
708  for(unsigned int i=0; i<vIterationSet.size(); i++)
709  if(vIterationSet[i]->bFound)
710  vIterationSet[i]->LinearUpdate(v6LastUpdate);
711  };
712  }
713 
714  if(bNonLinearIteration)
715  for(unsigned int i=0; i<vIterationSet.size(); i++)
716  if(vIterationSet[i]->bFound)
717  vIterationSet[i]->CalcJacobian();
718 
719  // Again, an M-Estimator hack beyond the fifth iteration.
720  double dOverrideSigma = 0.0;
721  if(iter > 5)
722  dOverrideSigma = 16.0;
723 
724  // Calculate and update pose; also store update vector for linear iteration updates.
725  Vector<6> v6Update =
726  CalcPoseUpdate(vIterationSet, dOverrideSigma, iter==9);
728  v6LastUpdate = v6Update;
729  };
730 
731  // Update the current keyframe with info on what was found in the frame.
732  // Strictly speaking this is unnecessary to do every frame, it'll only be
733  // needed if the KF gets added to MapMaker. Do it anyway.
734  // Export pose to current keyframe:
736 
737  // Record successful measurements. Use the KeyFrame-Measurement struct for this.
738  mCurrentKF.mMeasurements.clear();
739  for(vector<TrackerData*>::iterator it = vIterationSet.begin();
740  it!= vIterationSet.end();
741  it++)
742  {
743  if(! (*it)->bFound)
744  continue;
745  Measurement m;
746  m.v2RootPos = (*it)->v2Found;
747  m.nLevel = (*it)->nSearchLevel;
748  m.bSubPix = (*it)->bDidSubPix;
749  mCurrentKF.mMeasurements[& ((*it)->Point)] = m;
750  }
751 
752  // Finally, find the mean scene depth from tracked features
753  {
754  double dSum = 0;
755  double dSumSq = 0;
756  int nNum = 0;
757  for(vector<TrackerData*>::iterator it = vIterationSet.begin();
758  it!= vIterationSet.end();
759  it++)
760  if((*it)->bFound)
761  {
762  double z = (*it)->v3Cam[2];
763  dSum+= z;
764  dSumSq+= z*z;
765  nNum++;
766  };
767  if(nNum > 20)
768  {
769  mCurrentKF.dSceneDepthMean = dSum/nNum;
771  }
772  }
773 }
774 
775 // Find points in the image. Uses the PatchFiner struct stored in TrackerData
776 int Tracker::SearchForPoints(vector<TrackerData*> &vTD, int nRange, int nSubPixIts)
777 {
778  int nFound = 0;
779  for(unsigned int i=0; i<vTD.size(); i++) // for each point..
780  {
781  // First, attempt a search at pixel locations which are FAST corners.
782  // (PatchFinder::FindPatchCoarse)
783  TrackerData &TD = *vTD[i];
784  PatchFinder &Finder = TD.Finder;
785  Finder.MakeTemplateCoarseCont(TD.Point);
786  if(Finder.TemplateBad())
787  {
788  TD.bInImage = TD.bPotentiallyVisible = TD.bFound = false;
789  continue;
790  }
791  manMeasAttempted[Finder.GetLevel()]++; // Stats for tracking quality assessmenta
792 
793  bool bFound =
794  Finder.FindPatchCoarse(ir(TD.v2Image), mCurrentKF, nRange);
795  TD.bSearched = true;
796  if(!bFound)
797  {
798  TD.bFound = false;
799  continue;
800  }
801 
802  TD.bFound = true;
803  TD.dSqrtInvNoise = (1.0 / Finder.GetLevelScale());
804 
805  nFound++;
806  manMeasFound[Finder.GetLevel()]++;
807 
808  // Found the patch in coarse search - are Sub-pixel iterations wanted too?
809  if(nSubPixIts > 0)
810  {
811  TD.bDidSubPix = true;
812  Finder.MakeSubPixTemplate();
813  bool bSubPixConverges=Finder.IterateSubPixToConvergence(mCurrentKF, nSubPixIts);
814  if(!bSubPixConverges)
815  { // If subpix doesn't converge, the patch location is probably very dubious!
816  TD.bFound = false;
817  nFound--;
818  manMeasFound[Finder.GetLevel()]--;
819  continue;
820  }
821  TD.v2Found = Finder.GetSubPixPos();
822  }
823  else
824  {
825  TD.v2Found = Finder.GetCoarsePosAsVector();
826  TD.bDidSubPix = false;
827  }
828  }
829  return nFound;
830 };
831 
832 //Calculate a pose update 6-vector from a bunch of image measurements.
833 //User-selectable M-Estimator.
834 //Normally this robustly estimates a sigma-squared for all the measurements
835 //to reduce outlier influence, but this can be overridden if
836 //dOverrideSigma is positive. Also, bMarkOutliers set to true
837 //records any instances of a point being marked an outlier measurement
838 //by the Tukey MEstimator.
839 Vector<6> Tracker::CalcPoseUpdate(vector<TrackerData*> vTD, double dOverrideSigma, bool bMarkOutliers)
840 {
841  // Which M-estimator are we using?
842  int nEstimator = 0;
843  static string gvsEstimator=TrackerMEstimator;
844  if(gvsEstimator == "Tukey")
845  nEstimator = 0;
846  else if(gvsEstimator == "Cauchy")
847  nEstimator = 1;
848  else if(gvsEstimator == "Huber")
849  nEstimator = 2;
850  else
851  {
852  cout << "Invalid TrackerMEstimator, choices are Tukey, Cauchy, Huber" << endl;
853  nEstimator = 0;
854  gvsEstimator = "Tukey";
855  };
856 
857  // Find the covariance-scaled reprojection error for each measurement.
858  // Also, store the square of these quantities for M-Estimator sigma squared estimation.
859  vector<double> vdErrorSquared;
860  for(unsigned int f=0; f<vTD.size(); f++)
861  {
862  TrackerData &TD = *vTD[f];
863  if(!TD.bFound)
864  continue;
865  TD.v2Error_CovScaled = TD.dSqrtInvNoise* (TD.v2Found - TD.v2Image);
866  vdErrorSquared.push_back(TD.v2Error_CovScaled * TD.v2Error_CovScaled);
867  };
868 
869  // No valid measurements? Return null update.
870  if(vdErrorSquared.size() == 0)
871  return makeVector( 0,0,0,0,0,0);
872 
873  // What is the distribution of errors?
874  double dSigmaSquared;
875  if(dOverrideSigma > 0)
876  dSigmaSquared = dOverrideSigma; // Bit of a waste having stored the vector of square errors in this case!
877  else
878  {
879  if (nEstimator == 0)
880  dSigmaSquared = Tukey::FindSigmaSquared(vdErrorSquared);
881  else if(nEstimator == 1)
882  dSigmaSquared = Cauchy::FindSigmaSquared(vdErrorSquared);
883  else
884  dSigmaSquared = Huber::FindSigmaSquared(vdErrorSquared);
885  }
886 
887  // The TooN WLSCholesky class handles reweighted least squares.
888  // It just needs errors and jacobians.
889  WLS<6> wls;
890  wls.add_prior(100.0); // Stabilising prior
891  for(unsigned int f=0; f<vTD.size(); f++)
892  {
893  TrackerData &TD = *vTD[f];
894  if(!TD.bFound)
895  continue;
896  Vector<2> &v2 = TD.v2Error_CovScaled;
897  double dErrorSq = v2 * v2;
898  double dWeight;
899 
900  if(nEstimator == 0)
901  dWeight= Tukey::Weight(dErrorSq, dSigmaSquared);
902  else if(nEstimator == 1)
903  dWeight= Cauchy::Weight(dErrorSq, dSigmaSquared);
904  else
905  dWeight= Huber::Weight(dErrorSq, dSigmaSquared);
906 
907  // Inlier/outlier accounting, only really works for cut-off estimators such as Tukey.
908  if(dWeight == 0.0)
909  {
910  if(bMarkOutliers)
912  continue;
913  }
914  else
915  if(bMarkOutliers)
917 
918  Matrix<2,6> &m26Jac = TD.m26Jacobian;
919  wls.add_mJ(v2[0], TD.dSqrtInvNoise * m26Jac[0], dWeight); // These two lines are currently
920  wls.add_mJ(v2[1], TD.dSqrtInvNoise * m26Jac[1], dWeight); // the slowest bit of poseits
921  }
922 
923  wls.compute();
924  return wls.get_mu();
925 }
926 
927 
928 // Just add the current velocity to the current pose.
929 // N.b. this doesn't actually use time in any way, i.e. it assumes
930 // a one-frame-per-second camera. Skipped frames etc
931 // are not handled properly here.
933 {
935  Vector<6> v6Velocity = mv6CameraVelocity;
936  if(mbUseSBIInit)
937  {
938  v6Velocity.slice<3,3>() = mv6SBIRot.slice<3,3>();
939  v6Velocity[0] = 0.0;
940  v6Velocity[1] = 0.0;
941  }
942  mse3CamFromWorld = SE3<>::exp(v6Velocity) * mse3StartPos;
943 };
944 
945 
946 // The motion model is entirely the tracker's, and is kept as a decaying
947 // constant velocity model.
949 {
950  SE3<> se3NewFromOld = mse3CamFromWorld * mse3StartPos.inverse();
951  Vector<6> v6Motion = SE3<>::ln(se3NewFromOld);
952  Vector<6> v6OldVel = mv6CameraVelocity;
953 
954  mv6CameraVelocity = 0.9 * (0.5 * v6Motion + 0.5 * v6OldVel);
956 
957  // Also make an estimate of this which has been scaled by the mean scene depth.
958  // This is used to decide if we should use a coarse tracking stage.
959  // We can tolerate more translational vel when far away from scene!
961  v6.slice<0,3>() *= 1.0 / mCurrentKF.dSceneDepthMean;
962  mdMSDScaledVelocityMagnitude = sqrt(v6*v6);
963 }
964 
965 // Time to add a new keyframe? The MapMaker handles most of this.
967 {
970 }
971 
972 // Some heuristics to decide if tracking is any good, for this frame.
973 // This influences decisions to add key-frames, and eventually
974 // causes the tracker to attempt relocalisation.
976 {
977  int nTotalAttempted = 0;
978  int nTotalFound = 0;
979  int nLargeAttempted = 0;
980  int nLargeFound = 0;
981 
982  for(int i=0; i<LEVELS; i++)
983  {
984  nTotalAttempted += manMeasAttempted[i];
985  nTotalFound += manMeasFound[i];
986  if(i>=2) nLargeAttempted += manMeasAttempted[i];
987  if(i>=2) nLargeFound += manMeasFound[i];
988  }
989 
990  if(nTotalFound == 0 || nTotalAttempted == 0)
992  else
993  {
994  double dTotalFracFound = (double) nTotalFound / nTotalAttempted;
995  double dLargeFracFound;
996  if(nLargeAttempted > 10)
997  dLargeFracFound = (double) nLargeFound / nLargeAttempted;
998  else
999  dLargeFracFound = dTotalFracFound;
1000 
1001  static double gvdQualityGood=TrackerTrackingQualityGood;
1002  static double gvdQualityLost=TrackerTrackingQualityLost;
1003 
1004 
1005  if(dTotalFracFound > gvdQualityGood)
1007  else if(dLargeFracFound < gvdQualityLost)
1009  else
1011  }
1012 
1013  if(mTrackingQuality == DODGY)
1014  {
1015  // Further heuristics to see if it's actually bad, not just dodgy...
1016  // If the camera pose estimate has run miles away, it's probably bad.
1019  }
1020 
1021  if(mTrackingQuality==BAD)
1022  mnLostFrames++;
1023  else
1024  mnLostFrames = 0;
1025 }
1026 
1028 {
1029  return mMessageForUser.str();
1030 }
1031 
1033 {
1035  pair<SE2<>, double> result_pair;
1037  SE3<> se3Adjust = SmallBlurryImage::SE3fromSE2(result_pair.first, mCamera);
1038  mv6SBIRot = se3Adjust.ln();
1039 }
1040 
1041 ImageRef TrackerData::irImageSize; // Static member of TrackerData lives here
1042 
1044 {
1045  std::ostringstream corners;
1046 // for (int i=0; i<mCurrentKF.aLevels[0].vCorners.size(); i++)//mCurrentKF.aLevels[0].vCorners.size()
1047 // {
1048 // corners << mCurrentKF.aLevels[0].vCorners[i].x <<","<< mCurrentKF.aLevels[0].vCorners[i].y<<";";
1049 // }
1050  //cout << mCurrentKF.aLevels[0].vCorners.size()/2 << endl;
1051  corners << mCurrentKF.aLevels[0].vCorners.size();
1052  return corners.str();
1053 }
1054 
1055 
1056 
1057 
1058 
1059 
Tracker(CVD::ImageRef irVideoSize, const ATANCamera &c, Map &m, MapMaker &mm)
Definition: Tracker.cpp:28
bool bSubPix
Definition: KeyFrame.h:47
Definition: Tracker.h:39
static double Weight(double dErrorSquared, double dSigmaSquared)
Definition: MEstimator.h:129
const double TrackerTrackingQualityLost
Definition: globals.h:42
void GetDerivsUnsafe(ATANCamera &Cam)
Definition: TrackerData.h:71
const int TrackerCoarseMax
Definition: globals.h:33
Vector< 6 > CalcPoseUpdate(std::vector< TrackerData * > vTD, double dOverrideSigma=0.0, bool bMarkOutliers=false)
Definition: Tracker.cpp:839
bool IsDistanceToNearestKeyFrameExcessive(KeyFrame &kCurrent)
Definition: MapMaker.cpp:1046
#define LOGV(LOG_TAG,...)
Definition: globals.h:10
const int TrackerCoarseRange
Definition: globals.h:34
std::pair< SE2<>, double > IteratePosRelToTarget(SmallBlurryImage &other, int nIterations=10)
#define LOG_TAG
Definition: globals.h:9
const double TrackerCoarseMinVelocity
Definition: globals.h:38
SmallBlurryImage * mpSBILastFrame
Definition: Tracker.h:125
const int TrackerMiniPatchMaxSSD
Definition: globals.h:31
Vector< 2 > v2Found
Definition: TrackerData.h:38
int nMEstimatorOutlierCount
Definition: MapPoint.h:70
bool FindPatchCoarse(CVD::ImageRef ir, KeyFrame &kf, unsigned int nRange)
SO3< Precision > & get_rotation()
Returns the rotation part of the transformation as a SO3.
Definition: se3.h:64
bool bDidSubPix
Definition: TrackerData.h:37
int GetLevel()
Definition: PatchFinder.h:63
std::vector< Command > mvQueuedCommands
Definition: Tracker.h:139
static void GUICommandCallBack(void *ptr, std::string sCommand, std::string sParams)
Definition: Tracker.cpp:295
void SampleFromImage(CVD::ImageRef irPos, CVD::BasicImage< CVD::byte > &im)
Definition: MiniPatch.cpp:85
Vector< 6 > mv6CameraVelocity
Definition: Tracker.h:102
MapMaker & mMapMaker
Definition: Tracker.h:69
const std::string TrackerMEstimator
Definition: globals.h:40
#define LEVELS
Definition: KeyFrame.h:33
static double Weight(double dErrorSquared, double dSigmaSquared)
Definition: MEstimator.h:95
MiniPatch mPatch
Definition: Tracker.h:41
CVD::ImageRef irCurrentPos
Definition: Tracker.h:42
void TrackMap()
Definition: Tracker.cpp:493
bool bPotentiallyVisible
Definition: TrackerData.h:32
int QueueSize()
Definition: MapMaker.h:58
Matrix< 2, 6 > m26Jacobian
Definition: TrackerData.h:44
CVD::Image< CVD::byte > im
Definition: KeyFrame.h:59
int SearchForPoints(std::vector< TrackerData * > &vTD, int nRange, int nFineIts)
Definition: Tracker.cpp:776
bool ResetDone()
Definition: MapMaker.cpp:126
bool bFixed
Definition: KeyFrame.h:83
void MakeSubPixTemplate()
double dSceneDepthSigma
Definition: KeyFrame.h:92
bool thresholdReached
Definition: Tracker.h:143
void SendPoseToUnity(float r1, float r2, float r3, double t1, double t2, double t3)
Send pose to Unity3D
Definition: SendMessage.cpp:63
void StopTracking()
Definition: Tracker.cpp:54
static double FindSigmaSquared(std::vector< double > &vdErrorSquared)
Definition: MEstimator.h:77
static double FindSigmaSquared(std::vector< double > &vdErrorSquared)
Definition: MEstimator.h:111
int manMeasFound[LEVELS]
Definition: Tracker.h:116
void add_mJ(Precision m, const Vector< Size, Precision, B2 > &J, Precision weight=1)
Definition: wls.h:100
void TrailTracking_Start()
Definition: Tracker.cpp:413
void Reset()
Definition: Tracker.cpp:74
void ApplyMotionModel()
Definition: Tracker.cpp:932
int CalcSearchLevelAndWarpMatrix(MapPoint &p, SE3<> se3CFromW, Matrix< 2 > &m2CamDerivs)
Definition: PatchFinder.cpp:33
void SendStringToUnity(const char *st)
Send string to Unity3D
Definition: SendMessage.cpp:46
void GetEulerAnglesFromRotationMatrix(Matrix< 3, 3, double > &m, float &heading, float &attitude, float &bank)
Definition: Tracker.cpp:254
double mdVelocityMagnitude
Definition: Tracker.h:103
bool NeedNewKeyFrame(KeyFrame &kCurrent)
Definition: MapMaker.cpp:706
int mnLostFrames
Definition: Tracker.h:118
bool FindPatch(CVD::ImageRef &irPos, CVD::BasicImage< CVD::byte > &im, int nRange, std::vector< CVD::ImageRef > &vCorners, std::vector< int > *pvRowLUT=NULL)
Definition: MiniPatch.cpp:34
SE3 inverse() const
Definition: se3.h:86
std::string getVCorners()
Definition: Tracker.cpp:1043
Definition: abs.h:24
bool mbDidCoarse
Definition: Tracker.h:105
void AddKeyFrame(KeyFrame &k)
Definition: MapMaker.cpp:453
static double Weight(double dErrorSquared, double dSigmaSquared)
Definition: MEstimator.h:52
CVD::ImageRef mirSize
Definition: Tracker.h:73
static int mnMaxSSD
Definition: MiniPatch.h:34
bool AttemptRecovery()
Definition: Tracker.cpp:279
Vector< 2 > GetSubPixPos()
Definition: PatchFinder.h:95
double mdMSDScaledVelocityMagnitude
Definition: Tracker.h:104
std::vector< MapPoint * > vpPoints
Definition: Map.h:33
Vector< 2 > GetCoarsePosAsVector()
Definition: PatchFinder.h:87
enum Tracker::@13 mnInitialStage
void AssessTrackingQuality()
Definition: Tracker.cpp:975
void SetSendMessageToUnity(SendMessage sm)
Definition: Tracker.cpp:60
Vector< Size, Precision > & get_mu()
Returns the update. With no prior, this is the result of .
Definition: wls.h:202
SendMessage sendToUnity
Definition: Tracker.h:141
void SendLogToUnity(const char *st)
Send string to Unity3D
Definition: SendMessage.cpp:34
const int TrackerUseRotationEstimator
Definition: globals.h:30
bool IterateSubPixToConvergence(KeyFrame &kf, int nMaxIts)
bool TemplateBad()
Definition: PatchFinder.h:76
std::string GetMessageForUser()
Definition: Tracker.cpp:1027
Level aLevels[LEVELS]
Definition: KeyFrame.h:84
int MakeKeyFrame_Lite(CVD::BasicImage< CVD::byte > &im, int threshold)
Definition: KeyFrame.cpp:12
const int TrackerDisableCoarse
Definition: globals.h:37
CVD::ImageRef irInitialPos
Definition: Tracker.h:43
std::vector< CVD::ImageRef > vCorners
Definition: KeyFrame.h:62
bool bInImage
Definition: TrackerData.h:31
const int DesiredNumberOfCorners
Definition: globals.h:50
void GUICommandHandler(std::string sCommand, std::string sParams)
Definition: Tracker.cpp:304
MapPoint & Point
Definition: TrackerData.h:21
const bool SendArrayOfPointsForCornersTex
Definition: globals.h:55
DetectionState currentState
Definition: Tracker.h:144
Definition: se3.h:50
void RequestReset()
Definition: MapMaker.cpp:120
void MakeKeyFrame_Rest()
Definition: KeyFrame.cpp:69
std::vector< KeyFrame * > vpKeyFrames
Definition: Map.h:35
ATANCamera mCamera
Definition: Tracker.h:70
SmallBlurryImage * mpSBIThisFrame
Definition: Tracker.h:126
bool InitFromStereo(KeyFrame &kFirst, KeyFrame &kSecond, std::vector< std::pair< CVD::ImageRef, CVD::ImageRef > > &vMatches, SE3<> &se3CameraPos)
Definition: MapMaker.cpp:196
Relocaliser mRelocaliser
Definition: Tracker.h:71
void CalcSBIRotation()
Definition: Tracker.cpp:1032
SE3 se3CfromW
Definition: KeyFrame.h:82
const double TrackerRotationEstimatorBlur
Definition: globals.h:29
const bool UseNumberOfCornersAdjustment
Definition: globals.h:52
Matrix< R, C, P > exp(const Matrix< R, C, P, B > &m)
Definition: helpers.h:284
std::ostringstream mMessageForUser
Definition: Tracker.h:133
int TrailTracking_Advance()
Definition: Tracker.cpp:442
Definition: KeyFrame.h:54
bool IsGood()
Definition: Map.h:27
PatchFinder Finder
Definition: TrackerData.h:24
void Project(const SE3<> &se3CFromW, ATANCamera &Cam)
Definition: TrackerData.h:49
bool mbUserPressedSpacebar
Definition: Tracker.h:132
const int TrackerCoarseSubPixIts
Definition: globals.h:35
void UpdateMotionModel()
Definition: Tracker.cpp:948
bool in_image_with_border(const ImageRef &ir, int border) const
Definition: image.h:271
void StartTracking()
Definition: Tracker.cpp:49
KeyFrame mPreviousFrameKF
Definition: Tracker.h:87
Vector< 2 > v2Image
Definition: TrackerData.h:29
Vector< 2 > v2RootPos
Definition: KeyFrame.h:48
const int TrackerCoarseMin
Definition: globals.h:32
Definition: Map.h:24
std::list< Trail > mlTrails
Definition: Tracker.h:85
const int DesiredNumberOfCornersOffset
Definition: globals.h:51
enum Tracker::@14 mTrackingQuality
int nLevel
Definition: KeyFrame.h:46
ImageRef ir(const TooN::Vector< 2 > &v)
Vector< 6 > mv6SBIRot
Definition: Tracker.h:128
int nMEstimatorInlierCount
Definition: MapPoint.h:71
bool bSearched
Definition: TrackerData.h:35
int mnFrame
Definition: Tracker.h:110
const int MaxInitialTrails
Definition: globals.h:20
bool mbDraw
Definition: Tracker.h:107
void TrackForInitialMap()
Definition: Tracker.cpp:345
Vector< 3, Precision > & get_translation()
Returns the translation part of the transformation as a Vector.
Definition: se3.h:69
Vector< 2 > v2Error_CovScaled
Definition: TrackerData.h:43
SE3 mse3CamFromWorld
Definition: Tracker.h:100
std::map< MapPoint *, Measurement > mMeasurements
Definition: KeyFrame.h:85
std::string sCommand
Definition: Tracker.h:138
void TrackFrame(CVD::Image< CVD::byte > &imFrame, uint hnd, bool bDraw)
Definition: Tracker.cpp:110
int manMeasAttempted[LEVELS]
Definition: Tracker.h:115
Vector< 1 > makeVector(double x1)
Definition: make_vector.hh:4
string tostr(string w, const T &t)
Definition: Tracker.cpp:44
bool AttemptRecovery(KeyFrame &k)
Definition: Relocaliser.cpp:20
bool mbUseSBIInit
Definition: Tracker.h:129
void SetImageSize(Vector< 2 > v2ImageSize)
Definition: ATANCamera.cpp:21
void compute()
Definition: wls.h:180
const int TrackerMaxPatchesPerFrame
Definition: globals.h:39
bool mbJustRecoveredSoUseCoarse
Definition: Tracker.h:122
double dSceneDepthMean
Definition: KeyFrame.h:91
double dSqrtInvNoise
Definition: TrackerData.h:39
KeyFrame mCurrentKF
Definition: Tracker.h:65
static int mnHalfPatchSize
Definition: MiniPatch.h:32
Matrix< 2 > m2CamDerivs
Definition: TrackerData.h:30
static Vector< 6, Precision > ln(const SE3 &se3)
Definition: se3.h:430
void SendArrayOfPoints(int arrayOfPoint[], int size)
Send array of points to Unity3D
Definition: SendMessage.cpp:75
int nSearchLevel
Definition: TrackerData.h:34
SE3 mse3StartPos
Definition: Tracker.h:101
int GetLevelScale()
Definition: PatchFinder.h:64
Map & mMap
Definition: Tracker.h:68
const bool EnableLogging
Definition: globals.h:54
void add_prior(Precision val)
Definition: wls.h:70
DetectionState GetCurrentDetectionState()
Definition: Tracker.cpp:65
TrackerData * pTData
Definition: MapPoint.h:67
void AddNewKeyFrame()
Definition: Tracker.cpp:966
std::string sParams
Definition: Tracker.h:138
double dSTScore
Definition: KeyFrame.h:40
SE3 BestPose()
Definition: Relocaliser.cpp:15
DetectionState
Enum for state of SLAM detection process.
Definition: SLAMflex.h:11
void MakeTemplateCoarseCont(MapPoint &p)
Definition: PatchFinder.cpp:81
std::vector< Candidate > vCandidates
Definition: KeyFrame.h:67
static double FindSigmaSquared(std::vector< double > &vdErrorSquared)
Definition: MEstimator.h:155
KeyFrame mFirstKF
Definition: Tracker.h:86
int mnLastKeyFrameDropped
Definition: Tracker.h:111
static Operator< Internal::Zero > Zeros
Definition: objects.h:727
int threshold
Definition: Tracker.h:142
const double TrackerTrackingQualityGood
Definition: globals.h:41
static CVD::ImageRef irImageSize
Definition: TrackerData.h:107
float RadiansToDegrees(float r)
Definition: Tracker.h:77
static SE3 SE3fromSE2(SE2<> se2, ATANCamera camera)
#define cout
Definition: Bundle.cpp:16
CVD::ImageRef irLevelPos
Definition: KeyFrame.h:38
Definition: wls.h:48