idx int64 0 41.2k | question stringlengths 74 4.04k | target stringlengths 7 750 |
|---|---|---|
27,200 | public static < T extends ImageBase < T > > void boundImage ( T input , double min , double max ) { if ( input instanceof ImageGray ) { if ( GrayU8 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayU8 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS8 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS8 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayU16 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayU16 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS16 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS16 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS32 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS32 ) input , ( int ) min , ( int ) max ) ; } else if ( GrayS64 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayS64 ) input , ( long ) min , ( long ) max ) ; } else if ( GrayF32 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayF32 ) input , ( float ) min , ( float ) max ) ; } else if ( GrayF64 . class == input . getClass ( ) ) { PixelMath . boundImage ( ( GrayF64 ) input , min , max ) ; } else { throw new IllegalArgumentException ( "Unknown image Type: " + input . getClass ( ) . getSimpleName ( ) ) ; } } else if ( input instanceof Planar ) { Planar in = ( Planar ) input ; for ( int i = 0 ; i < in . getNumBands ( ) ; i ++ ) { boundImage ( in . getBand ( i ) , min , max ) ; } } } | Bounds image pixels to be between these two values . |
27,201 | private void updateTargetDescription ( ) { if ( targetPt != null ) { TupleDesc feature = describe . createDescription ( ) ; describe . process ( targetPt . x , targetPt . y , targetOrientation , targetRadius , feature ) ; tuplePanel . setDescription ( feature ) ; } else { tuplePanel . setDescription ( null ) ; } tuplePanel . repaint ( ) ; } | Extracts the target description and updates the panel . Should only be called from a swing thread |
27,202 | public static DMatrixRMaj inducedHomography13 ( TrifocalTensor tensor , Vector3D_F64 line2 , DMatrixRMaj output ) { if ( output == null ) output = new DMatrixRMaj ( 3 , 3 ) ; DMatrixRMaj T = tensor . T1 ; output . data [ 0 ] = T . data [ 0 ] * line2 . x + T . data [ 3 ] * line2 . y + T . data [ 6 ] * line2 . z ; output . data [ 3 ] = T . data [ 1 ] * line2 . x + T . data [ 4 ] * line2 . y + T . data [ 7 ] * line2 . z ; output . data [ 6 ] = T . data [ 2 ] * line2 . x + T . data [ 5 ] * line2 . y + T . data [ 8 ] * line2 . z ; T = tensor . T2 ; output . data [ 1 ] = T . data [ 0 ] * line2 . x + T . data [ 3 ] * line2 . y + T . data [ 6 ] * line2 . z ; output . data [ 4 ] = T . data [ 1 ] * line2 . x + T . data [ 4 ] * line2 . y + T . data [ 7 ] * line2 . z ; output . data [ 7 ] = T . data [ 2 ] * line2 . x + T . data [ 5 ] * line2 . y + T . data [ 8 ] * line2 . z ; T = tensor . T3 ; output . data [ 2 ] = T . data [ 0 ] * line2 . x + T . data [ 3 ] * line2 . y + T . data [ 6 ] * line2 . z ; output . data [ 5 ] = T . data [ 1 ] * line2 . x + T . data [ 4 ] * line2 . y + T . data [ 7 ] * line2 . z ; output . data [ 8 ] = T . data [ 2 ] * line2 . x + T . data [ 5 ] * line2 . y + T . data [ 8 ] * line2 . z ; return output ; } | Computes the homography induced from view 1 to 3 by a line in view 2 . The provided line in view 2 must contain the view 2 observation . |
27,203 | public static DMatrixRMaj inducedHomography12 ( TrifocalTensor tensor , Vector3D_F64 line3 , DMatrixRMaj output ) { if ( output == null ) output = new DMatrixRMaj ( 3 , 3 ) ; DMatrixRMaj T = tensor . T1 ; output . data [ 0 ] = T . data [ 0 ] * line3 . x + T . data [ 1 ] * line3 . y + T . data [ 2 ] * line3 . z ; output . data [ 3 ] = T . data [ 3 ] * line3 . x + T . data [ 4 ] * line3 . y + T . data [ 5 ] * line3 . z ; output . data [ 6 ] = T . data [ 6 ] * line3 . x + T . data [ 7 ] * line3 . y + T . data [ 8 ] * line3 . z ; T = tensor . T2 ; output . data [ 1 ] = T . data [ 0 ] * line3 . x + T . data [ 1 ] * line3 . y + T . data [ 2 ] * line3 . z ; output . data [ 4 ] = T . data [ 3 ] * line3 . x + T . data [ 4 ] * line3 . y + T . data [ 5 ] * line3 . z ; output . data [ 7 ] = T . data [ 6 ] * line3 . x + T . data [ 7 ] * line3 . y + T . data [ 8 ] * line3 . z ; T = tensor . T3 ; output . data [ 2 ] = T . data [ 0 ] * line3 . x + T . data [ 1 ] * line3 . y + T . data [ 2 ] * line3 . z ; output . data [ 5 ] = T . data [ 3 ] * line3 . x + T . data [ 4 ] * line3 . y + T . data [ 5 ] * line3 . z ; output . data [ 8 ] = T . data [ 6 ] * line3 . x + T . data [ 7 ] * line3 . y + T . data [ 8 ] * line3 . z ; return output ; } | Computes the homography induced from view 1 to 2 by a line in view 3 . The provided line in view 3 must contain the view 3 observation . |
27,204 | public static DMatrixRMaj homographyStereo3Pts ( DMatrixRMaj F , AssociatedPair p1 , AssociatedPair p2 , AssociatedPair p3 ) { HomographyInducedStereo3Pts alg = new HomographyInducedStereo3Pts ( ) ; alg . setFundamental ( F , null ) ; if ( ! alg . process ( p1 , p2 , p3 ) ) return null ; return alg . getHomography ( ) ; } | Computes the homography induced from a planar surface when viewed from two views using correspondences of three points . Observations must be on the planar surface . |
27,205 | public static DMatrixRMaj homographyStereoLinePt ( DMatrixRMaj F , PairLineNorm line , AssociatedPair point ) { HomographyInducedStereoLinePt alg = new HomographyInducedStereoLinePt ( ) ; alg . setFundamental ( F , null ) ; alg . process ( line , point ) ; return alg . getHomography ( ) ; } | Computes the homography induced from a planar surface when viewed from two views using correspondences of a line and a point . Observations must be on the planar surface . |
27,206 | public static DMatrixRMaj homographyStereo2Lines ( DMatrixRMaj F , PairLineNorm line0 , PairLineNorm line1 ) { HomographyInducedStereo2Line alg = new HomographyInducedStereo2Line ( ) ; alg . setFundamental ( F , null ) ; if ( ! alg . process ( line0 , line1 ) ) return null ; return alg . getHomography ( ) ; } | Computes the homography induced from a planar surface when viewed from two views using correspondences of two lines . Observations must be on the planar surface . |
27,207 | public static DMatrixRMaj createFundamental ( DMatrixRMaj E , CameraPinhole intrinsic ) { DMatrixRMaj K = PerspectiveOps . pinholeToMatrix ( intrinsic , ( DMatrixRMaj ) null ) ; return createFundamental ( E , K ) ; } | Computes a Fundamental matrix given an Essential matrix and the camera s intrinsic parameters . |
27,208 | public static void projectiveToMetric ( DMatrixRMaj cameraMatrix , DMatrixRMaj H , Se3_F64 worldToView , DMatrixRMaj K ) { DMatrixRMaj tmp = new DMatrixRMaj ( 3 , 4 ) ; CommonOps_DDRM . mult ( cameraMatrix , H , tmp ) ; MultiViewOps . decomposeMetricCamera ( tmp , K , worldToView ) ; } | Elevates a projective camera matrix into a metric one using the rectifying homography . Extracts calibration and Se3 pose . |
27,209 | public static void projectiveToMetricKnownK ( DMatrixRMaj cameraMatrix , DMatrixRMaj H , DMatrixRMaj K , Se3_F64 worldToView ) { DMatrixRMaj tmp = new DMatrixRMaj ( 3 , 4 ) ; CommonOps_DDRM . mult ( cameraMatrix , H , tmp ) ; DMatrixRMaj K_inv = new DMatrixRMaj ( 3 , 3 ) ; CommonOps_DDRM . invert ( K , K_inv ) ; DMatrixRMaj P = new DMatrixRMaj ( 3 , 4 ) ; CommonOps_DDRM . mult ( K_inv , tmp , P ) ; CommonOps_DDRM . extract ( P , 0 , 0 , worldToView . R ) ; worldToView . T . x = P . get ( 0 , 3 ) ; worldToView . T . y = P . get ( 1 , 3 ) ; worldToView . T . z = P . get ( 2 , 3 ) ; SingularValueDecomposition_F64 < DMatrixRMaj > svd = DecompositionFactory_DDRM . svd ( true , true , true ) ; DMatrixRMaj R = worldToView . R ; if ( ! svd . decompose ( R ) ) throw new RuntimeException ( "SVD Failed" ) ; CommonOps_DDRM . multTransB ( svd . getU ( null , false ) , svd . getV ( null , false ) , R ) ; double det = CommonOps_DDRM . det ( R ) ; if ( det < 0 ) { CommonOps_DDRM . scale ( - 1 , R ) ; worldToView . T . scale ( - 1 ) ; } } | Convert the projective camera matrix into a metric transform given the rectifying homography and a known calibration matrix . |
27,210 | public static void rectifyHToAbsoluteQuadratic ( DMatrixRMaj H , DMatrixRMaj Q ) { int indexQ = 0 ; for ( int rowA = 0 ; rowA < 4 ; rowA ++ ) { for ( int colB = 0 ; colB < 4 ; colB ++ ) { int indexA = rowA * 4 ; int indexB = colB * 4 ; double sum = 0 ; for ( int i = 0 ; i < 3 ; i ++ ) { sum += H . data [ indexA ++ ] * H . data [ indexB ++ ] ; } Q . data [ indexQ ++ ] = sum ; } } } | Rectifying homography to dual absolute quadratic . |
27,211 | public static void intrinsicFromAbsoluteQuadratic ( DMatrixRMaj Q , DMatrixRMaj P , CameraPinhole intrinsic ) { DMatrixRMaj tmp = new DMatrixRMaj ( 3 , 4 ) ; DMatrixRMaj tmp2 = new DMatrixRMaj ( 3 , 3 ) ; CommonOps_DDRM . mult ( P , Q , tmp ) ; CommonOps_DDRM . multTransB ( tmp , P , tmp2 ) ; decomposeDiac ( tmp2 , intrinsic ) ; } | Extracts the intrinsic camera matrix from a view given its camera matrix and the dual absolute quadratic . |
27,212 | public static Tuple2 < List < Point2D_F64 > , List < Point2D_F64 > > split2 ( List < AssociatedPair > input ) { List < Point2D_F64 > list1 = new ArrayList < > ( ) ; List < Point2D_F64 > list2 = new ArrayList < > ( ) ; for ( int i = 0 ; i < input . size ( ) ; i ++ ) { list1 . add ( input . get ( i ) . p1 ) ; list2 . add ( input . get ( i ) . p2 ) ; } return new Tuple2 < > ( list1 , list2 ) ; } | Splits the associated pairs into two lists |
27,213 | public static Tuple3 < List < Point2D_F64 > , List < Point2D_F64 > , List < Point2D_F64 > > split3 ( List < AssociatedTriple > input ) { List < Point2D_F64 > list1 = new ArrayList < > ( ) ; List < Point2D_F64 > list2 = new ArrayList < > ( ) ; List < Point2D_F64 > list3 = new ArrayList < > ( ) ; for ( int i = 0 ; i < input . size ( ) ; i ++ ) { list1 . add ( input . get ( i ) . p1 ) ; list2 . add ( input . get ( i ) . p2 ) ; list3 . add ( input . get ( i ) . p3 ) ; } return new Tuple3 < > ( list1 , list2 , list3 ) ; } | Splits the associated triple into three lists |
27,214 | protected void performShrinkage ( I transform , int numLevels ) { for ( int i = 0 ; i < numLevels ; i ++ ) { int w = transform . width ; int h = transform . height ; int ww = w / 2 ; int hh = h / 2 ; Number threshold ; I subband ; subband = transform . subimage ( ww , 0 , w , hh , null ) ; threshold = computeThreshold ( subband ) ; rule . process ( subband , threshold ) ; subband = transform . subimage ( 0 , hh , ww , h , null ) ; threshold = computeThreshold ( subband ) ; rule . process ( subband , threshold ) ; subband = transform . subimage ( ww , hh , w , h , null ) ; threshold = computeThreshold ( subband ) ; rule . process ( subband , threshold ) ; transform = transform . subimage ( 0 , 0 , ww , hh , null ) ; } } | Performs wavelet shrinking using the specified rule and by computing a threshold for each subband . |
27,215 | public void denoise ( GrayF32 transform , int numLevels ) { int scale = UtilWavelet . computeScale ( numLevels ) ; final int h = transform . height ; final int w = transform . width ; final int innerWidth = w / scale ; final int innerHeight = h / scale ; GrayF32 subbandHH = transform . subimage ( w / 2 , h / 2 , w , h , null ) ; float sigma = UtilDenoiseWavelet . estimateNoiseStdDev ( subbandHH , null ) ; float threshold = ( float ) UtilDenoiseWavelet . universalThreshold ( subbandHH , sigma ) ; rule . process ( transform . subimage ( innerWidth , 0 , w , h , null ) , threshold ) ; rule . process ( transform . subimage ( 0 , innerHeight , innerWidth , h , null ) , threshold ) ; } | Applies VisuShrink denoising to the provided multilevel wavelet transform using the provided threshold . |
27,216 | public static WaveletDescription < WlCoef_F32 > generate_F32 ( int I ) { if ( I != 6 ) { throw new IllegalArgumentException ( "Only 6 is currently supported" ) ; } WlCoef_F32 coef = new WlCoef_F32 ( ) ; coef . offsetScaling = - 2 ; coef . offsetWavelet = - 2 ; coef . scaling = new float [ 6 ] ; coef . wavelet = new float [ 6 ] ; double sqrt7 = Math . sqrt ( 7 ) ; double div = 16.0 * Math . sqrt ( 2 ) ; coef . scaling [ 0 ] = ( float ) ( ( 1.0 - sqrt7 ) / div ) ; coef . scaling [ 1 ] = ( float ) ( ( 5.0 + sqrt7 ) / div ) ; coef . scaling [ 2 ] = ( float ) ( ( 14.0 + 2.0 * sqrt7 ) / div ) ; coef . scaling [ 3 ] = ( float ) ( ( 14.0 - 2.0 * sqrt7 ) / div ) ; coef . scaling [ 4 ] = ( float ) ( ( 1.0 - sqrt7 ) / div ) ; coef . scaling [ 5 ] = ( float ) ( ( - 3.0 + sqrt7 ) / div ) ; coef . wavelet [ 0 ] = coef . scaling [ 5 ] ; coef . wavelet [ 1 ] = - coef . scaling [ 4 ] ; coef . wavelet [ 2 ] = coef . scaling [ 3 ] ; coef . wavelet [ 3 ] = - coef . scaling [ 2 ] ; coef . wavelet [ 4 ] = coef . scaling [ 1 ] ; coef . wavelet [ 5 ] = - coef . scaling [ 0 ] ; WlBorderCoefStandard < WlCoef_F32 > inverse = new WlBorderCoefStandard < > ( coef ) ; return new WaveletDescription < > ( new BorderIndex1D_Wrap ( ) , coef , inverse ) ; } | Creates a description of a Coiflet of order I wavelet . |
27,217 | public static FitData < EllipseRotated_F64 > fitEllipse_F64 ( List < Point2D_F64 > points , int iterations , boolean computeError , FitData < EllipseRotated_F64 > outputStorage ) { if ( outputStorage == null ) { outputStorage = new FitData < > ( new EllipseRotated_F64 ( ) ) ; } FitEllipseAlgebraic_F64 algebraic = new FitEllipseAlgebraic_F64 ( ) ; if ( ! algebraic . process ( points ) ) { FitData < Circle2D_F64 > circleData = averageCircle_F64 ( points , null , null ) ; Circle2D_F64 circle = circleData . shape ; outputStorage . shape . set ( circle . center . x , circle . center . y , circle . radius , circle . radius , 0 ) ; } else { UtilEllipse_F64 . convert ( algebraic . getEllipse ( ) , outputStorage . shape ) ; } if ( iterations > 0 ) { RefineEllipseEuclideanLeastSquares_F64 leastSquares = new RefineEllipseEuclideanLeastSquares_F64 ( ) ; leastSquares . setMaxIterations ( iterations ) ; leastSquares . refine ( outputStorage . shape , points ) ; outputStorage . shape . set ( leastSquares . getFound ( ) ) ; } if ( computeError ) { ClosestPointEllipseAngle_F64 closestPoint = new ClosestPointEllipseAngle_F64 ( 1e-8 , 100 ) ; closestPoint . setEllipse ( outputStorage . shape ) ; double total = 0 ; for ( Point2D_F64 p : points ) { closestPoint . process ( p ) ; total += p . distance ( closestPoint . getClosest ( ) ) ; } outputStorage . error = total / points . size ( ) ; } else { outputStorage . error = 0 ; } return outputStorage ; } | Computes the best fit ellipse based on minimizing Euclidean distance . An estimate is initially provided using algebraic algorithm which is then refined using non - linear optimization . The amount of non - linear optimization can be controlled using iterations parameter . Will work with partial and complete contours of objects . |
27,218 | public static List < Point2D_F64 > convert_I32_F64 ( List < Point2D_I32 > points ) { return convert_I32_F64 ( points , null ) . toList ( ) ; } | Converts a list of I32 points into F64 |
27,219 | public static FitData < Circle2D_F64 > averageCircle_I32 ( List < Point2D_I32 > points , GrowQueue_F64 optional , FitData < Circle2D_F64 > outputStorage ) { if ( outputStorage == null ) { outputStorage = new FitData < > ( new Circle2D_F64 ( ) ) ; } if ( optional == null ) { optional = new GrowQueue_F64 ( ) ; } Circle2D_F64 circle = outputStorage . shape ; int N = points . size ( ) ; int sumX = 0 , sumY = 0 ; for ( int i = 0 ; i < N ; i ++ ) { Point2D_I32 p = points . get ( i ) ; sumX += p . x ; sumY += p . y ; } optional . reset ( ) ; double centerX = circle . center . x = sumX / ( double ) N ; double centerY = circle . center . y = sumY / ( double ) N ; double meanR = 0 ; for ( int i = 0 ; i < N ; i ++ ) { Point2D_I32 p = points . get ( i ) ; double dx = p . x - centerX ; double dy = p . y - centerY ; double r = Math . sqrt ( dx * dx + dy * dy ) ; optional . push ( r ) ; meanR += r ; } meanR /= N ; circle . radius = meanR ; double variance = 0 ; for ( int i = 0 ; i < N ; i ++ ) { double diff = optional . get ( i ) - meanR ; variance += diff * diff ; } outputStorage . error = variance / N ; return outputStorage ; } | Computes a circle which has it s center at the mean position of the provided points and radius is equal to the average distance of each point from the center . While fast to compute the provided circle is not a best fit circle by any reasonable metric except for special cases . |
27,220 | public void fixate ( ) { ransac = FactoryMultiViewRobust . trifocalRansac ( configTriRansac , configError , configRansac ) ; sba = FactoryMultiView . bundleSparseProjective ( configSBA ) ; } | Must call if you change configurations . |
27,221 | boolean selectInitialTriplet ( View seed , GrowQueue_I32 motions , int selected [ ] ) { double bestScore = 0 ; for ( int i = 0 ; i < motions . size ; i ++ ) { View viewB = seed . connections . get ( i ) . other ( seed ) ; for ( int j = i + 1 ; j < motions . size ; j ++ ) { View viewC = seed . connections . get ( j ) . other ( seed ) ; double s = scoreTripleView ( seed , viewB , viewC ) ; if ( s > bestScore ) { bestScore = s ; selected [ 0 ] = i ; selected [ 1 ] = j ; } } } return bestScore != 0 ; } | Exhaustively look at all triplets that connect with the seed view |
27,222 | private void triangulateFeatures ( List < AssociatedTriple > inliers , DMatrixRMaj P1 , DMatrixRMaj P2 , DMatrixRMaj P3 ) { List < DMatrixRMaj > cameraMatrices = new ArrayList < > ( ) ; cameraMatrices . add ( P1 ) ; cameraMatrices . add ( P2 ) ; cameraMatrices . add ( P3 ) ; List < Point2D_F64 > triangObs = new ArrayList < > ( ) ; triangObs . add ( null ) ; triangObs . add ( null ) ; triangObs . add ( null ) ; Point4D_F64 X = new Point4D_F64 ( ) ; for ( int i = 0 ; i < inliers . size ( ) ; i ++ ) { AssociatedTriple t = inliers . get ( i ) ; triangObs . set ( 0 , t . p1 ) ; triangObs . set ( 1 , t . p2 ) ; triangObs . set ( 2 , t . p3 ) ; if ( triangulator . triangulate ( triangObs , cameraMatrices , X ) ) { structure . points [ i ] . set ( X . x , X . y , X . z , X . w ) ; } else { throw new RuntimeException ( "Failed to triangulate a point in the inlier set?! Handle if this is common" ) ; } } } | Triangulates the location of each features in homogenous space |
27,223 | private void initializeProjective3 ( FastQueue < AssociatedTriple > associated , FastQueue < AssociatedTripleIndex > associatedIdx , int totalViews , View viewA , View viewB , View viewC , int idxViewB , int idxViewC ) { ransac . process ( associated . toList ( ) ) ; List < AssociatedTriple > inliers = ransac . getMatchSet ( ) ; TrifocalTensor model = ransac . getModelParameters ( ) ; if ( verbose != null ) verbose . println ( "Remaining after RANSAC " + inliers . size ( ) + " / " + associated . size ( ) ) ; DMatrixRMaj P1 = CommonOps_DDRM . identity ( 3 , 4 ) ; DMatrixRMaj P2 = new DMatrixRMaj ( 3 , 4 ) ; DMatrixRMaj P3 = new DMatrixRMaj ( 3 , 4 ) ; MultiViewOps . extractCameraMatrices ( model , P2 , P3 ) ; structure . initialize ( totalViews , inliers . size ( ) ) ; db . lookupShape ( viewA . id , shape ) ; structure . setView ( 0 , true , P1 , shape . width , shape . height ) ; db . lookupShape ( viewB . id , shape ) ; structure . setView ( idxViewB , false , P2 , shape . width , shape . height ) ; db . lookupShape ( viewC . id , shape ) ; structure . setView ( idxViewC , false , P3 , shape . width , shape . height ) ; triangulateFeatures ( inliers , P1 , P2 , P3 ) ; seedToStructure . resize ( viewA . totalFeatures ) ; seedToStructure . fill ( - 1 ) ; inlierToSeed . resize ( inliers . size ( ) ) ; for ( int i = 0 ; i < inliers . size ( ) ; i ++ ) { int inputIdx = ransac . getInputIndex ( i ) ; inlierToSeed . data [ i ] = matchesTripleIdx . get ( inputIdx ) . a ; seedToStructure . data [ inlierToSeed . data [ i ] ] = i ; } } | Initializes projective reconstruction from 3 - views . |
27,224 | boolean findRemainingCameraMatrices ( LookupSimilarImages db , View seed , GrowQueue_I32 motions ) { points3D . reset ( ) ; for ( int i = 0 ; i < structure . points . length ; i ++ ) { structure . points [ i ] . get ( points3D . grow ( ) ) ; } assocPixel . reset ( ) ; for ( int i = 0 ; i < inlierToSeed . size ; i ++ ) { assocPixel . grow ( ) . p1 . set ( matchesTriple . get ( i ) . p1 ) ; } DMatrixRMaj cameraMatrix = new DMatrixRMaj ( 3 , 4 ) ; for ( int motionIdx = 0 ; motionIdx < motions . size ; motionIdx ++ ) { if ( motionIdx == selectedTriple [ 0 ] || motionIdx == selectedTriple [ 1 ] ) continue ; int connectionIdx = motions . get ( motionIdx ) ; Motion edge = seed . connections . get ( connectionIdx ) ; View viewI = edge . other ( seed ) ; db . lookupPixelFeats ( viewI . id , featsB ) ; if ( ! computeCameraMatrix ( seed , edge , featsB , cameraMatrix ) ) { if ( verbose != null ) { verbose . println ( "Pose estimator failed! motionIdx=" + motionIdx ) ; } return false ; } db . lookupShape ( edge . other ( seed ) . id , shape ) ; structure . setView ( motionIdx , false , cameraMatrix , shape . width , shape . height ) ; } return true ; } | Uses the triangulated points and observations in the root view to estimate the camera matrix for all the views which are remaining |
27,225 | private boolean computeCameraMatrix ( View seed , Motion edge , FastQueue < Point2D_F64 > featsB , DMatrixRMaj cameraMatrix ) { boolean seedSrc = edge . src == seed ; int matched = 0 ; for ( int i = 0 ; i < edge . inliers . size ; i ++ ) { AssociatedIndex a = edge . inliers . get ( i ) ; int featId = seedToStructure . data [ seedSrc ? a . src : a . dst ] ; if ( featId == - 1 ) continue ; assocPixel . get ( featId ) . p2 . set ( featsB . get ( seedSrc ? a . dst : a . src ) ) ; matched ++ ; } if ( matched != assocPixel . size ) throw new RuntimeException ( "BUG! Didn't find all features in the view" ) ; if ( poseEstimator . processHomogenous ( assocPixel . toList ( ) , points3D . toList ( ) ) ) { cameraMatrix . set ( poseEstimator . getProjective ( ) ) ; return true ; } else { return false ; } } | Computes camera matrix between the seed view and a connected view |
27,226 | private SceneObservations createObservationsForBundleAdjustment ( LookupSimilarImages db , View seed , GrowQueue_I32 motions ) { SceneObservations observations = new SceneObservations ( motions . size + 1 ) ; SceneObservations . View obsView = observations . getView ( 0 ) ; for ( int i = 0 ; i < inlierToSeed . size ; i ++ ) { int id = inlierToSeed . data [ i ] ; Point2D_F64 o = featsA . get ( id ) ; id = seedToStructure . data [ id ] ; obsView . add ( id , ( float ) o . x , ( float ) o . y ) ; } for ( int i = 0 ; i < motions . size ( ) ; i ++ ) { obsView = observations . getView ( i + 1 ) ; Motion m = seed . connections . get ( motions . get ( i ) ) ; View v = m . other ( seed ) ; boolean seedIsSrc = m . src == seed ; db . lookupPixelFeats ( v . id , featsB ) ; for ( int j = 0 ; j < m . inliers . size ; j ++ ) { AssociatedIndex a = m . inliers . get ( j ) ; int id = seedToStructure . data [ seedIsSrc ? a . src : a . dst ] ; if ( id < 0 ) continue ; Point2D_F64 o = featsB . get ( seedIsSrc ? a . dst : a . src ) ; obsView . add ( id , ( float ) o . x , ( float ) o . y ) ; } } return observations ; } | Convert observations into a format which bundle adjustment will understand |
27,227 | private boolean refineWithBundleAdjustment ( SceneObservations observations ) { if ( scaleSBA ) { scaler . applyScale ( structure , observations ) ; } sba . setVerbose ( verbose , verboseLevel ) ; sba . setParameters ( structure , observations ) ; sba . configure ( converge . ftol , converge . gtol , converge . maxIterations ) ; if ( ! sba . optimize ( structure ) ) { return false ; } if ( scaleSBA ) { for ( int i = 0 ; i < structure . views . length ; i ++ ) { DMatrixRMaj P = structure . views [ i ] . worldToView ; scaler . pixelScaling . get ( i ) . remove ( P , P ) ; } scaler . undoScale ( structure , observations ) ; } return true ; } | Last step is to refine the current initial estimate with bundle adjustment |
27,228 | public static void nv21ToBoof ( byte [ ] data , int width , int height , ImageBase output ) { if ( output instanceof Planar ) { Planar ms = ( Planar ) output ; if ( ms . getBandType ( ) == GrayU8 . class ) { ConvertNV21 . nv21TPlanarRgb_U8 ( data , width , height , ms ) ; } else if ( ms . getBandType ( ) == GrayF32 . class ) { ConvertNV21 . nv21ToPlanarRgb_F32 ( data , width , height , ms ) ; } else { throw new IllegalArgumentException ( "Unsupported output band format" ) ; } } else if ( output instanceof ImageGray ) { if ( output . getClass ( ) == GrayU8 . class ) { nv21ToGray ( data , width , height , ( GrayU8 ) output ) ; } else if ( output . getClass ( ) == GrayF32 . class ) { nv21ToGray ( data , width , height , ( GrayF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } else if ( output instanceof ImageInterleaved ) { if ( output . getClass ( ) == InterleavedU8 . class ) { ConvertNV21 . nv21ToInterleaved ( data , width , height , ( InterleavedU8 ) output ) ; } else if ( output . getClass ( ) == InterleavedF32 . class ) { ConvertNV21 . nv21ToInterleaved ( data , width , height , ( InterleavedF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } else { throw new IllegalArgumentException ( "Boofcv image type not yet supported" ) ; } } | Converts a NV21 encoded byte array into a BoofCV formatted image . |
27,229 | public static < T extends ImageGray < T > > T nv21ToGray ( byte [ ] data , int width , int height , T output , Class < T > outputType ) { if ( outputType == GrayU8 . class ) { return ( T ) nv21ToGray ( data , width , height , ( GrayU8 ) output ) ; } else if ( outputType == GrayF32 . class ) { return ( T ) nv21ToGray ( data , width , height , ( GrayF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported BoofCV Image Type " + outputType . getSimpleName ( ) ) ; } } | Converts an NV21 image into a gray scale image . Image type is determined at runtime . |
27,230 | public static GrayU8 nv21ToGray ( byte [ ] data , int width , int height , GrayU8 output ) { if ( output != null ) { output . reshape ( width , height ) ; } else { output = new GrayU8 ( width , height ) ; } if ( BoofConcurrency . USE_CONCURRENT ) { ImplConvertNV21_MT . nv21ToGray ( data , output ) ; } else { ImplConvertNV21 . nv21ToGray ( data , output ) ; } return output ; } | Converts an NV21 image into a gray scale U8 image . |
27,231 | public void generate ( long value , int gridWidth ) { renderer . init ( ) ; drawBorder ( ) ; double whiteBorder = whiteBorderDoc / markerWidth ; double X0 = whiteBorder + blackBorder ; double Y0 = whiteBorder + blackBorder ; double bw = ( 1.0 - 2 * X0 ) / gridWidth ; square ( X0 , 1.0 - whiteBorder - blackBorder - bw , bw ) ; final int bitCount = gridWidth * gridWidth - 4 ; for ( int j = 0 ; j < bitCount ; j ++ ) { if ( ( value & ( 1L << j ) ) != 0 ) { box ( bw , j , gridWidth ) ; } } } | Renders a binary square fiducial |
27,232 | public void setConfiguration ( Se3_F64 planeToCamera , CameraPinholeBrown intrinsic ) { this . planeToCamera = planeToCamera ; normToPixel = LensDistortionFactory . narrow ( intrinsic ) . distort_F64 ( false , true ) ; pixelToNorm = LensDistortionFactory . narrow ( intrinsic ) . undistort_F64 ( true , false ) ; planeToCamera . invert ( cameraToPlane ) ; } | Configures the camera s intrinsic and extrinsic parameters |
27,233 | public void setIntrinsic ( CameraPinholeBrown intrinsic ) { normToPixel = LensDistortionFactory . narrow ( intrinsic ) . distort_F64 ( false , true ) ; pixelToNorm = LensDistortionFactory . narrow ( intrinsic ) . undistort_F64 ( true , false ) ; } | Configures the camera s intrinsic parameters |
27,234 | public void setPlaneToCamera ( Se3_F64 planeToCamera , boolean computeInverse ) { this . planeToCamera = planeToCamera ; if ( computeInverse ) planeToCamera . invert ( cameraToPlane ) ; } | Specifies camera s extrinsic parameters . |
27,235 | public boolean planeToPixel ( double pointX , double pointY , Point2D_F64 pixel ) { plain3D . set ( - pointY , 0 , pointX ) ; SePointOps_F64 . transform ( planeToCamera , plain3D , camera3D ) ; if ( camera3D . z <= 0 ) return false ; double normX = camera3D . x / camera3D . z ; double normY = camera3D . y / camera3D . z ; normToPixel . compute ( normX , normY , pixel ) ; return true ; } | Given a point on the plane find the pixel in the image . |
27,236 | public boolean planeToNormalized ( double pointX , double pointY , Point2D_F64 normalized ) { plain3D . set ( - pointY , 0 , pointX ) ; SePointOps_F64 . transform ( planeToCamera , plain3D , camera3D ) ; if ( camera3D . z <= 0 ) return false ; normalized . x = camera3D . x / camera3D . z ; normalized . y = camera3D . y / camera3D . z ; return true ; } | Given a point on the plane find the normalized image coordinate |
27,237 | public void convert ( FeatureGraph2D graph ) { graph . nodes . resize ( corners . size ) ; graph . reset ( ) ; for ( int i = 0 ; i < corners . size ; i ++ ) { Node c = corners . get ( i ) ; FeatureGraph2D . Node n = graph . nodes . grow ( ) ; n . reset ( ) ; n . set ( c . x , c . y ) ; n . index = c . index ; } for ( int i = 0 ; i < corners . size ; i ++ ) { Node c = corners . get ( i ) ; for ( int j = 0 ; j < 4 ; j ++ ) { if ( c . edges [ j ] == null ) continue ; graph . connect ( c . index , c . edges [ j ] . index ) ; } } } | Convert into a generic graph . |
27,238 | public boolean process ( I frame ) { keyFrame = false ; tracker . process ( frame ) ; totalFramesProcessed ++ ; List < PointTrack > tracks = tracker . getActiveTracks ( null ) ; if ( tracks . size ( ) == 0 ) return false ; List < AssociatedPair > pairs = new ArrayList < > ( ) ; for ( PointTrack t : tracks ) { pairs . add ( ( AssociatedPair ) t . getCookie ( ) ) ; } if ( ! modelMatcher . process ( ( List ) pairs ) ) { return false ; } if ( modelRefiner != null ) { if ( ! modelRefiner . fitModel ( modelMatcher . getMatchSet ( ) , modelMatcher . getModelParameters ( ) , keyToCurr ) ) return false ; } else { keyToCurr . set ( modelMatcher . getModelParameters ( ) ) ; } for ( AssociatedPair p : modelMatcher . getMatchSet ( ) ) { ( ( AssociatedPairTrack ) p ) . lastUsed = totalFramesProcessed ; } pruneUnusedTracks ( ) ; worldToKey . concat ( keyToCurr , worldToCurr ) ; return true ; } | Processes the next frame in the sequence . |
27,239 | public void changeKeyFrame ( ) { List < PointTrack > inactive = tracker . getInactiveTracks ( null ) ; for ( PointTrack l : inactive ) { tracker . dropTrack ( l ) ; } List < PointTrack > active = tracker . getActiveTracks ( null ) ; for ( PointTrack l : active ) { AssociatedPairTrack p = l . getCookie ( ) ; p . p1 . set ( l ) ; p . lastUsed = totalFramesProcessed ; } tracker . spawnTracks ( ) ; List < PointTrack > spawned = tracker . getNewTracks ( null ) ; for ( PointTrack l : spawned ) { AssociatedPairTrack p = l . getCookie ( ) ; if ( p == null ) { l . cookie = p = new AssociatedPairTrack ( ) ; p . p2 = l ; } p . p1 . set ( l ) ; p . lastUsed = totalFramesProcessed ; } worldToKey . set ( worldToCurr ) ; keyToCurr . reset ( ) ; keyFrame = true ; } | Change the current frame into the keyframe . p1 location of existing tracks is set to their current location and new tracks are spawned . Reference frame transformations are also updated |
27,240 | public static double autoScale ( List < Point3D_F64 > cloud , double target ) { Point3D_F64 mean = new Point3D_F64 ( ) ; Point3D_F64 stdev = new Point3D_F64 ( ) ; statistics ( cloud , mean , stdev ) ; double scale = target / ( Math . max ( Math . max ( stdev . x , stdev . y ) , stdev . z ) ) ; int N = cloud . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { cloud . get ( i ) . scale ( scale ) ; } return scale ; } | Automatically rescales the point cloud based so that it has a standard deviation of target |
27,241 | public static void statistics ( List < Point3D_F64 > cloud , Point3D_F64 mean , Point3D_F64 stdev ) { final int N = cloud . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { Point3D_F64 p = cloud . get ( i ) ; mean . x += p . x / N ; mean . y += p . y / N ; mean . z += p . z / N ; } for ( int i = 0 ; i < N ; i ++ ) { Point3D_F64 p = cloud . get ( i ) ; double dx = p . x - mean . x ; double dy = p . y - mean . y ; double dz = p . z - mean . z ; stdev . x += dx * dx / N ; stdev . y += dy * dy / N ; stdev . z += dz * dz / N ; } stdev . x = Math . sqrt ( stdev . x ) ; stdev . y = Math . sqrt ( stdev . y ) ; stdev . z = Math . sqrt ( stdev . z ) ; } | Computes the mean and standard deviation of each axis in the point cloud computed in dependently |
27,242 | public static void prune ( List < Point3D_F64 > cloud , int minNeighbors , double radius ) { if ( minNeighbors < 0 ) throw new IllegalArgumentException ( "minNeighbors must be >= 0" ) ; NearestNeighbor < Point3D_F64 > nn = FactoryNearestNeighbor . kdtree ( new KdTreePoint3D_F64 ( ) ) ; NearestNeighbor . Search < Point3D_F64 > search = nn . createSearch ( ) ; nn . setPoints ( cloud , false ) ; FastQueue < NnData < Point3D_F64 > > results = new FastQueue ( NnData . class , true ) ; minNeighbors += 1 ; radius *= radius ; for ( int i = cloud . size ( ) - 1 ; i >= 0 ; i -- ) { search . findNearest ( cloud . get ( i ) , radius , minNeighbors , results ) ; if ( results . size < minNeighbors ) { cloud . remove ( i ) ; } } } | Prunes points from the point cloud if they have very few neighbors |
27,243 | public static void computeNormalizationLL ( List < List < Point2D_F64 > > points , NormalizationPoint2D normalize ) { double meanX = 0 ; double meanY = 0 ; int count = 0 ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { List < Point2D_F64 > l = points . get ( i ) ; for ( int j = 0 ; j < l . size ( ) ; j ++ ) { Point2D_F64 p = l . get ( j ) ; meanX += p . x ; meanY += p . y ; } count += l . size ( ) ; } meanX /= count ; meanY /= count ; double stdX = 0 ; double stdY = 0 ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { List < Point2D_F64 > l = points . get ( i ) ; for ( int j = 0 ; j < l . size ( ) ; j ++ ) { Point2D_F64 p = l . get ( j ) ; double dx = p . x - meanX ; double dy = p . y - meanY ; stdX += dx * dx ; stdY += dy * dy ; } } normalize . meanX = meanX ; normalize . meanY = meanY ; normalize . stdX = Math . sqrt ( stdX / count ) ; normalize . stdY = Math . sqrt ( stdY / count ) ; } | Computes normalization when points are contained in a list of lists |
27,244 | public static void convertFile ( File original ) throws IOException { File outputFile = determineClassName ( original ) ; String classNameOld = className ( original ) ; String classNameNew = className ( outputFile ) ; List < String > inputLines = FileUtils . readLines ( original , "UTF-8" ) ; List < String > outputLines = new ArrayList < > ( ) ; List < Macro > macros = new ArrayList < > ( ) ; boolean foundClassDef = false ; for ( int i = 0 ; i < inputLines . size ( ) ; i ++ ) { String line = inputLines . get ( i ) ; int where = line . indexOf ( prefix ) ; if ( where < 0 ) { if ( ! foundClassDef && line . contains ( "class " + classNameOld ) ) { foundClassDef = true ; line = line . replaceFirst ( "class " + classNameOld , "class " + classNameNew ) ; } else { line = line . replace ( classNameOld + "(" , classNameNew + "(" ) ; } outputLines . add ( line ) ; continue ; } String type = readType ( line , where + prefix . length ( ) ) ; String whitespaces = line . substring ( 0 , where ) ; int frontLength = where + prefix . length ( ) + type . length ( ) ; String message = line . length ( ) > frontLength ? line . substring ( frontLength + 1 ) : "" ; switch ( type ) { case "CLASS_NAME" : continue ; case "INLINE" : outputLines . add ( whitespaces + message ) ; break ; case "ABOVE" : outputLines . remove ( outputLines . size ( ) - 1 ) ; outputLines . add ( whitespaces + message ) ; break ; case "BELOW" : outputLines . add ( whitespaces + message ) ; i += 1 ; break ; case "REMOVE_ABOVE" : outputLines . remove ( outputLines . size ( ) - 1 ) ; break ; case "REMOVE_BELOW" : i += 1 ; break ; case "MACRO" : throw new RuntimeException ( "MACRO not handled yet" ) ; default : throw new RuntimeException ( "Unknown: " + type ) ; } } PrintStream out = new PrintStream ( outputFile ) ; for ( int i = 0 ; i < outputLines . size ( ) ; i ++ ) { out . println ( outputLines . get ( i ) ) ; } out . close ( ) ; createTestIfNotThere ( outputFile ) ; } | Converts the file from single thread into concurrent implementation |
27,245 | private static File determineClassName ( File original ) throws IOException { String text = FileUtils . readFileToString ( original , "UTF-8" ) ; if ( ! text . contains ( "//CONCURRENT" ) ) throw new IOException ( "Not a concurrent file" ) ; String pattern = "//CONCURRENT_CLASS_NAME " ; int where = text . indexOf ( pattern ) ; if ( where < 0 ) { String name = className ( original ) ; return new File ( original . getParent ( ) , name + "_MT.java" ) ; } String name = readUntilEndOfLine ( text , where + pattern . length ( ) ) ; return new File ( original . getParent ( ) , name + ".java" ) ; } | Searches the input file for an override . If none is found then _MT is added to the class name . |
27,246 | public void initialize ( int width , int height ) { if ( bottomWidth == width && bottomHeight == height ) return ; this . bottomWidth = width ; this . bottomHeight = height ; layers = imageType . createArray ( getNumLayers ( ) ) ; double scaleFactor = getScale ( 0 ) ; if ( scaleFactor == 1 ) { if ( ! saveOriginalReference ) { layers [ 0 ] = imageType . createImage ( bottomWidth , bottomHeight ) ; } } else { layers [ 0 ] = imageType . createImage ( ( int ) Math . ceil ( bottomWidth / scaleFactor ) , ( int ) Math . ceil ( bottomHeight / scaleFactor ) ) ; } for ( int i = 1 ; i < layers . length ; i ++ ) { scaleFactor = getScale ( i ) ; layers [ i ] = imageType . createImage ( ( int ) Math . ceil ( bottomWidth / scaleFactor ) , ( int ) Math . ceil ( bottomHeight / scaleFactor ) ) ; } } | Initializes internal data structures based on the input image s size . Should be called each time a new image is processed . |
27,247 | protected void checkScales ( ) { if ( getScale ( 0 ) < 0 ) { throw new IllegalArgumentException ( "The first layer must be more than zero." ) ; } double prevScale = 0 ; for ( int i = 0 ; i < getNumLayers ( ) ; i ++ ) { double s = getScale ( i ) ; if ( s < prevScale ) throw new IllegalArgumentException ( "Higher layers must be the same size or larger than previous layers." ) ; prevScale = s ; } } | Used to internally check that the provided scales are valid . |
27,248 | static boolean checkGridSize ( List < List < NodeInfo > > grid , int clusterSize ) { int total = 0 ; int expected = grid . get ( 0 ) . size ( ) ; for ( int i = 0 ; i < grid . size ( ) ; i ++ ) { if ( expected != grid . get ( i ) . size ( ) ) return false ; total += grid . get ( i ) . size ( ) ; } return total == clusterSize ; } | Makes sure the found grid is the same size as the original cluster . If it s not then . not all the nodes were used . All lists must have he same size too . |
27,249 | public double depthNView ( List < Point2D_F64 > obs , List < Se3_F64 > motion ) { double top = 0 , bottom = 0 ; Point2D_F64 a = obs . get ( 0 ) ; for ( int i = 1 ; i < obs . size ( ) ; i ++ ) { Se3_F64 se = motion . get ( i - 1 ) ; Point2D_F64 b = obs . get ( i ) ; GeometryMath_F64 . multCrossA ( b , se . getR ( ) , temp0 ) ; GeometryMath_F64 . mult ( temp0 , a , temp1 ) ; GeometryMath_F64 . cross ( b , se . getT ( ) , temp2 ) ; top += temp2 . x + temp2 . y + temp2 . z ; bottom += temp1 . x + temp1 . y + temp1 . z ; } return - top / bottom ; } | Computes the pixel depth from N views of the same object . Pixel depth in the first frame . |
27,250 | public double depth2View ( Point2D_F64 a , Point2D_F64 b , Se3_F64 fromAtoB ) { DMatrixRMaj R = fromAtoB . getR ( ) ; Vector3D_F64 T = fromAtoB . getT ( ) ; GeometryMath_F64 . multCrossA ( b , R , temp0 ) ; GeometryMath_F64 . mult ( temp0 , a , temp1 ) ; GeometryMath_F64 . cross ( b , T , temp2 ) ; return - ( temp2 . x + temp2 . y + temp2 . z ) / ( temp1 . x + temp1 . y + temp1 . z ) ; } | Computes pixel depth in image a from two observations . |
27,251 | public void initialize ( int numFeatures , int numViews ) { depths . reshape ( numViews , numFeatures ) ; pixels . reshape ( numViews * 2 , numFeatures ) ; pixelScale = 0 ; } | Initializes internal data structures . Must be called first |
27,252 | public void setPixels ( int view , List < Point2D_F64 > pixelsInView ) { if ( pixelsInView . size ( ) != pixels . numCols ) throw new IllegalArgumentException ( "Pixel count must be constant and match " + pixels . numCols ) ; int row = view * 2 ; for ( int i = 0 ; i < pixelsInView . size ( ) ; i ++ ) { Point2D_F64 p = pixelsInView . get ( i ) ; pixels . set ( row , i , p . x ) ; pixels . set ( row + 1 , i , p . y ) ; pixelScale = Math . max ( Math . abs ( p . x ) , Math . abs ( p . y ) ) ; } } | Sets pixel observations for a paricular view |
27,253 | public void setDepths ( int view , double featureDepths [ ] ) { if ( featureDepths . length < depths . numCols ) throw new IllegalArgumentException ( "Pixel count must be constant and match " + pixels . numCols ) ; int N = depths . numCols ; for ( int i = 0 ; i < N ; i ++ ) { depths . set ( view , i , featureDepths [ i ] ) ; } } | Sets depths for a particular value to the values in the passed in array |
27,254 | public void setDepthsFrom3D ( int view , List < Point3D_F64 > locations ) { if ( locations . size ( ) != pixels . numCols ) throw new IllegalArgumentException ( "Pixel count must be constant and match " + pixels . numCols ) ; int N = depths . numCols ; for ( int i = 0 ; i < N ; i ++ ) { depths . set ( view , i , locations . get ( i ) . z ) ; } } | Assigns depth to the z value of all the features in the list . Features must be in the coordinate system of the view for this to be correct |
27,255 | public boolean process ( ) { int numViews = depths . numRows ; int numFeatures = depths . numCols ; P . reshape ( 3 * numViews , 4 ) ; X . reshape ( 4 , numFeatures ) ; A . reshape ( numViews * 3 , numFeatures ) ; B . reshape ( numViews * 3 , numFeatures ) ; normalizeDepths ( depths ) ; assignValuesToA ( A ) ; for ( int iter = 0 ; iter < maxIterations ; iter ++ ) { if ( ! svd . decompose ( A ) ) return false ; svd . getU ( U , false ) ; svd . getV ( Vt , true ) ; double sv [ ] = svd . getSingularValues ( ) ; SingularOps_DDRM . descendingOrder ( U , false , sv , A . numCols , Vt , true ) ; CommonOps_DDRM . extract ( U , 0 , 0 , P ) ; CommonOps_DDRM . multCols ( P , sv ) ; CommonOps_DDRM . extract ( Vt , 0 , 0 , X ) ; CommonOps_DDRM . mult ( P , X , B ) ; double delta = SpecializedOps_DDRM . diffNormF ( A , B ) / ( A . numCols * A . numRows ) ; DMatrixRMaj tmp = A ; A = B ; B = tmp ; if ( delta <= minimumChangeTol ) break ; } return true ; } | Performs iteration to find camera matrices and feature locations in world frame |
27,256 | public void getCameraMatrix ( int view , DMatrixRMaj cameraMatrix ) { cameraMatrix . reshape ( 3 , 4 ) ; CommonOps_DDRM . extract ( P , view * 3 , 0 , cameraMatrix ) ; for ( int col = 0 ; col < 4 ; col ++ ) { cameraMatrix . data [ cameraMatrix . getIndex ( 0 , col ) ] *= pixelScale ; cameraMatrix . data [ cameraMatrix . getIndex ( 1 , col ) ] *= pixelScale ; } } | Used to get found camera matrix for a view |
27,257 | public void getFeature3D ( int feature , Point4D_F64 out ) { out . x = X . get ( 0 , feature ) ; out . y = X . get ( 1 , feature ) ; out . z = X . get ( 2 , feature ) ; out . w = X . get ( 3 , feature ) ; } | Returns location of 3D feature for a view |
27,258 | protected void computeScoreFive ( int top [ ] , int middle [ ] , int bottom [ ] , int score [ ] , int width ) { for ( int d = minDisparity ; d < maxDisparity ; d ++ ) { int indexSrc = ( d - minDisparity ) * width + ( d - minDisparity ) + radiusX ; int indexDst = ( d - minDisparity ) * width + ( d - minDisparity ) ; int end = indexSrc + ( width - d - 4 * radiusX ) ; while ( indexSrc < end ) { int s = 0 ; int val0 = top [ indexSrc - radiusX ] ; int val1 = top [ indexSrc + radiusX ] ; int val2 = bottom [ indexSrc - radiusX ] ; int val3 = bottom [ indexSrc + radiusX ] ; if ( val1 < val0 ) { int temp = val0 ; val0 = val1 ; val1 = temp ; } if ( val3 < val2 ) { int temp = val2 ; val2 = val3 ; val3 = temp ; } if ( val3 < val0 ) { s += val2 ; s += val3 ; } else if ( val2 < val1 ) { s += val2 ; s += val0 ; } else { s += val0 ; s += val1 ; } score [ indexDst ++ ] = s + middle [ indexSrc ++ ] ; } } } | Compute the final score by sampling the 5 regions . Four regions are sampled around the center region . Out of those four only the two with the smallest score are used . |
27,259 | public void setTrifocal ( TrifocalTensor tensor ) { this . tensor = tensor ; extract . setTensor ( tensor ) ; extract . extractFundmental ( F21 , F31 ) ; } | Specify the trifocaltensor |
27,260 | public void transfer_1_to_3 ( double x1 , double y1 , double x2 , double y2 , Point3D_F64 p3 ) { adjuster . process ( F21 , x1 , y1 , x2 , y2 , pa , pb ) ; GeometryMath_F64 . mult ( F21 , pa , la ) ; l . x = la . y ; l . y = - la . x ; l . z = - pb . x * la . y + pb . y * la . x ; MultiViewOps . transfer_1_to_3 ( tensor , pa , l , p3 ) ; } | Transfer a point to third view give its observed location in view one and two . |
27,261 | public void transfer_1_to_2 ( double x1 , double y1 , double x3 , double y3 , Point3D_F64 p2 ) { adjuster . process ( F31 , x1 , y1 , x3 , y3 , pa , pb ) ; GeometryMath_F64 . multTran ( F31 , pa , la ) ; l . x = la . y ; l . y = - la . x ; l . z = - pb . x * la . y + pb . y * la . x ; MultiViewOps . transfer_1_to_2 ( tensor , pa , l , p2 ) ; } | Transfer a point to third view give its observed location in view one and three . |
27,262 | public void classify ( Planar < GrayF32 > image ) { DataManipulationOps . imageToTensor ( preprocess ( image ) , tensorInput , 0 ) ; innerProcess ( tensorInput ) ; } | The original implementation takes in an image then crops it randomly . This is primarily for training but is replicated here to reduce the number of differences |
27,263 | public static void computeScoreRow ( GrayU8 left , GrayU8 right , int row , int [ ] scores , int minDisparity , int maxDisparity , int regionWidth , int elementScore [ ] ) { for ( int d = minDisparity ; d < maxDisparity ; d ++ ) { int dispFromMin = d - minDisparity ; final int colMax = left . width - d ; final int scoreMax = colMax - regionWidth ; int indexScore = left . width * dispFromMin + dispFromMin ; int indexLeft = left . startIndex + left . stride * row + d ; int indexRight = right . startIndex + right . stride * row ; computeScoreRowSad ( left , right , colMax , indexLeft , indexRight , elementScore ) ; int score = 0 ; for ( int i = 0 ; i < regionWidth ; i ++ ) score += elementScore [ i ] ; scores [ indexScore ++ ] = score ; for ( int col = 0 ; col < scoreMax ; col ++ , indexScore ++ ) { scores [ indexScore ] = score += elementScore [ col + regionWidth ] - elementScore [ col ] ; } } } | Computes disparity score for an entire row . |
27,264 | public static void computeScoreRowSad ( GrayF32 left , GrayF32 right , int elementMax , int indexLeft , int indexRight , float elementScore [ ] ) { for ( int rCol = 0 ; rCol < elementMax ; rCol ++ ) { float diff = ( left . data [ indexLeft ++ ] ) - ( right . data [ indexRight ++ ] ) ; elementScore [ rCol ] = Math . abs ( diff ) ; } } | compute the score for each element all at once to encourage the JVM to optimize and encourage the JVM to optimize this section of code . |
27,265 | public Se3_F64 estimateOutliers ( List < Point2D3D > observations ) { ModelMatcherMultiview < Se3_F64 , Point2D3D > ransac = FactoryMultiViewRobust . pnpRansac ( new ConfigPnP ( ) , new ConfigRansac ( 300 , 1.0 ) ) ; ransac . setIntrinsic ( 0 , intrinsic ) ; if ( ! ransac . process ( observations ) ) throw new RuntimeException ( "Probably got bad input data with NaN inside of it" ) ; System . out . println ( "Inlier size " + ransac . getMatchSet ( ) . size ( ) ) ; Se3_F64 worldToCamera = ransac . getModelParameters ( ) ; RefinePnP refine = FactoryMultiView . pnpRefine ( 1e-8 , 200 ) ; Se3_F64 refinedWorldToCamera = new Se3_F64 ( ) ; if ( ! refine . fitModel ( ransac . getMatchSet ( ) , worldToCamera , refinedWorldToCamera ) ) throw new RuntimeException ( "Refined failed! Input probably bad..." ) ; return refinedWorldToCamera ; } | Uses robust techniques to remove outliers |
27,266 | public void addOutliers ( List < Point2D3D > observations , int total ) { int size = observations . size ( ) ; for ( int i = 0 ; i < total ; i ++ ) { Point2D3D p = observations . get ( rand . nextInt ( size ) ) ; Point2D3D o = new Point2D3D ( ) ; o . observation . set ( p . observation ) ; o . location . x = p . location . x + rand . nextGaussian ( ) * 5 ; o . location . y = p . location . y + rand . nextGaussian ( ) * 5 ; o . location . z = p . location . z + rand . nextGaussian ( ) * 5 ; observations . add ( o ) ; } Collections . shuffle ( observations , rand ) ; } | Adds some really bad observations to the mix |
27,267 | public void loadInputData ( String fileName ) { Reader r = media . openFile ( fileName ) ; List < PathLabel > refs = new ArrayList < > ( ) ; try { BufferedReader reader = new BufferedReader ( r ) ; String line ; while ( ( line = reader . readLine ( ) ) != null ) { String [ ] z = line . split ( ":" ) ; String [ ] names = new String [ z . length - 1 ] ; for ( int i = 1 ; i < z . length ; i ++ ) { names [ i - 1 ] = baseDirectory + z [ i ] ; } refs . add ( new PathLabel ( z [ 0 ] , names ) ) ; } setInputList ( refs ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Loads a standardized file for input references |
27,268 | public void addToToolbar ( JComponent comp ) { toolbar . add ( comp , 1 + algBoxes . length ) ; toolbar . revalidate ( ) ; addedComponents . add ( comp ) ; } | Adds a new component into the toolbar . |
27,269 | public void setMainGUI ( final Component gui ) { postAlgorithmEvents = true ; this . gui = gui ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { add ( gui , BorderLayout . CENTER ) ; } } ) ; } | Used to add the main GUI to this panel . Must use this function . Algorithm change events will not be posted until this function has been set . |
27,270 | public void setInputImage ( BufferedImage image ) { inputImage = image ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { if ( inputImage == null ) { originalCheck . setEnabled ( false ) ; } else { originalCheck . setEnabled ( true ) ; origPanel . setImage ( inputImage ) ; origPanel . setPreferredSize ( new Dimension ( inputImage . getWidth ( ) , inputImage . getHeight ( ) ) ) ; origPanel . repaint ( ) ; } } } ) ; } | Specifies an image which contains the original input image . After this has been called the view input image widget is activated and when selected this image will be displayed instead of the main GUI . This functionality is optional . |
27,271 | public void setInputList ( final List < PathLabel > inputRefs ) { this . inputRefs = inputRefs ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { for ( int i = 0 ; i < inputRefs . size ( ) ; i ++ ) { imageBox . addItem ( inputRefs . get ( i ) . getLabel ( ) ) ; } } } ) ; } | Specifies a list of images to use as input and loads them |
27,272 | protected < T > T getAlgorithmCookie ( int indexFamily ) { return ( T ) algCookies [ indexFamily ] . get ( algBoxes [ indexFamily ] . getSelectedIndex ( ) ) ; } | Returns the cookie associated with the specified algorithm family . |
27,273 | private boolean checkSideSize ( Polygon2D_F64 p ) { double max = 0 , min = Double . MAX_VALUE ; for ( int i = 0 ; i < p . size ( ) ; i ++ ) { double l = p . getSideLength ( i ) ; max = Math . max ( max , l ) ; min = Math . min ( min , l ) ; } if ( min < 10 ) return false ; return ! ( min / max < thresholdSideRatio ) ; } | Sanity check the polygon based on the size of its sides to see if it could be a fiducial that can be decoded |
27,274 | protected double computeFractionBoundary ( float pixelThreshold ) { final int w = square . width ; int radius = ( int ) ( w * borderWidthFraction ) ; int innerWidth = w - 2 * radius ; int total = w * w - innerWidth * innerWidth ; int count = 0 ; for ( int y = 0 ; y < radius ; y ++ ) { int indexTop = y * w ; int indexBottom = ( w - radius + y ) * w ; for ( int x = 0 ; x < w ; x ++ ) { if ( square . data [ indexTop ++ ] < pixelThreshold ) count ++ ; if ( square . data [ indexBottom ++ ] < pixelThreshold ) count ++ ; } } for ( int y = radius ; y < w - radius ; y ++ ) { int indexLeft = y * w ; int indexRight = y * w + w - radius ; for ( int x = 0 ; x < radius ; x ++ ) { if ( square . data [ indexLeft ++ ] < pixelThreshold ) count ++ ; if ( square . data [ indexRight ++ ] < pixelThreshold ) count ++ ; } } return count / ( double ) total ; } | Computes the fraction of pixels inside the image border which are black |
27,275 | private void prepareForOutput ( Polygon2D_F64 imageShape , Result result ) { int rotationCCW = ( 4 - result . rotation ) % 4 ; for ( int j = 0 ; j < rotationCCW ; j ++ ) { UtilPolygons2D_F64 . shiftUp ( imageShape ) ; } FoundFiducial f = found . grow ( ) ; f . id = result . which ; for ( int i = 0 ; i < 4 ; i ++ ) { Point2D_F64 a = imageShape . get ( i ) ; undistToDist . compute ( a . x , a . y , f . distortedPixels . get ( i ) ) ; } } | Takes the found quadrilateral and the computed 3D information and prepares it for output |
27,276 | public void process ( GrayS32 pixelToRegion , GrowQueue_I32 regionMemberCount , FastQueue < float [ ] > regionColor , FastQueue < Point2D_I32 > modeLocation ) { stopRequested = false ; initializeMerge ( regionMemberCount . size ) ; markMergeRegions ( regionColor , modeLocation , pixelToRegion ) ; if ( stopRequested ) return ; performMerge ( pixelToRegion , regionMemberCount ) ; } | Merges together similar regions which are in close proximity to each other . After merging most of the input data structures are modified to take in account the changes . |
27,277 | protected void markMergeRegions ( FastQueue < float [ ] > regionColor , FastQueue < Point2D_I32 > modeLocation , GrayS32 pixelToRegion ) { for ( int targetId = 0 ; targetId < modeLocation . size && ! stopRequested ; targetId ++ ) { float [ ] color = regionColor . get ( targetId ) ; Point2D_I32 location = modeLocation . get ( targetId ) ; int x0 = location . x - searchRadius ; int x1 = location . x + searchRadius + 1 ; int y0 = location . y - searchRadius ; int y1 = location . y + searchRadius + 1 ; if ( x0 < 0 ) x0 = 0 ; if ( x1 > pixelToRegion . width ) x1 = pixelToRegion . width ; if ( y0 < 0 ) y0 = 0 ; if ( y1 > pixelToRegion . height ) y1 = pixelToRegion . height ; for ( int y = y0 ; y < y1 ; y ++ ) { for ( int x = x0 ; x < x1 ; x ++ ) { int candidateId = pixelToRegion . unsafe_get ( x , y ) ; if ( candidateId == targetId ) continue ; Point2D_I32 p = modeLocation . get ( candidateId ) ; if ( p . distance2 ( location ) <= maxSpacialDistanceSq ) { float [ ] candidateColor = regionColor . get ( candidateId ) ; float colorDistance = SegmentMeanShiftSearch . distanceSq ( color , candidateColor ) ; if ( colorDistance <= maxColorDistanceSq ) { markMerge ( targetId , candidateId ) ; } } } } } } | Takes the mode of a region and searches the local area around it for other regions . If the region s mode is also within the local area its color is checked to see if it s similar enough . If the color is similar enough then the two regions are marked for merger . |
27,278 | public static void convertToBoof ( Picture input , ImageBase output ) { if ( input . getColor ( ) == ColorSpace . RGB ) { ImplConvertJCodecPicture . RGB_to_PLU8 ( input , ( Planar ) output ) ; } else if ( input . getColor ( ) == ColorSpace . YUV420 ) { if ( output instanceof Planar ) { Planar ms = ( Planar ) output ; if ( ms . getImageType ( ) . getDataType ( ) == ImageDataType . U8 ) { ImplConvertJCodecPicture . yuv420_to_PlRgb_U8 ( input , ms ) ; } else if ( ms . getImageType ( ) . getDataType ( ) == ImageDataType . F32 ) { ImplConvertJCodecPicture . yuv420_to_PlRgb_F32 ( input , ms ) ; } } else if ( output instanceof GrayU8 ) { ImplConvertJCodecPicture . yuv420_to_U8 ( input , ( GrayU8 ) output ) ; } else if ( output instanceof GrayF32 ) { ImplConvertJCodecPicture . yuv420_to_F32 ( input , ( GrayF32 ) output ) ; } else { throw new RuntimeException ( "Unexpected output image type" ) ; } } } | Converts an image in JCodec format into one in BoofCV format . |
27,279 | public boolean process ( DMatrixRMaj R , List < Point3D_F64 > worldPts , List < Point2D_F64 > observed ) { if ( worldPts . size ( ) != observed . size ( ) ) throw new IllegalArgumentException ( "Number of worldPts and observed must be the same" ) ; if ( worldPts . size ( ) < 2 ) throw new IllegalArgumentException ( "A minimum of two points are required" ) ; int N = worldPts . size ( ) ; A . reshape ( 3 * N , 3 ) ; b . reshape ( A . numRows , 1 ) ; for ( int i = 0 ; i < N ; i ++ ) { Point3D_F64 X = worldPts . get ( i ) ; Point2D_F64 o = observed . get ( i ) ; int indexA = i * 3 * 3 ; int indexB = i * 3 ; A . data [ indexA + 1 ] = - 1 ; A . data [ indexA + 2 ] = o . y ; A . data [ indexA + 3 ] = 1 ; A . data [ indexA + 5 ] = - o . x ; A . data [ indexA + 6 ] = - o . y ; A . data [ indexA + 7 ] = o . x ; GeometryMath_F64 . mult ( R , X , RX ) ; b . data [ indexB ++ ] = 1 * RX . y - o . y * RX . z ; b . data [ indexB ++ ] = - 1 * RX . x + o . x * RX . z ; b . data [ indexB ] = o . y * RX . x - o . x * RX . y ; } if ( ! solver . setA ( A ) ) return false ; solver . solve ( b , x ) ; T . x = x . data [ 0 ] ; T . y = x . data [ 1 ] ; T . z = x . data [ 2 ] ; return true ; } | Computes the translation given two or more feature observations and the known rotation |
27,280 | public void process ( GrayU8 binary ) { found . reset ( ) ; labeled . reshape ( binary . width , binary . height ) ; contourFinder . process ( binary , labeled ) ; List < ContourPacked > blobs = contourFinder . getContours ( ) ; for ( int i = 0 ; i < blobs . size ( ) ; i ++ ) { ContourPacked c = blobs . get ( i ) ; contourFinder . loadContour ( c . externalIndex , contourTmp ) ; proccessContour ( contourTmp . toList ( ) ) ; if ( internalContour ) { for ( int j = 0 ; j < c . internalIndexes . size ( ) ; j ++ ) { contourFinder . loadContour ( c . internalIndexes . get ( j ) , contourTmp ) ; proccessContour ( contourTmp . toList ( ) ) ; } } } } | Finds all valid ellipses in the binary image |
27,281 | protected void adjustElipseForBinaryBias ( EllipseRotated_F64 ellipse ) { ellipse . center . x += 0.5 ; ellipse . center . y += 0.5 ; ellipse . a += 0.5 ; ellipse . b += 0.5 ; } | In a binary image the contour on the right and bottom is off by one pixel . This is because the block region extends the entire pixel not just the lower extent which is where it is indexed from . |
27,282 | void undistortContour ( List < Point2D_I32 > external , FastQueue < Point2D_F64 > pointsF ) { for ( int j = 0 ; j < external . size ( ) ; j ++ ) { Point2D_I32 p = external . get ( j ) ; if ( distToUndist != null ) { distToUndist . compute ( p . x , p . y , distortedPoint ) ; pointsF . grow ( ) . set ( distortedPoint . x , distortedPoint . y ) ; } else { pointsF . grow ( ) . set ( p . x , p . y ) ; } } } | Undistort the contour points and convert into a floating point format for the fitting operation |
27,283 | boolean isApproximatelyElliptical ( EllipseRotated_F64 ellipse , List < Point2D_F64 > points , int maxSamples ) { closestPoint . setEllipse ( ellipse ) ; double maxDistance2 = maxDistanceFromEllipse * maxDistanceFromEllipse ; if ( points . size ( ) <= maxSamples ) { for ( int i = 0 ; i < points . size ( ) ; i ++ ) { Point2D_F64 p = points . get ( i ) ; closestPoint . process ( p ) ; double d = closestPoint . getClosest ( ) . distance2 ( p ) ; if ( d > maxDistance2 ) { return false ; } } } else { for ( int i = 0 ; i < maxSamples ; i ++ ) { Point2D_F64 p = points . get ( i * points . size ( ) / maxSamples ) ; closestPoint . process ( p ) ; double d = closestPoint . getClosest ( ) . distance2 ( p ) ; if ( d > maxDistance2 ) { return false ; } } } return true ; } | Look at the maximum distance contour points are from the ellipse and see if they exceed a maximum threshold |
27,284 | public boolean filterPixelPolygon ( Polygon2D_F64 undistorted , Polygon2D_F64 distorted , GrowQueue_B touches , boolean touchesBorder ) { if ( touchesBorder ) { if ( distorted . size ( ) < 3 ) return false ; int totalRegular = distorted . size ( ) ; for ( int i = 0 ; i < distorted . size ( ) ; i ++ ) { if ( touches . get ( i ) ) totalRegular -- ; } return totalRegular > 0 ; } else { return distorted . size ( ) == 4 ; } } | If not touching the border then the number of corners must be 4 . If touching the border there must be at least 3 corners not touching the border . 7 corners at most . If there were 8 then all sides of a square would be touching the border . No more than 3 corners since that s the most number of non - border corners a square can have . |
27,285 | public Frame getFrame ( BufferedImage image , double gamma , boolean flipChannels ) { if ( image == null ) { return null ; } SampleModel sm = image . getSampleModel ( ) ; int depth = 0 , numChannels = sm . getNumBands ( ) ; switch ( image . getType ( ) ) { case BufferedImage . TYPE_INT_RGB : case BufferedImage . TYPE_INT_ARGB : case BufferedImage . TYPE_INT_ARGB_PRE : case BufferedImage . TYPE_INT_BGR : depth = Frame . DEPTH_UBYTE ; numChannels = 4 ; break ; } if ( depth == 0 || numChannels == 0 ) { switch ( sm . getDataType ( ) ) { case DataBuffer . TYPE_BYTE : depth = Frame . DEPTH_UBYTE ; break ; case DataBuffer . TYPE_USHORT : depth = Frame . DEPTH_USHORT ; break ; case DataBuffer . TYPE_SHORT : depth = Frame . DEPTH_SHORT ; break ; case DataBuffer . TYPE_INT : depth = Frame . DEPTH_INT ; break ; case DataBuffer . TYPE_FLOAT : depth = Frame . DEPTH_FLOAT ; break ; case DataBuffer . TYPE_DOUBLE : depth = Frame . DEPTH_DOUBLE ; break ; default : assert false ; } } if ( frame == null || frame . imageWidth != image . getWidth ( ) || frame . imageHeight != image . getHeight ( ) || frame . imageDepth != depth || frame . imageChannels != numChannels ) { frame = new Frame ( image . getWidth ( ) , image . getHeight ( ) , depth , numChannels ) ; } copy ( image , frame , gamma , flipChannels , null ) ; return frame ; } | Returns a Frame based on a BufferedImage given gamma and inverted channels flag . |
27,286 | public static int multiply ( int x , int y , int primitive , int domain ) { int r = 0 ; while ( y > 0 ) { if ( ( y & 1 ) != 0 ) { r = r ^ x ; } y = y >> 1 ; x = x << 1 ; if ( x >= domain ) { x ^= primitive ; } } return r ; } | Implementation of multiplication with a primitive polynomial . The result will be a member of the same field as the inputs provided primitive is an appropriate irreducible polynomial for that field . |
27,287 | private static boolean isClockWise ( Grid g ) { EllipseRotated_F64 v00 = g . get ( 0 , 0 ) ; EllipseRotated_F64 v02 = g . columns < 3 ? g . get ( 1 , 1 ) : g . get ( 0 , 2 ) ; EllipseRotated_F64 v20 = g . rows < 3 ? g . get ( 1 , 1 ) : g . get ( 2 , 0 ) ; double a_x = v02 . center . x - v00 . center . x ; double a_y = v02 . center . y - v00 . center . y ; double b_x = v20 . center . x - v00 . center . x ; double b_y = v20 . center . y - v00 . center . y ; return a_x * b_y - a_y * b_x < 0 ; } | Uses the cross product to determine if the grid is in clockwise order |
27,288 | public FDistort init ( ImageBase input , ImageBase output ) { this . input = input ; this . output = output ; inputType = input . getImageType ( ) ; interp ( InterpolationType . BILINEAR ) ; border ( 0 ) ; cached = false ; distorter = null ; outputToInput = null ; return this ; } | Specifies the input and output image and sets interpolation to BILINEAR black image border cache is off . |
27,289 | public FDistort setRefs ( ImageBase input , ImageBase output ) { this . input = input ; this . output = output ; inputType = input . getImageType ( ) ; return this ; } | All this does is set the references to the images . Nothing else is changed and its up to the user to correctly update everything else . |
27,290 | public FDistort input ( ImageBase input ) { if ( this . input == null || this . input . width != input . width || this . input . height != input . height ) { distorter = null ; } this . input = input ; inputType = input . getImageType ( ) ; return this ; } | Changes the input image . The previous distortion is thrown away only if the input image has a different shape |
27,291 | public FDistort output ( ImageBase output ) { if ( this . output == null || this . output . width != output . width || this . output . height != output . height ) { distorter = null ; } this . output = output ; return this ; } | Changes the output image . The previous distortion is thrown away only if the output image has a different shape |
27,292 | public FDistort border ( BorderType type ) { if ( borderType == type ) return this ; borderType = type ; return border ( FactoryImageBorder . generic ( type , inputType ) ) ; } | Sets the border by type . |
27,293 | public FDistort border ( double value ) { borderType = BorderType . ZERO ; return border ( FactoryImageBorder . genericValue ( value , inputType ) ) ; } | Sets the border to a fixed gray - scale value |
27,294 | public FDistort interp ( InterpolationType type ) { distorter = null ; this . interp = FactoryInterpolation . createPixel ( 0 , 255 , type , BorderType . EXTENDED , inputType ) ; return this ; } | Specifies the interpolation used by type . |
27,295 | public FDistort affine ( double a11 , double a12 , double a21 , double a22 , double dx , double dy ) { PixelTransformAffine_F32 transform ; if ( outputToInput != null && outputToInput instanceof PixelTransformAffine_F32 ) { transform = ( PixelTransformAffine_F32 ) outputToInput ; } else { transform = new PixelTransformAffine_F32 ( ) ; } Affine2D_F32 m = new Affine2D_F32 ( ) ; m . a11 = ( float ) a11 ; m . a12 = ( float ) a12 ; m . a21 = ( float ) a21 ; m . a22 = ( float ) a22 ; m . tx = ( float ) dx ; m . ty = ( float ) dy ; m . invert ( transform . getModel ( ) ) ; return transform ( transform ) ; } | Affine transform from input to output |
27,296 | public FDistort rotate ( double angleInputToOutput ) { PixelTransform < Point2D_F32 > outputToInput = DistortSupport . transformRotate ( input . width / 2 , input . height / 2 , output . width / 2 , output . height / 2 , ( float ) angleInputToOutput ) ; return transform ( outputToInput ) ; } | Applies a distortion which will rotate the input image by the specified amount . |
27,297 | public void apply ( ) { if ( distorter == null ) { Class typeOut = output . getImageType ( ) . getImageClass ( ) ; switch ( input . getImageType ( ) . getFamily ( ) ) { case GRAY : distorter = FactoryDistort . distortSB ( cached , ( InterpolatePixelS ) interp , typeOut ) ; break ; case PLANAR : distorter = FactoryDistort . distortPL ( cached , ( InterpolatePixelS ) interp , typeOut ) ; break ; case INTERLEAVED : distorter = FactoryDistort . distortIL ( cached , ( InterpolatePixelMB ) interp , output . getImageType ( ) ) ; break ; default : throw new IllegalArgumentException ( "Unsupported image type" ) ; } } distorter . setModel ( outputToInput ) ; distorter . apply ( input , output ) ; } | Applies the distortion . |
27,298 | public boolean process ( DMatrixRMaj P ) { if ( ! svd . decompose ( P ) ) return false ; svd . getU ( Ut , true ) ; svd . getV ( V , false ) ; double sv [ ] = svd . getSingularValues ( ) ; SingularOps_DDRM . descendingOrder ( Ut , true , sv , 3 , V , false ) ; for ( int i = 0 ; i < 3 ; i ++ ) { Wt . unsafe_set ( i , i , 1.0 / sv [ i ] ) ; } CommonOps_DDRM . mult ( V , Wt , tmp ) ; CommonOps_DDRM . mult ( tmp , Ut , PA ) ; SpecializedOps_DDRM . subvector ( V , 0 , 3 , V . numRows , false , 0 , ns ) ; return true ; } | Compute projective transform that converts P into identity |
27,299 | public void computeH ( DMatrixRMaj H ) { H . reshape ( 4 , 4 ) ; CommonOps_DDRM . insert ( PA , H , 0 , 0 ) ; for ( int i = 0 ; i < 4 ; i ++ ) { H . unsafe_set ( i , 3 , ns . data [ i ] ) ; } } | Retrieve projective transform H |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.