idx
int64
0
41.2k
question
stringlengths
74
4.04k
target
stringlengths
7
750
26,800
protected void applyThreshold ( T input , GrayU8 output ) { for ( int blockY = 0 ; blockY < stats . height ; blockY ++ ) { for ( int blockX = 0 ; blockX < stats . width ; blockX ++ ) { original . thresholdBlock ( blockX , blockY , input , stats , output ) ; } } }
Applies the dynamically computed threshold to each pixel in the image one block at a time
26,801
public void addImage ( T image , String cameraName ) { PairwiseImageGraph . View view = new PairwiseImageGraph . View ( graph . nodes . size ( ) , new FastQueue < TupleDesc > ( TupleDesc . class , true ) { protected TupleDesc createInstance ( ) { return detDesc . createDescription ( ) ; } } ) ; view . camera = graph . cameras . get ( cameraName ) ; if ( view . camera == null ) throw new IllegalArgumentException ( "Must have added the camera first" ) ; view . index = graph . nodes . size ( ) ; graph . nodes . add ( view ) ; detDesc . detect ( image ) ; view . descriptions . growArray ( detDesc . getNumberOfFeatures ( ) ) ; view . observationPixels . growArray ( detDesc . getNumberOfFeatures ( ) ) ; for ( int i = 0 ; i < detDesc . getNumberOfFeatures ( ) ; i ++ ) { Point2D_F64 p = detDesc . getLocation ( i ) ; view . descriptions . grow ( ) . setTo ( detDesc . getDescription ( i ) ) ; view . observationPixels . grow ( ) . set ( p ) ; } if ( view . camera . pixelToNorm == null ) { return ; } view . observationNorm . growArray ( detDesc . getNumberOfFeatures ( ) ) ; for ( int i = 0 ; i < view . observationPixels . size ; i ++ ) { Point2D_F64 p = view . observationPixels . get ( i ) ; view . camera . pixelToNorm . compute ( p . x , p . y , view . observationNorm . grow ( ) ) ; } if ( verbose != null ) { verbose . println ( "Detected Features: " + detDesc . getNumberOfFeatures ( ) ) ; } }
Adds a new observation from a camera . Detects features inside the and saves those .
26,802
protected boolean connectViews ( PairwiseImageGraph . View viewA , PairwiseImageGraph . View viewB , FastQueue < AssociatedIndex > matches ) { PairwiseImageGraph . Motion edge = new PairwiseImageGraph . Motion ( ) ; int inliersEpipolar ; CameraPinhole pinhole0 = viewA . camera . pinhole ; CameraPinhole pinhole1 = viewB . camera . pinhole ; if ( pinhole0 != null && pinhole1 != null ) { ransacEssential . setIntrinsic ( 0 , pinhole0 ) ; ransacEssential . setIntrinsic ( 1 , pinhole1 ) ; if ( ! fitEpipolar ( matches , viewA . observationNorm . toList ( ) , viewB . observationNorm . toList ( ) , ransacEssential , edge ) ) { if ( verbose != null && verboseLevel >= 1 ) { verbose . println ( " fit essential failed" ) ; } return false ; } edge . metric = true ; inliersEpipolar = ransacEssential . getMatchSet ( ) . size ( ) ; edge . F . set ( ransacEssential . getModelParameters ( ) ) ; } else if ( fitEpipolar ( matches , viewA . observationPixels . toList ( ) , viewB . observationPixels . toList ( ) , ransacFundamental , edge ) ) { edge . metric = false ; inliersEpipolar = ransacFundamental . getMatchSet ( ) . size ( ) ; edge . F . set ( ransacFundamental . getModelParameters ( ) ) ; } else { if ( verbose != null && verboseLevel >= 1 ) { verbose . println ( " fit fundamental failed" ) ; } return false ; } if ( inliersEpipolar < MIN_FEATURE_ASSOCIATED ) { if ( verbose != null && verboseLevel >= 1 ) { verbose . println ( " too too few inliers. " + inliersEpipolar + " min=" + MIN_FEATURE_ASSOCIATED + " obsA=" + viewA . observationNorm . size + " obsB=" + viewB . observationNorm . size ) ; } return false ; } double fractionA = inliersEpipolar / ( double ) viewA . descriptions . size ; double fractionB = inliersEpipolar / ( double ) viewB . descriptions . size ; if ( fractionA < MIN_ASSOCIATE_FRACTION | fractionB < MIN_ASSOCIATE_FRACTION ) return false ; edge . viewSrc = viewA ; edge . viewDst = viewB ; edge . index = graph . edges . size ( ) ; viewA . connections . add ( edge ) ; viewB . connections . add ( edge ) ; graph . edges . add ( edge ) ; return true ; }
Associate features between the two views . Then compute a homography and essential matrix using LSMed . Add features to the edge if they an inlier in essential . Save fit score of homography vs essential .
26,803
boolean fitEpipolar ( FastQueue < AssociatedIndex > matches , List < Point2D_F64 > pointsA , List < Point2D_F64 > pointsB , ModelMatcher < ? , AssociatedPair > ransac , PairwiseImageGraph . Motion edge ) { pairs . resize ( matches . size ) ; for ( int i = 0 ; i < matches . size ; i ++ ) { AssociatedIndex a = matches . get ( i ) ; pairs . get ( i ) . p1 . set ( pointsA . get ( a . src ) ) ; pairs . get ( i ) . p2 . set ( pointsB . get ( a . dst ) ) ; } if ( ! ransac . process ( pairs . toList ( ) ) ) return false ; int N = ransac . getMatchSet ( ) . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { AssociatedIndex a = matches . get ( ransac . getInputIndex ( i ) ) ; edge . associated . add ( a . copy ( ) ) ; } return true ; }
Uses ransac to fit an epipolar model to the associated features . Adds list of matched features to the edge .
26,804
public void process ( T gray , GrayU8 binary ) { if ( verbose ) System . out . println ( "ENTER DetectPolygonFromContour.process()" ) ; if ( contourPadded != null && ! contourPadded . isCreatePaddedCopy ( ) ) { int padding = 2 ; if ( gray . width + padding != binary . width || gray . height + padding != binary . height ) { throw new IllegalArgumentException ( "Including padding, expected a binary image with shape " + ( gray . width + padding ) + "x" + ( gray . height + padding ) ) ; } } else { InputSanityCheck . checkSameShape ( binary , gray ) ; } if ( imageWidth != gray . width || imageHeight != gray . height ) configure ( gray . width , gray . height ) ; for ( int i = 0 ; i < foundInfo . size ; i ++ ) { foundInfo . get ( i ) . reset ( ) ; } foundInfo . reset ( ) ; if ( contourEdgeIntensity != null ) contourEdgeIntensity . setImage ( gray ) ; long time0 = System . nanoTime ( ) ; contourFinder . process ( binary ) ; long time1 = System . nanoTime ( ) ; findCandidateShapes ( ) ; long time2 = System . nanoTime ( ) ; double a = ( time1 - time0 ) * 1e-6 ; double b = ( time2 - time1 ) * 1e-6 ; milliContour . update ( a ) ; milliShapes . update ( b ) ; if ( verbose ) System . out . println ( "EXIT DetectPolygonFromContour.process()" ) ; }
Examines the undistorted gray scale input image for squares . If p
26,805
void determineCornersOnBorder ( Polygon2D_F64 polygon , GrowQueue_B onImageBorder ) { onImageBorder . reset ( ) ; for ( int i = 0 ; i < polygon . size ( ) ; i ++ ) { Point2D_F64 p = polygon . get ( i ) ; onImageBorder . add ( p . x <= 1 || p . y <= 1 || p . x >= imageWidth - 2 || p . y >= imageHeight - 2 ) ; } }
Check to see if corners are touching the image border
26,806
public List < Point2D_I32 > getContour ( Info info ) { contourTmp . reset ( ) ; contourFinder . loadContour ( info . contour . externalIndex , contourTmp ) ; return contourTmp . toList ( ) ; }
Returns the undistorted contour for a shape . Data is potentially recycled the next time any function in this class is invoked .
26,807
private void removeDistortionFromContour ( List < Point2D_I32 > distorted , FastQueue < Point2D_I32 > undistorted ) { undistorted . reset ( ) ; for ( int j = 0 ; j < distorted . size ( ) ; j ++ ) { Point2D_I32 p = distorted . get ( j ) ; Point2D_I32 q = undistorted . grow ( ) ; distToUndist . compute ( p . x , p . y , distortedPoint ) ; q . x = Math . round ( distortedPoint . x ) ; q . y = Math . round ( distortedPoint . y ) ; } }
Removes lens distortion from the found contour
26,808
protected final boolean touchesBorder ( List < Point2D_I32 > contour ) { int endX = imageWidth - 1 ; int endY = imageHeight - 1 ; for ( int j = 0 ; j < contour . size ( ) ; j ++ ) { Point2D_I32 p = contour . get ( j ) ; if ( p . x == 0 || p . y == 0 || p . x == endX || p . y == endY ) { return true ; } } return false ; }
Checks to see if some part of the contour touches the image border . Most likely cropped
26,809
public void set ( double fx , double fy , double skew ) { this . fx = fx ; this . fy = fy ; this . skew = skew ; }
Specify camera intrinsic parameters
26,810
public static < T extends ImageGray < T > , FD extends TupleDesc > Homography2D_F64 computeTransform ( T imageA , T imageB , DetectDescribePoint < T , FD > detDesc , AssociateDescription < FD > associate , ModelMatcher < Homography2D_F64 , AssociatedPair > modelMatcher ) { List < Point2D_F64 > pointsA = new ArrayList < > ( ) ; FastQueue < FD > descA = UtilFeature . createQueue ( detDesc , 100 ) ; List < Point2D_F64 > pointsB = new ArrayList < > ( ) ; FastQueue < FD > descB = UtilFeature . createQueue ( detDesc , 100 ) ; describeImage ( imageA , detDesc , pointsA , descA ) ; describeImage ( imageB , detDesc , pointsB , descB ) ; associate . setSource ( descA ) ; associate . setDestination ( descB ) ; associate . associate ( ) ; FastQueue < AssociatedIndex > matches = associate . getMatches ( ) ; List < AssociatedPair > pairs = new ArrayList < > ( ) ; for ( int i = 0 ; i < matches . size ( ) ; i ++ ) { AssociatedIndex match = matches . get ( i ) ; Point2D_F64 a = pointsA . get ( match . src ) ; Point2D_F64 b = pointsB . get ( match . dst ) ; pairs . add ( new AssociatedPair ( a , b , false ) ) ; } if ( ! modelMatcher . process ( pairs ) ) throw new RuntimeException ( "Model Matcher failed!" ) ; return modelMatcher . getModelParameters ( ) . copy ( ) ; }
Using abstracted code find a transform which minimizes the difference between corresponding features in both images . This code is completely model independent and is the core algorithms .
26,811
public static < T extends ImageGray < T > > void stitch ( BufferedImage imageA , BufferedImage imageB , Class < T > imageType ) { T inputA = ConvertBufferedImage . convertFromSingle ( imageA , null , imageType ) ; T inputB = ConvertBufferedImage . convertFromSingle ( imageB , null , imageType ) ; DetectDescribePoint detDesc = FactoryDetectDescribe . surfStable ( new ConfigFastHessian ( 1 , 2 , 200 , 1 , 9 , 4 , 4 ) , null , null , imageType ) ; ScoreAssociation < BrightFeature > scorer = FactoryAssociation . scoreEuclidean ( BrightFeature . class , true ) ; AssociateDescription < BrightFeature > associate = FactoryAssociation . greedy ( scorer , 2 , true ) ; ModelMatcher < Homography2D_F64 , AssociatedPair > modelMatcher = FactoryMultiViewRobust . homographyRansac ( null , new ConfigRansac ( 60 , 3 ) ) ; Homography2D_F64 H = computeTransform ( inputA , inputB , detDesc , associate , modelMatcher ) ; renderStitching ( imageA , imageB , H ) ; }
Given two input images create and display an image where the two have been overlayed on top of each other .
26,812
public void configure ( double baseline , DMatrixRMaj K , DMatrixRMaj rectifiedR , Point2Transform2_F64 rectifiedToColor , int minDisparity , int maxDisparity ) { this . K = K ; ConvertMatrixData . convert ( rectifiedR , this . rectifiedR ) ; this . rectifiedToColor = rectifiedToColor ; this . baseline = ( float ) baseline ; this . focalLengthX = ( float ) K . get ( 0 , 0 ) ; this . focalLengthY = ( float ) K . get ( 1 , 1 ) ; this . centerX = ( float ) K . get ( 0 , 2 ) ; this . centerY = ( float ) K . get ( 1 , 2 ) ; this . minDisparity = minDisparity ; this . rangeDisparity = maxDisparity - minDisparity ; }
Stereo and intrinsic camera parameters
26,813
public void process ( ImageGray disparity , BufferedImage color ) { cloudRgb . setMaxSize ( disparity . width * disparity . height ) ; cloudXyz . setMaxSize ( disparity . width * disparity . height * 3 ) ; cloudRgb . reset ( ) ; cloudXyz . reset ( ) ; if ( disparity instanceof GrayU8 ) process ( ( GrayU8 ) disparity , color ) ; else process ( ( GrayF32 ) disparity , color ) ; }
Given the disparity image compute the 3D location of valid points and save pixel colors at that point
26,814
public < T extends ImageGray < T > > void corruptImage ( T original , T corrupted ) { GGrayImageOps . stretch ( original , valueScale , valueOffset , 255.0 , corrupted ) ; GImageMiscOps . addGaussian ( corrupted , rand , valueNoise , 0 , 255 ) ; GPixelMath . boundImage ( corrupted , 0 , 255 ) ; }
Applies the specified corruption to the image .
26,815
protected boolean findScaleH ( DMatrixRMaj H ) { if ( ! svd . decompose ( H ) ) return false ; Arrays . sort ( svd . getSingularValues ( ) , 0 , 3 ) ; double scale = svd . getSingularValues ( ) [ 1 ] ; CommonOps_DDRM . divide ( H , scale ) ; return true ; }
The scale of H is found by computing the second smallest singular value .
26,816
public static void vertical ( BorderIndex1D border , WlCoef_I32 coefficients , GrayI input , GrayI output ) { UtilWavelet . checkShape ( input , output ) ; final int offsetA = coefficients . offsetScaling ; final int offsetB = coefficients . offsetWavelet ; final int [ ] alpha = coefficients . scaling ; final int [ ] beta = coefficients . wavelet ; border . setLength ( input . height + input . height % 2 ) ; boolean isLarger = output . height > input . height ; for ( int x = 0 ; x < input . width ; x ++ ) { for ( int y = 0 ; y < input . height ; y += 2 ) { int scale = 0 ; int wavelet = 0 ; for ( int i = 0 ; i < alpha . length ; i ++ ) { int yy = border . getIndex ( y + i + offsetA ) ; if ( isLarger && yy >= input . height ) continue ; scale += input . get ( x , yy ) * alpha [ i ] ; } for ( int i = 0 ; i < beta . length ; i ++ ) { int yy = border . getIndex ( y + i + offsetB ) ; if ( isLarger && yy >= input . height ) continue ; wavelet += input . get ( x , yy ) * beta [ i ] ; } int outY = y / 2 ; scale = 2 * scale / coefficients . denominatorScaling ; wavelet = 2 * wavelet / coefficients . denominatorWavelet ; output . set ( x , outY , scale ) ; output . set ( x , output . height / 2 + outY , wavelet ) ; } } }
Performs a single level wavelet transform along the vertical axis .
26,817
public static < Input extends ImageBase < Input > , Output extends ImageBase < Output > > ConvolveInterface < Input , Output > convolve ( Kernel1D kernel , ImageType < Input > inputType , ImageType < Output > outputType , BorderType border , boolean isHorizontal ) { if ( inputType . getFamily ( ) != ImageType . Family . GRAY ) throw new IllegalArgumentException ( "Currently only gray scale image supported" ) ; Class _inputType = inputType . getImageClass ( ) ; Class _outputType = outputType == null ? null : outputType . getImageClass ( ) ; _outputType = BoofTesting . convertToGenericType ( _outputType ) ; Class < ? > borderClassType = FactoryImageBorder . lookupBorderClassType ( _inputType ) ; String direction = isHorizontal ? "horizontal" : "vertical" ; Method m ; try { switch ( border ) { case SKIP : m = ConvolveImageNoBorder . class . getMethod ( direction , kernel . getClass ( ) , _inputType , _outputType ) ; break ; case EXTENDED : m = BoofTesting . findMethod ( ConvolveImage . class , direction , kernel . getClass ( ) , _inputType , _outputType , borderClassType ) ; break ; case REFLECT : m = BoofTesting . findMethod ( ConvolveImage . class , direction , kernel . getClass ( ) , _inputType , _outputType , borderClassType ) ; break ; case WRAP : m = BoofTesting . findMethod ( ConvolveImage . class , direction , kernel . getClass ( ) , _inputType , _outputType , borderClassType ) ; break ; case NORMALIZED : m = ConvolveImageNormalized . class . getMethod ( direction , kernel . getClass ( ) , _inputType , _outputType ) ; break ; default : throw new IllegalArgumentException ( "Unknown border type " + border ) ; } } catch ( NoSuchMethodException e ) { throw new IllegalArgumentException ( "The specified convolution cannot be found" ) ; } return new GenericConvolve < > ( m , kernel , border , inputType , outputType ) ; }
Creates a filter for convolving 1D kernels along the image .
26,818
public static < T extends ImageBase < T > , O extends CameraPinhole , D extends CameraPinhole > ImageDistort < T , T > changeCameraModel ( AdjustmentType type , BorderType borderType , O original , D desired , D modified , ImageType < T > imageType ) { Class bandType = imageType . getImageClass ( ) ; boolean skip = borderType == BorderType . SKIP ; if ( skip ) borderType = BorderType . EXTENDED ; InterpolatePixelS interp = FactoryInterpolation . createPixelS ( 0 , 255 , InterpolationType . BILINEAR , borderType , bandType ) ; Point2Transform2_F32 undistToDist = LensDistortionOps_F32 . transformChangeModel ( type , original , desired , true , modified ) ; ImageDistort < T , T > distort = FactoryDistort . distort ( true , interp , imageType ) ; distort . setModel ( new PointToPixelTransform_F32 ( undistToDist ) ) ; distort . setRenderAll ( ! skip ) ; return distort ; }
Creates a distortion for modifying the input image from one camera model into another camera model . If requested the camera model can be further modified to ensure certain visibility requirements are meet and the adjusted camera model will be returned .
26,819
public static boolean getT ( GrayU8 image , int x , int y ) { if ( image . isInBounds ( x , y ) ) { return image . get ( x , y ) != 0 ; } else { return true ; } }
If a point is inside the image true is returned if its value is not zero otherwise true is returned .
26,820
public static boolean getF ( GrayU8 image , int x , int y ) { if ( image . isInBounds ( x , y ) ) { return image . get ( x , y ) != 0 ; } else { return false ; } }
If a point is inside the image true is returned if its value is not zero otherwise false is returned .
26,821
public boolean convert ( ChessboardCornerGraph cluster , GridInfo info ) { info . reset ( ) ; if ( ! orderEdges ( cluster ) ) return false ; if ( ! orderNodes ( cluster . corners , info ) ) return false ; int corner = selectCorner ( info ) ; if ( corner == - 1 ) { if ( verbose != null ) verbose . println ( "Failed to find valid corner." ) ; return false ; } for ( int i = 0 ; i < corner ; i ++ ) { rotateCCW ( info ) ; } return true ; }
Puts cluster nodes into grid order and computes the number of rows and columns . If the cluster is not a complete grid this function will fail and return false
26,822
int selectCorner ( GridInfo info ) { info . lookupGridCorners ( cornerList ) ; int bestCorner = - 1 ; double bestScore = Double . MAX_VALUE ; boolean bestIsCornerSquare = false ; for ( int i = 0 ; i < cornerList . size ( ) ; i ++ ) { Node n = cornerList . get ( i ) ; boolean corner = isCornerValidOrigin ( n ) ; if ( corner || ( allowNoCorner && ! bestIsCornerSquare ) ) { if ( checkShape != null ) { if ( i % 2 == 0 ) { if ( ! checkShape . isValidShape ( info . rows , info . cols ) ) { continue ; } } else { if ( ! checkShape . isValidShape ( info . cols , info . rows ) ) { continue ; } } } double distance = n . normSq ( ) ; if ( distance < bestScore || ( ! bestIsCornerSquare && corner ) ) { bestIsCornerSquare |= corner ; bestScore = distance ; bestCorner = i ; } } } info . hasCornerSquare = bestIsCornerSquare ; return bestCorner ; }
Selects a corner to be the grid s origin . 0 = top - left 1 = top - right 2 = bottom - right 3 = bottom - left .
26,823
boolean orderNodes ( FastQueue < Node > corners , GridInfo info ) { Node seed = null ; for ( int i = 0 ; i < corners . size ; i ++ ) { Node n = corners . get ( i ) ; if ( n . countEdges ( ) == 2 ) { seed = n ; break ; } } if ( seed == null ) { if ( verbose != null ) verbose . println ( "Can't find a corner with just two edges. Aborting" ) ; return false ; } int rowEdge = 0 ; while ( seed . edges [ rowEdge ] == null ) rowEdge = ( rowEdge + 1 ) % 4 ; int colEdge = ( rowEdge + 1 ) % 4 ; while ( seed . edges [ colEdge ] == null ) colEdge = ( colEdge + 2 ) % 4 ; if ( ! isRightHanded ( seed , rowEdge , colEdge ) ) { int tmp = rowEdge ; rowEdge = colEdge ; colEdge = tmp ; } while ( seed != null ) { int before = info . nodes . size ( ) ; Node n = seed ; do { info . nodes . add ( n ) ; n = n . edges [ colEdge ] ; } while ( n != null ) ; seed = seed . edges [ rowEdge ] ; if ( info . cols == - 1 ) { info . cols = info . nodes . size ( ) ; } else { int columnsInRow = info . nodes . size ( ) - before ; if ( columnsInRow != info . cols ) { if ( verbose != null ) verbose . println ( "Number of columns in each row is variable" ) ; return false ; } } } info . rows = info . nodes . size ( ) / info . cols ; return true ; }
Put corners into a proper grid . Make sure its a rectangular grid or else return false . Rows and columns are selected to ensure right hand rule .
26,824
static boolean isRightHanded ( Node seed , int idxRow , int idxCol ) { Node r = seed . edges [ idxRow ] ; Node c = seed . edges [ idxCol ] ; double dirRow = Math . atan2 ( r . y - seed . y , r . x - seed . x ) ; double dirCol = Math . atan2 ( c . y - seed . y , c . x - seed . x ) ; return UtilAngle . distanceCW ( dirRow , dirCol ) < Math . PI ; }
Checks to see if the rows and columns for a coordinate system which is right handed
26,825
void sortEdgesCCW ( FastQueue < Node > corners ) { for ( int nodeIdx = 0 ; nodeIdx < corners . size ; nodeIdx ++ ) { Node na = corners . get ( nodeIdx ) ; double ref = Double . NaN ; int count = 0 ; for ( int i = 0 ; i < 4 ; i ++ ) { order [ i ] = i ; tmpEdges [ i ] = na . edges [ i ] ; if ( na . edges [ i ] == null ) { directions [ i ] = Double . MAX_VALUE ; } else { Node nb = na . edges [ i ] ; double angleB = Math . atan2 ( nb . y - na . y , nb . x - na . x ) ; if ( Double . isNaN ( ref ) ) { ref = angleB ; directions [ i ] = 0 ; } else { directions [ i ] = UtilAngle . distanceCCW ( ref , angleB ) ; } count ++ ; } } sorter . sort ( directions , 0 , 4 , order ) ; for ( int i = 0 ; i < 4 ; i ++ ) { na . edges [ i ] = tmpEdges [ order [ i ] ] ; } if ( count == 2 ) { if ( directions [ order [ 1 ] ] > Math . PI ) { na . edges [ 0 ] = tmpEdges [ order [ 1 ] ] ; na . edges [ 1 ] = tmpEdges [ order [ 0 ] ] ; } else { na . edges [ 0 ] = tmpEdges [ order [ 0 ] ] ; na . edges [ 1 ] = tmpEdges [ order [ 1 ] ] ; } } else if ( count == 3 ) { int selected = - 1 ; double largestAngle = 0 ; for ( int i = 0 , j = 2 ; i < 3 ; j = i , i ++ ) { double ccw = UtilAngle . distanceCCW ( directions [ order [ j ] ] , directions [ order [ i ] ] ) ; if ( ccw > largestAngle ) { largestAngle = ccw ; selected = j ; } } for ( int i = 2 ; i > selected ; i -- ) { na . edges [ i + 1 ] = na . edges [ i ] ; } na . edges [ selected + 1 ] = null ; } } }
Sorts edges so that they point towards nodes in an increasing counter clockwise direction
26,826
public void rotateCCW ( GridInfo grid ) { cornerList . clear ( ) ; for ( int col = 0 ; col < grid . cols ; col ++ ) { for ( int row = 0 ; row < grid . rows ; row ++ ) { cornerList . add ( grid . get ( row , grid . cols - col - 1 ) ) ; } } int tmp = grid . rows ; grid . rows = grid . cols ; grid . cols = tmp ; grid . nodes . clear ( ) ; grid . nodes . addAll ( cornerList ) ; }
Rotates the grid in the CCW direction
26,827
public void setCalibration ( StereoParameters stereoParam ) { CameraPinholeBrown left = stereoParam . getLeft ( ) ; CameraPinholeBrown right = stereoParam . getRight ( ) ; imageLeftRect . reshape ( left . getWidth ( ) , left . getHeight ( ) ) ; imageRightRect . reshape ( right . getWidth ( ) , right . getHeight ( ) ) ; RectifyCalibrated rectifyAlg = RectifyImageOps . createCalibrated ( ) ; Se3_F64 leftToRight = stereoParam . getRightToLeft ( ) . invert ( null ) ; DMatrixRMaj K1 = PerspectiveOps . pinholeToMatrix ( left , ( DMatrixRMaj ) null ) ; DMatrixRMaj K2 = PerspectiveOps . pinholeToMatrix ( right , ( DMatrixRMaj ) null ) ; rectifyAlg . process ( K1 , new Se3_F64 ( ) , K2 , leftToRight ) ; rect1 = rectifyAlg . getRect1 ( ) ; rect2 = rectifyAlg . getRect2 ( ) ; rectK = rectifyAlg . getCalibrationMatrix ( ) ; rectR = rectifyAlg . getRectifiedRotation ( ) ; FMatrixRMaj rect1_F32 = new FMatrixRMaj ( 3 , 3 ) ; FMatrixRMaj rect2_F32 = new FMatrixRMaj ( 3 , 3 ) ; ConvertMatrixData . convert ( rect1 , rect1_F32 ) ; ConvertMatrixData . convert ( rect2 , rect2_F32 ) ; ImageType < T > imageType = imageLeftRect . getImageType ( ) ; distortLeftRect = RectifyImageOps . rectifyImage ( stereoParam . left , rect1_F32 , BorderType . SKIP , imageType ) ; distortRightRect = RectifyImageOps . rectifyImage ( stereoParam . right , rect2_F32 , BorderType . SKIP , imageType ) ; baseline = stereoParam . getBaseline ( ) ; fx = rectK . get ( 0 , 0 ) ; fy = rectK . get ( 1 , 1 ) ; cx = rectK . get ( 0 , 2 ) ; cy = rectK . get ( 1 , 2 ) ; }
Specifies stereo parameters
26,828
public void computeHomo3D ( double x , double y , Point3D_F64 pointLeft ) { pointRect . z = baseline * fx ; pointRect . x = pointRect . z * ( x - cx ) / fx ; pointRect . y = pointRect . z * ( y - cy ) / fy ; GeometryMath_F64 . multTran ( rectR , pointRect , pointLeft ) ; }
Given a coordinate of a point in the left rectified frame compute the point s 3D coordinate in the camera s reference frame in homogeneous coordinates . To convert the coordinate into normal 3D divide each element by the disparity .
26,829
public static < I extends ImageGray < I > , II extends ImageGray < II > > Class < II > getIntegralType ( Class < I > inputType ) { if ( inputType == GrayF32 . class ) { return ( Class < II > ) GrayF32 . class ; } else if ( inputType == GrayU8 . class ) { return ( Class < II > ) GrayS32 . class ; } else if ( inputType == GrayS32 . class ) { return ( Class < II > ) GrayS32 . class ; } else { throw new IllegalArgumentException ( "Unknown input image type: " + inputType . getSimpleName ( ) ) ; } }
Given the input image return the type of image the integral image should be .
26,830
public boolean computeStability ( int which , double disturbance , FiducialStability results ) { if ( ! getFiducialToCamera ( which , targetToCamera ) ) return false ; stability . setShape ( getSideWidth ( which ) , getSideHeight ( which ) ) ; stability . computeStability ( targetToCamera , disturbance , results ) ; return true ; }
Estimates the stability by perturbing each land mark by the specified number of pixels in the distorted image .
26,831
private void createDetectedList ( int which , List < PointIndex2D_F64 > pixels ) { detected2D3D . clear ( ) ; List < Point2D3D > all = getControl3D ( which ) ; for ( int i = 0 ; i < pixels . size ( ) ; i ++ ) { PointIndex2D_F64 a = pixels . get ( i ) ; Point2D3D b = all . get ( i ) ; pixelToNorm . compute ( a . x , a . y , b . observation ) ; detected2D3D . add ( b ) ; } }
Create the list of observed points in 2D3D
26,832
protected boolean estimatePose ( int which , List < Point2D3D > points , Se3_F64 fiducialToCamera ) { if ( ! estimatePnP . process ( points , initialEstimate ) ) { return false ; } filtered . clear ( ) ; if ( points . size ( ) > 6 ) { w2p . configure ( lensDistortion , initialEstimate ) ; errors . reset ( ) ; for ( int idx = 0 ; idx < detectedPixels . size ( ) ; idx ++ ) { PointIndex2D_F64 foo = detectedPixels . get ( idx ) ; w2p . transform ( points . get ( idx ) . location , predicted ) ; errors . add ( predicted . distance2 ( foo ) ) ; } double stdev = 0 ; for ( int i = 0 ; i < errors . size ; i ++ ) { stdev += errors . get ( i ) ; } double sigma3 = Math . max ( 1.5 , 4 * stdev ) ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { if ( errors . get ( i ) < sigma3 ) { filtered . add ( points . get ( i ) ) ; } } if ( filtered . size ( ) != points . size ( ) ) { if ( ! estimatePnP . process ( filtered , initialEstimate ) ) { return false ; } } } else { filtered . addAll ( points ) ; } return refinePnP . fitModel ( points , initialEstimate , fiducialToCamera ) ; }
Given the mapping of 2D observations to known 3D points estimate the pose of the fiducial . This solves the P - n - P problem .
26,833
public < D extends ImageGray < D > > void transform ( D derivX , D derivY , GrayU8 binary ) { InputSanityCheck . checkSameShape ( derivX , derivY , binary ) ; transform . reshape ( derivX . width , derivY . height ) ; ImageMiscOps . fill ( transform , 0 ) ; originX = derivX . width / 2 ; originY = derivX . height / 2 ; candidates . reset ( ) ; if ( derivX instanceof GrayF32 ) _transform ( ( GrayF32 ) derivX , ( GrayF32 ) derivY , binary ) ; else if ( derivX instanceof GrayS16 ) _transform ( ( GrayS16 ) derivX , ( GrayS16 ) derivY , binary ) ; else if ( derivX instanceof GrayS32 ) _transform ( ( GrayS32 ) derivX , ( GrayS32 ) derivY , binary ) ; else throw new IllegalArgumentException ( "Unsupported derivative image type: " + derivX . getClass ( ) . getSimpleName ( ) ) ; }
Computes the Hough transform using the image gradient and a binary image which flags pixels as being edges or not .
26,834
public void parameterize ( int x , int y , float derivX , float derivY ) { x -= originX ; y -= originY ; float v = ( x * derivX + y * derivY ) / ( derivX * derivX + derivY * derivY ) ; int x0 = ( int ) ( v * derivX ) + originX ; int y0 = ( int ) ( v * derivY ) + originY ; if ( transform . isInBounds ( x0 , y0 ) ) { int index = transform . startIndex + y0 * transform . stride + x0 ; if ( transform . data [ index ] ++ == 1 ) candidates . add ( x0 , y0 ) ; } }
Takes the detected point along the line and its gradient and converts it into transform space .
26,835
public static void HighLevel ( GrayF32 input ) { System . out . println ( "\n------------------- Dense High Level" ) ; DescribeImageDense < GrayF32 , TupleDesc_F64 > describer = FactoryDescribeImageDense . hog ( new ConfigDenseHoG ( ) , input . getImageType ( ) ) ; describer . process ( input ) ; System . out . println ( "Total Features = " + describer . getLocations ( ) . size ( ) ) ; for ( int i = 0 ; i < 5 ; i ++ ) { Point2D_I32 p = describer . getLocations ( ) . get ( i ) ; TupleDesc_F64 d = describer . getDescriptions ( ) . get ( i ) ; System . out . printf ( "%3d %3d = [ %f %f %f %f\n" , p . x , p . y , d . value [ 0 ] , d . value [ 1 ] , d . value [ 2 ] , d . value [ 3 ] ) ; } }
For much larger images you might need to shrink the image down or change the cell size to get good results .
26,836
public void process ( I left , I right , GrayF32 imageDisparity ) { InputSanityCheck . checkSameShape ( left , right , imageDisparity ) ; this . imageLeft = left ; this . imageRight = right ; w = left . width ; h = left . height ; for ( int y = radiusY * 2 ; y < h - radiusY * 2 ; y ++ ) { for ( int x = radiusX * 2 + minDisparity ; x < w - radiusX * 2 ; x ++ ) { int max = x - Math . max ( radiusX * 2 - 1 , x - score . length ) ; processPixel ( x , y , max ) ; imageDisparity . set ( x , y , ( float ) selectBest ( max ) ) ; } } }
Computes the disparity for two stereo images along the image s right axis . Both image must be rectified .
26,837
private void processPixel ( int c_x , int c_y , int maxDisparity ) { for ( int i = minDisparity ; i < maxDisparity ; i ++ ) { score [ i ] = computeScore ( c_x , c_x - i , c_y ) ; } }
Computes fit score for each possible disparity
26,838
protected double selectBest ( int length ) { double best = Double . MAX_VALUE ; int index = - 1 ; for ( int i = minDisparity ; i < length ; i ++ ) { if ( score [ i ] < best ) { best = score [ i ] ; index = i ; } } return index - minDisparity ; }
Select best disparity using the inner takes all approach
26,839
protected double computeScore ( int leftX , int rightX , int centerY ) { double center = computeScoreRect ( leftX , rightX , centerY ) ; four [ 0 ] = computeScoreRect ( leftX - radiusX , rightX - radiusX , centerY - radiusY ) ; four [ 1 ] = computeScoreRect ( leftX + radiusX , rightX + radiusX , centerY - radiusY ) ; four [ 2 ] = computeScoreRect ( leftX - radiusX , rightX - radiusX , centerY + radiusY ) ; four [ 3 ] = computeScoreRect ( leftX + radiusX , rightX + radiusX , centerY + radiusY ) ; Arrays . sort ( four ) ; return four [ 0 ] + four [ 1 ] + center ; }
Compute the score for five local regions and just use the center + the two best
26,840
public void render ( QrCode qr ) { initialize ( qr ) ; render . init ( ) ; positionPattern ( 0 , 0 , qr . ppCorner ) ; positionPattern ( ( numModules - 7 ) * moduleWidth , 0 , qr . ppRight ) ; positionPattern ( 0 , ( numModules - 7 ) * moduleWidth , qr . ppDown ) ; timingPattern ( 7 * moduleWidth , 6 * moduleWidth , moduleWidth , 0 ) ; timingPattern ( 6 * moduleWidth , 7 * moduleWidth , 0 , moduleWidth ) ; formatInformation ( ) ; if ( qr . version >= QrCode . VERSION_ENCODED_AT ) versionInformation ( ) ; int alignment [ ] = QrCode . VERSION_INFO [ qr . version ] . alignment ; for ( int i = 0 ; i < alignment . length ; i ++ ) { int row = alignment [ i ] ; for ( int j = 0 ; j < alignment . length ; j ++ ) { if ( i == 0 & j == 0 ) continue ; if ( i == alignment . length - 1 & j == 0 ) continue ; if ( i == 0 & j == alignment . length - 1 ) continue ; int col = alignment [ j ] ; alignmentPattern ( col , row ) ; } } if ( renderData ) { if ( qr . rawbits . length != QrCode . VERSION_INFO [ qr . version ] . codewords ) throw new RuntimeException ( "Unexpected length of raw data." ) ; bitLocations = new QrCodeCodeWordLocations ( qr . version ) . bits ; int numBytes = bitLocations . size ( ) / 8 ; if ( numBytes != qr . rawbits . length ) throw new RuntimeException ( "Egads. unexpected length of qrcode raw data" ) ; renderData ( ) ; } qr . bounds . set ( 0 , 0 , 0 ) ; qr . bounds . set ( 1 , markerWidth , 0 ) ; qr . bounds . set ( 2 , markerWidth , markerWidth ) ; qr . bounds . set ( 3 , 0 , markerWidth ) ; }
Generates a QR Code with the specified message . An exception is thrown if the message is too long to be encoded .
26,841
private void renderData ( ) { QrCodeMaskPattern mask = qr . mask ; int count = 0 ; int length = bitLocations . size ( ) - bitLocations . size ( ) % 8 ; while ( count < length ) { int bits = qr . rawbits [ count / 8 ] & 0xFF ; int N = Math . min ( 8 , bitLocations . size ( ) - count ) ; for ( int i = 0 ; i < N ; i ++ ) { Point2D_I32 coor = bitLocations . get ( count + i ) ; int value = mask . apply ( coor . y , coor . x , ( ( bits >> i ) & 0x01 ) ) ; if ( value > 0 ) { square ( coor . y , coor . x ) ; } } count += 8 ; } }
Renders the raw data bit output while applying the selected mask
26,842
public static CameraPinholeBrown loadPinholeRadial ( String fileName ) { FileStorage fs = new FileStorage ( new File ( fileName ) . getAbsolutePath ( ) , FileStorage . READ ) ; IntPointer width = new IntPointer ( 1 ) ; IntPointer height = new IntPointer ( 1 ) ; read ( fs . get ( "image_width" ) , width , - 1 ) ; read ( fs . get ( "image_height" ) , height , - 1 ) ; Mat K = new Mat ( ) ; read ( fs . get ( "camera_matrix" ) , K ) ; Mat distortion = new Mat ( ) ; read ( fs . get ( "distortion_coefficients" ) , distortion ) ; CameraPinholeBrown boof = new CameraPinholeBrown ( ) ; boof . width = width . get ( ) ; boof . height = height . get ( ) ; DoubleRawIndexer indexerK = K . createIndexer ( ) ; boof . fx = indexerK . get ( 0 , 0 ) ; boof . skew = indexerK . get ( 0 , 1 ) ; boof . fy = indexerK . get ( 1 , 1 ) ; boof . cx = indexerK . get ( 0 , 2 ) ; boof . cy = indexerK . get ( 1 , 2 ) ; DoubleRawIndexer indexerD = distortion . createIndexer ( ) ; if ( distortion . rows ( ) >= 5 ) boof . setRadial ( indexerD . get ( 0 , 0 ) , indexerD . get ( 1 , 0 ) , indexerD . get ( 4 , 0 ) ) ; else if ( distortion . rows ( ) >= 2 ) boof . setRadial ( indexerD . get ( 0 , 0 ) , indexerD . get ( 1 , 0 ) ) ; if ( distortion . rows ( ) >= 5 ) boof . fsetTangental ( indexerD . get ( 2 , 0 ) , indexerD . get ( 3 , 0 ) ) ; return boof ; }
Loads a pinhole camera model with radian and tangential distortion in OpenCV format
26,843
public double distanceSqCorner ( Point2D_F64 p ) { double best = Double . MAX_VALUE ; for ( int i = 0 ; i < 4 ; i ++ ) { double d = square . get ( i ) . distance2 ( p ) ; if ( d < best ) { best = d ; } } return best ; }
Finds the Euclidean distance squared of the closest corner to point p
26,844
public void reset ( ) { square = null ; touch = null ; center . set ( - 1 , - 1 ) ; largestSide = 0 ; smallestSide = Double . MAX_VALUE ; graph = RESET_GRAPH ; for ( int i = 0 ; i < edges . length ; i ++ ) { if ( edges [ i ] != null ) throw new RuntimeException ( "BUG!" ) ; sideLengths [ i ] = 0 ; } }
Discards previous information
26,845
public void updateArrayLength ( ) { if ( edges . length != square . size ( ) ) { edges = new SquareEdge [ square . size ( ) ] ; sideLengths = new double [ square . size ( ) ] ; } }
touch the border?
26,846
public int getNumberOfConnections ( ) { int ret = 0 ; for ( int i = 0 ; i < square . size ( ) ; i ++ ) { if ( edges [ i ] != null ) ret ++ ; } return ret ; }
Computes the number of edges attached to this node
26,847
public static < T extends ImageGray < T > > InterpolatePixelS < T > createPixelS ( double min , double max , InterpolationType type , BorderType borderType , Class < T > imageType ) { InterpolatePixelS < T > alg ; switch ( type ) { case NEAREST_NEIGHBOR : alg = nearestNeighborPixelS ( imageType ) ; break ; case BILINEAR : return bilinearPixelS ( imageType , borderType ) ; case BICUBIC : alg = bicubicS ( - 0.5f , ( float ) min , ( float ) max , imageType ) ; break ; case POLYNOMIAL4 : alg = polynomialS ( 4 , min , max , imageType ) ; break ; default : throw new IllegalArgumentException ( "Add type: " + type ) ; } if ( borderType != null ) alg . setBorder ( FactoryImageBorder . single ( imageType , borderType ) ) ; return alg ; }
Creates an interpolation class of the specified type for the specified image type .
26,848
public static < T extends ImageBase < T > > InterpolatePixelMB < T > createPixelMB ( double min , double max , InterpolationType type , BorderType borderType , ImageType < T > imageType ) { switch ( imageType . getFamily ( ) ) { case PLANAR : return ( InterpolatePixelMB < T > ) createPixelPL ( ( InterpolatePixelS ) createPixelS ( min , max , type , borderType , imageType . getDataType ( ) ) ) ; case GRAY : { InterpolatePixelS interpS = createPixelS ( min , max , type , borderType , imageType . getImageClass ( ) ) ; return new InterpolatePixel_S_to_MB ( interpS ) ; } case INTERLEAVED : switch ( type ) { case NEAREST_NEIGHBOR : return nearestNeighborPixelMB ( ( ImageType ) imageType , borderType ) ; case BILINEAR : return bilinearPixelMB ( ( ImageType ) imageType , borderType ) ; default : throw new IllegalArgumentException ( "Interpolate type not yet support for ImageInterleaved" ) ; } default : throw new IllegalArgumentException ( "Add type: " + type ) ; } }
Pixel based interpolation on multi - band image
26,849
public static void yu12ToBoof ( byte [ ] data , int width , int height , ImageBase output ) { if ( output instanceof Planar ) { Planar ms = ( Planar ) output ; ms . reshape ( width , height , 3 ) ; if ( BoofConcurrency . USE_CONCURRENT ) { if ( ms . getBandType ( ) == GrayU8 . class ) { ImplConvertYV12_MT . yv12ToPlanarRgb_U8 ( data , ms ) ; } else if ( ms . getBandType ( ) == GrayF32 . class ) { ImplConvertYV12_MT . yv12ToPlanarRgb_F32 ( data , ms ) ; } else { throw new IllegalArgumentException ( "Unsupported output band format" ) ; } } else { if ( ms . getBandType ( ) == GrayU8 . class ) { ImplConvertYV12 . yv12ToPlanarRgb_U8 ( data , ms ) ; } else if ( ms . getBandType ( ) == GrayF32 . class ) { ImplConvertYV12 . yv12ToPlanarRgb_F32 ( data , ms ) ; } else { throw new IllegalArgumentException ( "Unsupported output band format" ) ; } } } else if ( output instanceof ImageGray ) { if ( output . getClass ( ) == GrayU8 . class ) { yu12ToGray ( data , width , height , ( GrayU8 ) output ) ; } else if ( output . getClass ( ) == GrayF32 . class ) { yu12ToGray ( data , width , height , ( GrayF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } else if ( output instanceof ImageInterleaved ) { ( ( ImageMultiBand ) output ) . reshape ( width , height , 3 ) ; if ( BoofConcurrency . USE_CONCURRENT ) { if ( output . getClass ( ) == InterleavedU8 . class ) { ImplConvertYV12_MT . yv12ToInterleaved ( data , ( InterleavedU8 ) output ) ; } else if ( output . getClass ( ) == InterleavedF32 . class ) { ImplConvertYV12_MT . yv12ToInterleaved ( data , ( InterleavedF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } else { if ( output . getClass ( ) == InterleavedU8 . class ) { ImplConvertYV12 . yv12ToInterleaved ( data , ( InterleavedU8 ) output ) ; } else if ( output . getClass ( ) == InterleavedF32 . class ) { ImplConvertYV12 . yv12ToInterleaved ( data , ( InterleavedF32 ) output ) ; } else { throw new IllegalArgumentException ( "Unsupported output type" ) ; } } } else { throw new IllegalArgumentException ( "Boofcv image type not yet supported" ) ; } }
Converts a YU12 encoded byte array into a BoofCV formatted image .
26,850
public static GrayU8 yu12ToGray ( byte [ ] data , int width , int height , GrayU8 output ) { if ( output != null ) { if ( output . width != width || output . height != height ) throw new IllegalArgumentException ( "output width and height must be " + width + " " + height ) ; } else { output = new GrayU8 ( width , height ) ; } if ( BoofConcurrency . USE_CONCURRENT ) { ImplConvertNV21_MT . nv21ToGray ( data , output ) ; } else { ImplConvertNV21 . nv21ToGray ( data , output ) ; } return output ; }
Converts an YV12 image into a gray scale U8 image .
26,851
public static < D > ScoreAssociation < D > scoreEuclidean ( Class < D > tupleType , boolean squared ) { if ( TupleDesc_F64 . class . isAssignableFrom ( tupleType ) ) { if ( squared ) return ( ScoreAssociation ) new ScoreAssociateEuclideanSq_F64 ( ) ; else return ( ScoreAssociation ) new ScoreAssociateEuclidean_F64 ( ) ; } else if ( tupleType == TupleDesc_F32 . class ) { if ( squared ) return ( ScoreAssociation ) new ScoreAssociateEuclideanSq_F32 ( ) ; } throw new IllegalArgumentException ( "Euclidean score not yet supported for type " + tupleType . getSimpleName ( ) ) ; }
Scores features based on the Euclidean distance between them . The square is often used instead of the Euclidean distance since it is much faster to compute .
26,852
public static < D > ScoreAssociation < D > scoreHamming ( Class < D > tupleType ) { if ( tupleType == TupleDesc_B . class ) { return ( ScoreAssociation ) new ScoreAssociateHamming_B ( ) ; } throw new IllegalArgumentException ( "Hamming distance not yet supported for type " + tupleType . getSimpleName ( ) ) ; }
Hamming distance between two binary descriptors .
26,853
public void process ( GrayF32 intensity , QueueCorner corners ) { corners . reset ( ) ; float data [ ] = intensity . data ; for ( int y = 0 ; y < intensity . height ; y ++ ) { int startIndex = intensity . startIndex + y * intensity . stride ; int endIndex = startIndex + intensity . width ; for ( int index = startIndex ; index < endIndex ; index ++ ) { if ( data [ index ] > thresh ) { int x = index - startIndex ; corners . add ( x , y ) ; } } } }
Selects pixels as corners which are above the threshold .
26,854
public void swapLists ( ) { FastQueue < Helper > tmp = srcPositive ; srcPositive = dstPositive ; dstPositive = tmp ; tmp = srcNegative ; srcNegative = dstNegative ; dstNegative = tmp ; }
Swaps the source and dest feature list . Useful when processing a sequence of images and don t want to resort everything .
26,855
public void associate ( ) { matches . reset ( ) ; unassociatedSrc . reset ( ) ; if ( srcPositive . size == 0 && srcNegative . size == 0 ) return ; if ( dstPositive . size == 0 && dstNegative . size == 0 ) return ; assoc . setSource ( ( FastQueue ) srcPositive ) ; assoc . setDestination ( ( FastQueue ) dstPositive ) ; assoc . associate ( ) ; FastQueue < AssociatedIndex > m = assoc . getMatches ( ) ; for ( int i = 0 ; i < m . size ; i ++ ) { AssociatedIndex a = m . data [ i ] ; int globalSrcIndex = srcPositive . data [ a . src ] . index ; int globalDstIndex = dstPositive . data [ a . dst ] . index ; matches . grow ( ) . setAssociation ( globalSrcIndex , globalDstIndex , a . fitScore ) ; } GrowQueue_I32 un = assoc . getUnassociatedSource ( ) ; for ( int i = 0 ; i < un . size ; i ++ ) { unassociatedSrc . add ( srcPositive . data [ un . get ( i ) ] . index ) ; } assoc . setSource ( ( FastQueue ) srcNegative ) ; assoc . setDestination ( ( FastQueue ) dstNegative ) ; assoc . associate ( ) ; m = assoc . getMatches ( ) ; for ( int i = 0 ; i < m . size ; i ++ ) { AssociatedIndex a = m . data [ i ] ; int globalSrcIndex = srcNegative . data [ a . src ] . index ; int globalDstIndex = dstNegative . data [ a . dst ] . index ; matches . grow ( ) . setAssociation ( globalSrcIndex , globalDstIndex , a . fitScore ) ; } un = assoc . getUnassociatedSource ( ) ; for ( int i = 0 ; i < un . size ; i ++ ) { unassociatedSrc . add ( srcNegative . data [ un . get ( i ) ] . index ) ; } }
Associates the features together .
26,856
private void sort ( FastQueue < BrightFeature > input , FastQueue < Helper > pos , FastQueue < Helper > neg ) { pos . reset ( ) ; neg . reset ( ) ; for ( int i = 0 ; i < input . size ; i ++ ) { BrightFeature f = input . get ( i ) ; if ( f . white ) { pos . grow ( ) . wrap ( f , i ) ; } else { neg . grow ( ) . wrap ( f , i ) ; } } }
Splits the set of input features into positive and negative laplacian lists . Keep track of the feature s index in the original input list . This is the index that needs to be returned .
26,857
public static < I extends ImageMultiBand < I > , M extends ImageMultiBand < M > , D extends ImageGray < D > > ImageGradient < I , D > gradientReduce ( ImageGradient < I , M > gradient , DerivativeReduceType type , Class < D > outputType ) { String name ; switch ( type ) { case MAX_F : name = "maxf" ; break ; default : throw new RuntimeException ( "Unknown reduce type " + type ) ; } Class middleType ; switch ( gradient . getDerivativeType ( ) . getFamily ( ) ) { case PLANAR : middleType = Planar . class ; break ; case GRAY : throw new IllegalArgumentException ( "Can't have gradient output be single band" ) ; default : middleType = gradient . getDerivativeType ( ) . getImageClass ( ) ; } Method m = findReduce ( name , middleType , outputType ) ; GradientMultiToSingleBand_Reflection < M , D > reducer = new GradientMultiToSingleBand_Reflection < > ( m , gradient . getDerivativeType ( ) , outputType ) ; return new ImageGradientThenReduce < > ( gradient , reducer ) ; }
Computes the image gradient inside a multi - band image then reduces the output to a single band before returning the results
26,858
public static < I extends ImageGray < I > , D extends ImageGray < D > > ImageGradient < I , D > gradientSB ( DerivativeType type , Class < I > inputType , Class < D > derivType ) { if ( derivType == null ) derivType = GImageDerivativeOps . getDerivativeType ( inputType ) ; Class which ; switch ( type ) { case PREWITT : which = GradientPrewitt . class ; break ; case SOBEL : which = GradientSobel . class ; break ; case THREE : which = GradientThree . class ; break ; case TWO_0 : which = GradientTwo0 . class ; break ; case TWO_1 : which = GradientTwo1 . class ; break ; default : throw new IllegalArgumentException ( "Unknown type " + type ) ; } Method m = findDerivative ( which , inputType , derivType ) ; return new ImageGradient_Reflection < > ( m ) ; }
Returns the gradient for single band images of the specified type
26,859
public int addPattern ( GrayU8 inputBinary , double lengthSide ) { if ( inputBinary == null ) { throw new IllegalArgumentException ( "Input image is null." ) ; } else if ( lengthSide <= 0 ) { throw new IllegalArgumentException ( "Parameter lengthSide must be more than zero" ) ; } else if ( ImageStatistics . max ( inputBinary ) > 1 ) throw new IllegalArgumentException ( "A binary image is composed on 0 and 1 pixels. This isn't binary!" ) ; if ( inputBinary . width != squareLength || inputBinary . height != squareLength ) { GrayF32 inputGray = new GrayF32 ( inputBinary . width , inputBinary . height ) ; ConvertImage . convert ( inputBinary , inputGray ) ; PixelMath . multiply ( inputGray , 255 , inputGray ) ; GrayF32 scaled = new GrayF32 ( squareLength , squareLength ) ; if ( inputBinary . width > squareLength && inputBinary . height > squareLength ) { AverageDownSampleOps . down ( inputGray , scaled ) ; } else { new FDistort ( inputGray , scaled ) . scaleExt ( ) . apply ( ) ; } GThresholdImageOps . threshold ( scaled , binary , 255 / 2.0 , false ) ; } else { binary . setTo ( inputBinary ) ; } FiducialDef def = new FiducialDef ( ) ; def . lengthSide = lengthSide ; binaryToDef ( binary , def . desc [ 0 ] ) ; ImageMiscOps . rotateCCW ( binary ) ; binaryToDef ( binary , def . desc [ 1 ] ) ; ImageMiscOps . rotateCCW ( binary ) ; binaryToDef ( binary , def . desc [ 2 ] ) ; ImageMiscOps . rotateCCW ( binary ) ; binaryToDef ( binary , def . desc [ 3 ] ) ; int index = targets . size ( ) ; targets . add ( def ) ; return index ; }
Adds a new image to the detector . Image must be gray - scale and is converted into a binary image using the specified threshold . All input images are rescaled to be square and of the appropriate size . Thus the original shape of the image doesn t matter . Square shapes are highly recommended since that s what the target looks like .
26,860
protected static void binaryToDef ( GrayU8 binary , short [ ] desc ) { for ( int i = 0 ; i < binary . data . length ; i += 16 ) { int value = 0 ; for ( int j = 0 ; j < 16 ; j ++ ) { value |= binary . data [ i + j ] << j ; } desc [ i / 16 ] = ( short ) value ; } }
Converts a binary image into the compressed bit format
26,861
protected int hamming ( short [ ] a , short [ ] b ) { int distance = 0 ; for ( int i = 0 ; i < a . length ; i ++ ) { distance += DescriptorDistance . hamming ( ( a [ i ] & 0xFFFF ) ^ ( b [ i ] & 0xFFFF ) ) ; } return distance ; }
Computes the hamming score between two descriptions . Larger the number better the fit
26,862
public static TupleDesc_F64 combine ( List < TupleDesc_F64 > inputs , TupleDesc_F64 combined ) { int N = 0 ; for ( int i = 0 ; i < inputs . size ( ) ; i ++ ) { N += inputs . get ( i ) . size ( ) ; } if ( combined == null ) { combined = new TupleDesc_F64 ( N ) ; } else { if ( N != combined . size ( ) ) throw new RuntimeException ( "The combined feature needs to be " + N + " not " + combined . size ( ) ) ; } int start = 0 ; for ( int i = 0 ; i < inputs . size ( ) ; i ++ ) { double v [ ] = inputs . get ( i ) . value ; System . arraycopy ( v , 0 , combined . value , start , v . length ) ; start += v . length ; } return combined ; }
Concats the list of tuples together into one big feature . The combined feature must be large enough to store all the inputs .
26,863
public void setProcessing ( VideoProcessing processing ) { if ( this . processing != null ) { this . processing . stopProcessing ( ) ; } if ( Looper . getMainLooper ( ) . getThread ( ) != Thread . currentThread ( ) ) { throw new RuntimeException ( "Not called from a GUI thread. Bad stuff could happen" ) ; } this . processing = processing ; if ( processing != null && mCamera != null ) { processing . init ( mDraw , mCamera , mCameraInfo , previewRotation ) ; } }
Changes the CV algorithm running . Should only be called from a GUI thread .
26,864
private void setUpAndConfigureCamera ( ) { mCamera = openConfigureCamera ( mCameraInfo ) ; setCameraDisplayOrientation ( mCameraInfo , mCamera ) ; mPreview . setCamera ( mCamera ) ; if ( processing != null ) { processing . init ( mDraw , mCamera , mCameraInfo , previewRotation ) ; } }
Sets up the camera if it is not already setup .
26,865
protected void setProgressMessage ( final String message ) { runOnUiThread ( new Runnable ( ) { public void run ( ) { synchronized ( lockProgress ) { if ( progressDialog != null ) { progressDialog . setMessage ( message ) ; return ; } progressDialog = new ProgressDialog ( VideoDisplayActivity . this ) ; progressDialog . setMessage ( message ) ; progressDialog . setIndeterminate ( true ) ; progressDialog . setProgressStyle ( ProgressDialog . STYLE_SPINNER ) ; } long showTime = System . currentTimeMillis ( ) + 1000 ; while ( showTime > System . currentTimeMillis ( ) ) { Thread . yield ( ) ; } synchronized ( lockProgress ) { if ( progressDialog != null ) progressDialog . show ( ) ; } } } ) ; while ( progressDialog == null ) { Thread . yield ( ) ; } }
Displays an indeterminate progress dialog . If the dialog is already open this will change the message being displayed . Function blocks until the dialog has been declared .
26,866
protected void hideProgressDialog ( ) { synchronized ( lockProgress ) { if ( progressDialog == null ) return ; } if ( Looper . getMainLooper ( ) . getThread ( ) == Thread . currentThread ( ) ) { synchronized ( lockProgress ) { progressDialog . dismiss ( ) ; progressDialog = null ; } } else { runOnUiThread ( new Runnable ( ) { public void run ( ) { synchronized ( lockProgress ) { progressDialog . dismiss ( ) ; progressDialog = null ; } } } ) ; while ( progressDialog != null ) { Thread . yield ( ) ; } } }
Dismisses the progress dialog . Can be called even if there is no progressDialog being shown .
26,867
public void set ( CameraUniversalOmni original ) { super . set ( original ) ; this . mirrorOffset = original . mirrorOffset ; if ( radial . length != original . radial . length ) radial = new double [ original . radial . length ] ; System . arraycopy ( original . radial , 0 , radial , 0 , radial . length ) ; this . t1 = original . t1 ; this . t2 = original . t2 ; }
Assigns this model to be identical to the passed in model
26,868
public void process ( T image1 , T image2 , ImageFlow output ) { InputSanityCheck . checkSameShape ( image1 , image2 ) ; derivX . reshape ( image1 . width , image1 . height ) ; derivY . reshape ( image1 . width , image1 . height ) ; derivT . reshape ( image1 . width , image1 . height ) ; averageFlow . reshape ( output . width , output . height ) ; if ( resetOutput ) output . fillZero ( ) ; computeDerivX ( image1 , image2 , derivX ) ; computeDerivY ( image1 , image2 , derivY ) ; computeDerivT ( image1 , image2 , derivT ) ; findFlow ( derivX , derivY , derivT , output ) ; }
Computes dense optical flow from the first image s gradient and the difference between the second and the first image .
26,869
protected static void innerAverageFlow ( ImageFlow flow , ImageFlow averageFlow ) { int endX = flow . width - 1 ; int endY = flow . height - 1 ; for ( int y = 1 ; y < endY ; y ++ ) { int index = flow . width * y + 1 ; for ( int x = 1 ; x < endX ; x ++ , index ++ ) { ImageFlow . D average = averageFlow . data [ index ] ; ImageFlow . D f0 = flow . data [ index - 1 ] ; ImageFlow . D f1 = flow . data [ index + 1 ] ; ImageFlow . D f2 = flow . data [ index - flow . width ] ; ImageFlow . D f3 = flow . data [ index + flow . width ] ; ImageFlow . D f4 = flow . data [ index - 1 - flow . width ] ; ImageFlow . D f5 = flow . data [ index + 1 - flow . width ] ; ImageFlow . D f6 = flow . data [ index - 1 + flow . width ] ; ImageFlow . D f7 = flow . data [ index + 1 + flow . width ] ; average . x = 0.1666667f * ( f0 . x + f1 . x + f2 . x + f3 . x ) + 0.08333333f * ( f4 . x + f5 . x + f6 . x + f7 . x ) ; average . y = 0.1666667f * ( f0 . y + f1 . y + f2 . y + f3 . y ) + 0.08333333f * ( f4 . y + f5 . y + f6 . y + f7 . y ) ; } } }
Computes average flow using an 8 - connect neighborhood for the inner image
26,870
protected static void borderAverageFlow ( ImageFlow flow , ImageFlow averageFlow ) { for ( int y = 0 ; y < flow . height ; y ++ ) { computeBorder ( flow , averageFlow , 0 , y ) ; computeBorder ( flow , averageFlow , flow . width - 1 , y ) ; } for ( int x = 1 ; x < flow . width - 1 ; x ++ ) { computeBorder ( flow , averageFlow , x , 0 ) ; computeBorder ( flow , averageFlow , x , flow . height - 1 ) ; } }
Computes average flow using an 8 - connect neighborhood for the image border
26,871
void computeProjectionTable ( int width , int height ) { output . reshape ( width , height ) ; depthMap . reshape ( width , height ) ; ImageMiscOps . fill ( depthMap , - 1 ) ; pointing = new float [ width * height * 3 ] ; for ( int y = 0 ; y < output . height ; y ++ ) { for ( int x = 0 ; x < output . width ; x ++ ) { pixelTo3 . compute ( x , y , p3 ) ; if ( UtilEjml . isUncountable ( p3 . x ) ) { depthMap . unsafe_set ( x , y , Float . NaN ) ; } else { pointing [ ( y * output . width + x ) * 3 ] = ( float ) p3 . x ; pointing [ ( y * output . width + x ) * 3 + 1 ] = ( float ) p3 . y ; pointing [ ( y * output . width + x ) * 3 + 2 ] = ( float ) p3 . z ; } } } }
Computes 3D pointing vector for every pixel in the simulated camera frame
26,872
public GrayF32 render ( ) { ImageMiscOps . fill ( output , background ) ; ImageMiscOps . fill ( depthMap , Float . MAX_VALUE ) ; for ( int i = 0 ; i < scene . size ( ) ; i ++ ) { SurfaceRect r = scene . get ( i ) ; r . rectInCamera ( ) ; } if ( BoofConcurrency . USE_CONCURRENT ) { renderMultiThread ( ) ; } else { renderSingleThread ( ) ; } return getOutput ( ) ; }
Render the scene and returns the rendered image .
26,873
public void computePixel ( int which , double x , double y , Point2D_F64 output ) { SurfaceRect r = scene . get ( which ) ; Point3D_F64 p3 = new Point3D_F64 ( - x , - y , 0 ) ; SePointOps_F64 . transform ( r . rectToCamera , p3 , p3 ) ; p3 . scale ( 1.0 / p3 . norm ( ) ) ; sphereToPixel . compute ( p3 . x , p3 . y , p3 . z , output ) ; }
Project a point which lies on the 2D planar polygon s surface onto the rendered image
26,874
public void configure ( LensDistortionNarrowFOV model , PixelTransform < Point2D_F32 > visualToDepth ) { this . visualToDepth = visualToDepth ; this . p2n = model . undistort_F64 ( true , false ) ; }
Configures intrinsic camera parameters
26,875
public boolean process ( int x , int y ) { visualToDepth . compute ( x , y , distorted ) ; int depthX = ( int ) distorted . x ; int depthY = ( int ) distorted . y ; if ( depthImage . isInBounds ( depthX , depthY ) ) { double value = lookupDepth ( depthX , depthY ) ; if ( value == 0 ) return false ; p2n . compute ( x , y , norm ) ; worldPt . z = value * depthScale ; worldPt . x = worldPt . z * norm . x ; worldPt . y = worldPt . z * norm . y ; return true ; } else { return false ; } }
Given a pixel coordinate in the visual camera compute the 3D coordinate of that point .
26,876
private void captureFiducialPoints ( ) { Polygon2D_F64 p = regions . grow ( ) ; p . vertexes . resize ( sidesCollision . size ( ) ) ; for ( int i = 0 ; i < sidesCollision . size ( ) ; i ++ ) { p . get ( i ) . set ( sidesCollision . get ( i ) ) ; } quality . addObservations ( detector . getDetectedPoints ( ) ) ; gui . getInfoPanel ( ) . updateGeometry ( quality . getScore ( ) ) ; geometryTrigger |= quality . getScore ( ) >= 1.0 ; if ( geometryTrigger && magnets . isEmpty ( ) ) { gui . getInfoPanel ( ) . enabledFinishedButton ( ) ; } }
Record the area covered in the image by the fiducial update the quality calculation and see if it should enable the save button .
26,877
private boolean checkMagnetCapturePicture ( ) { boolean captured = false ; Iterator < Magnet > iter = magnets . iterator ( ) ; while ( iter . hasNext ( ) ) { Magnet i = iter . next ( ) ; if ( i . handlePictureTaken ( ) ) { iter . remove ( ) ; captured = true ; } } return captured ; }
Checks to see if its near one of the magnets in the image broder
26,878
public boolean fit ( List < Point2D_I32 > contour , GrowQueue_I32 corners ) { if ( corners . size ( ) < 3 ) { return false ; } searchRadius = Math . min ( 6 , Math . max ( contour . size ( ) / 12 , 3 ) ) ; int startCorner , endCorner ; if ( looping ) { startCorner = 0 ; endCorner = corners . size ; } else { startCorner = 1 ; endCorner = corners . size - 1 ; } boolean change = true ; for ( int iteration = 0 ; iteration < maxIterations && change ; iteration ++ ) { change = false ; for ( int i = startCorner ; i < endCorner ; i ++ ) { int c0 = CircularIndex . minusPOffset ( i , 1 , corners . size ( ) ) ; int c2 = CircularIndex . plusPOffset ( i , 1 , corners . size ( ) ) ; int improved = optimize ( contour , corners . get ( c0 ) , corners . get ( i ) , corners . get ( c2 ) ) ; if ( improved != corners . get ( i ) ) { corners . set ( i , improved ) ; change = true ; } } } return true ; }
Fits a polygon to the contour given an initial set of candidate corners . If not looping the corners must include the end points still . Minimum of 3 points required . Otherwise there s no corner! .
26,879
protected int optimize ( List < Point2D_I32 > contour , int c0 , int c1 , int c2 ) { double bestDistance = computeCost ( contour , c0 , c1 , c2 , 0 ) ; int bestIndex = 0 ; for ( int i = - searchRadius ; i <= searchRadius ; i ++ ) { if ( i == 0 ) { if ( bestIndex != 0 ) break ; } else { double found = computeCost ( contour , c0 , c1 , c2 , i ) ; if ( found < bestDistance ) { bestDistance = found ; bestIndex = i ; } } } return CircularIndex . addOffset ( c1 , bestIndex , contour . size ( ) ) ; }
Searches around the current c1 point for the best place to put the corner
26,880
protected double computeCost ( List < Point2D_I32 > contour , int c0 , int c1 , int c2 , int offset ) { c1 = CircularIndex . addOffset ( c1 , offset , contour . size ( ) ) ; createLine ( c0 , c1 , contour , line0 ) ; createLine ( c1 , c2 , contour , line1 ) ; return distanceSum ( line0 , c0 , c1 , contour ) + distanceSum ( line1 , c1 , c2 , contour ) ; }
Computes the distance between the two lines defined by corner points in the contour
26,881
protected double distanceSum ( LineGeneral2D_F64 line , int c0 , int c1 , List < Point2D_I32 > contour ) { double total = 0 ; if ( c0 < c1 ) { int length = c1 - c0 + 1 ; int samples = Math . min ( maxLineSamples , length ) ; for ( int i = 0 ; i < samples ; i ++ ) { int index = c0 + i * ( length - 1 ) / ( samples - 1 ) ; total += distance ( line , contour . get ( index ) ) ; } } else { int lengthFirst = contour . size ( ) - c0 ; int lengthSecond = c1 + 1 ; int length = lengthFirst + c1 + 1 ; int samples = Math . min ( maxLineSamples , length ) ; int samplesFirst = samples * lengthFirst / length ; int samplesSecond = samples * lengthSecond / length ; for ( int i = 0 ; i < samplesFirst ; i ++ ) { int index = c0 + i * lengthFirst / ( samples - 1 ) ; total += distance ( line , contour . get ( index ) ) ; } for ( int i = 0 ; i < samplesSecond ; i ++ ) { int index = i * lengthSecond / ( samples - 1 ) ; total += distance ( line , contour . get ( index ) ) ; } } return total ; }
Sum of Euclidean distance of contour points along the line
26,882
private void createLine ( int index0 , int index1 , List < Point2D_I32 > contour , LineGeneral2D_F64 line ) { if ( index1 < 0 ) System . out . println ( "SHIT" ) ; Point2D_I32 p0 = contour . get ( index0 ) ; Point2D_I32 p1 = contour . get ( index1 ) ; work . a . set ( p0 . x , p0 . y ) ; work . b . set ( p1 . x , p1 . y ) ; UtilLine2D_F64 . convert ( work , line ) ; line . normalize ( ) ; }
Given segment information create a line in general notation which has been normalized
26,883
public < B extends ImageGray < B > > B createSameShape ( Class < B > type ) { return GeneralizedImageOps . createSingleBand ( type , width , height ) ; }
Creates a single band image of the specified type that will have the same shape as this image
26,884
private synchronized void changeImageView ( ) { JComponent comp ; if ( control . selectedView < 3 ) { BufferedImage img ; switch ( control . selectedView ) { case 0 : img = disparityOut ; break ; case 1 : img = colorLeft ; break ; case 2 : img = colorRight ; break ; default : throw new RuntimeException ( "Unknown option" ) ; } gui . setImage ( img ) ; gui . setPreferredSize ( new Dimension ( origLeft . getWidth ( ) , origLeft . getHeight ( ) ) ) ; comp = gui ; } else { if ( ! computedCloud ) { computedCloud = true ; DisparityToColorPointCloud d2c = new DisparityToColorPointCloud ( ) ; double baseline = calib . getRightToLeft ( ) . getT ( ) . norm ( ) ; d2c . configure ( baseline , rectK , rectR , leftRectToPixel , control . minDisparity , control . maxDisparity ) ; d2c . process ( activeAlg . getDisparity ( ) , colorLeft ) ; CameraPinhole rectifiedPinhole = PerspectiveOps . matrixToPinhole ( rectK , colorLeft . getWidth ( ) , colorLeft . getHeight ( ) , null ) ; pcv . clearPoints ( ) ; pcv . setCameraHFov ( PerspectiveOps . computeHFov ( rectifiedPinhole ) ) ; pcv . setTranslationStep ( 5 ) ; pcv . addCloud ( d2c . getCloud ( ) , d2c . getCloudColor ( ) ) ; } comp = pcv . getComponent ( ) ; comp . requestFocusInWindow ( ) ; } panel . remove ( gui ) ; panel . remove ( pcv . getComponent ( ) ) ; panel . add ( comp , BorderLayout . CENTER ) ; panel . validate ( ) ; comp . repaint ( ) ; processedImage = true ; }
Changes which image is being displayed depending on GUI selection
26,885
private void rectifyInputImages ( ) { DMatrixRMaj K1 = PerspectiveOps . pinholeToMatrix ( calib . left , ( DMatrixRMaj ) null ) ; DMatrixRMaj K2 = PerspectiveOps . pinholeToMatrix ( calib . right , ( DMatrixRMaj ) null ) ; rectifyAlg . process ( K1 , new Se3_F64 ( ) , K2 , calib . getRightToLeft ( ) . invert ( null ) ) ; DMatrixRMaj rect1 = rectifyAlg . getRect1 ( ) ; DMatrixRMaj rect2 = rectifyAlg . getRect2 ( ) ; rectK = rectifyAlg . getCalibrationMatrix ( ) ; rectR = rectifyAlg . getRectifiedRotation ( ) ; RectifyImageOps . allInsideLeft ( calib . left , rect1 , rect2 , rectK ) ; leftRectToPixel = transformRectToPixel ( calib . left , rect1 ) ; ImageType < T > imageType = ImageType . single ( activeAlg . getInputType ( ) ) ; FMatrixRMaj rect1_F32 = new FMatrixRMaj ( 3 , 3 ) ; FMatrixRMaj rect2_F32 = new FMatrixRMaj ( 3 , 3 ) ; ConvertMatrixData . convert ( rect1 , rect1_F32 ) ; ConvertMatrixData . convert ( rect2 , rect2_F32 ) ; ImageDistort < T , T > distortRect1 = RectifyImageOps . rectifyImage ( calib . left , rect1_F32 , BorderType . SKIP , imageType ) ; ImageDistort < T , T > distortRect2 = RectifyImageOps . rectifyImage ( calib . right , rect2_F32 , BorderType . SKIP , imageType ) ; distortRect1 . apply ( inputLeft , rectLeft ) ; distortRect2 . apply ( inputRight , rectRight ) ; rectifiedImages = true ; }
Removes distortion and rectifies images .
26,886
private void changeGuiActive ( final boolean error , final boolean reverse ) { SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { control . setActiveGui ( error , reverse ) ; } } ) ; }
Active and deactivates different GUI configurations
26,887
public static String guessEncoding ( byte [ ] message ) { boolean isUtf8 = true ; boolean isJis = true ; boolean isIso = true ; for ( int i = 0 ; i < message . length ; i ++ ) { int v = message [ i ] & 0xFF ; if ( isUtf8 ) isUtf8 = isValidUTF8 ( v ) ; if ( isJis ) isJis = isValidJIS ( v ) ; if ( isIso ) isIso = isValidIso8869_1 ( v ) ; } if ( isUtf8 ) return UTF8 ; if ( isIso ) return ISO8859_1 ; if ( isJis ) return JIS ; return UTF8 ; }
The encoding for byte messages should be ISO8859_1 or JIS depending on which version of the specification you follow . In reality people use whatever they want and expect it to magically work . This attempts to figure out if it s ISO8859_1 JIS or UTF8 . UTF - 8 is the most common and is used if its ambiguous .
26,888
public static < T extends ImageGray < T > > T convert ( ImageGray < ? > src , T dst , Class < T > typeDst ) { if ( dst == null ) { dst = ( T ) GeneralizedImageOps . createSingleBand ( typeDst , src . width , src . height ) ; } else { InputSanityCheck . checkSameShape ( src , dst ) ; } convert ( src , dst ) ; return dst ; }
Converts an image from one type to another type . Creates a new image instance if an output is not provided .
26,889
public void setTensor ( TrifocalTensor tensor ) { this . tensor = tensor ; if ( ! svd . decompose ( tensor . T1 ) ) throw new RuntimeException ( "SVD failed?!" ) ; SingularOps_DDRM . nullVector ( svd , true , v1 ) ; SingularOps_DDRM . nullVector ( svd , false , u1 ) ; if ( ! svd . decompose ( tensor . T2 ) ) throw new RuntimeException ( "SVD failed?!" ) ; SingularOps_DDRM . nullVector ( svd , true , v2 ) ; SingularOps_DDRM . nullVector ( svd , false , u2 ) ; if ( ! svd . decompose ( tensor . T3 ) ) throw new RuntimeException ( "SVD failed?!" ) ; SingularOps_DDRM . nullVector ( svd , true , v3 ) ; SingularOps_DDRM . nullVector ( svd , false , u3 ) ; for ( int i = 0 ; i < 3 ; i ++ ) { U . set ( i , 0 , u1 . get ( i ) ) ; U . set ( i , 1 , u2 . get ( i ) ) ; U . set ( i , 2 , u3 . get ( i ) ) ; V . set ( i , 0 , v1 . get ( i ) ) ; V . set ( i , 1 , v2 . get ( i ) ) ; V . set ( i , 2 , v3 . get ( i ) ) ; } svd . decompose ( U ) ; SingularOps_DDRM . nullVector ( svd , false , tempE ) ; e2 . set ( tempE . get ( 0 ) , tempE . get ( 1 ) , tempE . get ( 2 ) ) ; svd . decompose ( V ) ; SingularOps_DDRM . nullVector ( svd , false , tempE ) ; e3 . set ( tempE . get ( 0 ) , tempE . get ( 1 ) , tempE . get ( 2 ) ) ; }
Specifies the input tensor . The epipoles are immediately extracted since they are needed to extract all other data structures
26,890
public void extractEpipoles ( Point3D_F64 e2 , Point3D_F64 e3 ) { e2 . set ( this . e2 ) ; e3 . set ( this . e3 ) ; }
Extracts the epipoles from the trifocal tensor . Extracted epipoles will have a norm of 1 as an artifact of using SVD .
26,891
public static < T extends ImageGray < T > > void histogram ( T image , double minPixelValue , double maxPixelValue , TupleDesc_F64 histogram ) { if ( image . getClass ( ) == GrayU8 . class ) { HistogramFeatureOps . histogram ( ( GrayU8 ) image , ( int ) maxPixelValue , histogram ) ; } else if ( image . getClass ( ) == GrayU16 . class ) { HistogramFeatureOps . histogram ( ( GrayU16 ) image , ( int ) maxPixelValue , histogram ) ; } else if ( image . getClass ( ) == GrayF32 . class ) { HistogramFeatureOps . histogram ( ( GrayF32 ) image , ( float ) minPixelValue , ( float ) maxPixelValue , histogram ) ; } else { throw new IllegalArgumentException ( "Unsupported band type" ) ; } }
Computes a single - band normalized histogram for any single band image .
26,892
public static void histogram ( double [ ] colors , int length , Histogram_F64 histogram ) { if ( length % histogram . getDimensions ( ) != 0 ) throw new IllegalArgumentException ( "Length does not match dimensions" ) ; int coordinate [ ] = new int [ histogram . getDimensions ( ) ] ; histogram . fill ( 0 ) ; for ( int i = 0 ; i < length ; ) { for ( int j = 0 ; j < coordinate . length ; j ++ , i ++ ) { coordinate [ j ] = histogram . getDimensionIndex ( j , colors [ i ] ) ; } int index = histogram . getIndex ( coordinate ) ; histogram . value [ index ] += 1.0 ; } }
Computes a coupled histogram from a list of colors . If the input is for integer values then add one to the maximum value . For example if the range of values is 0 to 255 then make it 0 to 256 .
26,893
void computePointStatistics ( Point [ ] points ) { final int length = points . length ; double v [ ] = new double [ length ] ; for ( int axis = 0 ; axis < 3 ; axis ++ ) { double maxAbs = 0 ; for ( int i = 0 ; i < length ; i ++ ) { v [ i ] = points [ i ] . coordinate [ axis ] ; maxAbs = Math . max ( maxAbs , Math . abs ( v [ i ] ) ) ; } double median = QuickSelect . select ( v , length / 2 , length ) ; switch ( axis ) { case 0 : medianPoint . x = median ; break ; case 1 : medianPoint . y = median ; break ; case 2 : medianPoint . z = median ; break ; } } for ( int i = 0 ; i < length ; i ++ ) { v [ i ] = points [ i ] . distanceSq ( medianPoint ) ; } medianDistancePoint = Math . sqrt ( QuickSelect . select ( v , length / 2 , length ) ) ; }
For 3D points computes the median value and variance along each dimension .
26,894
public void undoScale ( SceneStructureMetric structure , SceneObservations observations ) { if ( structure . homogenous ) return ; double scale = desiredDistancePoint / medianDistancePoint ; undoNormPoints3D ( structure , scale ) ; Point3D_F64 c = new Point3D_F64 ( ) ; for ( int i = 0 ; i < structure . views . length ; i ++ ) { SceneStructureMetric . View view = structure . views [ i ] ; GeometryMath_F64 . multTran ( view . worldToView . R , view . worldToView . T , c ) ; c . x = ( - c . x / scale + medianPoint . x ) ; c . y = ( - c . y / scale + medianPoint . y ) ; c . z = ( - c . z / scale + medianPoint . z ) ; GeometryMath_F64 . mult ( view . worldToView . R , c , view . worldToView . T ) ; view . worldToView . T . scale ( - 1 ) ; } }
Undoes scale transform for metric .
26,895
public boolean apply ( T input , Point2D_F64 corner0 , Point2D_F64 corner1 , Point2D_F64 corner2 , Point2D_F64 corner3 ) { if ( createTransform ( corner0 , corner1 , corner2 , corner3 ) ) { distort . input ( input ) . apply ( ) ; return true ; } else { return false ; } }
Applies distortion removal to the specified region in the input image . The undistorted image is returned .
26,896
public boolean createTransform ( Point2D_F64 tl , Point2D_F64 tr , Point2D_F64 br , Point2D_F64 bl ) { associatedPairs . get ( 0 ) . p2 . set ( tl ) ; associatedPairs . get ( 1 ) . p2 . set ( tr ) ; associatedPairs . get ( 2 ) . p2 . set ( br ) ; associatedPairs . get ( 3 ) . p2 . set ( bl ) ; if ( ! computeHomography . process ( associatedPairs , H ) ) return false ; ConvertMatrixData . convert ( H , H32 ) ; transform . set ( H32 ) ; return true ; }
Compues the distortion removal transform
26,897
public static void vertical ( Kernel1D_F32 kernel , GrayF32 image , GrayF32 dest , int skip ) { checkParameters ( image , dest , skip ) ; if ( kernel . width >= image . width ) { ConvolveDownNormalizedNaive . vertical ( kernel , image , dest , skip ) ; } else { ConvolveImageDownNoBorder . vertical ( kernel , image , dest , skip ) ; ConvolveDownNormalized_JustBorder . vertical ( kernel , image , dest , skip ) ; } }
Performs a vertical 1D down convolution across the image while re - normalizing the kernel depending on its overlap with the image .
26,898
public static void removeRadial ( float x , float y , float [ ] radial , float t1 , float t2 , Point2D_F32 out , float tol ) { float origX = x ; float origY = y ; float prevSum = 0 ; for ( int iter = 0 ; iter < 500 ; iter ++ ) { float r2 = x * x + y * y ; float ri2 = r2 ; float sum = 0 ; for ( int i = 0 ; i < radial . length ; i ++ ) { sum += radial [ i ] * ri2 ; ri2 *= r2 ; } float tx = 2.0f * t1 * x * y + t2 * ( r2 + 2.0f * x * x ) ; float ty = t1 * ( r2 + 2.0f * y * y ) + 2.0f * t2 * x * y ; x = ( origX - tx ) / ( 1.0f + sum ) ; y = ( origY - ty ) / ( 1.0f + sum ) ; if ( ( float ) Math . abs ( prevSum - sum ) <= tol ) { break ; } else { prevSum = sum ; } } out . set ( x , y ) ; }
Static function for removing radial and tangential distortion
26,899
public boolean process ( EllipseRotated_F64 input , EllipseRotated_F64 refined ) { refined . set ( input ) ; previous . set ( input ) ; for ( int iteration = 0 ; iteration < maxIterations ; iteration ++ ) { refined . set ( previous ) ; computePointsAndWeights ( refined ) ; if ( fitter . process ( samplePts . toList ( ) , weights . data ) ) { UtilEllipse_F64 . convert ( fitter . getEllipse ( ) , refined ) ; double scale = previous . a ; refined . center . x = refined . center . x * scale + previous . center . x ; refined . center . y = refined . center . y * scale + previous . center . y ; refined . a *= scale ; refined . b *= scale ; } else { return false ; } if ( change ( previous , refined ) <= convergenceTol ) { return true ; } else { previous . set ( refined ) ; } } return true ; }
Refines provided list by snapping it to edges found in the image