idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
27,000
private static void randomGaussian ( Random rand , double sigma , int radius , Point2D_I32 pt ) { int x , y ; while ( true ) { x = ( int ) ( rand . nextGaussian ( ) * sigma ) ; y = ( int ) ( rand . nextGaussian ( ) * sigma ) ; if ( Math . sqrt ( x * x + y * y ) < radius ) break ; } pt . set ( x , y ) ; }
Randomly selects a point which is inside a square region using a Gaussian distribution .
103
17
27,001
public static < T extends ImageBase < T > > PyramidDiscrete < T > discreteGaussian ( int [ ] scaleFactors , double sigma , int radius , boolean saveOriginalReference , ImageType < T > imageType ) { Class < Kernel1D > kernelType = FactoryKernel . getKernelType ( imageType . getDataType ( ) , 1 ) ; Kernel1D kernel = FactoryKernelGaussian . gaussian ( kernelType , sigma , radius ) ; return new PyramidDiscreteSampleBlur <> ( kernel , sigma , imageType , saveOriginalReference , scaleFactors ) ; }
Creates an updater for discrete pyramids where a Gaussian is convolved across the input prior to sub - sampling .
131
25
27,002
public static < T extends ImageGray < T > > PyramidFloat < T > floatGaussian ( double scaleFactors [ ] , double [ ] sigmas , Class < T > imageType ) { InterpolatePixelS < T > interp = FactoryInterpolation . bilinearPixelS ( imageType , BorderType . EXTENDED ) ; return new PyramidFloatGaussianScale <> ( interp , scaleFactors , sigmas , imageType ) ; }
Creates a float pyramid where each layer is blurred using a Gaussian with the specified sigma . Bilinear interpolation is used when sub - sampling .
98
32
27,003
public static double process_F64 ( double sample , double x [ ] , double y [ ] , int i0 , int i1 ) { double result = 0 ; for ( int i = i0 ; i <= i1 ; i ++ ) { double numerator = 1.0 ; for ( int j = i0 ; j <= i1 ; j ++ ) { if ( i != j ) numerator *= sample - x [ j ] ; } double denominator = 1.0 ; double a = x [ i ] ; for ( int j = i0 ; j <= i1 ; j ++ ) { if ( i != j ) denominator *= a - x [ j ] ; } result += ( numerator / denominator ) * y [ i ] ; } return result ; }
UsingLlangrange s formula it interpulates the value of a function at the specified sample point given discrete samples . Which samples are used and the order of the approximation are given by i0 and i1 .
166
43
27,004
public void add ( Color color , Point3D_F64 ... polygon ) { final Poly p = new Poly ( polygon . length , color ) ; for ( int i = 0 ; i < polygon . length ; i ++ ) p . pts [ i ] = polygon [ i ] . copy ( ) ; synchronized ( polygons ) { polygons . add ( p ) ; } }
Adds a polygon to the viewer . GUI Thread safe .
83
12
27,005
public static BufferedImage checkDeclare ( int width , int height , BufferedImage image , int type ) { if ( image == null ) return new BufferedImage ( width , height , type ) ; if ( image . getType ( ) != type ) return new BufferedImage ( width , height , type ) ; if ( image . getWidth ( ) != width || image . getHeight ( ) != height ) return new BufferedImage ( width , height , type ) ; return image ; }
If the provided image does not have the same shape and same type a new one is declared and returned .
104
21
27,006
public static BufferedImage checkCopy ( BufferedImage original , BufferedImage output ) { ColorModel cm = original . getColorModel ( ) ; boolean isAlphaPremultiplied = cm . isAlphaPremultiplied ( ) ; if ( output == null || original . getWidth ( ) != output . getWidth ( ) || original . getHeight ( ) != output . getHeight ( ) || original . getType ( ) != output . getType ( ) ) { WritableRaster raster = original . copyData ( original . getRaster ( ) . createCompatibleWritableRaster ( ) ) ; return new BufferedImage ( cm , raster , isAlphaPremultiplied , null ) ; } original . copyData ( output . getRaster ( ) ) ; return output ; }
Copies the original image into the output image . If it can t do a copy a new image is created and returned
170
24
27,007
public static BufferedImage stripAlphaChannel ( BufferedImage image ) { int numBands = image . getRaster ( ) . getNumBands ( ) ; if ( numBands == 4 ) { BufferedImage output = new BufferedImage ( image . getWidth ( ) , image . getHeight ( ) , BufferedImage . TYPE_INT_RGB ) ; output . createGraphics ( ) . drawImage ( image , 0 , 0 , null ) ; return output ; } else { return image ; } }
Returns an image which doesn t have an alpha channel . If the input image doesn t have an alpha channel to start then its returned as is . Otherwise a new image is created and the RGB channels are copied and the new image returned .
109
47
27,008
public static InterleavedU8 extractInterleavedU8 ( BufferedImage img ) { DataBuffer buffer = img . getRaster ( ) . getDataBuffer ( ) ; if ( buffer . getDataType ( ) == DataBuffer . TYPE_BYTE && isKnownByteFormat ( img ) ) { WritableRaster raster = img . getRaster ( ) ; InterleavedU8 ret = new InterleavedU8 ( ) ; ret . width = img . getWidth ( ) ; ret . height = img . getHeight ( ) ; ret . startIndex = ConvertRaster . getOffset ( raster ) ; ret . imageType . numBands = raster . getNumBands ( ) ; ret . numBands = raster . getNumBands ( ) ; ret . stride = ConvertRaster . stride ( raster ) ; ret . data = ( ( DataBufferByte ) buffer ) . getData ( ) ; ret . subImage = ret . startIndex != 0 ; return ret ; } throw new IllegalArgumentException ( "Buffered image does not have an interleaved byte raster" ) ; }
For BufferedImage stored as a byte array internally it extracts an interleaved image . The input image and the returned image will both share the same internal data array . Using this function allows unnecessary memory copying to be avoided .
242
45
27,009
public static GrayU8 extractGrayU8 ( BufferedImage img ) { WritableRaster raster = img . getRaster ( ) ; DataBuffer buffer = raster . getDataBuffer ( ) ; if ( buffer . getDataType ( ) == DataBuffer . TYPE_BYTE && isKnownByteFormat ( img ) ) { if ( raster . getNumBands ( ) != 1 ) throw new IllegalArgumentException ( "Input image has more than one channel" ) ; GrayU8 ret = new GrayU8 ( ) ; ret . width = img . getWidth ( ) ; ret . height = img . getHeight ( ) ; ret . startIndex = ConvertRaster . getOffset ( img . getRaster ( ) ) ; ret . stride = ConvertRaster . stride ( img . getRaster ( ) ) ; ret . data = ( ( DataBufferByte ) buffer ) . getData ( ) ; return ret ; } throw new IllegalArgumentException ( "Buffered image does not have a gray scale byte raster" ) ; }
For BufferedImage stored as a byte array internally it extracts an image . The input image and the returned image will both share the same internal data array . Using this function allows unnecessary memory copying to be avoided .
223
42
27,010
public static < T extends ImageGray < T > > T convertFromSingle ( BufferedImage src , T dst , Class < T > type ) { if ( type == GrayU8 . class ) { return ( T ) convertFrom ( src , ( GrayU8 ) dst ) ; } else if ( GrayI16 . class . isAssignableFrom ( type ) ) { return ( T ) convertFrom ( src , ( GrayI16 ) dst , ( Class ) type ) ; } else if ( type == GrayF32 . class ) { return ( T ) convertFrom ( src , ( GrayF32 ) dst ) ; } else { throw new IllegalArgumentException ( "Unknown type " + type ) ; } }
Converts a buffered image into an image of the specified type . In a dst image is provided it will be used for output otherwise a new image will be created .
151
34
27,011
public static BufferedImage convertTo ( JComponent comp , BufferedImage storage ) { if ( storage == null ) storage = new BufferedImage ( comp . getWidth ( ) , comp . getHeight ( ) , BufferedImage . TYPE_INT_RGB ) ; Graphics2D g2 = storage . createGraphics ( ) ; comp . paintComponents ( g2 ) ; return storage ; }
Draws the component into a BufferedImage .
83
10
27,012
public static Planar orderBandsIntoBuffered ( Planar src , BufferedImage dst ) { // see if no change is required if ( dst . getType ( ) == BufferedImage . TYPE_INT_RGB ) return src ; Planar tmp = new Planar ( src . type , src . getNumBands ( ) ) ; tmp . width = src . width ; tmp . height = src . height ; tmp . stride = src . stride ; tmp . startIndex = src . startIndex ; for ( int i = 0 ; i < src . getNumBands ( ) ; i ++ ) { tmp . bands [ i ] = src . bands [ i ] ; } ConvertRaster . orderBandsBufferedFromRgb ( tmp , dst ) ; return tmp ; }
Returns a new image with the color bands in the appropriate ordering . The returned image will reference the original image s image arrays .
166
25
27,013
public static double computeError ( GrayF32 imgA , GrayF32 imgB ) { final int h = imgA . getHeight ( ) ; final int w = imgA . getWidth ( ) ; double total = 0 ; for ( int y = 0 ; y < h ; y ++ ) { for ( int x = 0 ; x < w ; x ++ ) { double difference = Math . abs ( imgA . get ( x , y ) - imgB . get ( x , y ) ) ; total += difference ; } } return total / ( w * h ) ; }
todo push to what ops? Also what is this error called again?
122
15
27,014
public static double computeWeightedError ( GrayF32 imgA , GrayF32 imgB , GrayF32 imgWeight ) { final int h = imgA . getHeight ( ) ; final int w = imgA . getWidth ( ) ; double total = 0 ; double totalWeight = 0 ; for ( int y = 0 ; y < h ; y ++ ) { for ( int x = 0 ; x < w ; x ++ ) { float weight = imgWeight . get ( x , y ) ; double difference = Math . abs ( imgA . get ( x , y ) - imgB . get ( x , y ) ) ; total += difference * weight ; totalWeight += weight ; } } return total / totalWeight ; }
todo push to what ops?
153
7
27,015
private static boolean nearBorder ( Point2D_F64 p , StitchingFromMotion2D < ? , ? > stitch ) { int r = 10 ; if ( p . x < r || p . y < r ) return true ; if ( p . x >= stitch . getStitchedImage ( ) . width - r ) return true ; if ( p . y >= stitch . getStitchedImage ( ) . height - r ) return true ; return false ; }
Checks to see if the point is near the image border
98
12
27,016
public void setShape ( double width , double height ) { points2D3D . get ( 0 ) . location . set ( - width / 2 , - height / 2 , 0 ) ; points2D3D . get ( 1 ) . location . set ( - width / 2 , height / 2 , 0 ) ; points2D3D . get ( 2 ) . location . set ( width / 2 , height / 2 , 0 ) ; points2D3D . get ( 3 ) . location . set ( width / 2 , - height / 2 , 0 ) ; }
Specifes how big the fiducial is along two axises
121
14
27,017
public void computeStability ( Se3_F64 targetToCamera , double disturbance , FiducialStability results ) { targetToCamera . invert ( referenceCameraToTarget ) ; maxOrientation = 0 ; maxLocation = 0 ; Point3D_F64 cameraPt = new Point3D_F64 ( ) ; for ( int i = 0 ; i < points2D3D . size ( ) ; i ++ ) { Point2D3D p23 = points2D3D . get ( i ) ; targetToCamera . transform ( p23 . location , cameraPt ) ; p23 . observation . x = cameraPt . x / cameraPt . z ; p23 . observation . y = cameraPt . y / cameraPt . z ; refNorm . get ( i ) . set ( p23 . observation ) ; normToPixel . compute ( p23 . observation . x , p23 . observation . y , refPixels . get ( i ) ) ; } for ( int i = 0 ; i < points2D3D . size ( ) ; i ++ ) { // see what happens if you tweak this observation a little bit perturb ( disturbance , refPixels . get ( i ) , points2D3D . get ( i ) ) ; // set it back to the nominal value points2D3D . get ( i ) . observation . set ( refNorm . get ( i ) ) ; } results . location = maxLocation ; results . orientation = maxOrientation ; }
Estimate how sensitive this observation is to pixel noise
326
10
27,018
private void perturb ( double disturbance , Point2D_F64 pixel , Point2D3D p23 ) { double x ; double y = pixel . y ; x = pixel . x + disturbance ; computeDisturbance ( x , y , p23 ) ; x = pixel . x - disturbance ; computeDisturbance ( x , y , p23 ) ; x = pixel . x ; y = pixel . y + disturbance ; computeDisturbance ( x , y , p23 ) ; y = pixel . y - disturbance ; computeDisturbance ( x , y , p23 ) ; }
Perturb the observation in 4 different ways
126
9
27,019
public static < T extends ImageGray < T > , D extends ImageGray < D > > void detectLines ( BufferedImage image , Class < T > imageType , Class < D > derivType ) { // convert the line into a single band image T input = ConvertBufferedImage . convertFromSingle ( image , null , imageType ) ; // Comment/uncomment to try a different type of line detector DetectLineHoughPolar < T , D > detector = FactoryDetectLineAlgs . houghPolar ( new ConfigHoughPolar ( 3 , 30 , 2 , Math . PI / 180 , edgeThreshold , maxLines ) , imageType , derivType ) ; // DetectLineHoughFoot<T,D> detector = FactoryDetectLineAlgs.houghFoot( // new ConfigHoughFoot(3, 8, 5, edgeThreshold,maxLines), imageType, derivType); // DetectLineHoughFootSubimage<T,D> detector = FactoryDetectLineAlgs.houghFootSub( // new ConfigHoughFootSubimage(3, 8, 5, edgeThreshold,maxLines, 2, 2), imageType, derivType); List < LineParametric2D_F32 > found = detector . detect ( input ) ; // display the results ImageLinePanel gui = new ImageLinePanel ( ) ; gui . setImage ( image ) ; gui . setLines ( found ) ; gui . setPreferredSize ( new Dimension ( image . getWidth ( ) , image . getHeight ( ) ) ) ; listPanel . addItem ( gui , "Found Lines" ) ; }
Detects lines inside the image using different types of Hough detectors
348
13
27,020
public static < T extends ImageGray < T > , D extends ImageGray < D > > void detectLineSegments ( BufferedImage image , Class < T > imageType , Class < D > derivType ) { // convert the line into a single band image T input = ConvertBufferedImage . convertFromSingle ( image , null , imageType ) ; // Comment/uncomment to try a different type of line detector DetectLineSegmentsGridRansac < T , D > detector = FactoryDetectLineAlgs . lineRansac ( 40 , 30 , 2.36 , true , imageType , derivType ) ; List < LineSegment2D_F32 > found = detector . detect ( input ) ; // display the results ImageLinePanel gui = new ImageLinePanel ( ) ; gui . setImage ( image ) ; gui . setLineSegments ( found ) ; gui . setPreferredSize ( new Dimension ( image . getWidth ( ) , image . getHeight ( ) ) ) ; listPanel . addItem ( gui , "Found Line Segments" ) ; }
Detects segments inside the image
227
6
27,021
public static List < double [ ] > coupledHueSat ( List < String > images ) { List < double [ ] > points = new ArrayList <> ( ) ; Planar < GrayF32 > rgb = new Planar <> ( GrayF32 . class , 1 , 1 , 3 ) ; Planar < GrayF32 > hsv = new Planar <> ( GrayF32 . class , 1 , 1 , 3 ) ; for ( String path : images ) { BufferedImage buffered = UtilImageIO . loadImage ( path ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; rgb . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; hsv . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , rgb , true ) ; ColorHsv . rgbToHsv ( rgb , hsv ) ; Planar < GrayF32 > hs = hsv . partialSpectrum ( 0 , 1 ) ; // The number of bins is an important parameter. Try adjusting it Histogram_F64 histogram = new Histogram_F64 ( 12 , 12 ) ; histogram . setRange ( 0 , 0 , 2.0 * Math . PI ) ; // range of hue is from 0 to 2PI histogram . setRange ( 1 , 0 , 1.0 ) ; // range of saturation is from 0 to 1 // Compute the histogram GHistogramFeatureOps . histogram ( hs , histogram ) ; UtilFeature . normalizeL2 ( histogram ) ; // normalize so that image size doesn't matter points . add ( histogram . value ) ; } return points ; }
HSV stores color information in Hue and Saturation while intensity is in Value . This computes a 2D histogram from hue and saturation only which makes it lighting independent .
382
35
27,022
public static List < double [ ] > independentHueSat ( List < File > images ) { List < double [ ] > points = new ArrayList <> ( ) ; // The number of bins is an important parameter. Try adjusting it TupleDesc_F64 histogramHue = new TupleDesc_F64 ( 30 ) ; TupleDesc_F64 histogramValue = new TupleDesc_F64 ( 30 ) ; List < TupleDesc_F64 > histogramList = new ArrayList <> ( ) ; histogramList . add ( histogramHue ) ; histogramList . add ( histogramValue ) ; Planar < GrayF32 > rgb = new Planar <> ( GrayF32 . class , 1 , 1 , 3 ) ; Planar < GrayF32 > hsv = new Planar <> ( GrayF32 . class , 1 , 1 , 3 ) ; for ( File f : images ) { BufferedImage buffered = UtilImageIO . loadImage ( f . getPath ( ) ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; rgb . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; hsv . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , rgb , true ) ; ColorHsv . rgbToHsv ( rgb , hsv ) ; GHistogramFeatureOps . histogram ( hsv . getBand ( 0 ) , 0 , 2 * Math . PI , histogramHue ) ; GHistogramFeatureOps . histogram ( hsv . getBand ( 1 ) , 0 , 1 , histogramValue ) ; // need to combine them into a single descriptor for processing later on TupleDesc_F64 imageHist = UtilFeature . combine ( histogramList , null ) ; UtilFeature . normalizeL2 ( imageHist ) ; // normalize so that image size doesn't matter points . add ( imageHist . value ) ; } return points ; }
Computes two independent 1D histograms from hue and saturation . Less affects by sparsity but can produce worse results since the basic assumption that hue and saturation are decoupled is most of the time false .
451
42
27,023
public static List < double [ ] > coupledRGB ( List < File > images ) { List < double [ ] > points = new ArrayList <> ( ) ; Planar < GrayF32 > rgb = new Planar <> ( GrayF32 . class , 1 , 1 , 3 ) ; for ( File f : images ) { BufferedImage buffered = UtilImageIO . loadImage ( f . getPath ( ) ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; rgb . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , rgb , true ) ; // The number of bins is an important parameter. Try adjusting it Histogram_F64 histogram = new Histogram_F64 ( 10 , 10 , 10 ) ; histogram . setRange ( 0 , 0 , 255 ) ; histogram . setRange ( 1 , 0 , 255 ) ; histogram . setRange ( 2 , 0 , 255 ) ; GHistogramFeatureOps . histogram ( rgb , histogram ) ; UtilFeature . normalizeL2 ( histogram ) ; // normalize so that image size doesn't matter points . add ( histogram . value ) ; } return points ; }
Constructs a 3D histogram using RGB . RGB is a popular color space but the resulting histogram will depend on lighting conditions and might not produce the accurate results .
277
34
27,024
public static List < double [ ] > histogramGray ( List < File > images ) { List < double [ ] > points = new ArrayList <> ( ) ; GrayU8 gray = new GrayU8 ( 1 , 1 ) ; for ( File f : images ) { BufferedImage buffered = UtilImageIO . loadImage ( f . getPath ( ) ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; gray . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , gray , true ) ; TupleDesc_F64 imageHist = new TupleDesc_F64 ( 150 ) ; HistogramFeatureOps . histogram ( gray , 255 , imageHist ) ; UtilFeature . normalizeL2 ( imageHist ) ; // normalize so that image size doesn't matter points . add ( imageHist . value ) ; } return points ; }
Computes a histogram from the gray scale intensity image alone . Probably the least effective at looking up similar images .
212
23
27,025
public static int [ ] imageOffsets ( double radius , int imgWidth ) { double PI2 = Math . PI * 2.0 ; double circumference = PI2 * radius ; int num = ( int ) Math . ceil ( circumference ) ; num = num - num % 4 ; double angleStep = PI2 / num ; int temp [ ] = new int [ ( int ) Math . ceil ( circumference ) ] ; int i = 0 ; int prev = 0 ; for ( double ang = 0 ; ang < PI2 ; ang += angleStep ) { int x = ( int ) Math . round ( Math . cos ( ang ) * radius ) ; int y = ( int ) Math . round ( Math . sin ( ang ) * radius ) ; int pixel = y * imgWidth + x ; if ( pixel != prev ) { // System.out.println("i = "+i+" x = "+x+" y = "+y); temp [ i ++ ] = pixel ; } prev = pixel ; } if ( i == temp . length ) return temp ; else { int ret [ ] = new int [ i ] ; System . arraycopy ( temp , 0 , ret , 0 , i ) ; return ret ; } }
Computes the offsets for a discretized circle of the specified radius for an image with the specified width .
256
22
27,026
public static < T extends ImageGray < T > , TD extends TupleDesc > DetectDescribePoint < T , TD > createFromPremade ( Class < T > imageType ) { return ( DetectDescribePoint ) FactoryDetectDescribe . surfStable ( new ConfigFastHessian ( 1 , 2 , 200 , 1 , 9 , 4 , 4 ) , null , null , imageType ) ; // return (DetectDescribePoint)FactoryDetectDescribe.sift(new ConfigCompleteSift(-1,5,300)); }
For some features there are pre - made implementations of DetectDescribePoint . This has only been done in situations where there was a performance advantage or that it was a very common combination .
114
37
27,027
public static < T extends ImageGray < T > , TD extends TupleDesc > DetectDescribePoint < T , TD > createFromComponents ( Class < T > imageType ) { // create a corner detector Class derivType = GImageDerivativeOps . getDerivativeType ( imageType ) ; GeneralFeatureDetector corner = FactoryDetectPoint . createShiTomasi ( new ConfigGeneralDetector ( 1000 , 5 , 1 ) , null , derivType ) ; InterestPointDetector detector = FactoryInterestPoint . wrapPoint ( corner , 1 , imageType , derivType ) ; // describe points using BRIEF DescribeRegionPoint describe = FactoryDescribeRegionPoint . brief ( new ConfigBrief ( true ) , imageType ) ; // Combine together. // NOTE: orientation will not be estimated return FactoryDetectDescribe . fuseTogether ( detector , null , describe ) ; }
Any arbitrary implementation of InterestPointDetector OrientationImage DescribeRegionPoint can be combined into DetectDescribePoint . The syntax is more complex but the end result is more flexible . This should only be done if there isn t a pre - made DetectDescribePoint .
186
55
27,028
protected void computeWeights ( int numSamples , double numSigmas ) { weights = new float [ numSamples * numSamples ] ; float w [ ] = new float [ numSamples ] ; for ( int i = 0 ; i < numSamples ; i ++ ) { float x = i / ( float ) ( numSamples - 1 ) ; w [ i ] = ( float ) UtilGaussian . computePDF ( 0 , 1 , 2f * numSigmas * ( x - 0.5f ) ) ; } for ( int y = 0 ; y < numSamples ; y ++ ) { for ( int x = 0 ; x < numSamples ; x ++ ) { weights [ y * numSamples + x ] = w [ y ] * w [ x ] ; } } }
compute the weights by convolving 1D gaussian kernel
175
12
27,029
protected void createSamplePoints ( int numSamples ) { for ( int y = 0 ; y < numSamples ; y ++ ) { float regionY = ( y / ( numSamples - 1.0f ) - 0.5f ) ; for ( int x = 0 ; x < numSamples ; x ++ ) { float regionX = ( x / ( numSamples - 1.0f ) - 0.5f ) ; samplePts . add ( new Point2D_F32 ( regionX , regionY ) ) ; } } }
create the list of points in square coordinates that it will sample . values will range from - 0 . 5 to 0 . 5 along each axis .
119
29
27,030
protected void computeHistogramInside ( RectangleRotate_F32 region ) { for ( int i = 0 ; i < samplePts . size ( ) ; i ++ ) { Point2D_F32 p = samplePts . get ( i ) ; squareToImageSample ( p . x , p . y , region ) ; interpolate . get_fast ( imageX , imageY , value ) ; int indexHistogram = computeHistogramBin ( value ) ; sampleHistIndex [ i ] = indexHistogram ; histogram [ indexHistogram ] += weights [ i ] ; } }
Computes the histogram quickly inside the image
127
9
27,031
protected void computeHistogramBorder ( T image , RectangleRotate_F32 region ) { for ( int i = 0 ; i < samplePts . size ( ) ; i ++ ) { Point2D_F32 p = samplePts . get ( i ) ; squareToImageSample ( p . x , p . y , region ) ; // make sure its inside the image if ( ! BoofMiscOps . checkInside ( image , imageX , imageY ) ) { sampleHistIndex [ i ] = - 1 ; } else { // use the slower interpolation which can handle the border interpolate . get ( imageX , imageY , value ) ; int indexHistogram = computeHistogramBin ( value ) ; sampleHistIndex [ i ] = indexHistogram ; histogram [ indexHistogram ] += weights [ i ] ; } } }
Computes the histogram and skips pixels which are outside the image border
182
15
27,032
protected int computeHistogramBin ( float value [ ] ) { int indexHistogram = 0 ; int binStride = 1 ; for ( int bandIndex = 0 ; bandIndex < value . length ; bandIndex ++ ) { int bin = ( int ) ( numBins * value [ bandIndex ] / maxPixelValue ) ; indexHistogram += bin * binStride ; binStride *= numBins ; } return indexHistogram ; }
Given the value of a pixel compute which bin in the histogram it belongs in
96
16
27,033
protected boolean isInFastBounds ( RectangleRotate_F32 region ) { squareToImageSample ( - 0.5f , - 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; squareToImageSample ( - 0.5f , 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; squareToImageSample ( 0.5f , 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; squareToImageSample ( 0.5f , - 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; return true ; }
Checks to see if the region can be sampled using the fast algorithm
186
14
27,034
protected void squareToImageSample ( float x , float y , RectangleRotate_F32 region ) { // -1 because it starts counting at 0. otherwise width+1 samples are made x *= region . width - 1 ; y *= region . height - 1 ; imageX = x * c - y * s + region . cx ; imageY = x * s + y * c + region . cy ; }
Converts a point from square coordinates into image coordinates
89
10
27,035
private void createSparseDerivatives ( ) { Kernel1D_F32 kernelD = new Kernel1D_F32 ( new float [ ] { - 1 , 0 , 1 } , 3 ) ; Kernel1D_F32 kernelDD = KernelMath . convolve1D_F32 ( kernelD , kernelD ) ; Kernel2D_F32 kernelXY = KernelMath . convolve2D ( kernelD , kernelD ) ; derivXX = FactoryConvolveSparse . horizontal1D ( GrayF32 . class , kernelDD ) ; derivXY = FactoryConvolveSparse . convolve2D ( GrayF32 . class , kernelXY ) ; derivYY = FactoryConvolveSparse . vertical1D ( GrayF32 . class , kernelDD ) ; ImageBorder < GrayF32 > border = FactoryImageBorder . single ( GrayF32 . class , BorderType . EXTENDED ) ; derivXX . setImageBorder ( border ) ; derivXY . setImageBorder ( border ) ; derivYY . setImageBorder ( border ) ; }
Define sparse image derivative operators .
227
7
27,036
public void process ( GrayF32 input ) { scaleSpace . initialize ( input ) ; detections . reset ( ) ; do { // scale from octave to input image pixelScaleToInput = scaleSpace . pixelScaleCurrentToInput ( ) ; // detect features in the image for ( int j = 1 ; j < scaleSpace . getNumScales ( ) + 1 ; j ++ ) { // not really sure how to compute the scale for features found at a particular DoG image // using the average resulted in less visually appealing circles in a test image sigmaLower = scaleSpace . computeSigmaScale ( j - 1 ) ; sigmaTarget = scaleSpace . computeSigmaScale ( j ) ; sigmaUpper = scaleSpace . computeSigmaScale ( j + 1 ) ; // grab the local DoG scale space images dogLower = scaleSpace . getDifferenceOfGaussian ( j - 1 ) ; dogTarget = scaleSpace . getDifferenceOfGaussian ( j ) ; dogUpper = scaleSpace . getDifferenceOfGaussian ( j + 1 ) ; detectFeatures ( j ) ; } } while ( scaleSpace . computeNextOctave ( ) ) ; }
Detects SIFT features inside the input image
248
9
27,037
protected void detectFeatures ( int scaleIndex ) { extractor . process ( dogTarget ) ; FastQueue < NonMaxLimiter . LocalExtreme > found = extractor . getLocalExtreme ( ) ; derivXX . setImage ( dogTarget ) ; derivXY . setImage ( dogTarget ) ; derivYY . setImage ( dogTarget ) ; for ( int i = 0 ; i < found . size ; i ++ ) { NonMaxLimiter . LocalExtreme e = found . get ( i ) ; if ( e . max ) { if ( isScaleSpaceExtremum ( e . location . x , e . location . y , e . getValue ( ) , 1f ) ) { processFeatureCandidate ( e . location . x , e . location . y , e . getValue ( ) , e . max ) ; } } else if ( isScaleSpaceExtremum ( e . location . x , e . location . y , e . getValue ( ) , - 1f ) ) { processFeatureCandidate ( e . location . x , e . location . y , e . getValue ( ) , e . max ) ; } } }
Detect features inside the Difference - of - Gaussian image at the current scale
242
15
27,038
boolean isScaleSpaceExtremum ( int c_x , int c_y , float value , float signAdj ) { if ( c_x <= 1 || c_y <= 1 || c_x >= dogLower . width - 1 || c_y >= dogLower . height - 1 ) return false ; float v ; value *= signAdj ; for ( int y = - 1 ; y <= 1 ; y ++ ) { for ( int x = - 1 ; x <= 1 ; x ++ ) { v = dogLower . unsafe_get ( c_x + x , c_y + y ) ; if ( v * signAdj >= value ) return false ; v = dogUpper . unsafe_get ( c_x + x , c_y + y ) ; if ( v * signAdj >= value ) return false ; } } return true ; }
See if the point is a local extremum in scale - space above and below .
186
17
27,039
public static PixelTransformAffine_F32 transformScale ( ImageBase from , ImageBase to , PixelTransformAffine_F32 distort ) { if ( distort == null ) distort = new PixelTransformAffine_F32 ( ) ; float scaleX = ( float ) ( to . width ) / ( float ) ( from . width ) ; float scaleY = ( float ) ( to . height ) / ( float ) ( from . height ) ; Affine2D_F32 affine = distort . getModel ( ) ; affine . set ( scaleX , 0 , 0 , scaleY , 0 , 0 ) ; return distort ; }
Computes a transform which is used to rescale an image . The scale is computed directly from the size of the two input images and independently scales the x and y axises .
134
36
27,040
protected boolean projectOntoEssential ( DMatrixRMaj E ) { if ( ! svdConstraints . decompose ( E ) ) { return false ; } svdV = svdConstraints . getV ( svdV , false ) ; svdU = svdConstraints . getU ( svdU , false ) ; svdS = svdConstraints . getW ( svdS ) ; SingularOps_DDRM . descendingOrder ( svdU , false , svdS , svdV , false ) ; // project it into essential space // the scale factor is arbitrary, but the first two singular values need // to be the same. so just set them to one svdS . unsafe_set ( 0 , 0 , 1 ) ; svdS . unsafe_set ( 1 , 1 , 1 ) ; svdS . unsafe_set ( 2 , 2 , 0 ) ; // recompute F CommonOps_DDRM . mult ( svdU , svdS , temp0 ) ; CommonOps_DDRM . multTransB ( temp0 , svdV , E ) ; return true ; }
Projects the found estimate of E onto essential space .
249
11
27,041
protected boolean projectOntoFundamentalSpace ( DMatrixRMaj F ) { if ( ! svdConstraints . decompose ( F ) ) { return false ; } svdV = svdConstraints . getV ( svdV , false ) ; svdU = svdConstraints . getU ( svdU , false ) ; svdS = svdConstraints . getW ( svdS ) ; SingularOps_DDRM . descendingOrder ( svdU , false , svdS , svdV , false ) ; // the smallest singular value needs to be set to zero, unlike svdS . set ( 2 , 2 , 0 ) ; // recompute F CommonOps_DDRM . mult ( svdU , svdS , temp0 ) ; CommonOps_DDRM . multTransB ( temp0 , svdV , F ) ; return true ; }
Projects the found estimate of F onto Fundamental space .
199
11
27,042
public void learnFern ( boolean positive , ImageRectangle r ) { float rectWidth = r . getWidth ( ) ; float rectHeight = r . getHeight ( ) ; float c_x = r . x0 + ( rectWidth - 1 ) / 2f ; float c_y = r . y0 + ( rectHeight - 1 ) / 2f ; for ( int i = 0 ; i < ferns . length ; i ++ ) { // first learn it with no noise int value = computeFernValue ( c_x , c_y , rectWidth , rectHeight , ferns [ i ] ) ; TldFernFeature f = managers [ i ] . lookupFern ( value ) ; increment ( f , positive ) ; } }
Learns a fern from the specified region . No noise is added .
161
15
27,043
public void learnFernNoise ( boolean positive , ImageRectangle r ) { float rectWidth = r . getWidth ( ) ; float rectHeight = r . getHeight ( ) ; float c_x = r . x0 + ( rectWidth - 1 ) / 2.0f ; float c_y = r . y0 + ( rectHeight - 1 ) / 2.0f ; for ( int i = 0 ; i < ferns . length ; i ++ ) { // first learn it with no noise int value = computeFernValue ( c_x , c_y , rectWidth , rectHeight , ferns [ i ] ) ; TldFernFeature f = managers [ i ] . lookupFern ( value ) ; increment ( f , positive ) ; for ( int j = 0 ; j < numLearnRandom ; j ++ ) { value = computeFernValueRand ( c_x , c_y , rectWidth , rectHeight , ferns [ i ] ) ; f = managers [ i ] . lookupFern ( value ) ; increment ( f , positive ) ; } } }
Computes the value for each fern inside the region and update s their P and N value . Noise is added to the image measurements to take in account the variability .
236
34
27,044
private void increment ( TldFernFeature f , boolean positive ) { if ( positive ) { f . incrementP ( ) ; if ( f . numP > maxP ) maxP = f . numP ; } else { f . incrementN ( ) ; if ( f . numN > maxN ) maxN = f . numN ; } }
Increments the P and N value for a fern . Also updates the maxP and maxN statistics so that it knows when to re - normalize data structures .
75
34
27,045
public boolean lookupFernPN ( TldRegionFernInfo info ) { ImageRectangle r = info . r ; float rectWidth = r . getWidth ( ) ; float rectHeight = r . getHeight ( ) ; float c_x = r . x0 + ( rectWidth - 1 ) / 2.0f ; float c_y = r . y0 + ( rectHeight - 1 ) / 2.0f ; int sumP = 0 ; int sumN = 0 ; for ( int i = 0 ; i < ferns . length ; i ++ ) { TldFernDescription fern = ferns [ i ] ; int value = computeFernValue ( c_x , c_y , rectWidth , rectHeight , fern ) ; TldFernFeature f = managers [ i ] . table [ value ] ; if ( f != null ) { sumP += f . numP ; sumN += f . numN ; } } info . sumP = sumP ; info . sumN = sumN ; return sumN != 0 || sumP != 0 ; }
For the specified regions computes the values of each fern inside of it and then retrives their P and N values . The sum of which is stored inside of info .
233
35
27,046
protected int computeFernValue ( float c_x , float c_y , float rectWidth , float rectHeight , TldFernDescription fern ) { rectWidth -= 1 ; rectHeight -= 1 ; int desc = 0 ; for ( int i = 0 ; i < fern . pairs . length ; i ++ ) { Point2D_F32 p_a = fern . pairs [ i ] . a ; Point2D_F32 p_b = fern . pairs [ i ] . b ; float valA = interpolate . get_fast ( c_x + p_a . x * rectWidth , c_y + p_a . y * rectHeight ) ; float valB = interpolate . get_fast ( c_x + p_b . x * rectWidth , c_y + p_b . y * rectHeight ) ; desc *= 2 ; if ( valA < valB ) { desc += 1 ; } } return desc ; }
Computes the value of the specified fern at the specified location in the image .
209
17
27,047
public void renormalizeP ( ) { int targetMax = maxP / 20 ; for ( int i = 0 ; i < managers . length ; i ++ ) { TldFernManager m = managers [ i ] ; for ( int j = 0 ; j < m . table . length ; j ++ ) { TldFernFeature f = m . table [ j ] ; if ( f == null ) continue ; f . numP = targetMax * f . numP / maxP ; } } maxP = targetMax ; }
Renormalizes fern . numP to avoid overflow
113
11
27,048
public void renormalizeN ( ) { int targetMax = maxN / 20 ; for ( int i = 0 ; i < managers . length ; i ++ ) { TldFernManager m = managers [ i ] ; for ( int j = 0 ; j < m . table . length ; j ++ ) { TldFernFeature f = m . table [ j ] ; if ( f == null ) continue ; f . numN = targetMax * f . numN / maxN ; } } maxN = targetMax ; }
Renormalizes fern . numN to avoid overflow
113
11
27,049
public void describe ( double x , double y , double angle , double scale , TupleDesc_F64 ret ) { double c = Math . cos ( angle ) , s = Math . sin ( angle ) ; // By assuming that the entire feature is inside the image faster algorithms can be used // the results are also of dubious value when interacting with the image border. boolean isInBounds = SurfDescribeOps . isInside ( ii , x , y , radiusDescriptor , widthSample , scale , c , s ) ; // declare the feature if needed if ( ret == null ) ret = new BrightFeature ( featureDOF ) ; else if ( ret . value . length != featureDOF ) throw new IllegalArgumentException ( "Provided feature must have " + featureDOF + " values" ) ; gradient . setImage ( ii ) ; gradient . setWidth ( widthSample * scale ) ; // use a safe method if its along the image border SparseImageGradient gradient = isInBounds ? this . gradient : this . gradientSafe ; // extract descriptor features ( x , y , c , s , scale , gradient , ret . value ) ; }
Compute SURF descriptor but without laplacian sign
244
12
27,050
public boolean computeLaplaceSign ( int x , int y , double scale ) { int s = ( int ) Math . ceil ( scale ) ; kerXX = DerivativeIntegralImage . kernelDerivXX ( 9 * s , kerXX ) ; kerYY = DerivativeIntegralImage . kernelDerivYY ( 9 * s , kerYY ) ; double lap = GIntegralImageOps . convolveSparse ( ii , kerXX , x , y ) ; lap += GIntegralImageOps . convolveSparse ( ii , kerYY , x , y ) ; return lap > 0 ; }
Compute the sign of the Laplacian using a sparse convolution .
130
16
27,051
public static < I extends ImageGray < I > , D extends ImageGray < D > > DetectLineHoughPolar < I , D > houghPolar ( ConfigHoughPolar config , Class < I > imageType , Class < D > derivType ) { if ( config == null ) throw new IllegalArgumentException ( "This is no default since minCounts must be specified" ) ; ImageGradient < I , D > gradient = FactoryDerivative . sobel ( imageType , derivType ) ; return new DetectLineHoughPolar <> ( config . localMaxRadius , config . minCounts , config . resolutionRange , config . resolutionAngle , config . thresholdEdge , config . maxLines , gradient ) ; }
Creates a Hough line detector based on polar parametrization .
160
15
27,052
public static BufferedImage renderContours ( List < Contour > contours , int colorExternal , int colorInternal , int width , int height , BufferedImage out ) { if ( out == null ) { out = new BufferedImage ( width , height , BufferedImage . TYPE_INT_RGB ) ; } else { Graphics2D g2 = out . createGraphics ( ) ; g2 . setColor ( Color . BLACK ) ; g2 . fillRect ( 0 , 0 , width , height ) ; } for ( Contour c : contours ) { for ( Point2D_I32 p : c . external ) { out . setRGB ( p . x , p . y , colorExternal ) ; } for ( List < Point2D_I32 > l : c . internal ) { for ( Point2D_I32 p : l ) { out . setRGB ( p . x , p . y , colorInternal ) ; } } } return out ; }
Draws contours . Internal and external contours are different user specified colors .
207
16
27,053
public static void render ( List < Contour > contours , int colors [ ] , BufferedImage out ) { colors = checkColors ( colors , contours . size ( ) ) ; for ( int i = 0 ; i < contours . size ( ) ; i ++ ) { Contour c = contours . get ( i ) ; int color = colors [ i ] ; for ( Point2D_I32 p : c . external ) { out . setRGB ( p . x , p . y , color ) ; } } }
Renders only the external contours . Each contour is individually colored as specified by colors
114
18
27,054
public static BufferedImage renderBinary ( GrayU8 binaryImage , boolean invert , BufferedImage out ) { if ( out == null || ( out . getWidth ( ) != binaryImage . width || out . getHeight ( ) != binaryImage . height ) ) { out = new BufferedImage ( binaryImage . getWidth ( ) , binaryImage . getHeight ( ) , BufferedImage . TYPE_BYTE_GRAY ) ; } try { WritableRaster raster = out . getRaster ( ) ; DataBuffer buffer = raster . getDataBuffer ( ) ; if ( buffer . getDataType ( ) == DataBuffer . TYPE_BYTE ) { renderBinary ( binaryImage , invert , ( DataBufferByte ) buffer , raster ) ; } else if ( buffer . getDataType ( ) == DataBuffer . TYPE_INT ) { renderBinary ( binaryImage , invert , ( DataBufferInt ) buffer , raster ) ; } else { _renderBinary ( binaryImage , invert , out ) ; } } catch ( SecurityException e ) { _renderBinary ( binaryImage , invert , out ) ; } // hack so that it knows the buffer has been modified out . setRGB ( 0 , 0 , out . getRGB ( 0 , 0 ) ) ; return out ; }
Renders a binary image . 0 = black and 1 = white .
282
14
27,055
public void process ( PairLineNorm line , AssociatedPair point ) { // t0 = (F*x) cross l' GeometryMath_F64 . mult ( F , point . p1 , Fx ) ; GeometryMath_F64 . cross ( Fx , line . getL2 ( ) , t0 ) ; // t1 = x' cross ((f*x) cross l') GeometryMath_F64 . cross ( point . p2 , t0 , t1 ) ; // t0 = x' cross e' GeometryMath_F64 . cross ( point . p2 , e2 , t0 ) ; double top = GeometryMath_F64 . dot ( t0 , t1 ) ; double bottom = t0 . normSq ( ) * ( line . l1 . x * point . p1 . x + line . l1 . y * point . p1 . y + line . l1 . z ) ; // e' * l^T GeometryMath_F64 . outerProd ( e2 , line . l1 , el ) ; // cross(l')*F GeometryMath_F64 . multCrossA ( line . l2 , F , lf ) ; CommonOps_DDRM . add ( lf , top / bottom , el , H ) ; // pick a good scale and sign for H adjust . adjust ( H , point ) ; }
Computes the homography based on a line and point on the plane
303
14
27,056
public static int downSampleSize ( int length , int squareWidth ) { int ret = length / squareWidth ; if ( length % squareWidth != 0 ) ret ++ ; return ret ; }
Computes the length of a down sampled image based on the original length and the square width
39
18
27,057
public static void reshapeDown ( ImageBase image , int inputWidth , int inputHeight , int squareWidth ) { int w = downSampleSize ( inputWidth , squareWidth ) ; int h = downSampleSize ( inputHeight , squareWidth ) ; image . reshape ( w , h ) ; }
Reshapes an image so that it is the correct size to store the down sampled image
63
18
27,058
public static < T extends ImageGray < T > > void down ( Planar < T > input , int sampleWidth , Planar < T > output ) { for ( int band = 0 ; band < input . getNumBands ( ) ; band ++ ) { down ( input . getBand ( band ) , sampleWidth , output . getBand ( band ) ) ; } }
Down samples a planar image . Type checking is done at runtime .
79
14
27,059
public void setCamera1 ( double fx , double fy , double skew , double cx , double cy ) { PerspectiveOps . pinholeToMatrix ( fx , fy , skew , cx , cy , K1 ) ; }
Specifies known intrinsic parameters for view 1
49
8
27,060
public void setCamera2 ( double fx , double fy , double skew , double cx , double cy ) { PerspectiveOps . pinholeToMatrix ( fx , fy , skew , cx , cy , K2 ) ; PerspectiveOps . invertPinhole ( K2 , K2_inv ) ; }
Specifies known intrinsic parameters for view 2
66
8
27,061
public boolean estimatePlaneAtInfinity ( DMatrixRMaj P2 , Vector3D_F64 v ) { PerspectiveOps . projectionSplit ( P2 , Q2 , q2 ) ; // inv(K2)*(Q2*K1 + q2*v') CommonOps_DDF3 . mult ( K2_inv , q2 , t2 ) ; CommonOps_DDF3 . mult ( K2_inv , Q2 , tmpA ) ; CommonOps_DDF3 . mult ( tmpA , K1 , tmpB ) ; // Find the rotation matrix R*t2 = [||t2||,0,0]^T computeRotation ( t2 , RR ) ; // Compute W CommonOps_DDF3 . mult ( RR , tmpB , W ) ; // Compute v, the plane at infinity // v = (w2 cross w3 / ||w3|| - w1 ) / ||t2|| w2 . set ( W . a21 , W . a22 , W . a23 ) ; w3 . set ( W . a31 , W . a32 , W . a33 ) ; double n3 = w3 . norm ( ) ; v . cross ( w2 , w3 ) ; // approximation here, w2 and w3 might not be orthogonal v . divideIP ( n3 ) ; v . x -= W . a11 ; v . y -= W . a12 ; v . z -= W . a13 ; v . divideIP ( t2 . a1 ) ; // really just a sanity check for bad input return ! ( UtilEjml . isUncountable ( v . x ) || UtilEjml . isUncountable ( v . y ) || UtilEjml . isUncountable ( v . z ) ) ; }
Computes the plane at infinity
395
6
27,062
public void process ( SimpleImageSequence < T > sequence ) { // Figure out how large the GUI window should be T frame = sequence . next ( ) ; gui . setPreferredSize ( new Dimension ( frame . getWidth ( ) , frame . getHeight ( ) ) ) ; ShowImages . showWindow ( gui , "KTL Tracker" , true ) ; // process each frame in the image sequence while ( sequence . hasNext ( ) ) { frame = sequence . next ( ) ; // tell the tracker to process the frame tracker . process ( frame ) ; // if there are too few tracks spawn more if ( tracker . getActiveTracks ( null ) . size ( ) < 130 ) tracker . spawnTracks ( ) ; // visualize tracking results updateGUI ( sequence ) ; // wait for a fraction of a second so it doesn't process to fast BoofMiscOps . pause ( pause ) ; } }
Processes the sequence of images and displays the tracked features in a window
190
14
27,063
private void updateGUI ( SimpleImageSequence < T > sequence ) { BufferedImage orig = sequence . getGuiImage ( ) ; Graphics2D g2 = orig . createGraphics ( ) ; // draw tracks with semi-unique colors so you can track individual points with your eyes for ( PointTrack p : tracker . getActiveTracks ( null ) ) { int red = ( int ) ( 2.5 * ( p . featureId % 100 ) ) ; int green = ( int ) ( ( 255.0 / 150.0 ) * ( p . featureId % 150 ) ) ; int blue = ( int ) ( p . featureId % 255 ) ; VisualizeFeatures . drawPoint ( g2 , ( int ) p . x , ( int ) p . y , new Color ( red , green , blue ) ) ; } // draw tracks which have just been spawned green for ( PointTrack p : tracker . getNewTracks ( null ) ) { VisualizeFeatures . drawPoint ( g2 , ( int ) p . x , ( int ) p . y , Color . green ) ; } // tell the GUI to update gui . setImage ( orig ) ; gui . repaint ( ) ; }
Draw tracked features in blue or red if they were just spawned .
253
13
27,064
public void createSURF ( ) { ConfigFastHessian configDetector = new ConfigFastHessian ( ) ; configDetector . maxFeaturesPerScale = 250 ; configDetector . extractRadius = 3 ; configDetector . initialSampleSize = 2 ; tracker = FactoryPointTracker . dda_FH_SURF_Fast ( configDetector , null , null , imageType ) ; }
Creates a SURF feature tracker .
88
8
27,065
public void configure ( int width , int height , float vfov ) { declareVectors ( width , height ) ; float r = ( float ) Math . tan ( vfov / 2.0f ) ; for ( int pixelY = 0 ; pixelY < height ; pixelY ++ ) { float z = 2 * r * pixelY / ( height - 1 ) - r ; for ( int pixelX = 0 ; pixelX < width ; pixelX ++ ) { float theta = GrlConstants . F_PI2 * pixelX / width - GrlConstants . F_PI ; float x = ( float ) Math . cos ( theta ) ; float y = ( float ) Math . sin ( theta ) ; vectors [ pixelY * width + pixelX ] . set ( x , y , z ) ; } } }
Configures the rendered cylinder
179
5
27,066
public boolean process ( T image ) { configureContourDetector ( image ) ; binary . reshape ( image . width , image . height ) ; inputToBinary . process ( image , binary ) ; detectorSquare . process ( image , binary ) ; detectorSquare . refineAll ( ) ; detectorSquare . getPolygons ( found , null ) ; clusters = s2c . process ( found ) ; c2g . process ( clusters ) ; List < SquareGrid > grids = c2g . getGrids ( ) ; SquareGrid match = null ; double matchSize = 0 ; for ( SquareGrid g : grids ) { if ( g . columns != numCols || g . rows != numRows ) { if ( g . columns == numRows && g . rows == numCols ) { tools . transpose ( g ) ; } else { continue ; } } double size = tools . computeSize ( g ) ; if ( size > matchSize ) { matchSize = size ; match = g ; } } if ( match != null ) { if ( tools . checkFlip ( match ) ) { tools . flipRows ( match ) ; } tools . putIntoCanonical ( match ) ; if ( ! tools . orderSquareCorners ( match ) ) return false ; extractCalibrationPoints ( match ) ; return true ; } return false ; }
Process the image and detect the calibration target
289
8
27,067
void extractCalibrationPoints ( SquareGrid grid ) { calibrationPoints . clear ( ) ; for ( int row = 0 ; row < grid . rows ; row ++ ) { row0 . clear ( ) ; row1 . clear ( ) ; for ( int col = 0 ; col < grid . columns ; col ++ ) { Polygon2D_F64 square = grid . get ( row , col ) . square ; row0 . add ( square . get ( 0 ) ) ; row0 . add ( square . get ( 1 ) ) ; row1 . add ( square . get ( 3 ) ) ; row1 . add ( square . get ( 2 ) ) ; } calibrationPoints . addAll ( row0 ) ; calibrationPoints . addAll ( row1 ) ; } // calibCols = grid.columns*2; // calibRows = grid.rows*2; }
Extracts the calibration points from the corners of a fully ordered grid
185
14
27,068
public static < T extends ImageGray < T > > SparseScaleGradient < T , ? > createGradient ( boolean useHaar , Class < T > imageType ) { if ( useHaar ) return FactorySparseIntegralFilters . haar ( imageType ) ; else return FactorySparseIntegralFilters . gradient ( imageType ) ; }
Creates a class for computing the image gradient from an integral image in a sparse fashion . All these kernels assume that the kernel is entirely contained inside the image!
77
32
27,069
public static < T extends ImageGray < T > > boolean isInside ( T ii , double X , double Y , int radiusRegions , int kernelSize , double scale , double c , double s ) { int c_x = ( int ) Math . round ( X ) ; int c_y = ( int ) Math . round ( Y ) ; kernelSize = ( int ) Math . ceil ( kernelSize * scale ) ; int kernelRadius = kernelSize / 2 + ( kernelSize % 2 ) ; // find the radius of the whole area being sampled int radius = ( int ) Math . ceil ( radiusRegions * scale ) ; // integral image convolutions sample the pixel before the region starts // which is why the extra minus one is there int kernelPaddingMinus = radius + kernelRadius + 1 ; int kernelPaddingPlus = radius + kernelRadius ; // take in account the rotation if ( c != 0 || s != 0 ) { double xx = Math . abs ( c * kernelPaddingMinus - s * kernelPaddingMinus ) ; double yy = Math . abs ( s * kernelPaddingMinus + c * kernelPaddingMinus ) ; double delta = xx > yy ? xx - kernelPaddingMinus : yy - kernelPaddingMinus ; kernelPaddingMinus += ( int ) Math . ceil ( delta ) ; kernelPaddingPlus += ( int ) Math . ceil ( delta ) ; } // compute the new bounds and see if its inside int x0 = c_x - kernelPaddingMinus ; if ( x0 < 0 ) return false ; int x1 = c_x + kernelPaddingPlus ; if ( x1 >= ii . width ) return false ; int y0 = c_y - kernelPaddingMinus ; if ( y0 < 0 ) return false ; int y1 = c_y + kernelPaddingPlus ; if ( y1 >= ii . height ) return false ; return true ; }
Checks to see if the region is contained inside the image . This includes convolution kernel . Take in account the orientation of the region .
421
28
27,070
public static double rotatedWidth ( double width , double c , double s ) { return Math . abs ( c ) * width + Math . abs ( s ) * width ; }
Computes the width of a square containment region that contains a rotated rectangle .
36
15
27,071
public void assignIDsToRigidPoints ( ) { // return if it has already been assigned if ( lookupRigid != null ) return ; // Assign a unique ID to each point belonging to a rigid object // at the same time create a look up table that allows for the object that a point belongs to be quickly found lookupRigid = new int [ getTotalRigidPoints ( ) ] ; int pointID = 0 ; for ( int i = 0 ; i < rigids . length ; i ++ ) { Rigid r = rigids [ i ] ; r . indexFirst = pointID ; for ( int j = 0 ; j < r . points . length ; j ++ , pointID ++ ) { lookupRigid [ pointID ] = i ; } } }
Assigns an ID to all rigid points . This function does not need to be called by the user as it will be called by the residual function if needed
165
32
27,072
public void setCamera ( int which , boolean fixed , BundleAdjustmentCamera model ) { cameras [ which ] . known = fixed ; cameras [ which ] . model = model ; }
Specifies the camera model being used .
37
8
27,073
public void setRigid ( int which , boolean fixed , Se3_F64 worldToObject , int totalPoints ) { Rigid r = rigids [ which ] = new Rigid ( ) ; r . known = fixed ; r . objectToWorld . set ( worldToObject ) ; r . points = new Point [ totalPoints ] ; for ( int i = 0 ; i < totalPoints ; i ++ ) { r . points [ i ] = new Point ( pointSize ) ; } }
Declares the data structure for a rigid object . Location of points are set by accessing the object directly . Rigid objects are useful in known scenes with calibration targets .
105
33
27,074
public void connectViewToCamera ( int viewIndex , int cameraIndex ) { if ( views [ viewIndex ] . camera != - 1 ) throw new RuntimeException ( "View has already been assigned a camera" ) ; views [ viewIndex ] . camera = cameraIndex ; }
Specifies that the view uses the specified camera
57
9
27,075
public int getUnknownCameraCount ( ) { int total = 0 ; for ( int i = 0 ; i < cameras . length ; i ++ ) { if ( ! cameras [ i ] . known ) { total ++ ; } } return total ; }
Returns the number of cameras with parameters that are not fixed
51
11
27,076
public int getTotalRigidPoints ( ) { if ( rigids == null ) return 0 ; int total = 0 ; for ( int i = 0 ; i < rigids . length ; i ++ ) { total += rigids [ i ] . points . length ; } return total ; }
Returns total number of points associated with rigid objects .
61
10
27,077
public static < T extends KernelBase > T random ( Class < ? > type , int radius , int min , int max , Random rand ) { int width = radius * 2 + 1 ; return random ( type , width , radius , min , max , rand ) ; }
Creates a random kernel of the specified type where each element is drawn from an uniform distribution .
56
19
27,078
public void detect ( II integral ) { if ( intensity == null ) { intensity = new GrayF32 [ 3 ] ; for ( int i = 0 ; i < intensity . length ; i ++ ) { intensity [ i ] = new GrayF32 ( integral . width , integral . height ) ; } } foundPoints . reset ( ) ; // computes feature intensity every 'skip' pixels int skip = initialSampleRate ; // increment between kernel sizes int sizeStep = scaleStepSize ; // initial size of the kernel in the first octave int octaveSize = initialSize ; for ( int octave = 0 ; octave < numberOfOctaves ; octave ++ ) { for ( int i = 0 ; i < sizes . length ; i ++ ) { sizes [ i ] = octaveSize + i * sizeStep ; } // if the maximum kernel size is larger than the image don't process // the image any more int maxSize = sizes [ sizes . length - 1 ] ; if ( maxSize > integral . width || maxSize > integral . height ) break ; // detect features inside of this octave detectOctave ( integral , skip , sizes ) ; skip += skip ; octaveSize += sizeStep ; sizeStep += sizeStep ; } // todo save previously computed sizes for reuse in higher octaves and reuse it }
Detect interest points inside of the image .
276
8
27,079
protected void detectOctave ( II integral , int skip , int ... featureSize ) { int w = integral . width / skip ; int h = integral . height / skip ; // resize the output intensity image taking in account subsampling for ( int i = 0 ; i < intensity . length ; i ++ ) { intensity [ i ] . reshape ( w , h ) ; } // compute feature intensity in each level for ( int i = 0 ; i < featureSize . length ; i ++ ) { GIntegralImageFeatureIntensity . hessian ( integral , skip , featureSize [ i ] , intensity [ spaceIndex ] ) ; spaceIndex ++ ; if ( spaceIndex >= 3 ) spaceIndex = 0 ; // find maximum in scale space if ( i >= 2 ) { findLocalScaleSpaceMax ( featureSize , i - 1 , skip ) ; } } }
Computes feature intensities for all the specified feature sizes and finds features inside of the middle feature sizes .
181
21
27,080
protected static boolean checkMax ( ImageBorder_F32 inten , float bestScore , int c_x , int c_y ) { for ( int y = c_y - 1 ; y <= c_y + 1 ; y ++ ) { for ( int x = c_x - 1 ; x <= c_x + 1 ; x ++ ) { if ( inten . get ( x , y ) >= bestScore ) { return false ; } } } return true ; }
Sees if the best score in the current layer is greater than all the scores in a 3x3 neighborhood in another layer .
101
26
27,081
public void process ( T gray , GrayU8 binary ) { configureContourDetector ( gray ) ; recycleData ( ) ; positionPatterns . reset ( ) ; interpolate . setImage ( gray ) ; // detect squares squareDetector . process ( gray , binary ) ; long time0 = System . nanoTime ( ) ; squaresToPositionList ( ) ; long time1 = System . nanoTime ( ) ; // Create graph of neighboring squares createPositionPatternGraph ( ) ; // long time2 = System.nanoTime(); // doesn't take very long double milli = ( time1 - time0 ) * 1e-6 ; milliGraph . update ( milli ) ; if ( profiler ) { DetectPolygonFromContour < T > detectorPoly = squareDetector . getDetector ( ) ; System . out . printf ( " contour %5.1f shapes %5.1f adjust_bias %5.2f PosPat %6.2f" , detectorPoly . getMilliContour ( ) , detectorPoly . getMilliShapes ( ) , squareDetector . getMilliAdjustBias ( ) , milliGraph . getAverage ( ) ) ; } }
Detects position patterns inside the image and forms a graph .
257
12
27,082
private void createPositionPatternGraph ( ) { // Add items to NN search nn . setPoints ( ( List ) positionPatterns . toList ( ) , false ) ; for ( int i = 0 ; i < positionPatterns . size ( ) ; i ++ ) { PositionPatternNode f = positionPatterns . get ( i ) ; // The QR code version specifies the number of "modules"/blocks across the marker is // A position pattern is 7 blocks. A version 1 qr code is 21 blocks. Each version past one increments // by 4 blocks. The search is relative to the center of each position pattern, hence the - 7 double maximumQrCodeWidth = f . largestSide * ( 17 + 4 * maxVersionQR - 7.0 ) / 7.0 ; double searchRadius = 1.2 * maximumQrCodeWidth ; // search 1/2 the width + some fudge factor searchRadius *= searchRadius ; // Connect all the finder patterns which are near by each other together in a graph search . findNearest ( f , searchRadius , Integer . MAX_VALUE , searchResults ) ; if ( searchResults . size > 1 ) { for ( int j = 0 ; j < searchResults . size ; j ++ ) { NnData < SquareNode > r = searchResults . get ( j ) ; if ( r . point == f ) continue ; // skip over if it's the square that initiated the search considerConnect ( f , r . point ) ; } } } }
Connects together position patterns . For each square finds all of its neighbors based on center distance . Then considers them for connections
321
24
27,083
void considerConnect ( SquareNode node0 , SquareNode node1 ) { // Find the side on each line which intersects the line connecting the two centers lineA . a = node0 . center ; lineA . b = node1 . center ; int intersection0 = graph . findSideIntersect ( node0 , lineA , intersection , lineB ) ; connectLine . a . set ( intersection ) ; int intersection1 = graph . findSideIntersect ( node1 , lineA , intersection , lineB ) ; connectLine . b . set ( intersection ) ; if ( intersection1 < 0 || intersection0 < 0 ) { return ; } double side0 = node0 . sideLengths [ intersection0 ] ; double side1 = node1 . sideLengths [ intersection1 ] ; // it should intersect about in the middle of the line double sideLoc0 = connectLine . a . distance ( node0 . square . get ( intersection0 ) ) / side0 ; double sideLoc1 = connectLine . b . distance ( node1 . square . get ( intersection1 ) ) / side1 ; if ( Math . abs ( sideLoc0 - 0.5 ) > 0.35 || Math . abs ( sideLoc1 - 0.5 ) > 0.35 ) return ; // see if connecting sides are of similar size if ( Math . abs ( side0 - side1 ) / Math . max ( side0 , side1 ) > 0.25 ) { return ; } // Checks to see if the two sides selected above are closest to being parallel to each other. // Perspective distortion will make the lines not parallel, but will still have a smaller // acute angle than the adjacent sides if ( ! graph . almostParallel ( node0 , intersection0 , node1 , intersection1 ) ) { return ; } double ratio = Math . max ( node0 . smallestSide / node1 . largestSide , node1 . smallestSide / node0 . largestSide ) ; // System.out.println("ratio "+ratio); if ( ratio > 1.3 ) return ; double angle = graph . acuteAngle ( node0 , intersection0 , node1 , intersection1 ) ; double score = lineA . getLength ( ) * ( 1.0 + angle / 0.1 ) ; graph . checkConnect ( node0 , intersection0 , node1 , intersection1 , score ) ; }
Connects the candidate node to node n if they meet several criteria . See code for details .
496
19
27,084
boolean checkPositionPatternAppearance ( Polygon2D_F64 square , float grayThreshold ) { return ( checkLine ( square , grayThreshold , 0 ) || checkLine ( square , grayThreshold , 1 ) ) ; }
Determines if the found polygon looks like a position pattern . A horizontal and vertical line are sampled . At each sample point it is marked if it is above or below the binary threshold for this square . Location of sample points is found by removing perspective distortion .
50
53
27,085
static boolean positionSquareIntensityCheck ( float values [ ] , float threshold ) { if ( values [ 0 ] > threshold || values [ 1 ] < threshold ) return false ; if ( values [ 2 ] > threshold || values [ 3 ] > threshold || values [ 4 ] > threshold ) return false ; if ( values [ 5 ] < threshold || values [ 6 ] > threshold ) return false ; return true ; }
Checks to see if the array of sampled intensity values follows the expected pattern for a position pattern . X . XXX . X where x = black and . = white .
85
34
27,086
public void process ( DMatrixRMaj K1 , Se3_F64 worldToCamera1 , DMatrixRMaj K2 , Se3_F64 worldToCamera2 ) { SimpleMatrix sK1 = SimpleMatrix . wrap ( K1 ) ; SimpleMatrix sK2 = SimpleMatrix . wrap ( K2 ) ; SimpleMatrix R1 = SimpleMatrix . wrap ( worldToCamera1 . getR ( ) ) ; SimpleMatrix R2 = SimpleMatrix . wrap ( worldToCamera2 . getR ( ) ) ; SimpleMatrix T1 = new SimpleMatrix ( 3 , 1 , true , new double [ ] { worldToCamera1 . getT ( ) . x , worldToCamera1 . getT ( ) . y , worldToCamera1 . getT ( ) . z } ) ; SimpleMatrix T2 = new SimpleMatrix ( 3 , 1 , true , new double [ ] { worldToCamera2 . getT ( ) . x , worldToCamera2 . getT ( ) . y , worldToCamera2 . getT ( ) . z } ) ; // P = K*[R|T] SimpleMatrix KR1 = sK1 . mult ( R1 ) ; SimpleMatrix KR2 = sK2 . mult ( R2 ) ; // compute optical centers in world reference frame // c = -R'*T SimpleMatrix c1 = R1 . transpose ( ) . mult ( T1 . scale ( - 1 ) ) ; SimpleMatrix c2 = R2 . transpose ( ) . mult ( T2 . scale ( - 1 ) ) ; // new coordinate system axises selectAxises ( R1 , R2 , c1 , c2 ) ; // new extrinsic parameters, rotation matrix with rows of camera 1's coordinate system in // the world frame SimpleMatrix RR = new SimpleMatrix ( 3 , 3 , true , new double [ ] { v1 . x , v1 . y , v1 . z , v2 . x , v2 . y , v2 . z , v3 . x , v3 . y , v3 . z } ) ; // new calibration matrix that is an average of the original K = sK1 . plus ( sK2 ) . scale ( 0.5 ) ; K . set ( 0 , 1 , 0 ) ; // set skew to zero // new projection rotation matrices SimpleMatrix KRR = K . mult ( RR ) ; // rectification transforms rect1 . set ( KRR . mult ( KR1 . invert ( ) ) . getDDRM ( ) ) ; rect2 . set ( KRR . mult ( KR2 . invert ( ) ) . getDDRM ( ) ) ; rectifiedR = RR . getDDRM ( ) ; }
Computes rectification transforms for both cameras and optionally a single calibration matrix .
583
15
27,087
private void selectAxises ( SimpleMatrix R1 , SimpleMatrix R2 , SimpleMatrix c1 , SimpleMatrix c2 ) { // --------- Compute the new x-axis v1 . set ( c2 . get ( 0 ) - c1 . get ( 0 ) , c2 . get ( 1 ) - c1 . get ( 1 ) , c2 . get ( 2 ) - c1 . get ( 2 ) ) ; v1 . normalize ( ) ; // --------- Compute the new y-axis // cross product of old z axis and new x axis // According to the paper [1] this choice is arbitrary, however it is not. By selecting // the original axis the similarity with the first view is maximized. The other extreme // would be to make it perpendicular, resulting in an unusable rectification. // extract old z-axis from rotation matrix Vector3D_F64 oldZ = new Vector3D_F64 ( R1 . get ( 2 , 0 ) + R2 . get ( 2 , 0 ) , R1 . get ( 2 , 1 ) + R2 . get ( 2 , 1 ) , R1 . get ( 2 , 2 ) + R2 . get ( 2 , 2 ) ) ; GeometryMath_F64 . cross ( oldZ , v1 , v2 ) ; v2 . normalize ( ) ; // ---------- Compute the new z-axis // simply the process product of the first two GeometryMath_F64 . cross ( v1 , v2 , v3 ) ; v3 . normalize ( ) ; }
Selects axises of new coordinate system
332
8
27,088
public boolean process ( PairLineNorm line0 , PairLineNorm line1 ) { // Find plane equations of second lines in the first view double a0 = GeometryMath_F64 . dot ( e2 , line0 . l2 ) ; double a1 = GeometryMath_F64 . dot ( e2 , line1 . l2 ) ; GeometryMath_F64 . multTran ( A , line0 . l2 , Al0 ) ; GeometryMath_F64 . multTran ( A , line1 . l2 , Al1 ) ; // find the intersection of the planes created by each view of each line // first line planeA . set ( line0 . l1 . x , line0 . l1 . y , line0 . l1 . z , 0 ) ; planeB . set ( Al0 . x , Al0 . y , Al0 . z , a0 ) ; if ( ! Intersection3D_F64 . intersect ( planeA , planeB , intersect0 ) ) return false ; intersect0 . slope . normalize ( ) ; // maybe this will reduce overflow problems? // second line planeA . set ( line1 . l1 . x , line1 . l1 . y , line1 . l1 . z , 0 ) ; planeB . set ( Al1 . x , Al1 . y , Al1 . z , a1 ) ; if ( ! Intersection3D_F64 . intersect ( planeA , planeB , intersect1 ) ) return false ; intersect1 . slope . normalize ( ) ; // compute the plane defined by these two lines from0to1 . x = intersect1 . p . x - intersect0 . p . x ; from0to1 . y = intersect1 . p . y - intersect0 . p . y ; from0to1 . z = intersect1 . p . z - intersect0 . p . z ; // the plane's normal will be the cross product of one of the slopes and a line connecting the two lines GeometryMath_F64 . cross ( intersect0 . slope , from0to1 , pi . n ) ; pi . p . set ( intersect0 . p ) ; // convert this plane description into general format UtilPlane3D_F64 . convert ( pi , pi_gen ) ; v . set ( pi_gen . A / pi_gen . D , pi_gen . B / pi_gen . D , pi_gen . C / pi_gen . D ) ; // H = A - e2*v^T GeometryMath_F64 . outerProd ( e2 , v , av ) ; CommonOps_DDRM . subtract ( A , av , H ) ; // pick a good scale and sign for H adjust . adjust ( H , line0 ) ; return true ; }
Computes the homography based on two unique lines on the plane
599
13
27,089
protected int extractNumeral ( ) { int val = 0 ; final int topLeft = getTotalGridElements ( ) - gridWidth ; int shift = 0 ; // -2 because the top and bottom rows have 2 unusable bits (the first and last) for ( int i = 1 ; i < gridWidth - 1 ; i ++ ) { final int idx = topLeft + i ; val |= classified [ idx ] << shift ; //System.out.println("val |= classified[" + idx + "] << " + shift + ";"); shift ++ ; } // Don't do the first or last row, handled above and below - special cases for ( int ii = 1 ; ii < gridWidth - 1 ; ii ++ ) { for ( int i = 0 ; i < gridWidth ; i ++ ) { final int idx = getTotalGridElements ( ) - ( gridWidth * ( ii + 1 ) ) + i ; val |= classified [ idx ] << shift ; // System.out.println("val |= classified[" + idx + "] << " + shift + ";"); shift ++ ; } } // The last row for ( int i = 1 ; i < gridWidth - 1 ; i ++ ) { val |= classified [ i ] << shift ; //System.out.println("val |= classified[" + i + "] << " + shift + ";"); shift ++ ; } return val ; }
Extract the numerical value it encodes
307
8
27,090
private boolean rotateUntilInLowerCorner ( Result result ) { // sanity check corners. There should only be one exactly one black final int topLeft = getTotalGridElements ( ) - gridWidth ; final int topRight = getTotalGridElements ( ) - 1 ; final int bottomLeft = 0 ; final int bottomRight = gridWidth - 1 ; if ( classified [ bottomLeft ] + classified [ bottomRight ] + classified [ topRight ] + classified [ topLeft ] != 1 ) return true ; // Rotate until the black corner is in the lower left hand corner on the image. // remember that origin is the top left corner result . rotation = 0 ; while ( classified [ topLeft ] != 1 ) { result . rotation ++ ; rotateClockWise ( ) ; } return false ; }
Rotate the pattern until the black corner is in the lower right . Sanity check to make sure there is only one black corner
166
26
27,091
protected boolean thresholdBinaryNumber ( ) { int lower = ( int ) ( N * ( ambiguityThreshold / 2.0 ) ) ; int upper = ( int ) ( N * ( 1 - ambiguityThreshold / 2.0 ) ) ; final int totalElements = getTotalGridElements ( ) ; for ( int i = 0 ; i < totalElements ; i ++ ) { if ( counts [ i ] < lower ) { classified [ i ] = 0 ; } else if ( counts [ i ] > upper ) { classified [ i ] = 1 ; } else { // it's ambiguous so just fail return true ; } } return false ; }
Sees how many pixels were positive and negative in each square region . Then decides if they should be 0 or 1 or unknown
137
25
27,092
protected void findBitCounts ( GrayF32 gray , double threshold ) { // compute binary image using an adaptive algorithm to handle shadows ThresholdImageOps . threshold ( gray , binaryInner , ( float ) threshold , true ) ; Arrays . fill ( counts , 0 ) ; for ( int row = 0 ; row < gridWidth ; row ++ ) { int y0 = row * binaryInner . width / gridWidth + 2 ; int y1 = ( row + 1 ) * binaryInner . width / gridWidth - 2 ; for ( int col = 0 ; col < gridWidth ; col ++ ) { int x0 = col * binaryInner . width / gridWidth + 2 ; int x1 = ( col + 1 ) * binaryInner . width / gridWidth - 2 ; int total = 0 ; for ( int i = y0 ; i < y1 ; i ++ ) { int index = i * binaryInner . width + x0 ; for ( int j = x0 ; j < x1 ; j ++ ) { total += binaryInner . data [ index ++ ] ; } } counts [ row * gridWidth + col ] = total ; } } }
Converts the gray scale image into a binary number . Skip the outer 1 pixel of each inner square . These tend to be incorrectly classified due to distortion .
248
31
27,093
public void printClassified ( ) { System . out . println ( ) ; System . out . println ( " " ) ; for ( int row = 0 ; row < gridWidth ; row ++ ) { System . out . print ( " " ) ; for ( int col = 0 ; col < gridWidth ; col ++ ) { System . out . print ( classified [ row * gridWidth + col ] == 1 ? " " : "X" ) ; } System . out . print ( " " ) ; System . out . println ( ) ; } System . out . println ( " " ) ; }
This is only works well as a visual representation if the output font is mono spaced .
125
17
27,094
private void initializeStructure ( List < AssociatedTriple > listObs , DMatrixRMaj P2 , DMatrixRMaj P3 ) { List < DMatrixRMaj > cameraMatrices = new ArrayList <> ( ) ; cameraMatrices . add ( P1 ) ; cameraMatrices . add ( P2 ) ; cameraMatrices . add ( P3 ) ; List < Point2D_F64 > triangObs = new ArrayList <> ( ) ; triangObs . add ( null ) ; triangObs . add ( null ) ; triangObs . add ( null ) ; structure = new SceneStructureProjective ( true ) ; structure . initialize ( 3 , listObs . size ( ) ) ; observations = new SceneObservations ( 3 ) ; structure . setView ( 0 , true , P1 , 0 , 0 ) ; structure . setView ( 1 , false , P2 , 0 , 0 ) ; structure . setView ( 2 , false , P3 , 0 , 0 ) ; boolean needsPruning = false ; Point4D_F64 X = new Point4D_F64 ( ) ; for ( int i = 0 ; i < listObs . size ( ) ; i ++ ) { AssociatedTriple t = listObs . get ( i ) ; triangObs . set ( 0 , t . p1 ) ; triangObs . set ( 1 , t . p2 ) ; triangObs . set ( 2 , t . p3 ) ; // triangulation can fail if all 3 views have the same pixel value. This has been observed in // simulated 3D scenes if ( triangulator . triangulate ( triangObs , cameraMatrices , X ) ) { observations . getView ( 0 ) . add ( i , ( float ) t . p1 . x , ( float ) t . p1 . y ) ; observations . getView ( 1 ) . add ( i , ( float ) t . p2 . x , ( float ) t . p2 . y ) ; observations . getView ( 2 ) . add ( i , ( float ) t . p3 . x , ( float ) t . p3 . y ) ; structure . points [ i ] . set ( X . x , X . y , X . z , X . w ) ; } else { needsPruning = true ; } } if ( needsPruning ) { PruneStructureFromSceneProjective pruner = new PruneStructureFromSceneProjective ( structure , observations ) ; pruner . prunePoints ( 1 ) ; } }
Sets up data structures for SBA
550
8
27,095
private boolean backwardsValidation ( int indexSrc , int bestIndex ) { double bestScoreV = maxError ; int bestIndexV = - 1 ; D d_forward = descDst . get ( bestIndex ) ; setActiveSource ( locationDst . get ( bestIndex ) ) ; for ( int j = 0 ; j < locationSrc . size ( ) ; j ++ ) { // compute distance between the two features double distance = computeDistanceToSource ( locationSrc . get ( j ) ) ; if ( distance > maxDistance ) continue ; D d_v = descSrc . get ( j ) ; double score = scoreAssociation . score ( d_forward , d_v ) ; if ( score < bestScoreV ) { bestScoreV = score ; bestIndexV = j ; } } return bestIndexV == indexSrc ; }
Finds the best match for an index in destination and sees if it matches the source index
182
18
27,096
public static void multiply ( GrayU8 input , double value , GrayU8 output ) { output . reshape ( input . width , input . height ) ; int columns = input . width ; if ( BoofConcurrency . USE_CONCURRENT ) { ImplPixelMath_MT . multiplyU_A ( input . data , input . startIndex , input . stride , value , output . data , output . startIndex , output . stride , input . height , columns ) ; } else { ImplPixelMath . multiplyU_A ( input . data , input . startIndex , input . stride , value , output . data , output . startIndex , output . stride , input . height , columns ) ; } }
Multiply each element by a scalar value . Both input and output images can be the same instance .
149
22
27,097
public static void divide ( GrayU8 input , double denominator , GrayU8 output ) { output . reshape ( input . width , input . height ) ; int columns = input . width ; if ( BoofConcurrency . USE_CONCURRENT ) { ImplPixelMath_MT . divideU_A ( input . data , input . startIndex , input . stride , denominator , output . data , output . startIndex , output . stride , input . height , columns ) ; } else { ImplPixelMath . divideU_A ( input . data , input . startIndex , input . stride , denominator , output . data , output . startIndex , output . stride , input . height , columns ) ; } }
Divide each element by a scalar value . Both input and output images can be the same instance .
152
21
27,098
public boolean performTracking ( PyramidKltFeature feature ) { KltTrackFault result = tracker . track ( feature ) ; if ( result != KltTrackFault . SUCCESS ) { return false ; } else { tracker . setDescription ( feature ) ; return true ; } }
Updates the track using the latest inputs . If tracking fails then the feature description in each layer is unchanged and its global position .
61
26
27,099
public static void showDialog ( BufferedImage img ) { ImageIcon icon = new ImageIcon ( ) ; icon . setImage ( img ) ; JOptionPane . showMessageDialog ( null , icon ) ; }
Creates a dialog window showing the specified image . The function will not exit until the user clicks ok
45
20