idx int64 0 165k | question stringlengths 73 4.15k | target stringlengths 5 918 | len_question int64 21 890 | len_target int64 3 255 |
|---|---|---|---|---|
27,200 | public static void convertFile ( File original ) throws IOException { File outputFile = determineClassName ( original ) ; String classNameOld = className ( original ) ; String classNameNew = className ( outputFile ) ; // Read the file and split it up into lines List < String > inputLines = FileUtils . readLines ( original , "UTF-8" ) ; List < String > outputLines = new ArrayList <> ( ) ; List < Macro > macros = new ArrayList <> ( ) ; // parse each line by line looking for instructions boolean foundClassDef = false ; for ( int i = 0 ; i < inputLines . size ( ) ; i ++ ) { String line = inputLines . get ( i ) ; int where = line . indexOf ( prefix ) ; if ( where < 0 ) { if ( ! foundClassDef && line . contains ( "class " + classNameOld ) ) { foundClassDef = true ; line = line . replaceFirst ( "class " + classNameOld , "class " + classNameNew ) ; } else { line = line . replace ( classNameOld + "(" , classNameNew + "(" ) ; } outputLines . add ( line ) ; continue ; } String type = readType ( line , where + prefix . length ( ) ) ; String whitespaces = line . substring ( 0 , where ) ; int frontLength = where + prefix . length ( ) + type . length ( ) ; String message = line . length ( ) > frontLength ? line . substring ( frontLength + 1 ) : "" ; switch ( type ) { case "CLASS_NAME" : continue ; // ignore. already processed case "INLINE" : outputLines . add ( whitespaces + message ) ; break ; case "ABOVE" : // remove the previous line outputLines . remove ( outputLines . size ( ) - 1 ) ; outputLines . add ( whitespaces + message ) ; break ; case "BELOW" : outputLines . add ( whitespaces + message ) ; i += 1 ; // skip next line break ; case "REMOVE_ABOVE" : outputLines . remove ( outputLines . size ( ) - 1 ) ; break ; case "REMOVE_BELOW" : i += 1 ; // skip next line break ; case "MACRO" : throw new RuntimeException ( "MACRO not handled yet" ) ; default : throw new RuntimeException ( "Unknown: " + type ) ; } } PrintStream out = new PrintStream ( outputFile ) ; for ( int i = 0 ; i < outputLines . size ( ) ; i ++ ) { out . println ( outputLines . get ( i ) ) ; } out . close ( ) ; createTestIfNotThere ( outputFile ) ; } | Converts the file from single thread into concurrent implementation | 605 | 10 |
27,201 | private static File determineClassName ( File original ) throws IOException { String text = FileUtils . readFileToString ( original , "UTF-8" ) ; if ( ! text . contains ( "//CONCURRENT" ) ) throw new IOException ( "Not a concurrent file" ) ; String pattern = "//CONCURRENT_CLASS_NAME " ; int where = text . indexOf ( pattern ) ; if ( where < 0 ) { String name = className ( original ) ; return new File ( original . getParent ( ) , name + "_MT.java" ) ; } String name = readUntilEndOfLine ( text , where + pattern . length ( ) ) ; return new File ( original . getParent ( ) , name + ".java" ) ; } | Searches the input file for an override . If none is found then _MT is added to the class name . | 164 | 24 |
27,202 | @ Override public void initialize ( int width , int height ) { // see if it has already been initialized if ( bottomWidth == width && bottomHeight == height ) return ; this . bottomWidth = width ; this . bottomHeight = height ; layers = imageType . createArray ( getNumLayers ( ) ) ; double scaleFactor = getScale ( 0 ) ; if ( scaleFactor == 1 ) { if ( ! saveOriginalReference ) { layers [ 0 ] = imageType . createImage ( bottomWidth , bottomHeight ) ; } } else { layers [ 0 ] = imageType . createImage ( ( int ) Math . ceil ( bottomWidth / scaleFactor ) , ( int ) Math . ceil ( bottomHeight / scaleFactor ) ) ; } for ( int i = 1 ; i < layers . length ; i ++ ) { scaleFactor = getScale ( i ) ; layers [ i ] = imageType . createImage ( ( int ) Math . ceil ( bottomWidth / scaleFactor ) , ( int ) Math . ceil ( bottomHeight / scaleFactor ) ) ; } } | Initializes internal data structures based on the input image s size . Should be called each time a new image is processed . | 227 | 24 |
27,203 | protected void checkScales ( ) { if ( getScale ( 0 ) < 0 ) { throw new IllegalArgumentException ( "The first layer must be more than zero." ) ; } double prevScale = 0 ; for ( int i = 0 ; i < getNumLayers ( ) ; i ++ ) { double s = getScale ( i ) ; if ( s < prevScale ) throw new IllegalArgumentException ( "Higher layers must be the same size or larger than previous layers." ) ; prevScale = s ; } } | Used to internally check that the provided scales are valid . | 111 | 11 |
27,204 | static boolean checkGridSize ( List < List < NodeInfo > > grid , int clusterSize ) { int total = 0 ; int expected = grid . get ( 0 ) . size ( ) ; for ( int i = 0 ; i < grid . size ( ) ; i ++ ) { if ( expected != grid . get ( i ) . size ( ) ) return false ; total += grid . get ( i ) . size ( ) ; } return total == clusterSize ; } | Makes sure the found grid is the same size as the original cluster . If it s not then . not all the nodes were used . All lists must have he same size too . | 98 | 37 |
27,205 | public double depthNView ( List < Point2D_F64 > obs , List < Se3_F64 > motion ) { double top = 0 , bottom = 0 ; Point2D_F64 a = obs . get ( 0 ) ; for ( int i = 1 ; i < obs . size ( ) ; i ++ ) { Se3_F64 se = motion . get ( i - 1 ) ; Point2D_F64 b = obs . get ( i ) ; GeometryMath_F64 . multCrossA ( b , se . getR ( ) , temp0 ) ; GeometryMath_F64 . mult ( temp0 , a , temp1 ) ; GeometryMath_F64 . cross ( b , se . getT ( ) , temp2 ) ; top += temp2 . x + temp2 . y + temp2 . z ; bottom += temp1 . x + temp1 . y + temp1 . z ; } return - top / bottom ; } | Computes the pixel depth from N views of the same object . Pixel depth in the first frame . | 208 | 20 |
27,206 | public double depth2View ( Point2D_F64 a , Point2D_F64 b , Se3_F64 fromAtoB ) { DMatrixRMaj R = fromAtoB . getR ( ) ; Vector3D_F64 T = fromAtoB . getT ( ) ; GeometryMath_F64 . multCrossA ( b , R , temp0 ) ; GeometryMath_F64 . mult ( temp0 , a , temp1 ) ; GeometryMath_F64 . cross ( b , T , temp2 ) ; return - ( temp2 . x + temp2 . y + temp2 . z ) / ( temp1 . x + temp1 . y + temp1 . z ) ; } | Computes pixel depth in image a from two observations . | 159 | 11 |
27,207 | public void initialize ( int numFeatures , int numViews ) { depths . reshape ( numViews , numFeatures ) ; pixels . reshape ( numViews * 2 , numFeatures ) ; pixelScale = 0 ; } | Initializes internal data structures . Must be called first | 48 | 10 |
27,208 | public void setPixels ( int view , List < Point2D_F64 > pixelsInView ) { if ( pixelsInView . size ( ) != pixels . numCols ) throw new IllegalArgumentException ( "Pixel count must be constant and match " + pixels . numCols ) ; int row = view * 2 ; for ( int i = 0 ; i < pixelsInView . size ( ) ; i ++ ) { Point2D_F64 p = pixelsInView . get ( i ) ; pixels . set ( row , i , p . x ) ; pixels . set ( row + 1 , i , p . y ) ; pixelScale = Math . max ( Math . abs ( p . x ) , Math . abs ( p . y ) ) ; } } | Sets pixel observations for a paricular view | 164 | 9 |
27,209 | public void setDepths ( int view , double featureDepths [ ] ) { if ( featureDepths . length < depths . numCols ) throw new IllegalArgumentException ( "Pixel count must be constant and match " + pixels . numCols ) ; int N = depths . numCols ; for ( int i = 0 ; i < N ; i ++ ) { depths . set ( view , i , featureDepths [ i ] ) ; } } | Sets depths for a particular value to the values in the passed in array | 93 | 15 |
27,210 | public void setDepthsFrom3D ( int view , List < Point3D_F64 > locations ) { if ( locations . size ( ) != pixels . numCols ) throw new IllegalArgumentException ( "Pixel count must be constant and match " + pixels . numCols ) ; int N = depths . numCols ; for ( int i = 0 ; i < N ; i ++ ) { depths . set ( view , i , locations . get ( i ) . z ) ; } } | Assigns depth to the z value of all the features in the list . Features must be in the coordinate system of the view for this to be correct | 105 | 31 |
27,211 | public boolean process ( ) { int numViews = depths . numRows ; int numFeatures = depths . numCols ; P . reshape ( 3 * numViews , 4 ) ; X . reshape ( 4 , numFeatures ) ; A . reshape ( numViews * 3 , numFeatures ) ; B . reshape ( numViews * 3 , numFeatures ) ; // Scale depths so that they are close to unity normalizeDepths ( depths ) ; // Compute the initial A matirx assignValuesToA ( A ) ; for ( int iter = 0 ; iter < maxIterations ; iter ++ ) { if ( ! svd . decompose ( A ) ) return false ; svd . getU ( U , false ) ; svd . getV ( Vt , true ) ; double sv [ ] = svd . getSingularValues ( ) ; SingularOps_DDRM . descendingOrder ( U , false , sv , A . numCols , Vt , true ) ; // This is equivalent to forcing the rank to be 4 CommonOps_DDRM . extract ( U , 0 , 0 , P ) ; CommonOps_DDRM . multCols ( P , sv ) ; CommonOps_DDRM . extract ( Vt , 0 , 0 , X ) ; // Compute the new value of A CommonOps_DDRM . mult ( P , X , B ) ; // See how much change there is double delta = SpecializedOps_DDRM . diffNormF ( A , B ) / ( A . numCols * A . numRows ) ; // swap arrays for the next iteration DMatrixRMaj tmp = A ; A = B ; B = tmp ; // exit if converged if ( delta <= minimumChangeTol ) break ; } return true ; } | Performs iteration to find camera matrices and feature locations in world frame | 389 | 14 |
27,212 | public void getCameraMatrix ( int view , DMatrixRMaj cameraMatrix ) { cameraMatrix . reshape ( 3 , 4 ) ; CommonOps_DDRM . extract ( P , view * 3 , 0 , cameraMatrix ) ; for ( int col = 0 ; col < 4 ; col ++ ) { cameraMatrix . data [ cameraMatrix . getIndex ( 0 , col ) ] *= pixelScale ; cameraMatrix . data [ cameraMatrix . getIndex ( 1 , col ) ] *= pixelScale ; } } | Used to get found camera matrix for a view | 109 | 9 |
27,213 | public void getFeature3D ( int feature , Point4D_F64 out ) { out . x = X . get ( 0 , feature ) ; out . y = X . get ( 1 , feature ) ; out . z = X . get ( 2 , feature ) ; out . w = X . get ( 3 , feature ) ; } | Returns location of 3D feature for a view | 72 | 9 |
27,214 | protected void computeScoreFive ( int top [ ] , int middle [ ] , int bottom [ ] , int score [ ] , int width ) { // disparity as the outer loop to maximize common elements in inner loops, reducing redundant calculations for ( int d = minDisparity ; d < maxDisparity ; d ++ ) { // take in account the different in image border between the sub-regions and the effective region int indexSrc = ( d - minDisparity ) * width + ( d - minDisparity ) + radiusX ; int indexDst = ( d - minDisparity ) * width + ( d - minDisparity ) ; int end = indexSrc + ( width - d - 4 * radiusX ) ; while ( indexSrc < end ) { int s = 0 ; // sample four outer regions at the corners around the center region int val0 = top [ indexSrc - radiusX ] ; int val1 = top [ indexSrc + radiusX ] ; int val2 = bottom [ indexSrc - radiusX ] ; int val3 = bottom [ indexSrc + radiusX ] ; // select the two best scores from outer for regions if ( val1 < val0 ) { int temp = val0 ; val0 = val1 ; val1 = temp ; } if ( val3 < val2 ) { int temp = val2 ; val2 = val3 ; val3 = temp ; } if ( val3 < val0 ) { s += val2 ; s += val3 ; } else if ( val2 < val1 ) { s += val2 ; s += val0 ; } else { s += val0 ; s += val1 ; } score [ indexDst ++ ] = s + middle [ indexSrc ++ ] ; } } } | Compute the final score by sampling the 5 regions . Four regions are sampled around the center region . Out of those four only the two with the smallest score are used . | 379 | 34 |
27,215 | public void setTrifocal ( TrifocalTensor tensor ) { this . tensor = tensor ; extract . setTensor ( tensor ) ; extract . extractFundmental ( F21 , F31 ) ; } | Specify the trifocaltensor | 48 | 8 |
27,216 | public void transfer_1_to_3 ( double x1 , double y1 , double x2 , double y2 , Point3D_F64 p3 ) { // Adjust the observations so that they lie on the epipolar lines exactly adjuster . process ( F21 , x1 , y1 , x2 , y2 , pa , pb ) ; GeometryMath_F64 . mult ( F21 , pa , la ) ; // line through pb and perpendicular to la l . x = la . y ; l . y = - la . x ; l . z = - pb . x * la . y + pb . y * la . x ; MultiViewOps . transfer_1_to_3 ( tensor , pa , l , p3 ) ; } | Transfer a point to third view give its observed location in view one and two . | 166 | 16 |
27,217 | public void transfer_1_to_2 ( double x1 , double y1 , double x3 , double y3 , Point3D_F64 p2 ) { // Adjust the observations so that they lie on the epipolar lines exactly adjuster . process ( F31 , x1 , y1 , x3 , y3 , pa , pb ) ; GeometryMath_F64 . multTran ( F31 , pa , la ) ; // line through pb and perpendicular to la l . x = la . y ; l . y = - la . x ; l . z = - pb . x * la . y + pb . y * la . x ; MultiViewOps . transfer_1_to_2 ( tensor , pa , l , p2 ) ; } | Transfer a point to third view give its observed location in view one and three . | 168 | 16 |
27,218 | @ Override public void classify ( Planar < GrayF32 > image ) { DataManipulationOps . imageToTensor ( preprocess ( image ) , tensorInput , 0 ) ; innerProcess ( tensorInput ) ; } | The original implementation takes in an image then crops it randomly . This is primarily for training but is replicated here to reduce the number of differences | 50 | 27 |
27,219 | public static void computeScoreRow ( GrayU8 left , GrayU8 right , int row , int [ ] scores , int minDisparity , int maxDisparity , int regionWidth , int elementScore [ ] ) { // disparity as the outer loop to maximize common elements in inner loops, reducing redundant calculations for ( int d = minDisparity ; d < maxDisparity ; d ++ ) { int dispFromMin = d - minDisparity ; // number of individual columns the error is computed in final int colMax = left . width - d ; // number of regions that a score/error is computed in final int scoreMax = colMax - regionWidth ; // indexes that data is read to/from for different data structures int indexScore = left . width * dispFromMin + dispFromMin ; int indexLeft = left . startIndex + left . stride * row + d ; int indexRight = right . startIndex + right . stride * row ; // Fill elementScore with scores for individual elements for this row at disparity d computeScoreRowSad ( left , right , colMax , indexLeft , indexRight , elementScore ) ; // score at the first column int score = 0 ; for ( int i = 0 ; i < regionWidth ; i ++ ) score += elementScore [ i ] ; scores [ indexScore ++ ] = score ; // scores for the remaining columns for ( int col = 0 ; col < scoreMax ; col ++ , indexScore ++ ) { scores [ indexScore ] = score += elementScore [ col + regionWidth ] - elementScore [ col ] ; } } } | Computes disparity score for an entire row . | 335 | 9 |
27,220 | public static void computeScoreRowSad ( GrayF32 left , GrayF32 right , int elementMax , int indexLeft , int indexRight , float elementScore [ ] ) { for ( int rCol = 0 ; rCol < elementMax ; rCol ++ ) { float diff = ( left . data [ indexLeft ++ ] ) - ( right . data [ indexRight ++ ] ) ; elementScore [ rCol ] = Math . abs ( diff ) ; } } | compute the score for each element all at once to encourage the JVM to optimize and encourage the JVM to optimize this section of code . | 97 | 29 |
27,221 | public Se3_F64 estimateOutliers ( List < Point2D3D > observations ) { // We can no longer trust that each point is a real observation. Let's use RANSAC to separate the points // You will need to tune the number of iterations and inlier threshold!!! ModelMatcherMultiview < Se3_F64 , Point2D3D > ransac = FactoryMultiViewRobust . pnpRansac ( new ConfigPnP ( ) , new ConfigRansac ( 300 , 1.0 ) ) ; ransac . setIntrinsic ( 0 , intrinsic ) ; // Observations must be in normalized image coordinates! See javadoc of pnpRansac if ( ! ransac . process ( observations ) ) throw new RuntimeException ( "Probably got bad input data with NaN inside of it" ) ; System . out . println ( "Inlier size " + ransac . getMatchSet ( ) . size ( ) ) ; Se3_F64 worldToCamera = ransac . getModelParameters ( ) ; // You will most likely want to refine this solution too. Can make a difference with real world data RefinePnP refine = FactoryMultiView . pnpRefine ( 1e-8 , 200 ) ; Se3_F64 refinedWorldToCamera = new Se3_F64 ( ) ; // notice that only the match set was passed in if ( ! refine . fitModel ( ransac . getMatchSet ( ) , worldToCamera , refinedWorldToCamera ) ) throw new RuntimeException ( "Refined failed! Input probably bad..." ) ; return refinedWorldToCamera ; } | Uses robust techniques to remove outliers | 356 | 8 |
27,222 | public void addOutliers ( List < Point2D3D > observations , int total ) { int size = observations . size ( ) ; for ( int i = 0 ; i < total ; i ++ ) { // outliers will be created by adding lots of noise to real observations Point2D3D p = observations . get ( rand . nextInt ( size ) ) ; Point2D3D o = new Point2D3D ( ) ; o . observation . set ( p . observation ) ; o . location . x = p . location . x + rand . nextGaussian ( ) * 5 ; o . location . y = p . location . y + rand . nextGaussian ( ) * 5 ; o . location . z = p . location . z + rand . nextGaussian ( ) * 5 ; observations . add ( o ) ; } // randomize the order Collections . shuffle ( observations , rand ) ; } | Adds some really bad observations to the mix | 195 | 8 |
27,223 | @ Override public void loadInputData ( String fileName ) { Reader r = media . openFile ( fileName ) ; List < PathLabel > refs = new ArrayList <> ( ) ; try { BufferedReader reader = new BufferedReader ( r ) ; String line ; while ( ( line = reader . readLine ( ) ) != null ) { String [ ] z = line . split ( ":" ) ; String [ ] names = new String [ z . length - 1 ] ; for ( int i = 1 ; i < z . length ; i ++ ) { names [ i - 1 ] = baseDirectory + z [ i ] ; } refs . add ( new PathLabel ( z [ 0 ] , names ) ) ; } setInputList ( refs ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } | Loads a standardized file for input references | 182 | 8 |
27,224 | public void addToToolbar ( JComponent comp ) { toolbar . add ( comp , 1 + algBoxes . length ) ; toolbar . revalidate ( ) ; addedComponents . add ( comp ) ; } | Adds a new component into the toolbar . | 46 | 8 |
27,225 | public void setMainGUI ( final Component gui ) { postAlgorithmEvents = true ; this . gui = gui ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { add ( gui , BorderLayout . CENTER ) ; } } ) ; } | Used to add the main GUI to this panel . Must use this function . Algorithm change events will not be posted until this function has been set . | 60 | 30 |
27,226 | public void setInputImage ( BufferedImage image ) { inputImage = image ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { if ( inputImage == null ) { originalCheck . setEnabled ( false ) ; } else { originalCheck . setEnabled ( true ) ; origPanel . setImage ( inputImage ) ; origPanel . setPreferredSize ( new Dimension ( inputImage . getWidth ( ) , inputImage . getHeight ( ) ) ) ; origPanel . repaint ( ) ; } } } ) ; } | Specifies an image which contains the original input image . After this has been called the view input image widget is activated and when selected this image will be displayed instead of the main GUI . This functionality is optional . | 119 | 42 |
27,227 | public void setInputList ( final List < PathLabel > inputRefs ) { this . inputRefs = inputRefs ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { for ( int i = 0 ; i < inputRefs . size ( ) ; i ++ ) { imageBox . addItem ( inputRefs . get ( i ) . getLabel ( ) ) ; } } } ) ; } | Specifies a list of images to use as input and loads them | 95 | 13 |
27,228 | protected < T > T getAlgorithmCookie ( int indexFamily ) { return ( T ) algCookies [ indexFamily ] . get ( algBoxes [ indexFamily ] . getSelectedIndex ( ) ) ; } | Returns the cookie associated with the specified algorithm family . | 49 | 10 |
27,229 | private boolean checkSideSize ( Polygon2D_F64 p ) { double max = 0 , min = Double . MAX_VALUE ; for ( int i = 0 ; i < p . size ( ) ; i ++ ) { double l = p . getSideLength ( i ) ; max = Math . max ( max , l ) ; min = Math . min ( min , l ) ; } // See if a side is too small to decode if ( min < 10 ) return false ; // see if it's under extreme perspective distortion and unlikely to be readable return ! ( min / max < thresholdSideRatio ) ; } | Sanity check the polygon based on the size of its sides to see if it could be a fiducial that can be decoded | 130 | 28 |
27,230 | protected double computeFractionBoundary ( float pixelThreshold ) { // TODO ignore outer pixels from this computation. Will require 8 regions (4 corners + top/bottom + left/right) final int w = square . width ; int radius = ( int ) ( w * borderWidthFraction ) ; int innerWidth = w - 2 * radius ; int total = w * w - innerWidth * innerWidth ; int count = 0 ; for ( int y = 0 ; y < radius ; y ++ ) { int indexTop = y * w ; int indexBottom = ( w - radius + y ) * w ; for ( int x = 0 ; x < w ; x ++ ) { if ( square . data [ indexTop ++ ] < pixelThreshold ) count ++ ; if ( square . data [ indexBottom ++ ] < pixelThreshold ) count ++ ; } } for ( int y = radius ; y < w - radius ; y ++ ) { int indexLeft = y * w ; int indexRight = y * w + w - radius ; for ( int x = 0 ; x < radius ; x ++ ) { if ( square . data [ indexLeft ++ ] < pixelThreshold ) count ++ ; if ( square . data [ indexRight ++ ] < pixelThreshold ) count ++ ; } } return count / ( double ) total ; } | Computes the fraction of pixels inside the image border which are black | 280 | 13 |
27,231 | private void prepareForOutput ( Polygon2D_F64 imageShape , Result result ) { // the rotation estimate, apply in counter clockwise direction // since result.rotation is a clockwise rotation in the visual sense, which // is CCW on the grid int rotationCCW = ( 4 - result . rotation ) % 4 ; for ( int j = 0 ; j < rotationCCW ; j ++ ) { UtilPolygons2D_F64 . shiftUp ( imageShape ) ; } // save the results for output FoundFiducial f = found . grow ( ) ; f . id = result . which ; for ( int i = 0 ; i < 4 ; i ++ ) { Point2D_F64 a = imageShape . get ( i ) ; undistToDist . compute ( a . x , a . y , f . distortedPixels . get ( i ) ) ; } } | Takes the found quadrilateral and the computed 3D information and prepares it for output | 193 | 18 |
27,232 | public void process ( GrayS32 pixelToRegion , GrowQueue_I32 regionMemberCount , FastQueue < float [ ] > regionColor , FastQueue < Point2D_I32 > modeLocation ) { stopRequested = false ; initializeMerge ( regionMemberCount . size ) ; markMergeRegions ( regionColor , modeLocation , pixelToRegion ) ; if ( stopRequested ) return ; performMerge ( pixelToRegion , regionMemberCount ) ; } | Merges together similar regions which are in close proximity to each other . After merging most of the input data structures are modified to take in account the changes . | 100 | 31 |
27,233 | protected void markMergeRegions ( FastQueue < float [ ] > regionColor , FastQueue < Point2D_I32 > modeLocation , GrayS32 pixelToRegion ) { for ( int targetId = 0 ; targetId < modeLocation . size && ! stopRequested ; targetId ++ ) { float [ ] color = regionColor . get ( targetId ) ; Point2D_I32 location = modeLocation . get ( targetId ) ; int x0 = location . x - searchRadius ; int x1 = location . x + searchRadius + 1 ; int y0 = location . y - searchRadius ; int y1 = location . y + searchRadius + 1 ; // ensure that all pixels it examines are inside the image if ( x0 < 0 ) x0 = 0 ; if ( x1 > pixelToRegion . width ) x1 = pixelToRegion . width ; if ( y0 < 0 ) y0 = 0 ; if ( y1 > pixelToRegion . height ) y1 = pixelToRegion . height ; // look at the local neighborhood for ( int y = y0 ; y < y1 ; y ++ ) { for ( int x = x0 ; x < x1 ; x ++ ) { int candidateId = pixelToRegion . unsafe_get ( x , y ) ; // see if it is the same region if ( candidateId == targetId ) continue ; // see if the mode is near by Point2D_I32 p = modeLocation . get ( candidateId ) ; if ( p . distance2 ( location ) <= maxSpacialDistanceSq ) { // see if the color is similar float [ ] candidateColor = regionColor . get ( candidateId ) ; float colorDistance = SegmentMeanShiftSearch . distanceSq ( color , candidateColor ) ; if ( colorDistance <= maxColorDistanceSq ) { // mark the two regions as merged markMerge ( targetId , candidateId ) ; } } } } } } | Takes the mode of a region and searches the local area around it for other regions . If the region s mode is also within the local area its color is checked to see if it s similar enough . If the color is similar enough then the two regions are marked for merger . | 420 | 56 |
27,234 | public static void convertToBoof ( Picture input , ImageBase output ) { if ( input . getColor ( ) == ColorSpace . RGB ) { ImplConvertJCodecPicture . RGB_to_PLU8 ( input , ( Planar ) output ) ; } else if ( input . getColor ( ) == ColorSpace . YUV420 ) { if ( output instanceof Planar ) { Planar ms = ( Planar ) output ; if ( ms . getImageType ( ) . getDataType ( ) == ImageDataType . U8 ) { ImplConvertJCodecPicture . yuv420_to_PlRgb_U8 ( input , ms ) ; } else if ( ms . getImageType ( ) . getDataType ( ) == ImageDataType . F32 ) { ImplConvertJCodecPicture . yuv420_to_PlRgb_F32 ( input , ms ) ; } } else if ( output instanceof GrayU8 ) { ImplConvertJCodecPicture . yuv420_to_U8 ( input , ( GrayU8 ) output ) ; } else if ( output instanceof GrayF32 ) { ImplConvertJCodecPicture . yuv420_to_F32 ( input , ( GrayF32 ) output ) ; } else { throw new RuntimeException ( "Unexpected output image type" ) ; } } } | Converts an image in JCodec format into one in BoofCV format . | 295 | 17 |
27,235 | public boolean process ( DMatrixRMaj R , List < Point3D_F64 > worldPts , List < Point2D_F64 > observed ) { if ( worldPts . size ( ) != observed . size ( ) ) throw new IllegalArgumentException ( "Number of worldPts and observed must be the same" ) ; if ( worldPts . size ( ) < 2 ) throw new IllegalArgumentException ( "A minimum of two points are required" ) ; int N = worldPts . size ( ) ; A . reshape ( 3 * N , 3 ) ; b . reshape ( A . numRows , 1 ) ; for ( int i = 0 ; i < N ; i ++ ) { Point3D_F64 X = worldPts . get ( i ) ; Point2D_F64 o = observed . get ( i ) ; int indexA = i * 3 * 3 ; int indexB = i * 3 ; A . data [ indexA + 1 ] = - 1 ; A . data [ indexA + 2 ] = o . y ; A . data [ indexA + 3 ] = 1 ; A . data [ indexA + 5 ] = - o . x ; A . data [ indexA + 6 ] = - o . y ; A . data [ indexA + 7 ] = o . x ; GeometryMath_F64 . mult ( R , X , RX ) ; b . data [ indexB ++ ] = 1 * RX . y - o . y * RX . z ; b . data [ indexB ++ ] = - 1 * RX . x + o . x * RX . z ; b . data [ indexB ] = o . y * RX . x - o . x * RX . y ; } if ( ! solver . setA ( A ) ) return false ; solver . solve ( b , x ) ; T . x = x . data [ 0 ] ; T . y = x . data [ 1 ] ; T . z = x . data [ 2 ] ; return true ; } | Computes the translation given two or more feature observations and the known rotation | 439 | 14 |
27,236 | public void process ( GrayU8 binary ) { found . reset ( ) ; labeled . reshape ( binary . width , binary . height ) ; contourFinder . process ( binary , labeled ) ; List < ContourPacked > blobs = contourFinder . getContours ( ) ; for ( int i = 0 ; i < blobs . size ( ) ; i ++ ) { ContourPacked c = blobs . get ( i ) ; contourFinder . loadContour ( c . externalIndex , contourTmp ) ; proccessContour ( contourTmp . toList ( ) ) ; if ( internalContour ) { for ( int j = 0 ; j < c . internalIndexes . size ( ) ; j ++ ) { contourFinder . loadContour ( c . internalIndexes . get ( j ) , contourTmp ) ; proccessContour ( contourTmp . toList ( ) ) ; } } } } | Finds all valid ellipses in the binary image | 209 | 11 |
27,237 | protected void adjustElipseForBinaryBias ( EllipseRotated_F64 ellipse ) { ellipse . center . x += 0.5 ; ellipse . center . y += 0.5 ; ellipse . a += 0.5 ; ellipse . b += 0.5 ; } | In a binary image the contour on the right and bottom is off by one pixel . This is because the block region extends the entire pixel not just the lower extent which is where it is indexed from . | 70 | 41 |
27,238 | void undistortContour ( List < Point2D_I32 > external , FastQueue < Point2D_F64 > pointsF ) { for ( int j = 0 ; j < external . size ( ) ; j ++ ) { Point2D_I32 p = external . get ( j ) ; if ( distToUndist != null ) { distToUndist . compute ( p . x , p . y , distortedPoint ) ; pointsF . grow ( ) . set ( distortedPoint . x , distortedPoint . y ) ; } else { pointsF . grow ( ) . set ( p . x , p . y ) ; } } } | Undistort the contour points and convert into a floating point format for the fitting operation | 139 | 18 |
27,239 | boolean isApproximatelyElliptical ( EllipseRotated_F64 ellipse , List < Point2D_F64 > points , int maxSamples ) { closestPoint . setEllipse ( ellipse ) ; double maxDistance2 = maxDistanceFromEllipse * maxDistanceFromEllipse ; if ( points . size ( ) <= maxSamples ) { for ( int i = 0 ; i < points . size ( ) ; i ++ ) { Point2D_F64 p = points . get ( i ) ; closestPoint . process ( p ) ; double d = closestPoint . getClosest ( ) . distance2 ( p ) ; if ( d > maxDistance2 ) { return false ; } } } else { for ( int i = 0 ; i < maxSamples ; i ++ ) { Point2D_F64 p = points . get ( i * points . size ( ) / maxSamples ) ; closestPoint . process ( p ) ; double d = closestPoint . getClosest ( ) . distance2 ( p ) ; if ( d > maxDistance2 ) { return false ; } } } return true ; } | Look at the maximum distance contour points are from the ellipse and see if they exceed a maximum threshold | 249 | 22 |
27,240 | @ Override public boolean filterPixelPolygon ( Polygon2D_F64 undistorted , Polygon2D_F64 distorted , GrowQueue_B touches , boolean touchesBorder ) { if ( touchesBorder ) { if ( distorted . size ( ) < 3 ) return false ; int totalRegular = distorted . size ( ) ; for ( int i = 0 ; i < distorted . size ( ) ; i ++ ) { if ( touches . get ( i ) ) totalRegular -- ; } return totalRegular > 0 ; // Would be 3 if it was filled in, but local binary can cause external contour to be concave } else { return distorted . size ( ) == 4 ; } } | If not touching the border then the number of corners must be 4 . If touching the border there must be at least 3 corners not touching the border . 7 corners at most . If there were 8 then all sides of a square would be touching the border . No more than 3 corners since that s the most number of non - border corners a square can have . | 145 | 72 |
27,241 | public Frame getFrame ( BufferedImage image , double gamma , boolean flipChannels ) { if ( image == null ) { return null ; } SampleModel sm = image . getSampleModel ( ) ; int depth = 0 , numChannels = sm . getNumBands ( ) ; switch ( image . getType ( ) ) { case BufferedImage . TYPE_INT_RGB : case BufferedImage . TYPE_INT_ARGB : case BufferedImage . TYPE_INT_ARGB_PRE : case BufferedImage . TYPE_INT_BGR : depth = Frame . DEPTH_UBYTE ; numChannels = 4 ; break ; } if ( depth == 0 || numChannels == 0 ) { switch ( sm . getDataType ( ) ) { case DataBuffer . TYPE_BYTE : depth = Frame . DEPTH_UBYTE ; break ; case DataBuffer . TYPE_USHORT : depth = Frame . DEPTH_USHORT ; break ; case DataBuffer . TYPE_SHORT : depth = Frame . DEPTH_SHORT ; break ; case DataBuffer . TYPE_INT : depth = Frame . DEPTH_INT ; break ; case DataBuffer . TYPE_FLOAT : depth = Frame . DEPTH_FLOAT ; break ; case DataBuffer . TYPE_DOUBLE : depth = Frame . DEPTH_DOUBLE ; break ; default : assert false ; } } if ( frame == null || frame . imageWidth != image . getWidth ( ) || frame . imageHeight != image . getHeight ( ) || frame . imageDepth != depth || frame . imageChannels != numChannels ) { frame = new Frame ( image . getWidth ( ) , image . getHeight ( ) , depth , numChannels ) ; } copy ( image , frame , gamma , flipChannels , null ) ; return frame ; } | Returns a Frame based on a BufferedImage given gamma and inverted channels flag . | 400 | 16 |
27,242 | public static int multiply ( int x , int y , int primitive , int domain ) { int r = 0 ; while ( y > 0 ) { if ( ( y & 1 ) != 0 ) { r = r ^ x ; } y = y >> 1 ; x = x << 1 ; if ( x >= domain ) { x ^= primitive ; } } return r ; } | Implementation of multiplication with a primitive polynomial . The result will be a member of the same field as the inputs provided primitive is an appropriate irreducible polynomial for that field . | 78 | 40 |
27,243 | private static boolean isClockWise ( Grid g ) { EllipseRotated_F64 v00 = g . get ( 0 , 0 ) ; EllipseRotated_F64 v02 = g . columns < 3 ? g . get ( 1 , 1 ) : g . get ( 0 , 2 ) ; EllipseRotated_F64 v20 = g . rows < 3 ? g . get ( 1 , 1 ) : g . get ( 2 , 0 ) ; double a_x = v02 . center . x - v00 . center . x ; double a_y = v02 . center . y - v00 . center . y ; double b_x = v20 . center . x - v00 . center . x ; double b_y = v20 . center . y - v00 . center . y ; return a_x * b_y - a_y * b_x < 0 ; } | Uses the cross product to determine if the grid is in clockwise order | 198 | 15 |
27,244 | public FDistort init ( ImageBase input , ImageBase output ) { this . input = input ; this . output = output ; inputType = input . getImageType ( ) ; interp ( InterpolationType . BILINEAR ) ; border ( 0 ) ; cached = false ; distorter = null ; outputToInput = null ; return this ; } | Specifies the input and output image and sets interpolation to BILINEAR black image border cache is off . | 76 | 23 |
27,245 | public FDistort setRefs ( ImageBase input , ImageBase output ) { this . input = input ; this . output = output ; inputType = input . getImageType ( ) ; return this ; } | All this does is set the references to the images . Nothing else is changed and its up to the user to correctly update everything else . | 44 | 27 |
27,246 | public FDistort input ( ImageBase input ) { if ( this . input == null || this . input . width != input . width || this . input . height != input . height ) { distorter = null ; } this . input = input ; inputType = input . getImageType ( ) ; return this ; } | Changes the input image . The previous distortion is thrown away only if the input image has a different shape | 67 | 20 |
27,247 | public FDistort output ( ImageBase output ) { if ( this . output == null || this . output . width != output . width || this . output . height != output . height ) { distorter = null ; } this . output = output ; return this ; } | Changes the output image . The previous distortion is thrown away only if the output image has a different shape | 56 | 20 |
27,248 | public FDistort border ( BorderType type ) { if ( borderType == type ) return this ; borderType = type ; return border ( FactoryImageBorder . generic ( type , inputType ) ) ; } | Sets the border by type . | 43 | 7 |
27,249 | public FDistort border ( double value ) { // to recycle here the value also needs to be saved // if( borderType == BorderType.VALUE ) // return this; borderType = BorderType . ZERO ; return border ( FactoryImageBorder . genericValue ( value , inputType ) ) ; } | Sets the border to a fixed gray - scale value | 63 | 11 |
27,250 | public FDistort interp ( InterpolationType type ) { distorter = null ; this . interp = FactoryInterpolation . createPixel ( 0 , 255 , type , BorderType . EXTENDED , inputType ) ; return this ; } | Specifies the interpolation used by type . | 52 | 9 |
27,251 | public FDistort affine ( double a11 , double a12 , double a21 , double a22 , double dx , double dy ) { PixelTransformAffine_F32 transform ; if ( outputToInput != null && outputToInput instanceof PixelTransformAffine_F32 ) { transform = ( PixelTransformAffine_F32 ) outputToInput ; } else { transform = new PixelTransformAffine_F32 ( ) ; } Affine2D_F32 m = new Affine2D_F32 ( ) ; m . a11 = ( float ) a11 ; m . a12 = ( float ) a12 ; m . a21 = ( float ) a21 ; m . a22 = ( float ) a22 ; m . tx = ( float ) dx ; m . ty = ( float ) dy ; m . invert ( transform . getModel ( ) ) ; return transform ( transform ) ; } | Affine transform from input to output | 195 | 7 |
27,252 | public FDistort rotate ( double angleInputToOutput ) { PixelTransform < Point2D_F32 > outputToInput = DistortSupport . transformRotate ( input . width / 2 , input . height / 2 , output . width / 2 , output . height / 2 , ( float ) angleInputToOutput ) ; return transform ( outputToInput ) ; } | Applies a distortion which will rotate the input image by the specified amount . | 77 | 15 |
27,253 | public void apply ( ) { // see if the distortion class needs to be created again if ( distorter == null ) { Class typeOut = output . getImageType ( ) . getImageClass ( ) ; switch ( input . getImageType ( ) . getFamily ( ) ) { case GRAY : distorter = FactoryDistort . distortSB ( cached , ( InterpolatePixelS ) interp , typeOut ) ; break ; case PLANAR : distorter = FactoryDistort . distortPL ( cached , ( InterpolatePixelS ) interp , typeOut ) ; break ; case INTERLEAVED : distorter = FactoryDistort . distortIL ( cached , ( InterpolatePixelMB ) interp , output . getImageType ( ) ) ; break ; default : throw new IllegalArgumentException ( "Unsupported image type" ) ; } } distorter . setModel ( outputToInput ) ; distorter . apply ( input , output ) ; } | Applies the distortion . | 203 | 5 |
27,254 | public boolean process ( DMatrixRMaj P ) { if ( ! svd . decompose ( P ) ) return false ; svd . getU ( Ut , true ) ; svd . getV ( V , false ) ; double sv [ ] = svd . getSingularValues ( ) ; SingularOps_DDRM . descendingOrder ( Ut , true , sv , 3 , V , false ) ; // compute W+, which is transposed and non-negative inverted for ( int i = 0 ; i < 3 ; i ++ ) { Wt . unsafe_set ( i , i , 1.0 / sv [ i ] ) ; } // get the pseudo inverse // A+ = V*(W+)*U' CommonOps_DDRM . mult ( V , Wt , tmp ) ; CommonOps_DDRM . mult ( tmp , Ut , PA ) ; // Vector U, which is P*U = 0 SpecializedOps_DDRM . subvector ( V , 0 , 3 , V . numRows , false , 0 , ns ) ; return true ; } | Compute projective transform that converts P into identity | 233 | 10 |
27,255 | public void computeH ( DMatrixRMaj H ) { H . reshape ( 4 , 4 ) ; CommonOps_DDRM . insert ( PA , H , 0 , 0 ) ; for ( int i = 0 ; i < 4 ; i ++ ) { H . unsafe_set ( i , 3 , ns . data [ i ] ) ; } } | Retrieve projective transform H | 76 | 6 |
27,256 | public void init ( ) { N = getParameterLength ( ) ; jacR = new DMatrixRMaj [ N ] ; for ( int i = 0 ; i < N ; i ++ ) { jacR [ i ] = new DMatrixRMaj ( 3 , 3 ) ; } jacobian = new DMatrixRMaj ( N , 9 ) ; paramInternal = new double [ N ] ; numericalJac = createNumericalAlgorithm ( function ) ; } | Initializes data structures . Separate function to make it easier to extend the class | 103 | 16 |
27,257 | public void checkOneObservationPerView ( ) { for ( int viewIdx = 0 ; viewIdx < views . length ; viewIdx ++ ) { SceneObservations . View v = views [ viewIdx ] ; for ( int obsIdx = 0 ; obsIdx < v . size ( ) ; obsIdx ++ ) { int a = v . point . get ( obsIdx ) ; for ( int i = obsIdx + 1 ; i < v . size ( ) ; i ++ ) { if ( a == v . point . get ( i ) ) { new RuntimeException ( "Same point is viewed more than once in the same view" ) ; } } } } } | Makes sure that each feature is only observed in each view | 149 | 12 |
27,258 | public static void profile ( Performer performer , int num ) { long deltaTime = measureTime ( performer , num ) ; System . out . printf ( "%30s time = %8d ms per frame = %8.3f\n" , performer . getName ( ) , deltaTime , ( deltaTime / ( double ) num ) ) ; // System.out.println(performer.getClass().getSimpleName()+ // " time = "+deltaTime+" ms per frame "+(deltaTime/(double)num)); } | See how long it takes to run the process num times and print the results to standard out | 115 | 18 |
27,259 | protected void initialize ( T input , GrayS32 output ) { this . graph = output ; final int N = input . width * input . height ; regionSize . resize ( N ) ; threshold . resize ( N ) ; for ( int i = 0 ; i < N ; i ++ ) { regionSize . data [ i ] = 1 ; threshold . data [ i ] = K ; graph . data [ i ] = i ; // assign a unique label to each pixel since they are all their own region initially } edges . reset ( ) ; edgesNotMatched . reset ( ) ; } | Predeclares all memory required and sets data structures to their initial values | 122 | 15 |
27,260 | protected void mergeSmallRegions ( ) { for ( int i = 0 ; i < edgesNotMatched . size ( ) ; i ++ ) { Edge e = edgesNotMatched . get ( i ) ; int rootA = find ( e . indexA ) ; int rootB = find ( e . indexB ) ; // see if they are already part of the same segment if ( rootA == rootB ) continue ; int sizeA = regionSize . get ( rootA ) ; int sizeB = regionSize . get ( rootB ) ; // merge if one of the regions is too small if ( sizeA < minimumSize || sizeB < minimumSize ) { // Point everything towards rootA graph . data [ e . indexB ] = rootA ; graph . data [ rootB ] = rootA ; // Update the size of regionA regionSize . data [ rootA ] = sizeA + sizeB ; } } } | Look at the remaining regions and if there are any small ones marge them into a larger region | 195 | 19 |
27,261 | protected int find ( int child ) { int root = graph . data [ child ] ; if ( root == graph . data [ root ] ) return root ; int inputChild = child ; while ( root != child ) { child = root ; root = graph . data [ child ] ; } graph . data [ inputChild ] = root ; return root ; } | Finds the root given child . If the child does not point directly to the parent find the parent and make the child point directly towards it . | 73 | 29 |
27,262 | protected void computeOutput ( ) { outputRegionId . reset ( ) ; outputRegionSizes . reset ( ) ; for ( int y = 0 ; y < graph . height ; y ++ ) { int indexGraph = graph . startIndex + y * graph . stride ; for ( int x = 0 ; x < graph . width ; x ++ , indexGraph ++ ) { int parent = graph . data [ indexGraph ] ; if ( parent == indexGraph ) { outputRegionId . add ( indexGraph ) ; outputRegionSizes . add ( regionSize . get ( indexGraph ) ) ; } else { // find the parent and set the child to it int child = indexGraph ; while ( parent != child ) { child = parent ; parent = graph . data [ child ] ; } graph . data [ indexGraph ] = parent ; } } } } | Searches for root nodes in the graph and adds their size to the list of region sizes . Makes sure all other nodes in the graph point directly at their root . | 177 | 34 |
27,263 | public void apply ( DMatrixRMaj H , DMatrixRMaj output ) { output . reshape ( 3 , H . numCols ) ; int stride = H . numCols ; for ( int col = 0 ; col < H . numCols ; col ++ ) { // This column in H double h1 = H . data [ col ] , h2 = H . data [ col + stride ] , h3 = H . data [ col + 2 * stride ] ; output . data [ col ] = h1 / stdX - meanX * h3 / stdX ; output . data [ col + stride ] = h2 / stdY - meanY * h3 / stdY ; output . data [ col + 2 * stride ] = h3 ; } } | Applies normalization to a H = 3xN matrix | 165 | 12 |
27,264 | public void apply ( DMatrix3x3 C , DMatrix3x3 output ) { DMatrix3x3 Hinv = matrixInv3 ( work ) ; PerspectiveOps . multTranA ( Hinv , C , Hinv , output ) ; } | Apply transform to conic in 3x3 matrix format . | 57 | 12 |
27,265 | private static WlCoef_I32 generateInv_I32 ( ) { WlCoef_I32 ret = new WlCoef_I32 ( ) ; ret . scaling = new int [ ] { 1 , 1 } ; ret . wavelet = new int [ ] { ret . scaling [ 0 ] , - ret . scaling [ 0 ] } ; ret . denominatorScaling = 2 ; ret . denominatorWavelet = 2 ; return ret ; } | Create a description for the inverse transform . Note that this will NOT produce an exact copy of the original due to rounding error . | 99 | 25 |
27,266 | public void updateTracks ( I input , PyramidDiscrete < I > pyramid , D [ ] derivX , D [ ] derivY ) { // forget recently dropped or spawned tracks tracksSpawned . clear ( ) ; // save references this . input = input ; trackerKlt . setInputs ( pyramid , derivX , derivY ) ; trackUsingKlt ( tracksPureKlt ) ; trackUsingKlt ( tracksReactivated ) ; } | Updates the location and description of tracks using KLT . Saves a reference to the input image for future processing . | 93 | 24 |
27,267 | private void trackUsingKlt ( List < CombinedTrack < TD > > tracks ) { for ( int i = 0 ; i < tracks . size ( ) ; ) { CombinedTrack < TD > track = tracks . get ( i ) ; if ( ! trackerKlt . performTracking ( track . track ) ) { // handle the dropped track tracks . remove ( i ) ; tracksDormant . add ( track ) ; } else { track . set ( track . track . x , track . track . y ) ; i ++ ; } } } | Tracks features in the list using KLT and update their state | 114 | 13 |
27,268 | public void spawnTracksFromDetected ( ) { // mark detected features with no matches as available FastQueue < AssociatedIndex > matches = associate . getMatches ( ) ; int N = detector . getNumberOfFeatures ( ) ; for ( int i = 0 ; i < N ; i ++ ) associated [ i ] = false ; for ( AssociatedIndex i : matches . toList ( ) ) { associated [ i . dst ] = true ; } // spawn new tracks for unassociated detected features for ( int i = 0 ; i < N ; i ++ ) { if ( associated [ i ] ) continue ; Point2D_F64 p = detector . getLocation ( i ) ; TD d = detectedDesc . get ( i ) ; CombinedTrack < TD > track ; if ( tracksUnused . size ( ) > 0 ) { track = tracksUnused . pop ( ) ; } else { track = new CombinedTrack <> ( ) ; track . desc = detector . createDescription ( ) ; track . track = trackerKlt . createNewTrack ( ) ; } // create the descriptor for tracking trackerKlt . setDescription ( ( float ) p . x , ( float ) p . y , track . track ) ; // set track ID and location track . featureId = totalTracks ++ ; track . desc . setTo ( d ) ; track . set ( p ) ; // update list of active tracks tracksPureKlt . add ( track ) ; tracksSpawned . add ( track ) ; } } | From the found interest points create new tracks . Tracks are only created at points where there are no existing tracks . | 314 | 22 |
27,269 | private void associateToDetected ( List < CombinedTrack < TD > > known ) { // initialize data structures detectedDesc . reset ( ) ; knownDesc . reset ( ) ; // create a list of detected feature descriptions int N = detector . getNumberOfFeatures ( ) ; for ( int i = 0 ; i < N ; i ++ ) { detectedDesc . add ( detector . getDescription ( i ) ) ; } // create a list of previously created track descriptions for ( CombinedTrack < TD > t : known ) { knownDesc . add ( t . desc ) ; } // associate features associate . setSource ( knownDesc ) ; associate . setDestination ( detectedDesc ) ; associate . associate ( ) ; N = Math . max ( known . size ( ) , detector . getNumberOfFeatures ( ) ) ; if ( associated . length < N ) associated = new boolean [ N ] ; } | Associates pre - existing tracks to newly detected features | 185 | 11 |
27,270 | public void associateAllToDetected ( ) { // initialize data structures List < CombinedTrack < TD >> all = new ArrayList <> ( ) ; all . addAll ( tracksReactivated ) ; all . addAll ( tracksDormant ) ; all . addAll ( tracksPureKlt ) ; int numTainted = tracksReactivated . size ( ) + tracksDormant . size ( ) ; tracksReactivated . clear ( ) ; tracksDormant . clear ( ) ; // detect features detector . detect ( input ) ; // associate features associateToDetected ( all ) ; FastQueue < AssociatedIndex > matches = associate . getMatches ( ) ; // See which features got respawned and which ones are made dormant for ( int i = 0 ; i < numTainted ; i ++ ) { associated [ i ] = false ; } for ( AssociatedIndex a : matches . toList ( ) ) { // don't mess with pure-KLT tracks if ( a . src >= numTainted ) continue ; CombinedTrack < TD > t = all . get ( a . src ) ; t . set ( detector . getLocation ( a . dst ) ) ; trackerKlt . setDescription ( ( float ) t . x , ( float ) t . y , t . track ) ; tracksReactivated . add ( t ) ; associated [ a . src ] = true ; } for ( int i = 0 ; i < numTainted ; i ++ ) { if ( ! associated [ i ] ) { tracksDormant . add ( all . get ( i ) ) ; } } } | Associate all tracks in any state to the latest observations . If a dormant track is associated it will be reactivated . If a reactivated track is associated it s state will be updated . PureKLT tracks are left unmodified . | 333 | 47 |
27,271 | public boolean dropTrack ( CombinedTrack < TD > track ) { if ( ! tracksPureKlt . remove ( track ) ) if ( ! tracksReactivated . remove ( track ) ) if ( ! tracksDormant . remove ( track ) ) return false ; tracksUnused . add ( track ) ; return true ; } | Stops tracking the specified track and recycles its data . | 67 | 12 |
27,272 | public void dropAllTracks ( ) { tracksUnused . addAll ( tracksDormant ) ; tracksUnused . addAll ( tracksPureKlt ) ; tracksUnused . addAll ( tracksReactivated ) ; tracksSpawned . clear ( ) ; tracksPureKlt . clear ( ) ; tracksReactivated . clear ( ) ; tracksSpawned . clear ( ) ; tracksDormant . clear ( ) ; } | Drops all tracks and recycles the data | 90 | 9 |
27,273 | private void processImage ( ) { final List < Point2D_F64 > leftPts = new ArrayList <> ( ) ; final List < Point2D_F64 > rightPts = new ArrayList <> ( ) ; final List < TupleDesc > leftDesc = new ArrayList <> ( ) ; final List < TupleDesc > rightDesc = new ArrayList <> ( ) ; final ProgressMonitor progressMonitor = new ProgressMonitor ( this , "Compute Feature Information" , "" , 0 , 4 ) ; extractImageFeatures ( progressMonitor , 0 , imageLeft , leftDesc , leftPts ) ; extractImageFeatures ( progressMonitor , 2 , imageRight , rightDesc , rightPts ) ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { progressMonitor . close ( ) ; scorePanel . setScorer ( controlPanel . getSelected ( ) ) ; scorePanel . setLocation ( leftPts , rightPts , leftDesc , rightDesc ) ; repaint ( ) ; } } ) ; } | Extracts image information and then passes that info onto scorePanel for display . Data is not recycled to avoid threading issues . | 228 | 26 |
27,274 | private void extractImageFeatures ( final ProgressMonitor progressMonitor , final int progress , T image , List < TupleDesc > descs , List < Point2D_F64 > locs ) { SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { progressMonitor . setNote ( "Detecting" ) ; } } ) ; detector . detect ( image ) ; SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { progressMonitor . setProgress ( progress + 1 ) ; progressMonitor . setNote ( "Describing" ) ; } } ) ; describe . setImage ( image ) ; orientation . setImage ( image ) ; // See if the detector can detect the feature's scale if ( detector . hasScale ( ) ) { for ( int i = 0 ; i < detector . getNumberOfFeatures ( ) ; i ++ ) { double yaw = 0 ; Point2D_F64 pt = detector . getLocation ( i ) ; double radius = detector . getRadius ( i ) ; if ( describe . requiresOrientation ( ) ) { orientation . setObjectRadius ( radius ) ; yaw = orientation . compute ( pt . x , pt . y ) ; } TupleDesc d = describe . createDescription ( ) ; if ( describe . process ( pt . x , pt . y , yaw , radius , d ) ) { descs . add ( d ) ; locs . add ( pt . copy ( ) ) ; } } } else { // just set the radius to one in this case orientation . setObjectRadius ( 1 ) ; for ( int i = 0 ; i < detector . getNumberOfFeatures ( ) ; i ++ ) { double yaw = 0 ; Point2D_F64 pt = detector . getLocation ( i ) ; if ( describe . requiresOrientation ( ) ) { yaw = orientation . compute ( pt . x , pt . y ) ; } TupleDesc d = describe . createDescription ( ) ; if ( describe . process ( pt . x , pt . y , yaw , 1 , d ) ) { descs . add ( d ) ; locs . add ( pt . copy ( ) ) ; } } } SwingUtilities . invokeLater ( new Runnable ( ) { public void run ( ) { progressMonitor . setProgress ( progress + 2 ) ; } } ) ; } | Detects the locations of the features in the image and extracts descriptions of each of the features . | 510 | 19 |
27,275 | protected void resizeForLayer ( int width , int height ) { deriv1X . reshape ( width , height ) ; deriv1Y . reshape ( width , height ) ; deriv2X . reshape ( width , height ) ; deriv2Y . reshape ( width , height ) ; deriv2XX . reshape ( width , height ) ; deriv2YY . reshape ( width , height ) ; deriv2XY . reshape ( width , height ) ; warpImage2 . reshape ( width , height ) ; warpDeriv2X . reshape ( width , height ) ; warpDeriv2Y . reshape ( width , height ) ; warpDeriv2XX . reshape ( width , height ) ; warpDeriv2YY . reshape ( width , height ) ; warpDeriv2XY . reshape ( width , height ) ; derivFlowUX . reshape ( width , height ) ; derivFlowUY . reshape ( width , height ) ; derivFlowVX . reshape ( width , height ) ; derivFlowVY . reshape ( width , height ) ; psiData . reshape ( width , height ) ; psiGradient . reshape ( width , height ) ; psiSmooth . reshape ( width , height ) ; divU . reshape ( width , height ) ; divV . reshape ( width , height ) ; divD . reshape ( width , height ) ; du . reshape ( width , height ) ; dv . reshape ( width , height ) ; } | Resize images for the current layer being processed | 320 | 9 |
27,276 | private void computePsiSmooth ( GrayF32 ux , GrayF32 uy , GrayF32 vx , GrayF32 vy , GrayF32 psiSmooth ) { int N = derivFlowUX . width * derivFlowUX . height ; for ( int i = 0 ; i < N ; i ++ ) { float vux = ux . data [ i ] ; float vuy = uy . data [ i ] ; float vvx = vx . data [ i ] ; float vvy = vy . data [ i ] ; float mu = vux * vux + vuy * vuy ; float mv = vvx * vvx + vvy * vvy ; psiSmooth . data [ i ] = ( float ) ( 1.0 / ( 2.0 * Math . sqrt ( mu + mv + EPSILON * EPSILON ) ) ) ; } } | Equation 5 . Psi_s | 197 | 8 |
27,277 | protected void computePsiDataPsiGradient ( GrayF32 image1 , GrayF32 image2 , GrayF32 deriv1x , GrayF32 deriv1y , GrayF32 deriv2x , GrayF32 deriv2y , GrayF32 deriv2xx , GrayF32 deriv2yy , GrayF32 deriv2xy , GrayF32 du , GrayF32 dv , GrayF32 psiData , GrayF32 psiGradient ) { int N = image1 . width * image1 . height ; for ( int i = 0 ; i < N ; i ++ ) { float du_ = du . data [ i ] ; float dv_ = dv . data [ i ] ; // compute Psi-data float taylor2 = image2 . data [ i ] + deriv2x . data [ i ] * du_ + deriv2y . data [ i ] * dv_ ; float v = taylor2 - image1 . data [ i ] ; psiData . data [ i ] = ( float ) ( 1.0 / ( 2.0 * Math . sqrt ( v * v + EPSILON * EPSILON ) ) ) ; // compute Psi-gradient float dIx = deriv2x . data [ i ] + deriv2xx . data [ i ] * du_ + deriv2xy . data [ i ] * dv_ - deriv1x . data [ i ] ; float dIy = deriv2y . data [ i ] + deriv2xy . data [ i ] * du_ + deriv2yy . data [ i ] * dv_ - deriv1y . data [ i ] ; float dI2 = dIx * dIx + dIy * dIy ; psiGradient . data [ i ] = ( float ) ( 1.0 / ( 2.0 * Math . sqrt ( dI2 + EPSILON * EPSILON ) ) ) ; } } | Compute Psi - data using equation 6 and approximation in equation 5 | 416 | 14 |
27,278 | private void computeDivUVD ( GrayF32 u , GrayF32 v , GrayF32 psi , GrayF32 divU , GrayF32 divV , GrayF32 divD ) { final int stride = psi . stride ; // compute the inside pixel for ( int y = 1 ; y < psi . height - 1 ; y ++ ) { // index of the current pixel int index = y * stride + 1 ; for ( int x = 1 ; x < psi . width - 1 ; x ++ , index ++ ) { float psi_index = psi . data [ index ] ; float coef0 = 0.5f * ( psi . data [ index + 1 ] + psi_index ) ; float coef1 = 0.5f * ( psi . data [ index - 1 ] + psi_index ) ; float coef2 = 0.5f * ( psi . data [ index + stride ] + psi_index ) ; float coef3 = 0.5f * ( psi . data [ index - stride ] + psi_index ) ; float u_index = u . data [ index ] ; divU . data [ index ] = coef0 * ( u . data [ index + 1 ] - u_index ) + coef1 * ( u . data [ index - 1 ] - u_index ) + coef2 * ( u . data [ index + stride ] - u_index ) + coef3 * ( u . data [ index - stride ] - u_index ) ; float v_index = v . data [ index ] ; divV . data [ index ] = coef0 * ( v . data [ index + 1 ] - v_index ) + coef1 * ( v . data [ index - 1 ] - v_index ) + coef2 * ( v . data [ index + stride ] - v_index ) + coef3 * ( v . data [ index - stride ] - v_index ) ; divD . data [ index ] = coef0 + coef1 + coef2 + coef3 ; } } // handle the image borders for ( int x = 0 ; x < psi . width ; x ++ ) { computeDivUVD_safe ( x , 0 , u , v , psi , divU , divV , divD ) ; computeDivUVD_safe ( x , psi . height - 1 , u , v , psi , divU , divV , divD ) ; } for ( int y = 1 ; y < psi . height - 1 ; y ++ ) { computeDivUVD_safe ( 0 , y , u , v , psi , divU , divV , divD ) ; computeDivUVD_safe ( psi . width - 1 , y , u , v , psi , divU , divV , divD ) ; } } | Computes the divergence for u v and d . Equation 8 and Equation 10 . | 600 | 18 |
27,279 | public void reset ( ) { unused . addAll ( templateNegative ) ; unused . addAll ( templatePositive ) ; templateNegative . clear ( ) ; templatePositive . clear ( ) ; } | Discard previous results and puts it back into its initial state | 43 | 12 |
27,280 | public void addDescriptor ( boolean positive , ImageRectangle rect ) { addDescriptor ( positive , rect . x0 , rect . y0 , rect . x1 , rect . y1 ) ; } | Creates a new descriptor for the specified region | 45 | 9 |
27,281 | private void addDescriptor ( boolean positive , NccFeature f ) { // avoid adding the same descriptor twice or adding contradicting results if ( positive ) if ( distance ( f , templatePositive ) < 0.05 ) { return ; } if ( ! positive ) { if ( distance ( f , templateNegative ) < 0.05 ) { return ; } // a positive positive can have very bad affects on tracking, try to avoid learning a positive // example as a negative one if ( distance ( f , templatePositive ) < 0.05 ) { return ; } } if ( positive ) templatePositive . add ( f ) ; else templateNegative . add ( f ) ; } | Adds a descriptor to the positive or negative list . If it is very similar to an existing one it is not added . Look at code for details | 144 | 29 |
27,282 | public void computeNccDescriptor ( NccFeature f , float x0 , float y0 , float x1 , float y1 ) { double mean = 0 ; float widthStep = ( x1 - x0 ) / 15.0f ; float heightStep = ( y1 - y0 ) / 15.0f ; // compute the mean value int index = 0 ; for ( int y = 0 ; y < 15 ; y ++ ) { float sampleY = y0 + y * heightStep ; for ( int x = 0 ; x < 15 ; x ++ ) { mean += f . value [ index ++ ] = interpolate . get_fast ( x0 + x * widthStep , sampleY ) ; } } mean /= 15 * 15 ; // compute the variance and save the difference from the mean double variance = 0 ; index = 0 ; for ( int y = 0 ; y < 15 ; y ++ ) { for ( int x = 0 ; x < 15 ; x ++ ) { double v = f . value [ index ++ ] -= mean ; variance += v * v ; } } variance /= 15 * 15 ; f . mean = mean ; f . sigma = Math . sqrt ( variance ) ; } | Computes the NCC descriptor by sample points at evenly spaced distances inside the rectangle | 258 | 16 |
27,283 | public NccFeature createDescriptor ( ) { NccFeature f ; if ( unused . isEmpty ( ) ) f = new NccFeature ( 15 * 15 ) ; else f = unused . pop ( ) ; return f ; } | Creates a new descriptor or recycles an old one | 50 | 11 |
27,284 | public double computeConfidence ( int x0 , int y0 , int x1 , int y1 ) { computeNccDescriptor ( observed , x0 , y0 , x1 , y1 ) ; // distance from each set of templates if ( templateNegative . size ( ) > 0 && templatePositive . size ( ) > 0 ) { double distancePositive = distance ( observed , templatePositive ) ; double distanceNegative = distance ( observed , templateNegative ) ; return distanceNegative / ( distanceNegative + distancePositive ) ; } else if ( templatePositive . size ( ) > 0 ) { return 1.0 - distance ( observed , templatePositive ) ; } else { return distance ( observed , templateNegative ) ; } } | Compute a value which indicates how confident the specified region is to be a member of the positive set . The confidence value is from 0 to 1 . 1 indicates 100% confidence . | 162 | 36 |
27,285 | public double computeConfidence ( ImageRectangle r ) { return computeConfidence ( r . x0 , r . y0 , r . x1 , r . y1 ) ; } | see the other function with the same name | 39 | 8 |
27,286 | public double distance ( NccFeature observed , List < NccFeature > candidates ) { double maximum = - Double . MAX_VALUE ; // The feature which has the best fit will maximize the score for ( NccFeature f : candidates ) { double score = DescriptorDistance . ncc ( observed , f ) ; if ( score > maximum ) maximum = score ; } return 1 - 0.5 * ( maximum + 1 ) ; } | Computes the best distance to observed from the candidate list . | 91 | 12 |
27,287 | public void process ( GrayF32 input ) { constructPyramid ( input ) ; corners . reset ( ) ; // top to bottom. This way the intensity image is at the input image's scale. Which is useful // for visualiztion purposes double scale = Math . pow ( 2.0 , pyramid . size ( ) - 1 ) ; for ( int level = pyramid . size ( ) - 1 ; level >= 0 ; level -- ) { // find the corners detector . process ( pyramid . get ( level ) ) ; // Add found corners to this level's list PyramidLevel featsLevel = featureLevels . get ( level ) ; FastQueue < ChessboardCorner > corners = detector . getCorners ( ) ; featsLevel . corners . reset ( ) ; for ( int i = 0 ; i < corners . size ; i ++ ) { ChessboardCorner cf = corners . get ( i ) ; // convert the coordinate into input image coordinates double x = cf . x * scale ; double y = cf . y * scale ; // Compensate for how the pyramid was computed using an average down sample. It shifts // the coordinate system. // if( scale > 1 ) { // x += 0.5*scale; // y += 0.5*scale; // } ChessboardCorner cl = featsLevel . corners . grow ( ) ; cl . first = true ; cl . set ( x , y , cf . orientation , cf . intensity ) ; } scale /= 2.0 ; } // Create a combined set of features from all the levels. Only add each feature once by searching // for it in the next level down for ( int levelIdx = 0 ; levelIdx < pyramid . size ( ) ; levelIdx ++ ) { PyramidLevel level0 = featureLevels . get ( levelIdx ) ; // mark features in the next level as seen if they match ones in this level if ( levelIdx + 1 < pyramid . size ( ) ) { PyramidLevel level1 = featureLevels . get ( levelIdx + 1 ) ; markSeenAsFalse ( level0 . corners , level1 . corners ) ; } } for ( int levelIdx = 0 ; levelIdx < pyramid . size ( ) ; levelIdx ++ ) { PyramidLevel level = featureLevels . get ( levelIdx ) ; // only add corners if they were first seen in this level for ( int i = 0 ; i < level . corners . size ; i ++ ) { ChessboardCorner c = level . corners . get ( i ) ; if ( c . first ) corners . grow ( ) . set ( c ) ; } } } | Detects corner features inside the input gray scale image . | 553 | 11 |
27,288 | void markSeenAsFalse ( FastQueue < ChessboardCorner > corners0 , FastQueue < ChessboardCorner > corners1 ) { nn . setPoints ( corners1 . toList ( ) , false ) ; // radius of the blob in the intensity image is 2*kernelRadius int radius = detector . shiRadius * 2 + 1 ; for ( int i = 0 ; i < corners0 . size ; i ++ ) { ChessboardCorner c0 = corners0 . get ( i ) ; nnSearch . findNearest ( c0 , radius , 5 , nnResults ) ; // TODO does it ever find multiple matches? // Could make this smarter by looking at the orientation too for ( int j = 0 ; j < nnResults . size ; j ++ ) { ChessboardCorner c1 = nnResults . get ( j ) . point ; // if the current one wasn't first then none of its children can be first if ( ! c0 . first ) { c1 . first = false ; } else if ( c1 . intensity < c0 . intensity ) { // keeping the one with the best intensity score seems to help. Formally test this idea c1 . first = false ; } else { c0 . first = false ; } } } } | Finds corners in list 1 which match corners in list 0 . If the feature in list 0 has already been seen then the feature in list 1 will be marked as seen . Otherwise the feature which is the most intense is marked as first . | 273 | 48 |
27,289 | @ Override public void processImage ( int sourceID , long frameID , final BufferedImage buffered , ImageBase input ) { System . out . flush ( ) ; synchronized ( bufferedImageLock ) { original = ConvertBufferedImage . checkCopy ( buffered , original ) ; work = ConvertBufferedImage . checkDeclare ( buffered , work ) ; } if ( saveRequested ) { saveInputImage ( ) ; saveRequested = false ; } final double timeInSeconds ; // TODO Copy all data that's visualized outside so that GUI doesn't lock synchronized ( this ) { long before = System . nanoTime ( ) ; detector . process ( ( T ) input ) ; long after = System . nanoTime ( ) ; timeInSeconds = ( after - before ) * 1e-9 ; } // create a local copy so that gui and processing thread's dont conflict synchronized ( detected ) { this . detected . reset ( ) ; for ( QrCode d : detector . getDetections ( ) ) { this . detected . grow ( ) . set ( d ) ; } this . failures . reset ( ) ; for ( QrCode d : detector . getFailures ( ) ) { if ( d . failureCause . ordinal ( ) >= QrCode . Failure . READING_BITS . ordinal ( ) ) this . failures . grow ( ) . set ( d ) ; } // System.out.println("Failed "+failures.size()); // for( QrCode qr : failures.toList() ) { // System.out.println(" cause "+qr.failureCause); // } } controlPanel . polygonPanel . thresholdPanel . updateHistogram ( ( T ) input ) ; SwingUtilities . invokeLater ( ( ) -> { controls . setProcessingTimeS ( timeInSeconds ) ; viewUpdated ( ) ; synchronized ( detected ) { controlPanel . messagePanel . updateList ( detected . toList ( ) , failures . toList ( ) ) ; } } ) ; } | Override this function so that it doesn t threshold the image twice | 432 | 12 |
27,290 | public boolean computeHomography ( CalibrationObservation observedPoints ) { if ( observedPoints . size ( ) < 4 ) throw new IllegalArgumentException ( "At least 4 points needed in each set of observations. " + " Filter these first please" ) ; List < AssociatedPair > pairs = new ArrayList <> ( ) ; for ( int i = 0 ; i < observedPoints . size ( ) ; i ++ ) { int which = observedPoints . get ( i ) . index ; Point2D_F64 obs = observedPoints . get ( i ) ; pairs . add ( new AssociatedPair ( worldPoints . get ( which ) , obs , true ) ) ; } if ( ! computeHomography . process ( pairs , found ) ) return false ; // todo do non-linear refinement. Take advantage of coordinates being fixed return true ; } | Computes the homography from a list of detected grid points in the image . The order of the grid points is important and must follow the expected row major starting at the top left . | 180 | 37 |
27,291 | public synchronized void recycle ( float [ ] array ) { if ( array . length != length ) { throw new IllegalArgumentException ( "Unexpected array length. Expected " + length + " found " + array . length ) ; } storage . add ( array ) ; } | Adds the array to storage . if the array length is unexpected an exception is thrown | 57 | 16 |
27,292 | private void visualizeResults ( SceneStructureMetric structure , List < BufferedImage > colorImages ) { List < Point3D_F64 > cloudXyz = new ArrayList <> ( ) ; GrowQueue_I32 cloudRgb = new GrowQueue_I32 ( ) ; Point3D_F64 world = new Point3D_F64 ( ) ; Point3D_F64 camera = new Point3D_F64 ( ) ; Point2D_F64 pixel = new Point2D_F64 ( ) ; for ( int i = 0 ; i < structure . points . length ; i ++ ) { // Get 3D location SceneStructureMetric . Point p = structure . points [ i ] ; p . get ( world ) ; // Project point into an arbitrary view for ( int j = 0 ; j < p . views . size ; j ++ ) { int viewIdx = p . views . get ( j ) ; SePointOps_F64 . transform ( structure . views [ viewIdx ] . worldToView , world , camera ) ; int cameraIdx = structure . views [ viewIdx ] . camera ; structure . cameras [ cameraIdx ] . model . project ( camera . x , camera . y , camera . z , pixel ) ; // Get the points color BufferedImage image = colorImages . get ( viewIdx ) ; int x = ( int ) pixel . x ; int y = ( int ) pixel . y ; // After optimization it might have been moved out of the camera's original FOV. // hopefully this isn't too common if ( x < 0 || y < 0 || x >= image . getWidth ( ) || y >= image . getHeight ( ) ) continue ; cloudXyz . add ( world . copy ( ) ) ; cloudRgb . add ( image . getRGB ( ( int ) pixel . x , ( int ) pixel . y ) ) ; break ; } } PointCloudViewer viewer = VisualizeData . createPointCloudViewer ( ) ; viewer . setTranslationStep ( 0.05 ) ; viewer . addCloud ( cloudXyz , cloudRgb . data ) ; viewer . setCameraHFov ( UtilAngle . radian ( 60 ) ) ; SwingUtilities . invokeLater ( ( ) -> { viewer . getComponent ( ) . setPreferredSize ( new Dimension ( 500 , 500 ) ) ; ShowImages . showWindow ( viewer . getComponent ( ) , "Reconstruction Points" , true ) ; } ) ; } | Opens a window showing the found point cloud . Points are colorized using the pixel value inside one of the input images | 534 | 24 |
27,293 | public static void decodeFormatMessage ( int message , QrCode qr ) { int error = message >> 3 ; qr . error = QrCode . ErrorLevel . lookup ( error ) ; qr . mask = QrCodeMaskPattern . lookupMask ( message & 0x07 ) ; } | Assumes that the format message has no errors in it and decodes its data and saves it into the qr code | 63 | 24 |
27,294 | public static int correctDCH ( int N , int messageNoMask , int generator , int totalBits , int dataBits ) { int bestHamming = 255 ; int bestMessage = - 1 ; int errorBits = totalBits - dataBits ; // exhaustively check all possibilities for ( int i = 0 ; i < N ; i ++ ) { int test = i << errorBits ; test = test ^ bitPolyModulus ( test , generator , totalBits , dataBits ) ; int distance = DescriptorDistance . hamming ( test ^ messageNoMask ) ; // see if it found a better match if ( distance < bestHamming ) { bestHamming = distance ; bestMessage = i ; } else if ( distance == bestHamming ) { // ambiguous so reject bestMessage = - 1 ; } } return bestMessage ; } | Applies a brute force algorithm to find the message which has the smallest hamming distance . if two messages have the same distance - 1 is returned . | 182 | 30 |
27,295 | private static void displayResults ( BufferedImage orig , Planar < GrayF32 > distortedImg , ImageDistort allInside , ImageDistort fullView ) { // render the results Planar < GrayF32 > undistortedImg = new Planar <> ( GrayF32 . class , distortedImg . getWidth ( ) , distortedImg . getHeight ( ) , distortedImg . getNumBands ( ) ) ; allInside . apply ( distortedImg , undistortedImg ) ; BufferedImage out1 = ConvertBufferedImage . convertTo ( undistortedImg , null , true ) ; fullView . apply ( distortedImg , undistortedImg ) ; BufferedImage out2 = ConvertBufferedImage . convertTo ( undistortedImg , null , true ) ; // display in a single window where the user can easily switch between images ListDisplayPanel panel = new ListDisplayPanel ( ) ; panel . addItem ( new ImagePanel ( orig ) , "Original" ) ; panel . addItem ( new ImagePanel ( out1 ) , "Undistorted All Inside" ) ; panel . addItem ( new ImagePanel ( out2 ) , "Undistorted Full View" ) ; ShowImages . showWindow ( panel , "Removing Lens Distortion" , true ) ; } | Displays results in a window for easy comparison .. | 283 | 10 |
27,296 | public boolean process ( double sampleRadius , Quadrilateral_F64 input ) { work . set ( input ) ; samples . reset ( ) ; estimator . process ( work , false ) ; estimator . getWorldToCamera ( ) . invert ( referenceCameraToWorld ) ; samples . reset ( ) ; createSamples ( sampleRadius , work . a , input . a ) ; createSamples ( sampleRadius , work . b , input . b ) ; createSamples ( sampleRadius , work . c , input . c ) ; createSamples ( sampleRadius , work . d , input . d ) ; if ( samples . size ( ) < 10 ) return false ; maxLocation = 0 ; maxOrientation = 0 ; for ( int i = 0 ; i < samples . size ( ) ; i ++ ) { referenceCameraToWorld . concat ( samples . get ( i ) , difference ) ; ConvertRotation3D_F64 . matrixToRodrigues ( difference . getR ( ) , rodrigues ) ; double theta = Math . abs ( rodrigues . theta ) ; double d = difference . getT ( ) . norm ( ) ; if ( theta > maxOrientation ) { maxOrientation = theta ; } if ( d > maxLocation ) { maxLocation = d ; } } return true ; } | Processes the observation and generates a stability estimate | 292 | 9 |
27,297 | private void createSamples ( double sampleRadius , Point2D_F64 workPoint , Point2D_F64 originalPoint ) { workPoint . x = originalPoint . x + sampleRadius ; if ( estimator . process ( work , false ) ) { samples . grow ( ) . set ( estimator . getWorldToCamera ( ) ) ; } workPoint . x = originalPoint . x - sampleRadius ; if ( estimator . process ( work , false ) ) { samples . grow ( ) . set ( estimator . getWorldToCamera ( ) ) ; } workPoint . x = originalPoint . x ; workPoint . y = originalPoint . y + sampleRadius ; if ( estimator . process ( work , false ) ) { samples . grow ( ) . set ( estimator . getWorldToCamera ( ) ) ; } workPoint . y = originalPoint . y - sampleRadius ; if ( estimator . process ( work , false ) ) { samples . grow ( ) . set ( estimator . getWorldToCamera ( ) ) ; } workPoint . set ( originalPoint ) ; } | Samples around the provided corner + - in x and y directions | 238 | 13 |
27,298 | public void initialLearning ( Rectangle2D_F64 targetRegion , FastQueue < ImageRectangle > cascadeRegions ) { storageMetric . reset ( ) ; fernNegative . clear ( ) ; // learn the initial descriptor TldHelperFunctions . convertRegion ( targetRegion , targetRegion_I32 ) ; // select the variance the first time using user selected region variance . selectThreshold ( targetRegion_I32 ) ; // add positive examples template . addDescriptor ( true , targetRegion_I32 ) ; fern . learnFernNoise ( true , targetRegion_I32 ) ; // Find all the regions which can be used to learn a negative descriptor for ( int i = 0 ; i < cascadeRegions . size ; i ++ ) { ImageRectangle r = cascadeRegions . get ( i ) ; // see if it passes the variance test if ( ! variance . checkVariance ( r ) ) continue ; // learn features far away from the target region double overlap = helper . computeOverlap ( targetRegion_I32 , r ) ; if ( overlap > config . overlapLower ) continue ; fernNegative . add ( r ) ; } // randomize which regions are used // Collections.shuffle(fernNegative,rand); int N = fernNegative . size ( ) ; //Math.min(config.numNegativeFerns,fernNegative.size()); for ( int i = 0 ; i < N ; i ++ ) { fern . learnFern ( false , fernNegative . get ( i ) ) ; } // run detection algorithm and if there is an ambiguous solution mark it as not target detection . detectionCascade ( cascadeRegions ) ; learnAmbiguousNegative ( targetRegion ) ; } | Select positive and negative examples based on the region the user s initially selected region . The selected region is used as a positive example while all the other regions far away are used as negative examples . | 375 | 38 |
27,299 | public void updateLearning ( Rectangle2D_F64 targetRegion ) { storageMetric . reset ( ) ; // learn the initial descriptor TldHelperFunctions . convertRegion ( targetRegion , targetRegion_I32 ) ; template . addDescriptor ( true , targetRegion_I32 ) ; fern . learnFernNoise ( true , targetRegion_I32 ) ; // mark only a few of the far away regions as negative. Marking all of them as negative is // computationally expensive FastQueue < TldRegionFernInfo > ferns = detection . getFernInfo ( ) ; int N = Math . min ( config . numNegativeFerns , ferns . size ) ; for ( int i = 0 ; i < N ; i ++ ) { int index = rand . nextInt ( ferns . size ) ; TldRegionFernInfo f = ferns . get ( index ) ; // no need to check variance here since the detector already did it // learn features far away from the target region double overlap = helper . computeOverlap ( targetRegion_I32 , f . r ) ; if ( overlap > config . overlapLower ) continue ; fern . learnFern ( false , f . r ) ; } learnAmbiguousNegative ( targetRegion ) ; } | Updates learning using the latest tracking results . | 279 | 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.