idx
int64
0
41.2k
question
stringlengths
74
4.04k
target
stringlengths
7
750
27,000
public void getCenter ( int which , Point2D_F64 location ) { CalibrationObservation view = detector . getDetectedPoints ( ) ; location . set ( 0 , 0 ) ; for ( int i = 0 ; i < view . size ( ) ; i ++ ) { PointIndex2D_F64 p = view . get ( i ) ; location . x += p . x ; location . y += p . y ; } location . x /= view . size ( ) ; location . y /= view . size ( ) ; }
Returns the detection point average location . This will NOT be the same as the geometric center .
27,001
public void setConstraints ( boolean zeroSkew , boolean principlePointOrigin , boolean knownAspect , double aspect ) { if ( knownAspect && ! zeroSkew ) throw new IllegalArgumentException ( "If aspect is known then skew must be zero" ) ; this . zeroSkew = zeroSkew ; this . principlePointOrigin = principlePointOrigin ; this . knownAspectRatio = knownAspect ; this . aspectRatio = aspect ; notZeros . resize ( 6 ) ; for ( int i = 0 ; i < 6 ; i ++ ) { notZeros . data [ i ] = i ; } if ( principlePointOrigin ) { notZeros . remove ( 4 ) ; notZeros . remove ( 2 ) ; } if ( zeroSkew ) { notZeros . remove ( 1 ) ; } }
Specifies linear constraints
27,002
void extractReferenceW ( DMatrixRMaj nv ) { W0 . a11 = nv . data [ 0 ] ; W0 . a12 = W0 . a21 = nv . data [ 1 ] ; W0 . a13 = W0 . a31 = nv . data [ 2 ] ; W0 . a22 = nv . data [ 3 ] ; W0 . a23 = W0 . a32 = nv . data [ 4 ] ; W0 . a33 = nv . data [ 5 ] ; }
Extracts calibration for the reference frame
27,003
void convertW ( Homography2D_F64 w , CameraPinhole c ) { tmp . set ( w ) ; CommonOps_DDF3 . divide ( tmp , tmp . a33 ) ; CommonOps_DDF3 . cholU ( tmp ) ; CommonOps_DDF3 . invert ( tmp , K ) ; CommonOps_DDF3 . divide ( K , K . a33 ) ; c . fx = K . a11 ; c . fy = knownAspectRatio ? ( K . a22 + c . fx * aspectRatio ) / 2.0 : K . a22 ; c . skew = zeroSkew ? 0 : K . a12 ; c . cx = principlePointOrigin ? 0 : K . a13 ; c . cy = principlePointOrigin ? 0 : K . a23 ; }
Converts W into a pinhole camera model by finding the cholesky decomposition
27,004
void extractCalibration ( Homography2D_F64 Hinv , CameraPinhole c ) { CommonOps_DDF3 . multTransA ( Hinv , W0 , tmp ) ; CommonOps_DDF3 . mult ( tmp , Hinv , Wi ) ; convertW ( Wi , c ) ; }
Extracts calibration for the non - reference frames
27,005
public boolean computeInverseH ( List < Homography2D_F64 > homography0toI ) { listHInv . reset ( ) ; int N = homography0toI . size ( ) ; for ( int i = 0 ; i < N ; i ++ ) { Homography2D_F64 H = homography0toI . get ( i ) ; Homography2D_F64 Hinv = listHInv . grow ( ) ; double d = CommonOps_DDF3 . det ( H ) ; if ( d < 0 ) CommonOps_DDF3 . divide ( H , - Math . pow ( - d , 1.0 / 3 ) , Hinv ) ; else CommonOps_DDF3 . divide ( H , Math . pow ( d , 1.0 / 3 ) , Hinv ) ; if ( ! CommonOps_DDF3 . invert ( Hinv , Hinv ) ) { return false ; } } return true ; }
Ensures the determinant is one then inverts the homogrpahy
27,006
protected void startCameraTexture ( TextureView view ) { if ( verbose ) Log . i ( TAG , "startCamera(TextureView=" + ( view != null ) + ")" ) ; this . mTextureView = view ; this . mView = null ; this . mTextureView . setSurfaceTextureListener ( mSurfaceTextureListener ) ; }
After this function is called the camera will be start . It might not start immediately and there can be a delay .
27,007
protected void configureCamera ( CameraDevice device , CameraCharacteristics characteristics , CaptureRequest . Builder captureRequestBuilder ) { if ( verbose ) Log . i ( TAG , "configureCamera() default function" ) ; captureRequestBuilder . set ( CaptureRequest . CONTROL_AF_MODE , CaptureRequest . CONTROL_AF_MODE_CONTINUOUS_VIDEO ) ; captureRequestBuilder . set ( CaptureRequest . CONTROL_AE_MODE , CaptureRequest . CONTROL_AE_MODE_ON ) ; }
Override to do custom configuration of the camera s settings . By default the camera is put into auto mode .
27,008
protected boolean selectCamera ( String id , CameraCharacteristics characteristics ) { if ( verbose ) Log . i ( TAG , "selectCamera() default function" ) ; Integer facing = characteristics . get ( CameraCharacteristics . LENS_FACING ) ; return facing == null || facing != CameraCharacteristics . LENS_FACING_FRONT ; }
By default this will select the backfacing camera . override to change the camera it selects .
27,009
protected void reopenCameraAtResolution ( int cameraWidth , int cameraHeight ) { if ( Looper . getMainLooper ( ) . getThread ( ) != Thread . currentThread ( ) ) { throw new RuntimeException ( "Attempted to reopenCameraAtResolution main looper thread!" ) ; } boolean releaseLock = true ; open . mLock . lock ( ) ; try { if ( verbose ) Log . i ( TAG , "Reopening camera is null == " + ( open . mCameraDevice == null ) + " state=" + open . state + " activity=" + getClass ( ) . getSimpleName ( ) ) ; if ( open . state != CameraState . OPEN ) throw new RuntimeException ( "BUG! Attempted to re-open camera when not open" ) ; if ( null == open . mCameraDevice ) { throw new RuntimeException ( "Can't re-open a closed camera" ) ; } closePreviewSession ( ) ; open . mCameraSize = null ; firstFrame = true ; CameraManager manager = ( CameraManager ) getSystemService ( Context . CAMERA_SERVICE ) ; if ( manager == null ) throw new RuntimeException ( "Null camera manager" ) ; try { open . mPreviewReader = ImageReader . newInstance ( cameraWidth , cameraHeight , ImageFormat . YUV_420_888 , 2 ) ; open . mPreviewReader . setOnImageAvailableListener ( onAvailableListener , mBackgroundHandler ) ; configureTransform ( viewWidth , viewHeight ) ; manager . openCamera ( open . cameraId , mStateCallback , null ) ; releaseLock = false ; } catch ( IllegalArgumentException e ) { Toast . makeText ( this , e . getMessage ( ) , Toast . LENGTH_LONG ) . show ( ) ; finish ( ) ; } catch ( CameraAccessException e ) { e . printStackTrace ( ) ; } } finally { if ( releaseLock ) open . mLock . unlock ( ) ; } }
Re - opens the camera with the same settings at the specified resolution . It is assumed that you know what you re doing and that this is a valid resolution .
27,010
protected boolean closeCamera ( ) { if ( verbose ) Log . i ( TAG , "closeCamera() activity=" + getClass ( ) . getSimpleName ( ) ) ; if ( Looper . getMainLooper ( ) . getThread ( ) != Thread . currentThread ( ) ) { throw new RuntimeException ( "Attempted to close camera not on the main looper thread!" ) ; } boolean closed = false ; open . mLock . lock ( ) ; try { if ( verbose ) Log . i ( TAG , "closeCamera: camera=" + ( open . mCameraDevice == null ) + " state=" + open . state ) ; closePreviewSession ( ) ; if ( open . state == CameraState . OPENING ) { open . state = CameraState . CLOSING ; if ( open . mCameraDevice != null ) { throw new RuntimeException ( "BUG! Camera is opening and should be null until opened" ) ; } } else { if ( null != open . mCameraDevice ) { closed = true ; open . closeCamera ( ) ; } open . state = CameraState . CLOSED ; open . clearCamera ( ) ; } } finally { open . mLock . unlock ( ) ; } return closed ; }
Closes the camera . Returns true if the camera was not already closed and it closed it
27,011
private void startPreview ( ) { if ( Looper . getMainLooper ( ) . getThread ( ) != Thread . currentThread ( ) ) { throw new RuntimeException ( "Not on main looper! Modify code to remove assumptions" ) ; } if ( verbose ) { Log . i ( TAG , "startPreview()" ) ; } try { open . mLock . lock ( ) ; if ( null == open . mCameraDevice || null == open . mCameraSize ) { Log . i ( TAG , " aborting startPreview. Camera not open yet." ) ; return ; } closePreviewSession ( ) ; open . surfaces = new ArrayList < > ( ) ; open . mPreviewRequestBuilder = open . mCameraDevice . createCaptureRequest ( CameraDevice . TEMPLATE_PREVIEW ) ; if ( mTextureView != null && mTextureView . isAvailable ( ) ) { SurfaceTexture texture = mTextureView . getSurfaceTexture ( ) ; assert texture != null ; texture . setDefaultBufferSize ( open . mCameraSize . getWidth ( ) , open . mCameraSize . getHeight ( ) ) ; Surface previewSurface = new Surface ( texture ) ; open . surfaces . add ( previewSurface ) ; open . mPreviewRequestBuilder . addTarget ( previewSurface ) ; } Surface readerSurface = open . mPreviewReader . getSurface ( ) ; open . surfaces . add ( readerSurface ) ; open . mPreviewRequestBuilder . addTarget ( readerSurface ) ; createCaptureSession ( ) ; } catch ( CameraAccessException e ) { e . printStackTrace ( ) ; } finally { open . mLock . unlock ( ) ; } }
Start the camera preview .
27,012
public void cameraIntrinsicNominal ( CameraPinhole intrinsic ) { open . mLock . lock ( ) ; try { if ( open . mCameraCharacterstics != null ) { SizeF physicalSize = open . mCameraCharacterstics . get ( CameraCharacteristics . SENSOR_INFO_PHYSICAL_SIZE ) ; Rect activeSize = open . mCameraCharacterstics . get ( CameraCharacteristics . SENSOR_INFO_ACTIVE_ARRAY_SIZE ) ; Size pixelSize = open . mCameraCharacterstics . get ( CameraCharacteristics . SENSOR_INFO_PIXEL_ARRAY_SIZE ) ; float [ ] focalLengths = open . mCameraCharacterstics . get ( CameraCharacteristics . LENS_INFO_AVAILABLE_FOCAL_LENGTHS ) ; if ( focalLengths != null && focalLengths . length > 0 && physicalSize != null && activeSize != null && pixelSize != null ) { float fl = focalLengths [ 0 ] ; float widthToPixel = pixelSize . getWidth ( ) / physicalSize . getWidth ( ) ; float heightToPixel = pixelSize . getHeight ( ) / physicalSize . getHeight ( ) ; float s = open . mCameraSize . getWidth ( ) / ( float ) activeSize . width ( ) ; intrinsic . fx = fl * widthToPixel * s ; intrinsic . fy = fl * heightToPixel * s ; intrinsic . skew = 0 ; intrinsic . cx = activeSize . centerX ( ) * s ; intrinsic . cy = activeSize . centerY ( ) * s ; intrinsic . width = open . mCameraSize . getWidth ( ) ; intrinsic . height = open . mCameraSize . getHeight ( ) ; return ; } } PerspectiveOps . createIntrinsic ( open . mCameraSize . getWidth ( ) , open . mCameraSize . getHeight ( ) , UtilAngle . radian ( 60 ) ) ; } finally { open . mLock . unlock ( ) ; } }
Returns the camera intrinsic parameters estimated from the physical parameters returned by the camera2 API
27,013
private float displayDensityAdjusted ( ) { open . mLock . lock ( ) ; try { if ( open . mCameraSize == null ) return displayMetrics . density ; int rotation = getWindowManager ( ) . getDefaultDisplay ( ) . getRotation ( ) ; int screenWidth = ( rotation == 0 || rotation == 2 ) ? displayMetrics . widthPixels : displayMetrics . heightPixels ; int cameraWidth = open . mSensorOrientation == 0 || open . mSensorOrientation == 180 ? open . mCameraSize . getWidth ( ) : open . mCameraSize . getHeight ( ) ; return displayMetrics . density * cameraWidth / screenWidth ; } finally { open . mLock . unlock ( ) ; } }
Some times the size of a font of stroke needs to be specified in the input image but then gets scaled to image resolution . This compensates for that .
27,014
public static < T extends ImageGray < T > > StereoDisparitySparse < T > regionSparseWta ( int minDisparity , int maxDisparity , int regionRadiusX , int regionRadiusY , double maxPerPixelError , double texture , boolean subpixelInterpolation , Class < T > imageType ) { double maxError = ( regionRadiusX * 2 + 1 ) * ( regionRadiusY * 2 + 1 ) * maxPerPixelError ; if ( imageType == GrayU8 . class ) { DisparitySparseSelect < int [ ] > select ; if ( subpixelInterpolation ) select = selectDisparitySparseSubpixel_S32 ( ( int ) maxError , texture ) ; else select = selectDisparitySparse_S32 ( ( int ) maxError , texture ) ; DisparitySparseScoreSadRect < int [ ] , GrayU8 > score = scoreDisparitySparseSadRect_U8 ( minDisparity , maxDisparity , regionRadiusX , regionRadiusY ) ; return new WrapDisparitySparseSadRect ( score , select ) ; } else if ( imageType == GrayF32 . class ) { DisparitySparseSelect < float [ ] > select ; if ( subpixelInterpolation ) select = selectDisparitySparseSubpixel_F32 ( ( int ) maxError , texture ) ; else select = selectDisparitySparse_F32 ( ( int ) maxError , texture ) ; DisparitySparseScoreSadRect < float [ ] , GrayF32 > score = scoreDisparitySparseSadRect_F32 ( minDisparity , maxDisparity , regionRadiusX , regionRadiusY ) ; return new WrapDisparitySparseSadRect ( score , select ) ; } else throw new RuntimeException ( "Image type not supported: " + imageType . getSimpleName ( ) ) ; }
WTA algorithms that computes disparity on a sparse per - pixel basis as requested ..
27,015
public void addPoint ( float x , float y , float z ) { norm . grow ( ) . set ( x / z , y / z ) ; }
Adds the estimated 3D location of a feature .
27,016
public void process ( ) { computeCovarince ( ) ; float eigenvalue = smallestEigenvalue ( ) ; double stdev = Math . sqrt ( eigenvalue ) ; double angle0 = Math . atan2 ( 1.0 , sigmas * ( meanX - stdev ) ) ; double angle1 = Math . atan2 ( 1.0 , sigmas * ( meanX + stdev ) ) ; spread = Math . abs ( angle1 - angle0 ) ; }
Computes the worst case spread for how features are laid out
27,017
public static ClassifierAndSource vgg_cifar10 ( ) { List < String > sources = new ArrayList < > ( ) ; sources . add ( "http://boofcv.org/notwiki/largefiles/likevgg_cifar10.zip" ) ; ClassifierAndSource ret = new ClassifierAndSource ( ) ; ret . data0 = new ImageClassifierVggCifar10 ( ) ; ret . data1 = sources ; return ret ; }
VGG trained on CIFAR10 data
27,018
public static ClassifierAndSource nin_imagenet ( ) { List < String > sources = new ArrayList < > ( ) ; sources . add ( "http://boofcv.org/notwiki/largefiles/nin_imagenet.zip" ) ; ClassifierAndSource ret = new ClassifierAndSource ( ) ; ret . data0 = new ImageClassifierNiNImageNet ( ) ; ret . data1 = sources ; return ret ; }
NIN trained on ImageNet data
27,019
public Exit launch ( Class mainClass , String ... args ) { jvmArgs = configureArguments ( mainClass , args ) ; try { Runtime rt = Runtime . getRuntime ( ) ; Process pr = rt . exec ( jvmArgs ) ; Thread . sleep ( 500 ) ; BufferedReader input = new BufferedReader ( new InputStreamReader ( pr . getInputStream ( ) ) ) ; BufferedReader error = new BufferedReader ( new InputStreamReader ( pr . getErrorStream ( ) ) ) ; if ( ! monitorSlave ( pr , input , error ) ) { if ( killRequested ) return Exit . REQUESTED ; else return Exit . FROZEN ; } if ( pr . exitValue ( ) != 0 ) { return Exit . RETURN_NOT_ZERO ; } else { return Exit . NORMAL ; } } catch ( IOException | InterruptedException e ) { throw new RuntimeException ( e ) ; } }
Launches the class with the provided arguments . Blocks until the process stops .
27,020
private boolean monitorSlave ( Process pr , BufferedReader input , BufferedReader error ) throws IOException , InterruptedException { System . in . skip ( System . in . available ( ) ) ; boolean frozen = false ; long startTime = System . currentTimeMillis ( ) ; long lastAliveMessage = startTime ; for ( ; ; ) { while ( System . in . available ( ) > 0 ) { if ( System . in . read ( ) == 'q' ) { System . out . println ( "User requested for the application to quit by pressing 'q'" ) ; System . exit ( 0 ) ; } } synchronized ( streamLock ) { printBuffer ( error , printErr ) ; } if ( input . ready ( ) ) { synchronized ( streamLock ) { printBuffer ( input , printOut ) ; } } else { Thread . sleep ( 500 ) ; } try { pr . exitValue ( ) ; break ; } catch ( IllegalThreadStateException e ) { if ( killRequested ) { pr . destroy ( ) ; break ; } if ( frozenTime > 0 && System . currentTimeMillis ( ) - startTime > frozenTime ) { pr . destroy ( ) ; frozen = true ; break ; } if ( System . currentTimeMillis ( ) - lastAliveMessage > 60000 ) { System . out . println ( "\nMaster is still alive: " + new Date ( ) + " Press 'q' and enter to quit." ) ; lastAliveMessage = System . currentTimeMillis ( ) ; } } } synchronized ( streamLock ) { printBuffer ( error , printErr ) ; printBuffer ( input , printOut ) ; } durationMilli = System . currentTimeMillis ( ) - startTime ; return ! frozen && ! killRequested ; }
Prints printOut the standard printOut and error from the slave and checks its health . Exits if the slave has finished or is declared frozen .
27,021
protected void computeWeightBlockPixels ( ) { int rows = cellsPerBlockY * pixelsPerCell ; int cols = cellsPerBlockX * pixelsPerCell ; weights = new double [ rows * cols ] ; double offsetRow = 0 , offsetCol = 0 ; int radiusRow = rows / 2 , radiusCol = cols / 2 ; if ( rows % 2 == 0 ) { offsetRow = 0.5 ; } if ( cols % 2 == 0 ) { offsetCol = 0.5 ; } int index = 0 ; for ( int row = 0 ; row < rows ; row ++ ) { double drow = row - radiusRow + offsetRow ; double pdfRow = UtilGaussian . computePDF ( 0 , radiusRow , drow ) ; for ( int col = 0 ; col < cols ; col ++ ) { double dcol = col - radiusCol + offsetCol ; double pdfCol = UtilGaussian . computePDF ( 0 , radiusCol , dcol ) ; weights [ index ++ ] = pdfCol * pdfRow ; } } double max = 0 ; for ( int i = 0 ; i < weights . length ; i ++ ) { if ( weights [ i ] > max ) { max = weights [ i ] ; } } for ( int i = 0 ; i < weights . length ; i ++ ) { weights [ i ] /= max ; } }
Compute gaussian weights applied to each pixel in the block
27,022
private void computePixelFeatures ( ) { for ( int y = 0 ; y < derivX . height ; y ++ ) { int pixelIndex = y * derivX . width ; int endIndex = pixelIndex + derivX . width ; for ( ; pixelIndex < endIndex ; pixelIndex ++ ) { float dx = derivX . data [ pixelIndex ] ; float dy = derivY . data [ pixelIndex ] ; orientation . data [ pixelIndex ] = UtilAngle . atanSafe ( dy , dx ) + GrlConstants . F_PId2 ; magnitude . data [ pixelIndex ] = Math . sqrt ( dx * dx + dy * dy ) ; } } }
Computes the orientation and magnitude of each pixel
27,023
void addToHistogram ( int cellX , int cellY , int orientationIndex , double magnitude ) { if ( cellX < 0 || cellX >= cellsPerBlockX ) return ; if ( cellY < 0 || cellY >= cellsPerBlockY ) return ; int index = ( cellY * cellsPerBlockX + cellX ) * orientationBins + orientationIndex ; histogram [ index ] += magnitude ; }
Adds the magnitude to the histogram at the specified cell and orientation
27,024
public void setInput ( float x [ ] , float y [ ] , int size ) { if ( x . length < size || y . length < size ) { throw new IllegalArgumentException ( "Arrays too small for size." ) ; } if ( size < M ) { throw new IllegalArgumentException ( "Not enough data points for M" ) ; } this . x = x ; this . y = y ; this . size = size ; this . dj = Math . min ( 1 , ( int ) Math . pow ( size , 0.25 ) ) ; ascend = x [ size - 1 ] >= x [ 0 ] ; }
Sets the data that is being interpolated .
27,025
public float process ( float testX ) { if ( doHunt ) { hunt ( testX ) ; } else { bisectionSearch ( testX , 0 , size - 1 ) ; } return compute ( testX ) ; }
Performs interpolation at the sample point .
27,026
protected void hunt ( float val ) { int lowerLimit = center ; int upperLimit ; int inc = 1 ; if ( val >= x [ lowerLimit ] && ascend ) { for ( ; ; ) { upperLimit = lowerLimit + inc ; if ( upperLimit >= size - 1 ) { upperLimit = size - 1 ; break ; } else if ( val < x [ upperLimit ] && ascend ) { break ; } else { lowerLimit = upperLimit ; inc += inc ; } } } else { upperLimit = lowerLimit ; for ( ; ; ) { lowerLimit = lowerLimit - inc ; if ( lowerLimit <= 0 ) { lowerLimit = 0 ; break ; } else if ( val >= x [ lowerLimit ] && ascend ) { break ; } else { upperLimit = lowerLimit ; inc += inc ; } } } bisectionSearch ( val , lowerLimit , upperLimit ) ; }
To speed up finding the appropriate indexes to use in the interpolation it can use its previous results to search a smaller region than it would otherwise .
27,027
public static void process ( GrayI orig , GrayI derivX , GrayI derivY ) { final int width = orig . getWidth ( ) ; final int height = orig . getHeight ( ) ; for ( int y = 1 ; y < height - 1 ; y ++ ) { for ( int x = 1 ; x < width - 1 ; x ++ ) { int dy = - ( orig . get ( x - 1 , y - 1 ) + 2 * orig . get ( x , y - 1 ) + orig . get ( x + 1 , y - 1 ) ) ; dy += ( orig . get ( x - 1 , y + 1 ) + 2 * orig . get ( x , y + 1 ) + orig . get ( x + 1 , y + 1 ) ) ; int dx = - ( orig . get ( x - 1 , y - 1 ) + 2 * orig . get ( x - 1 , y ) + orig . get ( x - 1 , y + 1 ) ) ; dx += ( orig . get ( x + 1 , y - 1 ) + 2 * orig . get ( x + 1 , y ) + orig . get ( x + 1 , y + 1 ) ) ; derivX . set ( x , y , dx ) ; derivY . set ( x , y , dy ) ; } } }
Computes the derivative of orig along the x and y axes
27,028
public static < T extends ImageBase < T > > void performSegmentation ( ImageSuperpixels < T > alg , T color ) { GBlurImageOps . gaussian ( color , color , 0.5 , - 1 , null ) ; GrayS32 pixelToSegment = new GrayS32 ( color . width , color . height ) ; alg . segment ( color , pixelToSegment ) ; visualize ( pixelToSegment , color , alg . getTotalSuperpixels ( ) ) ; }
Segments and visualizes the image
27,029
public static < T extends ImageBase < T > > void visualize ( GrayS32 pixelToRegion , T color , int numSegments ) { ImageType < T > type = color . getImageType ( ) ; ComputeRegionMeanColor < T > colorize = FactorySegmentationAlg . regionMeanColor ( type ) ; FastQueue < float [ ] > segmentColor = new ColorQueue_F32 ( type . getNumBands ( ) ) ; segmentColor . resize ( numSegments ) ; GrowQueue_I32 regionMemberCount = new GrowQueue_I32 ( ) ; regionMemberCount . resize ( numSegments ) ; ImageSegmentationOps . countRegionPixels ( pixelToRegion , numSegments , regionMemberCount . data ) ; colorize . process ( color , pixelToRegion , regionMemberCount , segmentColor ) ; BufferedImage outColor = VisualizeRegions . regionsColor ( pixelToRegion , segmentColor , null ) ; BufferedImage outSegments = VisualizeRegions . regions ( pixelToRegion , numSegments , null ) ; BufferedImage outBorder = new BufferedImage ( color . width , color . height , BufferedImage . TYPE_INT_RGB ) ; ConvertBufferedImage . convertTo ( color , outBorder , true ) ; VisualizeRegions . regionBorders ( pixelToRegion , 0xFF0000 , outBorder ) ; ListDisplayPanel gui = new ListDisplayPanel ( ) ; gui . addImage ( outColor , "Color of Segments" ) ; gui . addImage ( outBorder , "Region Borders" ) ; gui . addImage ( outSegments , "Regions" ) ; ShowImages . showWindow ( gui , "Superpixels" , true ) ; }
Visualizes results three ways . 1 ) Colorized segmented image where each region is given a random color . 2 ) Each pixel is assigned the mean color through out the region . 3 ) Black pixels represent the border between regions .
27,030
public static GrayU8 denseDisparity ( GrayU8 rectLeft , GrayU8 rectRight , int regionSize , int minDisparity , int maxDisparity ) { StereoDisparity < GrayU8 , GrayU8 > disparityAlg = FactoryStereoDisparity . regionWta ( DisparityAlgorithms . RECT_FIVE , minDisparity , maxDisparity , regionSize , regionSize , 25 , 1 , 0.2 , GrayU8 . class ) ; disparityAlg . process ( rectLeft , rectRight ) ; return disparityAlg . getDisparity ( ) ; }
Computes the dense disparity between between two stereo images . The input images must be rectified with lens distortion removed to work! Floating point images are also supported .
27,031
public static GrayF32 denseDisparitySubpixel ( GrayU8 rectLeft , GrayU8 rectRight , int regionSize , int minDisparity , int maxDisparity ) { StereoDisparity < GrayU8 , GrayF32 > disparityAlg = FactoryStereoDisparity . regionSubpixelWta ( DisparityAlgorithms . RECT_FIVE , minDisparity , maxDisparity , regionSize , regionSize , 25 , 1 , 0.2 , GrayU8 . class ) ; disparityAlg . process ( rectLeft , rectRight ) ; return disparityAlg . getDisparity ( ) ; }
Same as above but compute disparity to within sub - pixel accuracy . The difference between the two is more apparent when a 3D point cloud is computed .
27,032
public static RectifyCalibrated rectify ( GrayU8 origLeft , GrayU8 origRight , StereoParameters param , GrayU8 rectLeft , GrayU8 rectRight ) { RectifyCalibrated rectifyAlg = RectifyImageOps . createCalibrated ( ) ; Se3_F64 leftToRight = param . getRightToLeft ( ) . invert ( null ) ; DMatrixRMaj K1 = PerspectiveOps . pinholeToMatrix ( param . getLeft ( ) , ( DMatrixRMaj ) null ) ; DMatrixRMaj K2 = PerspectiveOps . pinholeToMatrix ( param . getRight ( ) , ( DMatrixRMaj ) null ) ; rectifyAlg . process ( K1 , new Se3_F64 ( ) , K2 , leftToRight ) ; DMatrixRMaj rect1 = rectifyAlg . getRect1 ( ) ; DMatrixRMaj rect2 = rectifyAlg . getRect2 ( ) ; DMatrixRMaj rectK = rectifyAlg . getCalibrationMatrix ( ) ; RectifyImageOps . allInsideLeft ( param . left , rect1 , rect2 , rectK ) ; FMatrixRMaj rect1_F32 = new FMatrixRMaj ( 3 , 3 ) ; FMatrixRMaj rect2_F32 = new FMatrixRMaj ( 3 , 3 ) ; ConvertMatrixData . convert ( rect1 , rect1_F32 ) ; ConvertMatrixData . convert ( rect2 , rect2_F32 ) ; ImageDistort < GrayU8 , GrayU8 > imageDistortLeft = RectifyImageOps . rectifyImage ( param . getLeft ( ) , rect1_F32 , BorderType . SKIP , origLeft . getImageType ( ) ) ; ImageDistort < GrayU8 , GrayU8 > imageDistortRight = RectifyImageOps . rectifyImage ( param . getRight ( ) , rect2_F32 , BorderType . SKIP , origRight . getImageType ( ) ) ; imageDistortLeft . apply ( origLeft , rectLeft ) ; imageDistortRight . apply ( origRight , rectRight ) ; return rectifyAlg ; }
Rectified the input images using known calibration .
27,033
public boolean isRangeSet ( ) { for ( int i = 0 ; i < getDimensions ( ) ; i ++ ) { if ( valueMin [ i ] == 0 && valueMax [ i ] == 0 ) { return false ; } } return true ; }
Returns true if the min and max value for each dimension has been set
27,034
public void setRange ( int dimension , double min , double max ) { valueMin [ dimension ] = min ; valueMax [ dimension ] = max ; }
Specifies the minimum and maximum values for a specific dimension
27,035
public int getDimensionIndex ( int dimension , int value ) { double min = valueMin [ dimension ] ; double max = valueMax [ dimension ] ; double fraction = ( ( value - min ) / ( max - min + 1.0 ) ) ; return ( int ) ( fraction * length [ dimension ] ) ; }
Given a value it returns the corresponding bin index in this histogram for integer values . The discretion is taken in account and 1 is added to the range .
27,036
public final int getIndex ( int coordinate [ ] ) { int index = coordinate [ 0 ] * strides [ 0 ] ; for ( int i = 1 ; i < coordinate . length ; i ++ ) { index += strides [ i ] * coordinate [ i ] ; } return index ; }
For a N - Dimensional histogram it will return the array index for the N - D coordinate
27,037
public Histogram_F64 copy ( ) { Histogram_F64 out = newInstance ( ) ; System . arraycopy ( value , 0 , out . value , 0 , length . length ) ; return out ; }
Creates an exact copy of this histogram
27,038
public boolean refine ( Polygon2D_F64 input , Polygon2D_F64 output ) { if ( input . size ( ) != output . size ( ) ) throw new IllegalArgumentException ( "Input and output sides do not match. " + input . size ( ) + " " + output . size ( ) ) ; if ( checkShapeTooSmall ( input ) ) return false ; if ( general . length < input . size ( ) ) { general = new LineGeneral2D_F64 [ input . size ( ) ] ; for ( int i = 0 ; i < general . length ; i ++ ) { general [ i ] = new LineGeneral2D_F64 ( ) ; } } return optimize ( input , output ) ; }
Refines the fit a polygon by snapping it to the edges .
27,039
private boolean checkShapeTooSmall ( Polygon2D_F64 input ) { double minLength = cornerOffset * 2 + 2 ; for ( int i = 0 ; i < input . size ( ) ; i ++ ) { int j = ( i + 1 ) % input . size ( ) ; Point2D_F64 a = input . get ( i ) ; Point2D_F64 b = input . get ( j ) ; if ( a . distance2 ( b ) < minLength * minLength ) return true ; } return false ; }
Looks at the distance between each vertex . If that distance is so small the edge can t be measured the return true .
27,040
protected boolean optimize ( Polygon2D_F64 seed , Polygon2D_F64 current ) { previous . set ( seed ) ; double convergeTol = convergeTolPixels * convergeTolPixels ; for ( int i = 0 ; i < seed . size ( ) ; i ++ ) { int j = ( i + 1 ) % seed . size ( ) ; Point2D_F64 a = seed . get ( i ) ; Point2D_F64 b = seed . get ( j ) ; UtilLine2D_F64 . convert ( a , b , general [ i ] ) ; } boolean changed = false ; for ( int iteration = 0 ; iteration < maxIterations ; iteration ++ ) { for ( int i = 0 ; i < previous . size ( ) ; i ++ ) { int j = ( i + 1 ) % previous . size ( ) ; Point2D_F64 a = previous . get ( i ) ; Point2D_F64 b = previous . get ( j ) ; before . set ( general [ i ] ) ; boolean failed = false ; if ( ! optimize ( a , b , general [ i ] ) ) { failed = true ; } else { int k = ( i + previous . size ( ) - 1 ) % previous . size ( ) ; if ( Intersection2D_F64 . intersection ( general [ k ] , general [ i ] , tempA ) != null && Intersection2D_F64 . intersection ( general [ i ] , general [ j ] , tempB ) != null ) { if ( tempA . distance ( a ) > maxCornerChangePixel || tempB . distance ( b ) > maxCornerChangePixel ) { failed = true ; } } else { failed = true ; } } if ( failed ) { general [ i ] . set ( before ) ; } else { changed = true ; } } if ( ! UtilShapePolygon . convert ( general , current ) ) return false ; boolean converged = true ; for ( int i = 0 ; i < current . size ( ) ; i ++ ) { if ( current . get ( i ) . distance2 ( previous . get ( i ) ) > convergeTol ) { converged = false ; break ; } } if ( converged ) { break ; } else { previous . set ( current ) ; } } return changed ; }
Refines the initial line estimates using EM . The number of iterations is fixed .
27,041
protected boolean optimize ( Point2D_F64 a , Point2D_F64 b , LineGeneral2D_F64 found ) { computeAdjustedEndPoints ( a , b ) ; return snapToEdge . refine ( adjA , adjB , found ) ; }
Fits a line defined by the two points . When fitting the line the weight of the edge is used to determine how influential the point is
27,042
public static BufferedImage watersheds ( GrayS32 segments , BufferedImage output , int radius ) { if ( output == null ) output = new BufferedImage ( segments . width , segments . height , BufferedImage . TYPE_INT_RGB ) ; if ( radius <= 0 ) { for ( int y = 0 ; y < segments . height ; y ++ ) { for ( int x = 0 ; x < segments . width ; x ++ ) { int index = segments . unsafe_get ( x , y ) ; if ( index == 0 ) output . setRGB ( x , y , 0xFF0000 ) ; } } } else { for ( int y = 0 ; y < segments . height ; y ++ ) { for ( int x = 0 ; x < segments . width ; x ++ ) { int index = segments . unsafe_get ( x , y ) ; if ( index == 0 ) { for ( int i = - radius ; i <= radius ; i ++ ) { int yy = y + i ; for ( int j = - radius ; j <= radius ; j ++ ) { int xx = x + j ; if ( segments . isInBounds ( xx , yy ) ) { output . setRGB ( xx , yy , 0xFF0000 ) ; } } } } } } } return output ; }
Sets the pixels of each watershed as red in the output image . Watersheds have a value of 0
27,043
public static BufferedImage regions ( GrayS32 pixelToRegion , int numRegions , BufferedImage output ) { return VisualizeBinaryData . renderLabeled ( pixelToRegion , numRegions , output ) ; }
Draws each region with a random color
27,044
private static void randomGaussian ( Random rand , double sigma , int radius , Point2D_I32 pt ) { int x , y ; while ( true ) { x = ( int ) ( rand . nextGaussian ( ) * sigma ) ; y = ( int ) ( rand . nextGaussian ( ) * sigma ) ; if ( Math . sqrt ( x * x + y * y ) < radius ) break ; } pt . set ( x , y ) ; }
Randomly selects a point which is inside a square region using a Gaussian distribution .
27,045
public static < T extends ImageBase < T > > PyramidDiscrete < T > discreteGaussian ( int [ ] scaleFactors , double sigma , int radius , boolean saveOriginalReference , ImageType < T > imageType ) { Class < Kernel1D > kernelType = FactoryKernel . getKernelType ( imageType . getDataType ( ) , 1 ) ; Kernel1D kernel = FactoryKernelGaussian . gaussian ( kernelType , sigma , radius ) ; return new PyramidDiscreteSampleBlur < > ( kernel , sigma , imageType , saveOriginalReference , scaleFactors ) ; }
Creates an updater for discrete pyramids where a Gaussian is convolved across the input prior to sub - sampling .
27,046
public static < T extends ImageGray < T > > PyramidFloat < T > floatGaussian ( double scaleFactors [ ] , double [ ] sigmas , Class < T > imageType ) { InterpolatePixelS < T > interp = FactoryInterpolation . bilinearPixelS ( imageType , BorderType . EXTENDED ) ; return new PyramidFloatGaussianScale < > ( interp , scaleFactors , sigmas , imageType ) ; }
Creates a float pyramid where each layer is blurred using a Gaussian with the specified sigma . Bilinear interpolation is used when sub - sampling .
27,047
public static double process_F64 ( double sample , double x [ ] , double y [ ] , int i0 , int i1 ) { double result = 0 ; for ( int i = i0 ; i <= i1 ; i ++ ) { double numerator = 1.0 ; for ( int j = i0 ; j <= i1 ; j ++ ) { if ( i != j ) numerator *= sample - x [ j ] ; } double denominator = 1.0 ; double a = x [ i ] ; for ( int j = i0 ; j <= i1 ; j ++ ) { if ( i != j ) denominator *= a - x [ j ] ; } result += ( numerator / denominator ) * y [ i ] ; } return result ; }
UsingLlangrange s formula it interpulates the value of a function at the specified sample point given discrete samples . Which samples are used and the order of the approximation are given by i0 and i1 .
27,048
public void add ( Color color , Point3D_F64 ... polygon ) { final Poly p = new Poly ( polygon . length , color ) ; for ( int i = 0 ; i < polygon . length ; i ++ ) p . pts [ i ] = polygon [ i ] . copy ( ) ; synchronized ( polygons ) { polygons . add ( p ) ; } }
Adds a polygon to the viewer . GUI Thread safe .
27,049
public static BufferedImage checkDeclare ( int width , int height , BufferedImage image , int type ) { if ( image == null ) return new BufferedImage ( width , height , type ) ; if ( image . getType ( ) != type ) return new BufferedImage ( width , height , type ) ; if ( image . getWidth ( ) != width || image . getHeight ( ) != height ) return new BufferedImage ( width , height , type ) ; return image ; }
If the provided image does not have the same shape and same type a new one is declared and returned .
27,050
public static BufferedImage checkCopy ( BufferedImage original , BufferedImage output ) { ColorModel cm = original . getColorModel ( ) ; boolean isAlphaPremultiplied = cm . isAlphaPremultiplied ( ) ; if ( output == null || original . getWidth ( ) != output . getWidth ( ) || original . getHeight ( ) != output . getHeight ( ) || original . getType ( ) != output . getType ( ) ) { WritableRaster raster = original . copyData ( original . getRaster ( ) . createCompatibleWritableRaster ( ) ) ; return new BufferedImage ( cm , raster , isAlphaPremultiplied , null ) ; } original . copyData ( output . getRaster ( ) ) ; return output ; }
Copies the original image into the output image . If it can t do a copy a new image is created and returned
27,051
public static BufferedImage stripAlphaChannel ( BufferedImage image ) { int numBands = image . getRaster ( ) . getNumBands ( ) ; if ( numBands == 4 ) { BufferedImage output = new BufferedImage ( image . getWidth ( ) , image . getHeight ( ) , BufferedImage . TYPE_INT_RGB ) ; output . createGraphics ( ) . drawImage ( image , 0 , 0 , null ) ; return output ; } else { return image ; } }
Returns an image which doesn t have an alpha channel . If the input image doesn t have an alpha channel to start then its returned as is . Otherwise a new image is created and the RGB channels are copied and the new image returned .
27,052
public static InterleavedU8 extractInterleavedU8 ( BufferedImage img ) { DataBuffer buffer = img . getRaster ( ) . getDataBuffer ( ) ; if ( buffer . getDataType ( ) == DataBuffer . TYPE_BYTE && isKnownByteFormat ( img ) ) { WritableRaster raster = img . getRaster ( ) ; InterleavedU8 ret = new InterleavedU8 ( ) ; ret . width = img . getWidth ( ) ; ret . height = img . getHeight ( ) ; ret . startIndex = ConvertRaster . getOffset ( raster ) ; ret . imageType . numBands = raster . getNumBands ( ) ; ret . numBands = raster . getNumBands ( ) ; ret . stride = ConvertRaster . stride ( raster ) ; ret . data = ( ( DataBufferByte ) buffer ) . getData ( ) ; ret . subImage = ret . startIndex != 0 ; return ret ; } throw new IllegalArgumentException ( "Buffered image does not have an interleaved byte raster" ) ; }
For BufferedImage stored as a byte array internally it extracts an interleaved image . The input image and the returned image will both share the same internal data array . Using this function allows unnecessary memory copying to be avoided .
27,053
public static GrayU8 extractGrayU8 ( BufferedImage img ) { WritableRaster raster = img . getRaster ( ) ; DataBuffer buffer = raster . getDataBuffer ( ) ; if ( buffer . getDataType ( ) == DataBuffer . TYPE_BYTE && isKnownByteFormat ( img ) ) { if ( raster . getNumBands ( ) != 1 ) throw new IllegalArgumentException ( "Input image has more than one channel" ) ; GrayU8 ret = new GrayU8 ( ) ; ret . width = img . getWidth ( ) ; ret . height = img . getHeight ( ) ; ret . startIndex = ConvertRaster . getOffset ( img . getRaster ( ) ) ; ret . stride = ConvertRaster . stride ( img . getRaster ( ) ) ; ret . data = ( ( DataBufferByte ) buffer ) . getData ( ) ; return ret ; } throw new IllegalArgumentException ( "Buffered image does not have a gray scale byte raster" ) ; }
For BufferedImage stored as a byte array internally it extracts an image . The input image and the returned image will both share the same internal data array . Using this function allows unnecessary memory copying to be avoided .
27,054
public static < T extends ImageGray < T > > T convertFromSingle ( BufferedImage src , T dst , Class < T > type ) { if ( type == GrayU8 . class ) { return ( T ) convertFrom ( src , ( GrayU8 ) dst ) ; } else if ( GrayI16 . class . isAssignableFrom ( type ) ) { return ( T ) convertFrom ( src , ( GrayI16 ) dst , ( Class ) type ) ; } else if ( type == GrayF32 . class ) { return ( T ) convertFrom ( src , ( GrayF32 ) dst ) ; } else { throw new IllegalArgumentException ( "Unknown type " + type ) ; } }
Converts a buffered image into an image of the specified type . In a dst image is provided it will be used for output otherwise a new image will be created .
27,055
public static BufferedImage convertTo ( JComponent comp , BufferedImage storage ) { if ( storage == null ) storage = new BufferedImage ( comp . getWidth ( ) , comp . getHeight ( ) , BufferedImage . TYPE_INT_RGB ) ; Graphics2D g2 = storage . createGraphics ( ) ; comp . paintComponents ( g2 ) ; return storage ; }
Draws the component into a BufferedImage .
27,056
public static Planar orderBandsIntoBuffered ( Planar src , BufferedImage dst ) { if ( dst . getType ( ) == BufferedImage . TYPE_INT_RGB ) return src ; Planar tmp = new Planar ( src . type , src . getNumBands ( ) ) ; tmp . width = src . width ; tmp . height = src . height ; tmp . stride = src . stride ; tmp . startIndex = src . startIndex ; for ( int i = 0 ; i < src . getNumBands ( ) ; i ++ ) { tmp . bands [ i ] = src . bands [ i ] ; } ConvertRaster . orderBandsBufferedFromRgb ( tmp , dst ) ; return tmp ; }
Returns a new image with the color bands in the appropriate ordering . The returned image will reference the original image s image arrays .
27,057
public static double computeError ( GrayF32 imgA , GrayF32 imgB ) { final int h = imgA . getHeight ( ) ; final int w = imgA . getWidth ( ) ; double total = 0 ; for ( int y = 0 ; y < h ; y ++ ) { for ( int x = 0 ; x < w ; x ++ ) { double difference = Math . abs ( imgA . get ( x , y ) - imgB . get ( x , y ) ) ; total += difference ; } } return total / ( w * h ) ; }
todo push to what ops? Also what is this error called again?
27,058
public static double computeWeightedError ( GrayF32 imgA , GrayF32 imgB , GrayF32 imgWeight ) { final int h = imgA . getHeight ( ) ; final int w = imgA . getWidth ( ) ; double total = 0 ; double totalWeight = 0 ; for ( int y = 0 ; y < h ; y ++ ) { for ( int x = 0 ; x < w ; x ++ ) { float weight = imgWeight . get ( x , y ) ; double difference = Math . abs ( imgA . get ( x , y ) - imgB . get ( x , y ) ) ; total += difference * weight ; totalWeight += weight ; } } return total / totalWeight ; }
todo push to what ops?
27,059
private static boolean nearBorder ( Point2D_F64 p , StitchingFromMotion2D < ? , ? > stitch ) { int r = 10 ; if ( p . x < r || p . y < r ) return true ; if ( p . x >= stitch . getStitchedImage ( ) . width - r ) return true ; if ( p . y >= stitch . getStitchedImage ( ) . height - r ) return true ; return false ; }
Checks to see if the point is near the image border
27,060
public void setShape ( double width , double height ) { points2D3D . get ( 0 ) . location . set ( - width / 2 , - height / 2 , 0 ) ; points2D3D . get ( 1 ) . location . set ( - width / 2 , height / 2 , 0 ) ; points2D3D . get ( 2 ) . location . set ( width / 2 , height / 2 , 0 ) ; points2D3D . get ( 3 ) . location . set ( width / 2 , - height / 2 , 0 ) ; }
Specifes how big the fiducial is along two axises
27,061
public void computeStability ( Se3_F64 targetToCamera , double disturbance , FiducialStability results ) { targetToCamera . invert ( referenceCameraToTarget ) ; maxOrientation = 0 ; maxLocation = 0 ; Point3D_F64 cameraPt = new Point3D_F64 ( ) ; for ( int i = 0 ; i < points2D3D . size ( ) ; i ++ ) { Point2D3D p23 = points2D3D . get ( i ) ; targetToCamera . transform ( p23 . location , cameraPt ) ; p23 . observation . x = cameraPt . x / cameraPt . z ; p23 . observation . y = cameraPt . y / cameraPt . z ; refNorm . get ( i ) . set ( p23 . observation ) ; normToPixel . compute ( p23 . observation . x , p23 . observation . y , refPixels . get ( i ) ) ; } for ( int i = 0 ; i < points2D3D . size ( ) ; i ++ ) { perturb ( disturbance , refPixels . get ( i ) , points2D3D . get ( i ) ) ; points2D3D . get ( i ) . observation . set ( refNorm . get ( i ) ) ; } results . location = maxLocation ; results . orientation = maxOrientation ; }
Estimate how sensitive this observation is to pixel noise
27,062
private void perturb ( double disturbance , Point2D_F64 pixel , Point2D3D p23 ) { double x ; double y = pixel . y ; x = pixel . x + disturbance ; computeDisturbance ( x , y , p23 ) ; x = pixel . x - disturbance ; computeDisturbance ( x , y , p23 ) ; x = pixel . x ; y = pixel . y + disturbance ; computeDisturbance ( x , y , p23 ) ; y = pixel . y - disturbance ; computeDisturbance ( x , y , p23 ) ; }
Perturb the observation in 4 different ways
27,063
public static < T extends ImageGray < T > , D extends ImageGray < D > > void detectLines ( BufferedImage image , Class < T > imageType , Class < D > derivType ) { T input = ConvertBufferedImage . convertFromSingle ( image , null , imageType ) ; DetectLineHoughPolar < T , D > detector = FactoryDetectLineAlgs . houghPolar ( new ConfigHoughPolar ( 3 , 30 , 2 , Math . PI / 180 , edgeThreshold , maxLines ) , imageType , derivType ) ; List < LineParametric2D_F32 > found = detector . detect ( input ) ; ImageLinePanel gui = new ImageLinePanel ( ) ; gui . setImage ( image ) ; gui . setLines ( found ) ; gui . setPreferredSize ( new Dimension ( image . getWidth ( ) , image . getHeight ( ) ) ) ; listPanel . addItem ( gui , "Found Lines" ) ; }
Detects lines inside the image using different types of Hough detectors
27,064
public static < T extends ImageGray < T > , D extends ImageGray < D > > void detectLineSegments ( BufferedImage image , Class < T > imageType , Class < D > derivType ) { T input = ConvertBufferedImage . convertFromSingle ( image , null , imageType ) ; DetectLineSegmentsGridRansac < T , D > detector = FactoryDetectLineAlgs . lineRansac ( 40 , 30 , 2.36 , true , imageType , derivType ) ; List < LineSegment2D_F32 > found = detector . detect ( input ) ; ImageLinePanel gui = new ImageLinePanel ( ) ; gui . setImage ( image ) ; gui . setLineSegments ( found ) ; gui . setPreferredSize ( new Dimension ( image . getWidth ( ) , image . getHeight ( ) ) ) ; listPanel . addItem ( gui , "Found Line Segments" ) ; }
Detects segments inside the image
27,065
public static List < double [ ] > coupledHueSat ( List < String > images ) { List < double [ ] > points = new ArrayList < > ( ) ; Planar < GrayF32 > rgb = new Planar < > ( GrayF32 . class , 1 , 1 , 3 ) ; Planar < GrayF32 > hsv = new Planar < > ( GrayF32 . class , 1 , 1 , 3 ) ; for ( String path : images ) { BufferedImage buffered = UtilImageIO . loadImage ( path ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; rgb . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; hsv . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , rgb , true ) ; ColorHsv . rgbToHsv ( rgb , hsv ) ; Planar < GrayF32 > hs = hsv . partialSpectrum ( 0 , 1 ) ; Histogram_F64 histogram = new Histogram_F64 ( 12 , 12 ) ; histogram . setRange ( 0 , 0 , 2.0 * Math . PI ) ; histogram . setRange ( 1 , 0 , 1.0 ) ; GHistogramFeatureOps . histogram ( hs , histogram ) ; UtilFeature . normalizeL2 ( histogram ) ; points . add ( histogram . value ) ; } return points ; }
HSV stores color information in Hue and Saturation while intensity is in Value . This computes a 2D histogram from hue and saturation only which makes it lighting independent .
27,066
public static List < double [ ] > independentHueSat ( List < File > images ) { List < double [ ] > points = new ArrayList < > ( ) ; TupleDesc_F64 histogramHue = new TupleDesc_F64 ( 30 ) ; TupleDesc_F64 histogramValue = new TupleDesc_F64 ( 30 ) ; List < TupleDesc_F64 > histogramList = new ArrayList < > ( ) ; histogramList . add ( histogramHue ) ; histogramList . add ( histogramValue ) ; Planar < GrayF32 > rgb = new Planar < > ( GrayF32 . class , 1 , 1 , 3 ) ; Planar < GrayF32 > hsv = new Planar < > ( GrayF32 . class , 1 , 1 , 3 ) ; for ( File f : images ) { BufferedImage buffered = UtilImageIO . loadImage ( f . getPath ( ) ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; rgb . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; hsv . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , rgb , true ) ; ColorHsv . rgbToHsv ( rgb , hsv ) ; GHistogramFeatureOps . histogram ( hsv . getBand ( 0 ) , 0 , 2 * Math . PI , histogramHue ) ; GHistogramFeatureOps . histogram ( hsv . getBand ( 1 ) , 0 , 1 , histogramValue ) ; TupleDesc_F64 imageHist = UtilFeature . combine ( histogramList , null ) ; UtilFeature . normalizeL2 ( imageHist ) ; points . add ( imageHist . value ) ; } return points ; }
Computes two independent 1D histograms from hue and saturation . Less affects by sparsity but can produce worse results since the basic assumption that hue and saturation are decoupled is most of the time false .
27,067
public static List < double [ ] > coupledRGB ( List < File > images ) { List < double [ ] > points = new ArrayList < > ( ) ; Planar < GrayF32 > rgb = new Planar < > ( GrayF32 . class , 1 , 1 , 3 ) ; for ( File f : images ) { BufferedImage buffered = UtilImageIO . loadImage ( f . getPath ( ) ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; rgb . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , rgb , true ) ; Histogram_F64 histogram = new Histogram_F64 ( 10 , 10 , 10 ) ; histogram . setRange ( 0 , 0 , 255 ) ; histogram . setRange ( 1 , 0 , 255 ) ; histogram . setRange ( 2 , 0 , 255 ) ; GHistogramFeatureOps . histogram ( rgb , histogram ) ; UtilFeature . normalizeL2 ( histogram ) ; points . add ( histogram . value ) ; } return points ; }
Constructs a 3D histogram using RGB . RGB is a popular color space but the resulting histogram will depend on lighting conditions and might not produce the accurate results .
27,068
public static List < double [ ] > histogramGray ( List < File > images ) { List < double [ ] > points = new ArrayList < > ( ) ; GrayU8 gray = new GrayU8 ( 1 , 1 ) ; for ( File f : images ) { BufferedImage buffered = UtilImageIO . loadImage ( f . getPath ( ) ) ; if ( buffered == null ) throw new RuntimeException ( "Can't load image!" ) ; gray . reshape ( buffered . getWidth ( ) , buffered . getHeight ( ) ) ; ConvertBufferedImage . convertFrom ( buffered , gray , true ) ; TupleDesc_F64 imageHist = new TupleDesc_F64 ( 150 ) ; HistogramFeatureOps . histogram ( gray , 255 , imageHist ) ; UtilFeature . normalizeL2 ( imageHist ) ; points . add ( imageHist . value ) ; } return points ; }
Computes a histogram from the gray scale intensity image alone . Probably the least effective at looking up similar images .
27,069
public static int [ ] imageOffsets ( double radius , int imgWidth ) { double PI2 = Math . PI * 2.0 ; double circumference = PI2 * radius ; int num = ( int ) Math . ceil ( circumference ) ; num = num - num % 4 ; double angleStep = PI2 / num ; int temp [ ] = new int [ ( int ) Math . ceil ( circumference ) ] ; int i = 0 ; int prev = 0 ; for ( double ang = 0 ; ang < PI2 ; ang += angleStep ) { int x = ( int ) Math . round ( Math . cos ( ang ) * radius ) ; int y = ( int ) Math . round ( Math . sin ( ang ) * radius ) ; int pixel = y * imgWidth + x ; if ( pixel != prev ) { temp [ i ++ ] = pixel ; } prev = pixel ; } if ( i == temp . length ) return temp ; else { int ret [ ] = new int [ i ] ; System . arraycopy ( temp , 0 , ret , 0 , i ) ; return ret ; } }
Computes the offsets for a discretized circle of the specified radius for an image with the specified width .
27,070
public static < T extends ImageGray < T > , TD extends TupleDesc > DetectDescribePoint < T , TD > createFromPremade ( Class < T > imageType ) { return ( DetectDescribePoint ) FactoryDetectDescribe . surfStable ( new ConfigFastHessian ( 1 , 2 , 200 , 1 , 9 , 4 , 4 ) , null , null , imageType ) ; }
For some features there are pre - made implementations of DetectDescribePoint . This has only been done in situations where there was a performance advantage or that it was a very common combination .
27,071
public static < T extends ImageGray < T > , TD extends TupleDesc > DetectDescribePoint < T , TD > createFromComponents ( Class < T > imageType ) { Class derivType = GImageDerivativeOps . getDerivativeType ( imageType ) ; GeneralFeatureDetector corner = FactoryDetectPoint . createShiTomasi ( new ConfigGeneralDetector ( 1000 , 5 , 1 ) , null , derivType ) ; InterestPointDetector detector = FactoryInterestPoint . wrapPoint ( corner , 1 , imageType , derivType ) ; DescribeRegionPoint describe = FactoryDescribeRegionPoint . brief ( new ConfigBrief ( true ) , imageType ) ; return FactoryDetectDescribe . fuseTogether ( detector , null , describe ) ; }
Any arbitrary implementation of InterestPointDetector OrientationImage DescribeRegionPoint can be combined into DetectDescribePoint . The syntax is more complex but the end result is more flexible . This should only be done if there isn t a pre - made DetectDescribePoint .
27,072
protected void computeWeights ( int numSamples , double numSigmas ) { weights = new float [ numSamples * numSamples ] ; float w [ ] = new float [ numSamples ] ; for ( int i = 0 ; i < numSamples ; i ++ ) { float x = i / ( float ) ( numSamples - 1 ) ; w [ i ] = ( float ) UtilGaussian . computePDF ( 0 , 1 , 2f * numSigmas * ( x - 0.5f ) ) ; } for ( int y = 0 ; y < numSamples ; y ++ ) { for ( int x = 0 ; x < numSamples ; x ++ ) { weights [ y * numSamples + x ] = w [ y ] * w [ x ] ; } } }
compute the weights by convolving 1D gaussian kernel
27,073
protected void createSamplePoints ( int numSamples ) { for ( int y = 0 ; y < numSamples ; y ++ ) { float regionY = ( y / ( numSamples - 1.0f ) - 0.5f ) ; for ( int x = 0 ; x < numSamples ; x ++ ) { float regionX = ( x / ( numSamples - 1.0f ) - 0.5f ) ; samplePts . add ( new Point2D_F32 ( regionX , regionY ) ) ; } } }
create the list of points in square coordinates that it will sample . values will range from - 0 . 5 to 0 . 5 along each axis .
27,074
protected void computeHistogramInside ( RectangleRotate_F32 region ) { for ( int i = 0 ; i < samplePts . size ( ) ; i ++ ) { Point2D_F32 p = samplePts . get ( i ) ; squareToImageSample ( p . x , p . y , region ) ; interpolate . get_fast ( imageX , imageY , value ) ; int indexHistogram = computeHistogramBin ( value ) ; sampleHistIndex [ i ] = indexHistogram ; histogram [ indexHistogram ] += weights [ i ] ; } }
Computes the histogram quickly inside the image
27,075
protected void computeHistogramBorder ( T image , RectangleRotate_F32 region ) { for ( int i = 0 ; i < samplePts . size ( ) ; i ++ ) { Point2D_F32 p = samplePts . get ( i ) ; squareToImageSample ( p . x , p . y , region ) ; if ( ! BoofMiscOps . checkInside ( image , imageX , imageY ) ) { sampleHistIndex [ i ] = - 1 ; } else { interpolate . get ( imageX , imageY , value ) ; int indexHistogram = computeHistogramBin ( value ) ; sampleHistIndex [ i ] = indexHistogram ; histogram [ indexHistogram ] += weights [ i ] ; } } }
Computes the histogram and skips pixels which are outside the image border
27,076
protected int computeHistogramBin ( float value [ ] ) { int indexHistogram = 0 ; int binStride = 1 ; for ( int bandIndex = 0 ; bandIndex < value . length ; bandIndex ++ ) { int bin = ( int ) ( numBins * value [ bandIndex ] / maxPixelValue ) ; indexHistogram += bin * binStride ; binStride *= numBins ; } return indexHistogram ; }
Given the value of a pixel compute which bin in the histogram it belongs in
27,077
protected boolean isInFastBounds ( RectangleRotate_F32 region ) { squareToImageSample ( - 0.5f , - 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; squareToImageSample ( - 0.5f , 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; squareToImageSample ( 0.5f , 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; squareToImageSample ( 0.5f , - 0.5f , region ) ; if ( ! interpolate . isInFastBounds ( imageX , imageY ) ) return false ; return true ; }
Checks to see if the region can be sampled using the fast algorithm
27,078
protected void squareToImageSample ( float x , float y , RectangleRotate_F32 region ) { x *= region . width - 1 ; y *= region . height - 1 ; imageX = x * c - y * s + region . cx ; imageY = x * s + y * c + region . cy ; }
Converts a point from square coordinates into image coordinates
27,079
private void createSparseDerivatives ( ) { Kernel1D_F32 kernelD = new Kernel1D_F32 ( new float [ ] { - 1 , 0 , 1 } , 3 ) ; Kernel1D_F32 kernelDD = KernelMath . convolve1D_F32 ( kernelD , kernelD ) ; Kernel2D_F32 kernelXY = KernelMath . convolve2D ( kernelD , kernelD ) ; derivXX = FactoryConvolveSparse . horizontal1D ( GrayF32 . class , kernelDD ) ; derivXY = FactoryConvolveSparse . convolve2D ( GrayF32 . class , kernelXY ) ; derivYY = FactoryConvolveSparse . vertical1D ( GrayF32 . class , kernelDD ) ; ImageBorder < GrayF32 > border = FactoryImageBorder . single ( GrayF32 . class , BorderType . EXTENDED ) ; derivXX . setImageBorder ( border ) ; derivXY . setImageBorder ( border ) ; derivYY . setImageBorder ( border ) ; }
Define sparse image derivative operators .
27,080
public void process ( GrayF32 input ) { scaleSpace . initialize ( input ) ; detections . reset ( ) ; do { pixelScaleToInput = scaleSpace . pixelScaleCurrentToInput ( ) ; for ( int j = 1 ; j < scaleSpace . getNumScales ( ) + 1 ; j ++ ) { sigmaLower = scaleSpace . computeSigmaScale ( j - 1 ) ; sigmaTarget = scaleSpace . computeSigmaScale ( j ) ; sigmaUpper = scaleSpace . computeSigmaScale ( j + 1 ) ; dogLower = scaleSpace . getDifferenceOfGaussian ( j - 1 ) ; dogTarget = scaleSpace . getDifferenceOfGaussian ( j ) ; dogUpper = scaleSpace . getDifferenceOfGaussian ( j + 1 ) ; detectFeatures ( j ) ; } } while ( scaleSpace . computeNextOctave ( ) ) ; }
Detects SIFT features inside the input image
27,081
protected void detectFeatures ( int scaleIndex ) { extractor . process ( dogTarget ) ; FastQueue < NonMaxLimiter . LocalExtreme > found = extractor . getLocalExtreme ( ) ; derivXX . setImage ( dogTarget ) ; derivXY . setImage ( dogTarget ) ; derivYY . setImage ( dogTarget ) ; for ( int i = 0 ; i < found . size ; i ++ ) { NonMaxLimiter . LocalExtreme e = found . get ( i ) ; if ( e . max ) { if ( isScaleSpaceExtremum ( e . location . x , e . location . y , e . getValue ( ) , 1f ) ) { processFeatureCandidate ( e . location . x , e . location . y , e . getValue ( ) , e . max ) ; } } else if ( isScaleSpaceExtremum ( e . location . x , e . location . y , e . getValue ( ) , - 1f ) ) { processFeatureCandidate ( e . location . x , e . location . y , e . getValue ( ) , e . max ) ; } } }
Detect features inside the Difference - of - Gaussian image at the current scale
27,082
boolean isScaleSpaceExtremum ( int c_x , int c_y , float value , float signAdj ) { if ( c_x <= 1 || c_y <= 1 || c_x >= dogLower . width - 1 || c_y >= dogLower . height - 1 ) return false ; float v ; value *= signAdj ; for ( int y = - 1 ; y <= 1 ; y ++ ) { for ( int x = - 1 ; x <= 1 ; x ++ ) { v = dogLower . unsafe_get ( c_x + x , c_y + y ) ; if ( v * signAdj >= value ) return false ; v = dogUpper . unsafe_get ( c_x + x , c_y + y ) ; if ( v * signAdj >= value ) return false ; } } return true ; }
See if the point is a local extremum in scale - space above and below .
27,083
public static PixelTransformAffine_F32 transformScale ( ImageBase from , ImageBase to , PixelTransformAffine_F32 distort ) { if ( distort == null ) distort = new PixelTransformAffine_F32 ( ) ; float scaleX = ( float ) ( to . width ) / ( float ) ( from . width ) ; float scaleY = ( float ) ( to . height ) / ( float ) ( from . height ) ; Affine2D_F32 affine = distort . getModel ( ) ; affine . set ( scaleX , 0 , 0 , scaleY , 0 , 0 ) ; return distort ; }
Computes a transform which is used to rescale an image . The scale is computed directly from the size of the two input images and independently scales the x and y axises .
27,084
protected boolean projectOntoEssential ( DMatrixRMaj E ) { if ( ! svdConstraints . decompose ( E ) ) { return false ; } svdV = svdConstraints . getV ( svdV , false ) ; svdU = svdConstraints . getU ( svdU , false ) ; svdS = svdConstraints . getW ( svdS ) ; SingularOps_DDRM . descendingOrder ( svdU , false , svdS , svdV , false ) ; svdS . unsafe_set ( 0 , 0 , 1 ) ; svdS . unsafe_set ( 1 , 1 , 1 ) ; svdS . unsafe_set ( 2 , 2 , 0 ) ; CommonOps_DDRM . mult ( svdU , svdS , temp0 ) ; CommonOps_DDRM . multTransB ( temp0 , svdV , E ) ; return true ; }
Projects the found estimate of E onto essential space .
27,085
protected boolean projectOntoFundamentalSpace ( DMatrixRMaj F ) { if ( ! svdConstraints . decompose ( F ) ) { return false ; } svdV = svdConstraints . getV ( svdV , false ) ; svdU = svdConstraints . getU ( svdU , false ) ; svdS = svdConstraints . getW ( svdS ) ; SingularOps_DDRM . descendingOrder ( svdU , false , svdS , svdV , false ) ; svdS . set ( 2 , 2 , 0 ) ; CommonOps_DDRM . mult ( svdU , svdS , temp0 ) ; CommonOps_DDRM . multTransB ( temp0 , svdV , F ) ; return true ; }
Projects the found estimate of F onto Fundamental space .
27,086
public void learnFern ( boolean positive , ImageRectangle r ) { float rectWidth = r . getWidth ( ) ; float rectHeight = r . getHeight ( ) ; float c_x = r . x0 + ( rectWidth - 1 ) / 2f ; float c_y = r . y0 + ( rectHeight - 1 ) / 2f ; for ( int i = 0 ; i < ferns . length ; i ++ ) { int value = computeFernValue ( c_x , c_y , rectWidth , rectHeight , ferns [ i ] ) ; TldFernFeature f = managers [ i ] . lookupFern ( value ) ; increment ( f , positive ) ; } }
Learns a fern from the specified region . No noise is added .
27,087
public void learnFernNoise ( boolean positive , ImageRectangle r ) { float rectWidth = r . getWidth ( ) ; float rectHeight = r . getHeight ( ) ; float c_x = r . x0 + ( rectWidth - 1 ) / 2.0f ; float c_y = r . y0 + ( rectHeight - 1 ) / 2.0f ; for ( int i = 0 ; i < ferns . length ; i ++ ) { int value = computeFernValue ( c_x , c_y , rectWidth , rectHeight , ferns [ i ] ) ; TldFernFeature f = managers [ i ] . lookupFern ( value ) ; increment ( f , positive ) ; for ( int j = 0 ; j < numLearnRandom ; j ++ ) { value = computeFernValueRand ( c_x , c_y , rectWidth , rectHeight , ferns [ i ] ) ; f = managers [ i ] . lookupFern ( value ) ; increment ( f , positive ) ; } } }
Computes the value for each fern inside the region and update s their P and N value . Noise is added to the image measurements to take in account the variability .
27,088
private void increment ( TldFernFeature f , boolean positive ) { if ( positive ) { f . incrementP ( ) ; if ( f . numP > maxP ) maxP = f . numP ; } else { f . incrementN ( ) ; if ( f . numN > maxN ) maxN = f . numN ; } }
Increments the P and N value for a fern . Also updates the maxP and maxN statistics so that it knows when to re - normalize data structures .
27,089
public boolean lookupFernPN ( TldRegionFernInfo info ) { ImageRectangle r = info . r ; float rectWidth = r . getWidth ( ) ; float rectHeight = r . getHeight ( ) ; float c_x = r . x0 + ( rectWidth - 1 ) / 2.0f ; float c_y = r . y0 + ( rectHeight - 1 ) / 2.0f ; int sumP = 0 ; int sumN = 0 ; for ( int i = 0 ; i < ferns . length ; i ++ ) { TldFernDescription fern = ferns [ i ] ; int value = computeFernValue ( c_x , c_y , rectWidth , rectHeight , fern ) ; TldFernFeature f = managers [ i ] . table [ value ] ; if ( f != null ) { sumP += f . numP ; sumN += f . numN ; } } info . sumP = sumP ; info . sumN = sumN ; return sumN != 0 || sumP != 0 ; }
For the specified regions computes the values of each fern inside of it and then retrives their P and N values . The sum of which is stored inside of info .
27,090
protected int computeFernValue ( float c_x , float c_y , float rectWidth , float rectHeight , TldFernDescription fern ) { rectWidth -= 1 ; rectHeight -= 1 ; int desc = 0 ; for ( int i = 0 ; i < fern . pairs . length ; i ++ ) { Point2D_F32 p_a = fern . pairs [ i ] . a ; Point2D_F32 p_b = fern . pairs [ i ] . b ; float valA = interpolate . get_fast ( c_x + p_a . x * rectWidth , c_y + p_a . y * rectHeight ) ; float valB = interpolate . get_fast ( c_x + p_b . x * rectWidth , c_y + p_b . y * rectHeight ) ; desc *= 2 ; if ( valA < valB ) { desc += 1 ; } } return desc ; }
Computes the value of the specified fern at the specified location in the image .
27,091
public void renormalizeP ( ) { int targetMax = maxP / 20 ; for ( int i = 0 ; i < managers . length ; i ++ ) { TldFernManager m = managers [ i ] ; for ( int j = 0 ; j < m . table . length ; j ++ ) { TldFernFeature f = m . table [ j ] ; if ( f == null ) continue ; f . numP = targetMax * f . numP / maxP ; } } maxP = targetMax ; }
Renormalizes fern . numP to avoid overflow
27,092
public void renormalizeN ( ) { int targetMax = maxN / 20 ; for ( int i = 0 ; i < managers . length ; i ++ ) { TldFernManager m = managers [ i ] ; for ( int j = 0 ; j < m . table . length ; j ++ ) { TldFernFeature f = m . table [ j ] ; if ( f == null ) continue ; f . numN = targetMax * f . numN / maxN ; } } maxN = targetMax ; }
Renormalizes fern . numN to avoid overflow
27,093
public void describe ( double x , double y , double angle , double scale , TupleDesc_F64 ret ) { double c = Math . cos ( angle ) , s = Math . sin ( angle ) ; boolean isInBounds = SurfDescribeOps . isInside ( ii , x , y , radiusDescriptor , widthSample , scale , c , s ) ; if ( ret == null ) ret = new BrightFeature ( featureDOF ) ; else if ( ret . value . length != featureDOF ) throw new IllegalArgumentException ( "Provided feature must have " + featureDOF + " values" ) ; gradient . setImage ( ii ) ; gradient . setWidth ( widthSample * scale ) ; SparseImageGradient gradient = isInBounds ? this . gradient : this . gradientSafe ; features ( x , y , c , s , scale , gradient , ret . value ) ; }
Compute SURF descriptor but without laplacian sign
27,094
public boolean computeLaplaceSign ( int x , int y , double scale ) { int s = ( int ) Math . ceil ( scale ) ; kerXX = DerivativeIntegralImage . kernelDerivXX ( 9 * s , kerXX ) ; kerYY = DerivativeIntegralImage . kernelDerivYY ( 9 * s , kerYY ) ; double lap = GIntegralImageOps . convolveSparse ( ii , kerXX , x , y ) ; lap += GIntegralImageOps . convolveSparse ( ii , kerYY , x , y ) ; return lap > 0 ; }
Compute the sign of the Laplacian using a sparse convolution .
27,095
public static < I extends ImageGray < I > , D extends ImageGray < D > > DetectLineHoughPolar < I , D > houghPolar ( ConfigHoughPolar config , Class < I > imageType , Class < D > derivType ) { if ( config == null ) throw new IllegalArgumentException ( "This is no default since minCounts must be specified" ) ; ImageGradient < I , D > gradient = FactoryDerivative . sobel ( imageType , derivType ) ; return new DetectLineHoughPolar < > ( config . localMaxRadius , config . minCounts , config . resolutionRange , config . resolutionAngle , config . thresholdEdge , config . maxLines , gradient ) ; }
Creates a Hough line detector based on polar parametrization .
27,096
public static BufferedImage renderContours ( List < Contour > contours , int colorExternal , int colorInternal , int width , int height , BufferedImage out ) { if ( out == null ) { out = new BufferedImage ( width , height , BufferedImage . TYPE_INT_RGB ) ; } else { Graphics2D g2 = out . createGraphics ( ) ; g2 . setColor ( Color . BLACK ) ; g2 . fillRect ( 0 , 0 , width , height ) ; } for ( Contour c : contours ) { for ( Point2D_I32 p : c . external ) { out . setRGB ( p . x , p . y , colorExternal ) ; } for ( List < Point2D_I32 > l : c . internal ) { for ( Point2D_I32 p : l ) { out . setRGB ( p . x , p . y , colorInternal ) ; } } } return out ; }
Draws contours . Internal and external contours are different user specified colors .
27,097
public static void render ( List < Contour > contours , int colors [ ] , BufferedImage out ) { colors = checkColors ( colors , contours . size ( ) ) ; for ( int i = 0 ; i < contours . size ( ) ; i ++ ) { Contour c = contours . get ( i ) ; int color = colors [ i ] ; for ( Point2D_I32 p : c . external ) { out . setRGB ( p . x , p . y , color ) ; } } }
Renders only the external contours . Each contour is individually colored as specified by colors
27,098
public static BufferedImage renderBinary ( GrayU8 binaryImage , boolean invert , BufferedImage out ) { if ( out == null || ( out . getWidth ( ) != binaryImage . width || out . getHeight ( ) != binaryImage . height ) ) { out = new BufferedImage ( binaryImage . getWidth ( ) , binaryImage . getHeight ( ) , BufferedImage . TYPE_BYTE_GRAY ) ; } try { WritableRaster raster = out . getRaster ( ) ; DataBuffer buffer = raster . getDataBuffer ( ) ; if ( buffer . getDataType ( ) == DataBuffer . TYPE_BYTE ) { renderBinary ( binaryImage , invert , ( DataBufferByte ) buffer , raster ) ; } else if ( buffer . getDataType ( ) == DataBuffer . TYPE_INT ) { renderBinary ( binaryImage , invert , ( DataBufferInt ) buffer , raster ) ; } else { _renderBinary ( binaryImage , invert , out ) ; } } catch ( SecurityException e ) { _renderBinary ( binaryImage , invert , out ) ; } out . setRGB ( 0 , 0 , out . getRGB ( 0 , 0 ) ) ; return out ; }
Renders a binary image . 0 = black and 1 = white .
27,099
public void process ( PairLineNorm line , AssociatedPair point ) { GeometryMath_F64 . mult ( F , point . p1 , Fx ) ; GeometryMath_F64 . cross ( Fx , line . getL2 ( ) , t0 ) ; GeometryMath_F64 . cross ( point . p2 , t0 , t1 ) ; GeometryMath_F64 . cross ( point . p2 , e2 , t0 ) ; double top = GeometryMath_F64 . dot ( t0 , t1 ) ; double bottom = t0 . normSq ( ) * ( line . l1 . x * point . p1 . x + line . l1 . y * point . p1 . y + line . l1 . z ) ; GeometryMath_F64 . outerProd ( e2 , line . l1 , el ) ; GeometryMath_F64 . multCrossA ( line . l2 , F , lf ) ; CommonOps_DDRM . add ( lf , top / bottom , el , H ) ; adjust . adjust ( H , point ) ; }
Computes the homography based on a line and point on the plane