signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class CPMeasurementUnitServiceBaseImpl { /** * Sets the cp tax category remote service . * @ param cpTaxCategoryService the cp tax category remote service */ public void setCPTaxCategoryService ( com . liferay . commerce . product . service . CPTaxCategoryService cpTaxCategoryService ) { } }
this . cpTaxCategoryService = cpTaxCategoryService ;
public class CatalogUtil { /** * Build Importer configuration optionally log deprecation or any other messages . * @ param importConfiguration deployment configuration . * @ param validation if we are validating configuration log any deprecation messages . This avoids double logging of deprecated * or any other messages we would introduce in here . * @ return */ private static ImportConfiguration buildImportProcessorConfiguration ( ImportConfigurationType importConfiguration , boolean validation ) { } }
String importBundleUrl = importConfiguration . getModule ( ) ; if ( ! importConfiguration . isEnabled ( ) ) { return null ; } switch ( importConfiguration . getType ( ) ) { case CUSTOM : break ; case KAFKA : String version = importConfiguration . getVersion ( ) . trim ( ) ; if ( "8" . equals ( version ) ) { if ( validation ) { hostLog . warn ( "Kafka importer version 0.8 has been deprecated." ) ; } importBundleUrl = "kafkastream.jar" ; } else if ( "10" . equals ( version ) ) { importBundleUrl = "kafkastream10.jar" ; } else { throw new DeploymentCheckException ( "Kafka " + version + " is not supported." ) ; } break ; case KINESIS : importBundleUrl = "kinesisstream.jar" ; break ; default : throw new DeploymentCheckException ( "Import Configuration type must be specified." ) ; } Properties moduleProps = new Properties ( ) ; Properties formatterProps = new Properties ( ) ; String formatBundle = importConfiguration . getFormat ( ) ; String formatName = null ; if ( formatBundle != null && formatBundle . trim ( ) . length ( ) > 0 ) { if ( "csv" . equalsIgnoreCase ( formatBundle ) || "tsv" . equalsIgnoreCase ( formatBundle ) ) { formatName = formatBundle ; formatBundle = "voltcsvformatter.jar" ; } else if ( JAR_EXTENSION_RE . matcher ( formatBundle ) . matches ( ) ) { int typeIndex = formatBundle . lastIndexOf ( "/" ) ; formatName = formatBundle . substring ( typeIndex + 1 ) ; formatBundle = formatBundle . substring ( 0 , typeIndex ) ; } else { throw new DeploymentCheckException ( "Import format " + formatBundle + " not valid." ) ; } formatterProps . setProperty ( ImportDataProcessor . IMPORT_FORMATTER , buildBundleURL ( formatBundle , true ) ) ; } if ( importBundleUrl != null && importBundleUrl . trim ( ) . length ( ) > 0 ) { moduleProps . setProperty ( ImportDataProcessor . IMPORT_MODULE , buildBundleURL ( importBundleUrl , false ) ) ; } List < PropertyType > importProperties = importConfiguration . getProperty ( ) ; if ( importProperties != null && ! importProperties . isEmpty ( ) ) { for ( PropertyType prop : importProperties ) { String key = prop . getName ( ) ; String value = prop . getValue ( ) ; if ( ! key . toLowerCase ( ) . contains ( "passw" ) ) { moduleProps . setProperty ( key , value . trim ( ) ) ; } else { // Don ' t trim passwords moduleProps . setProperty ( key , value ) ; } } } List < PropertyType > formatProperties = importConfiguration . getFormatProperty ( ) ; if ( formatProperties != null && ! formatProperties . isEmpty ( ) ) { for ( PropertyType prop : formatProperties ) { formatterProps . setProperty ( prop . getName ( ) , prop . getValue ( ) ) ; } } return new ImportConfiguration ( formatName , moduleProps , formatterProps ) ;
import java . lang . Math ; class ComputePolygonArea { /** * This function calculates the area of a regular polygon with a given number of sides and side length . * Examples : * computePolygonArea ( 4 , 20 ) - > 400.000006 * computePolygonArea ( 10 , 15 ) - > 1731.1969896610804 * computePolygonArea ( 9 , 7 ) - > 302.90938549487214 * Args : * sideNumber : An integer representing the number of polygon ' s sides . * sideLength : A numerical value of the length of one side . * Returns : * The calculated area of a regular polygon . */ public static double computePolygonArea ( int sideNumber , double sideLength ) { } }
return ( sideNumber * Math . pow ( sideLength , 2 ) ) / ( 4 * Math . tan ( Math . PI / sideNumber ) ) ;
public class CommerceAccountOrganizationRelUtil { /** * Returns the last commerce account organization rel in the ordered set where organizationId = & # 63 ; . * @ param organizationId the organization ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce account organization rel , or < code > null < / code > if a matching commerce account organization rel could not be found */ public static CommerceAccountOrganizationRel fetchByOrganizationId_Last ( long organizationId , OrderByComparator < CommerceAccountOrganizationRel > orderByComparator ) { } }
return getPersistence ( ) . fetchByOrganizationId_Last ( organizationId , orderByComparator ) ;
public class CycleAnalyzer { /** * This method searches a given { @ link Graph } for cycles . This method looks * into all vertices . If a disconnected subgraph exists , this graph is * analyzed , too . * @ param < V > * is the actual vertex implementation . * @ param < E > * is the actual edge implementation . * @ param graph * is the { @ link Graph } to be searched for cycles . * @ param directed * is to be set to < code > true < / code > is the graph is to be * handled as an directed graph ( The { @ link Pair } result in * { @ link Edge # getVertices ( ) is interpreted as startVertex and * targetVertex } . ) . < code > false < / code > is to be set otherwise . * @ return < code > true < / code > is returned if a cycle was found . * < code > false < / code > is returned otherwise . */ public static < V extends Vertex < V , E > , E extends Edge < V , E > > boolean hasCycles ( Graph < V , E > graph , boolean directed ) { } }
requireNonNull ( graph , "The given start vertex is null" ) ; Set < V > notVisited = new HashSet < > ( graph . getVertices ( ) ) ; while ( ! notVisited . isEmpty ( ) ) { if ( hasCycle ( notVisited . iterator ( ) . next ( ) , new Stack < > ( ) , new Stack < > ( ) , notVisited , directed ) ) { return true ; } } return false ;
public class SimpleDataArray { /** * Sets data at a given index . * @ param index the array index * @ param data the data ( byte array ) . * If < code > null < / code > , the data at the given index will be removed . * @ param scn the global scn indicating the sequence of this change * @ throws ArrayIndexOutOfBoundsException if the index is out of range . */ @ Override public void set ( int index , byte [ ] data , long scn ) throws Exception { } }
if ( data == null ) { set ( index , data , 0 , 0 , scn ) ; } else { set ( index , data , 0 , data . length , scn ) ; }
public class RestartServerCmd { public Any execute ( DeviceImpl device , Any in_any ) throws DevFailed { } }
Util . out4 . println ( "RestartServer.execute(): arrived " ) ; ( ( DServer ) ( device ) ) . restart_server ( ) ; return Util . return_empty_any ( "RestartServer" ) ;
public class PartitionKeyGenerators { /** * Produces a partition key from the device ID contained in an incoming request . * @ return partition key derived from device ID * @ throws PersistenceException if device ID cannot be retrieved */ public static Function < RequestEnvelope , String > deviceId ( ) { } }
return r -> Optional . ofNullable ( r ) . map ( RequestEnvelope :: getContext ) . map ( Context :: getSystem ) . map ( SystemState :: getDevice ) . map ( Device :: getDeviceId ) . orElseThrow ( ( ) -> new PersistenceException ( "Could not retrieve device ID from request envelope to generate persistence ID" ) ) ;
public class Gauge { /** * Defines the text that could be used in a tooltip as an * alert message . * @ param MESSAGE */ public void setAlertMessage ( final String MESSAGE ) { } }
if ( null == alertMessage ) { _alertMessage = MESSAGE ; fireUpdateEvent ( ALERT_EVENT ) ; } else { alertMessage . set ( MESSAGE ) ; }
public class SecurityFatHttpUtils { /** * This method creates a connection to a webpage and then returns the connection , it doesn ' t care what the response code is . * @ param server * The liberty server that is hosting the URL * @ param path * The path to the URL with the output to test ( excluding port and server information ) . For instance * " / someContextRoot / servlet1" * @ return The connection to the http address */ public static HttpURLConnection getHttpConnectionWithAnyResponseCode ( LibertyServer server , String path ) throws IOException { } }
int timeout = DEFAULT_TIMEOUT ; URL url = createURL ( server , path ) ; HttpURLConnection con = getHttpConnection ( url , timeout , HTTPRequestMethod . GET ) ; Log . info ( SecurityFatHttpUtils . class , "getHttpConnection" , "Connecting to " + url . toExternalForm ( ) + " expecting http response in " + timeout + " seconds." ) ; con . connect ( ) ; return con ;
public class VirtualNetworkGatewaysInner { /** * Updates a virtual network gateway tags . * @ param resourceGroupName The name of the resource group . * @ param virtualNetworkGatewayName The name of the virtual network gateway . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the VirtualNetworkGatewayInner object if successful . */ public VirtualNetworkGatewayInner beginUpdateTags ( String resourceGroupName , String virtualNetworkGatewayName ) { } }
return beginUpdateTagsWithServiceResponseAsync ( resourceGroupName , virtualNetworkGatewayName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class InterProcessSemaphoreV2 { /** * Set the data to put for the node created by this semaphore . This must be called prior to calling one * of the acquire ( ) methods . * @ param nodeData node data */ public void setNodeData ( byte [ ] nodeData ) { } }
this . nodeData = ( nodeData != null ) ? Arrays . copyOf ( nodeData , nodeData . length ) : null ;
public class GedLinkDocumentMongoToGedObjectConverterVisitor { /** * { @ inheritDoc } */ @ Override public final void visit ( final SourceLinkDocumentMongo document ) { } }
setGedObject ( new SourceLink ( getParent ( ) , "Source" , new ObjectId ( document . getString ( ) ) ) ) ;
public class PreparedModel { /** * Returns the element name for the class descriptor which is the adjusted short ( unqualified ) class * name . Also takes care that the element name does not clash with another class of the same short * name that maps to a different table though . * @ param classDesc The class descriptor * @ return The element name */ private String getElementName ( ClassDescriptor classDesc ) { } }
String elementName = classDesc . getClassNameOfObject ( ) . replace ( '$' , '_' ) ; elementName = elementName . substring ( elementName . lastIndexOf ( '.' ) + 1 ) ; Table table = getTableFor ( elementName ) ; int suffix = 0 ; while ( ( table != null ) && ! table . getName ( ) . equals ( classDesc . getFullTableName ( ) ) ) { ++ suffix ; table = getTableFor ( elementName + "-" + suffix ) ; } if ( suffix > 0 ) { elementName += "-" + suffix ; } return elementName ;
public class DateUtils { /** * Get the minutes difference */ public static int minutesDiff ( Date earlierDate , Date laterDate ) { } }
if ( earlierDate == null || laterDate == null ) { return 0 ; } return ( int ) ( ( laterDate . getTime ( ) / MINUTE_MILLIS ) - ( earlierDate . getTime ( ) / MINUTE_MILLIS ) ) ;
public class VimGenerator2 { /** * Append a Vim pattern . * @ param it the receiver of the generated elements . * @ param addNewLine indicates if a new line must be appended . * @ param name the name of the pattern . * @ param pattern the regular expression . * @ param contained indicates if the pattern is matching when it is into a container . * @ param contains the elements inside the matched region . * @ return { @ code it } . */ @ SuppressWarnings ( "static-method" ) protected IStyleAppendable appendMatch ( IStyleAppendable it , boolean addNewLine , String name , String pattern , boolean contained , String ... contains ) { } }
it . append ( "syn match " ) ; // $ NON - NLS - 1 $ it . append ( name ) ; if ( contained ) { it . append ( " contained" ) ; // $ NON - NLS - 1 $ } it . append ( " " ) ; // $ NON - NLS - 1 $ it . append ( regexString ( pattern ) ) ; if ( contains . length > 0 ) { it . append ( " contains=" ) . append ( contains [ 0 ] ) ; // $ NON - NLS - 1 $ for ( int i = 1 ; i < contains . length ; ++ i ) { it . append ( "," ) . append ( contains [ i ] ) ; // $ NON - NLS - 1 $ } } if ( addNewLine ) { it . newLine ( ) ; } return it ;
public class PolicyStatesInner { /** * Queries policy states for the resources under the management group . * @ param policyStatesResource The virtual resource under PolicyStates resource type . In a given time range , ' latest ' represents the latest policy state ( s ) , whereas ' default ' represents all policy state ( s ) . Possible values include : ' default ' , ' latest ' * @ param managementGroupName Management group name . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < PolicyStatesQueryResultsInner > listQueryResultsForManagementGroupAsync ( PolicyStatesResource policyStatesResource , String managementGroupName , final ServiceCallback < PolicyStatesQueryResultsInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( listQueryResultsForManagementGroupWithServiceResponseAsync ( policyStatesResource , managementGroupName ) , serviceCallback ) ;
public class SpatialUtil { /** * Check that two spatial objects have the same dimensionality . * @ param box1 First object * @ param box2 Second object * @ return Dimensionality * @ throws IllegalArgumentException when the dimensionalities do not agree */ public static int assertSameDimensionality ( SpatialComparable box1 , SpatialComparable box2 ) { } }
final int dim = box1 . getDimensionality ( ) ; if ( dim != box2 . getDimensionality ( ) ) { throw new IllegalArgumentException ( "The spatial objects do not have the same dimensionality!" ) ; } return dim ;
public class Projection { /** * This will revert the current map ' s scaling and rotation for a point . This can be useful when * drawing to a fixed location on the screen . */ public Point unrotateAndScalePoint ( int x , int y , Point reuse ) { } }
return applyMatrixToPoint ( x , y , reuse , mUnrotateAndScaleMatrix , mOrientation != 0 ) ;
public class MemcachedNodesManager { /** * Return the nodeId for the given socket address . Returns < code > null < / code > * if the socket address is not known . * @ throws IllegalArgumentException thrown when the socketAddress is < code > null < / code > or not registered with this { @ link MemcachedNodesManager } . */ @ Nonnull public String getNodeId ( final InetSocketAddress socketAddress ) throws IllegalArgumentException { } }
if ( socketAddress == null ) { throw new IllegalArgumentException ( "SocketAddress must not be null." ) ; } final String result = _address2Ids . get ( socketAddress ) ; if ( result == null ) { throw new IllegalArgumentException ( "SocketAddress " + socketAddress + " not known (registered addresses: " + _address2Ids . keySet ( ) + ")." ) ; } return result ;
public class DscCompilationJobsInner { /** * Retrieve the job stream identified by job stream id . * @ param resourceGroupName Name of an Azure Resource group . * @ param automationAccountName The name of the automation account . * @ param jobId The job id . * @ param jobStreamId The job stream id . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the JobStreamInner object */ public Observable < JobStreamInner > getStreamAsync ( String resourceGroupName , String automationAccountName , UUID jobId , String jobStreamId ) { } }
return getStreamWithServiceResponseAsync ( resourceGroupName , automationAccountName , jobId , jobStreamId ) . map ( new Func1 < ServiceResponse < JobStreamInner > , JobStreamInner > ( ) { @ Override public JobStreamInner call ( ServiceResponse < JobStreamInner > response ) { return response . body ( ) ; } } ) ;
public class PlainLauncher { /** * Creates the UI which the launcher displays . If there is misconfiguration or error , a UI containing an error * message is returned . * @ return the UI which the launcher displays . */ protected WComponent createUI ( ) { } }
// Check if the parameter COMPONENT _ TO _ LAUNCH _ PARAM _ KEY has been // configured with the name of a component to launch . WComponent sharedApp ; uiClassName = getComponentToLaunchClassName ( ) ; if ( uiClassName == null ) { sharedApp = new WText ( "You need to set the class name of the WComponent you want to run.<br />" + "Do this by setting the parameter \"" + COMPONENT_TO_LAUNCH_PARAM_KEY + "\" in your \"local_app.properties\" file.<br />" + "Eg. <code>" + COMPONENT_TO_LAUNCH_PARAM_KEY + "=com.github.bordertech.wcomponents.examples.picker.ExamplePicker</code>" ) ; ( ( WText ) sharedApp ) . setEncodeText ( false ) ; } else { UIRegistry registry = UIRegistry . getInstance ( ) ; sharedApp = registry . getUI ( uiClassName ) ; if ( sharedApp == null ) { sharedApp = new WText ( "Unable to load the component \"" + uiClassName + "\".<br />" + "Either the component does not exist as a resource in the classpath," + " or is not a WComponent.<br />" + "Check that the parameter \"" + COMPONENT_TO_LAUNCH_PARAM_KEY + "\" is set correctly." ) ; ( ( WText ) sharedApp ) . setEncodeText ( false ) ; } } return sharedApp ;
public class CmsImageScaler { /** * Adds a filter name to the list of filters that should be applied to the image . < p > * @ param filter the filter name to add */ public void addFilter ( String filter ) { } }
if ( CmsStringUtil . isNotEmpty ( filter ) ) { filter = filter . trim ( ) . toLowerCase ( ) ; if ( FILTERS . contains ( filter ) ) { m_filters . add ( filter ) ; } }
public class SourceTreeManager { /** * Get the source tree from the input source . * @ param source The Source object that should identify the desired node . * @ param locator The location of the caller , for diagnostic purposes . * @ return non - null reference to a node . * @ throws TransformerException if the Source argument can ' t be resolved to * a node . */ public int getSourceTree ( Source source , SourceLocator locator , XPathContext xctxt ) throws TransformerException { } }
int n = getNode ( source ) ; if ( DTM . NULL != n ) return n ; n = parseToNode ( source , locator , xctxt ) ; if ( DTM . NULL != n ) putDocumentInCache ( n , source ) ; return n ;
public class ProcessExecutor { /** * Override this to customize how the waiting task is started in the background . * @ param < T > the type of the task * @ param executor the executor service to submit the task on * @ param task the task to be submitted * @ return the future of the task */ protected < T > Future < T > invokeSubmit ( ExecutorService executor , Callable < T > task ) { } }
return executor . submit ( wrapTask ( task ) ) ;
public class EFapsClassLoader { /** * Get the current EFapsClassLoader . * This static method is used to provide a way to use the same classloader * in different threads , due to the reason that using different classloader * instances might bring the problem of " instanceof " return unexpected results . * @ param _ parent parent classloader * @ return the current EFapsClassLoader */ public static synchronized EFapsClassLoader getOfflineInstance ( final ClassLoader _parent ) { } }
if ( EFapsClassLoader . CLASSLOADER == null ) { EFapsClassLoader . CLASSLOADER = new EFapsClassLoader ( _parent , true ) ; } return EFapsClassLoader . CLASSLOADER ;
public class WSFServlet { /** * Creates a ServletDelegate instance according to the STACK _ SERVLET _ DELEGATE _ CLASS init parameter . * The class is loaded through a ServletDelegateFactory that ' s retrieved as follows : * - if a default ClassLoaderProvider is available , the webservice subsystem classloader from it * is used to lookup the factory * - otherwise the current thread context classloader is used to lookup the factory . * @ param servletConfig servlet config * @ return the servlet delegate */ protected ServletDelegate getDelegate ( ServletConfig servletConfig ) { } }
ClassLoaderProvider clProvider = ClassLoaderProvider . getDefaultProvider ( ) ; ClassLoader cl = clProvider . getWebServiceSubsystemClassLoader ( ) ; ServiceLoader < ServletDelegateFactory > sl = ServiceLoader . load ( ServletDelegateFactory . class , cl ) ; ServletDelegateFactory factory = sl . iterator ( ) . next ( ) ; return factory . newServletDelegate ( servletConfig . getInitParameter ( STACK_SERVLET_DELEGATE_CLASS ) ) ;
public class GeneralizedCounter { /** * adds to count for the { @ link # depth ( ) } - dimensional key < code > l < / code > . */ public void incrementCount ( List < K > l , double count ) { } }
if ( l . size ( ) != depth ) { wrongDepth ( ) ; // throws exception } GeneralizedCounter < K > next = this ; Iterator < K > i = l . iterator ( ) ; K o = i . next ( ) ; while ( i . hasNext ( ) ) { next . addToTotal ( count ) ; next = next . conditionalizeHelper ( o ) ; o = i . next ( ) ; } next . incrementCount1D ( o , count ) ;
public class ManipulateResponse { /** * tag : : json [ ] */ @ Route ( method = HttpMethod . GET , uri = "/manipulate/json" ) public Result jsonResult ( ) { } }
ObjectNode node = new ObjectMapper ( ) . createObjectNode ( ) ; node . put ( "hello" , "world" ) ; return ok ( node ) ;
public class PluginWSCommons { /** * Write properties of the specified UpdateCenter to the specified JsonWriter . * < pre > * " updateCenterRefresh " : " 2015-04-24T16:08:36 + 0200" * < / pre > */ public static void writeUpdateCenterProperties ( JsonWriter json , Optional < UpdateCenter > updateCenter ) { } }
if ( updateCenter . isPresent ( ) ) { json . propDateTime ( PROPERTY_UPDATE_CENTER_REFRESH , updateCenter . get ( ) . getDate ( ) ) ; }
public class ReadFileExtensions { /** * The Method readHeadLine ( ) opens the File and reads the first line from the file . * @ param inputFile * The Path to the File and name from the file from where we read . * @ return The first line from the file . * @ throws FileNotFoundException * the file not found exception * @ throws IOException * Signals that an I / O exception has occurred . */ public static String readHeadLine ( final String inputFile ) throws FileNotFoundException , IOException { } }
String headLine = null ; try ( BufferedReader reader = new BufferedReader ( new FileReader ( inputFile ) ) ) { headLine = reader . readLine ( ) ; } return headLine ;
public class TaskExecutor { /** * Waits until the first task completes , then calls the ( optional ) observers * to notify the completion and returns the result . * @ param futures * the list of futures to wait for . * @ param observers * an optional set of observers . * @ return * the result of the first task to complete . * @ throws InterruptedException * @ throws ExecutionException */ public T waitForAny ( List < Future < T > > futures , @ SuppressWarnings ( "unchecked" ) TaskObserver < T > ... observers ) throws InterruptedException , ExecutionException { } }
int count = futures . size ( ) ; while ( count -- > 0 ) { int id = queue . take ( ) ; logger . debug ( "task '{}' complete (count: {}, queue: {})" , id , count , queue . size ( ) ) ; T result = futures . get ( id ) . get ( ) ; for ( TaskObserver < T > observer : observers ) { observer . onTaskComplete ( tasks . get ( id ) , result ) ; } return result ; } return null ;
public class RandomRotation { /** * Randomly rotates a vector using the random rotation matrix that was created in the constructor . * @ param vector * The initial vector * @ return The randomly rotated vector */ public double [ ] rotate ( double [ ] vector ) { } }
DenseMatrix64F transformed = new DenseMatrix64F ( 1 , vector . length ) ; DenseMatrix64F original = DenseMatrix64F . wrap ( 1 , vector . length , vector ) ; CommonOps . mult ( original , randomMatrix , transformed ) ; return transformed . getData ( ) ;
public class Serial { /** * < p > Sends an array of bytes to the serial port / device identified by the given file descriptor . < / p > * @ param fd * The file descriptor of the serial port / device . * @ param data * A ByteBuffer of data to be transmitted . * @ param offset * The starting index ( inclusive ) in the array to send from . * @ param length * The number of bytes from the byte array to transmit to the serial port . */ public synchronized static void write ( int fd , byte [ ] data , int offset , int length ) throws IOException { } }
// we make a copy of the data argument because we don ' t want to modify the original source data byte [ ] buffer = new byte [ length ] ; System . arraycopy ( data , offset , buffer , 0 , length ) ; // write the buffer contents to the serial port via JNI native method write ( fd , buffer , length ) ;
public class GriddedTileDao { /** * Delete by table name * @ param tableName * table name * @ return deleted count */ public int delete ( String tableName ) { } }
DeleteBuilder < GriddedTile , Long > db = deleteBuilder ( ) ; int deleted = 0 ; try { db . where ( ) . eq ( GriddedTile . COLUMN_TABLE_NAME , tableName ) ; PreparedDelete < GriddedTile > deleteQuery = db . prepare ( ) ; deleted = delete ( deleteQuery ) ; } catch ( SQLException e ) { throw new GeoPackageException ( "Failed to delete Gridded Tile by Table Name: " + tableName , e ) ; } return deleted ;
public class UsageIndex { /** * Build usage index for given collection of proto files . */ public static UsageIndex build ( Collection < Proto > protos ) { } }
UsageIndex usageIndex = new UsageIndex ( ) ; for ( Proto proto : protos ) { ProtoWalker . newInstance ( proto . getContext ( ) ) . onMessage ( message -> { for ( Field field : message . getFields ( ) ) { usageIndex . register ( field . getType ( ) , message ) ; } } ) . onService ( service -> { for ( ServiceMethod serviceMethod : service . getMethods ( ) ) { usageIndex . register ( serviceMethod . getArgType ( ) , service ) ; usageIndex . register ( serviceMethod . getReturnType ( ) , service ) ; } } ) . walk ( ) ; } return usageIndex ;
public class NodeImpl { /** * Return child Properties list . * @ return List of child Properties * @ throws RepositoryException * if error occurs * @ throws AccessDeniedException * if Nodes cannot be listed due to permissions on this Node */ private List < PropertyData > childPropertiesData ( ) throws RepositoryException , AccessDeniedException { } }
List < PropertyData > storedProps = new ArrayList < PropertyData > ( dataManager . getChildPropertiesData ( nodeData ( ) ) ) ; Collections . sort ( storedProps , new PropertiesDataOrderComparator < PropertyData > ( ) ) ; return storedProps ;
public class VirtualCdj { /** * Send a master changed announcement to all registered master listeners . * @ param update the message announcing the new tempo master */ private void deliverMasterChangedAnnouncement ( final DeviceUpdate update ) { } }
for ( final MasterListener listener : getMasterListeners ( ) ) { try { listener . masterChanged ( update ) ; } catch ( Throwable t ) { logger . warn ( "Problem delivering master changed announcement to listener" , t ) ; } }
public class OracleDatabase { /** * { @ inheritDoc } */ @ Override public long nextSequence ( final ConnectionResource _con , final String _name ) throws SQLException { } }
final long ret ; final String cmd = new StringBuilder ( ) . append ( "SELECT " + _name + ".nextval from dual" ) . toString ( ) ; final Statement stmt = _con . createStatement ( ) ; try { final ResultSet resultset = stmt . executeQuery ( cmd ) ; if ( resultset . next ( ) ) { ret = resultset . getLong ( 1 ) ; } else { throw new SQLException ( "fetching new value from sequence '" + _name + "' failed" ) ; } resultset . close ( ) ; } finally { stmt . close ( ) ; } return ret ;
public class CommerceTaxFixedRateAddressRelPersistenceImpl { /** * Caches the commerce tax fixed rate address rel in the entity cache if it is enabled . * @ param commerceTaxFixedRateAddressRel the commerce tax fixed rate address rel */ @ Override public void cacheResult ( CommerceTaxFixedRateAddressRel commerceTaxFixedRateAddressRel ) { } }
entityCache . putResult ( CommerceTaxFixedRateAddressRelModelImpl . ENTITY_CACHE_ENABLED , CommerceTaxFixedRateAddressRelImpl . class , commerceTaxFixedRateAddressRel . getPrimaryKey ( ) , commerceTaxFixedRateAddressRel ) ; commerceTaxFixedRateAddressRel . resetOriginalValues ( ) ;
public class MessagesApi { /** * Get normalized message presence * Get normalized message presence . * @ param startDate startDate ( required ) * @ param endDate endDate ( required ) * @ param interval String representing grouping interval . One of : & # 39 ; minute & # 39 ; ( 1 hour limit ) , & # 39 ; hour & # 39 ; ( 1 day limit ) , & # 39 ; day & # 39 ; ( 31 days limit ) , & # 39 ; month & # 39 ; ( 1 year limit ) , or & # 39 ; year & # 39 ; ( 10 years limit ) . ( required ) * @ param sdid Source device ID of the messages being searched . ( optional ) * @ param fieldPresence String representing a field from the specified device ID . ( optional ) * @ return FieldPresenceEnvelope * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public FieldPresenceEnvelope getFieldPresence ( Long startDate , Long endDate , String interval , String sdid , String fieldPresence ) throws ApiException { } }
ApiResponse < FieldPresenceEnvelope > resp = getFieldPresenceWithHttpInfo ( startDate , endDate , interval , sdid , fieldPresence ) ; return resp . getData ( ) ;
public class DefaultDependencyDescriptor { /** * { @ inheritDoc } */ @ Override public ArtifactSpec toArtifactSpec ( ) { } }
if ( spec == null ) { spec = new ArtifactSpec ( getScope ( ) , getGroup ( ) , getName ( ) , getVersion ( ) , getType ( ) , getClassifier ( ) , getFile ( ) ) ; } return spec ;
public class AbstractExcerpt { /** * { @ inheritDoc } */ public String getExcerpt ( String id , int maxFragments , int maxFragmentSize ) throws IOException { } }
IndexReader reader = index . getIndexReader ( ) ; try { checkRewritten ( reader ) ; Term idTerm = new Term ( FieldNames . UUID , id ) ; TermDocs tDocs = reader . termDocs ( idTerm ) ; int docNumber ; Document doc ; try { if ( tDocs . next ( ) ) { docNumber = tDocs . doc ( ) ; doc = reader . document ( docNumber ) ; } else { // node not found in index return null ; } } finally { tDocs . close ( ) ; } Fieldable [ ] fields = doc . getFieldables ( FieldNames . FULLTEXT ) ; if ( fields == null ) { LOG . debug ( "Fulltext field not stored, using {}" , SimpleExcerptProvider . class . getName ( ) ) ; SimpleExcerptProvider exProvider = new SimpleExcerptProvider ( ) ; exProvider . init ( query , index ) ; return exProvider . getExcerpt ( id , maxFragments , maxFragmentSize ) ; } StringBuilder text = new StringBuilder ( ) ; String separator = "" ; for ( int i = 0 ; i < fields . length ; i ++ ) { if ( fields [ i ] . stringValue ( ) . length ( ) > 0 ) { text . append ( separator ) ; text . append ( fields [ i ] . stringValue ( ) ) ; separator = " " ; } } TermFreqVector tfv = reader . getTermFreqVector ( docNumber , FieldNames . FULLTEXT ) ; if ( tfv instanceof TermPositionVector ) { return createExcerpt ( ( TermPositionVector ) tfv , text . toString ( ) , maxFragments , maxFragmentSize ) ; } else { LOG . debug ( "No TermPositionVector on Fulltext field." ) ; return null ; } } finally { Util . closeOrRelease ( reader ) ; }
public class ZooKeeperMasterModel { /** * Remove a deployment group . * < p > If successful , all ZK nodes associated with the DG will be deleted . Specifically these * nodes are guaranteed to be non - existent after a successful remove ( not all of them might exist * before , though ) : * < ul > * < li > / config / deployment - groups / [ group - name ] < / li > * < li > / status / deployment - groups / [ group - name ] < / li > * < li > / status / deployment - groups / [ group - name ] / hosts < / li > * < li > / status / deployment - groups / [ group - name ] / removed < / li > * < li > / status / deployment - group - tasks / [ group - name ] < / li > * < / ul > * If the operation fails no ZK nodes will be removed . * @ throws DeploymentGroupDoesNotExistException If the DG does not exist . */ @ Override public void removeDeploymentGroup ( final String name ) throws DeploymentGroupDoesNotExistException { } }
log . info ( "removing deployment-group: name={}" , name ) ; final ZooKeeperClient client = provider . get ( "removeDeploymentGroup" ) ; try { client . ensurePath ( Paths . configDeploymentGroups ( ) ) ; client . ensurePath ( Paths . statusDeploymentGroups ( ) ) ; client . ensurePath ( Paths . statusDeploymentGroupTasks ( ) ) ; final List < ZooKeeperOperation > operations = Lists . newArrayList ( ) ; final List < String > paths = ImmutableList . of ( Paths . configDeploymentGroup ( name ) , Paths . statusDeploymentGroup ( name ) , Paths . statusDeploymentGroupHosts ( name ) , Paths . statusDeploymentGroupRemovedHosts ( name ) , Paths . statusDeploymentGroupTasks ( name ) ) ; // For each deployment group path : // * If it exists : delete it . // * If it doesn ' t exist , add and delete it in the same transaction . This is a round - about // way of ensuring that it wasn ' t created when we commit the transaction . // This is particularly important for / status / deployment - group - tasks / [ group - name ] , which // might exist if a rolling - update is in progress . To avoid inconsistent state we make sure // it ' s deleted if it does exist . // Having / status / deployment - group - tasks / [ group - name ] for removed groups around will cause // DGs to become slower and spam logs with errors so we want to avoid it . for ( final String path : paths ) { if ( client . exists ( path ) == null ) { operations . add ( create ( path ) ) ; } } for ( final String path : Lists . reverse ( paths ) ) { operations . add ( delete ( path ) ) ; } client . transaction ( operations ) ; } catch ( final NoNodeException e ) { throw new DeploymentGroupDoesNotExistException ( name ) ; } catch ( final KeeperException e ) { throw new HeliosRuntimeException ( "removing deployment-group " + name + " failed" , e ) ; }
public class JsonbDeSerializer { /** * Initializes the instance with necessary registries . * @ param typeRegistry * Mapping from type name to type class . * @ param deserRegistry * Mapping from type name to deserializers . * @ param serRegistry * Mapping from type name to serializers . */ public void init ( final SerializedDataTypeRegistry typeRegistry , final DeserializerRegistry deserRegistry , final SerializerRegistry serRegistry ) { } }
if ( initialized ) { throw new IllegalStateException ( "Instance already initialized - Don't call the init methods more than once" ) ; } this . typeRegistry = typeRegistry ; for ( final JsonbDeserializer < ? > deserializer : deserializers ) { if ( deserializer instanceof DeserializerRegistryRequired ) { if ( deserRegistry == null ) { throw new IllegalStateException ( "There is at least one deserializer that requires a 'DeserializerRegistry', but you didn't provide one (deserializer=" + deserializer . getClass ( ) . getName ( ) + ")" ) ; } final DeserializerRegistryRequired des = ( DeserializerRegistryRequired ) deserializer ; des . setRegistry ( deserRegistry ) ; } if ( deserializer instanceof SerializedDataTypeRegistryRequired ) { if ( typeRegistry == null ) { throw new IllegalStateException ( "There is at least one deserializer that requires a 'SerializedDataTypeRegistry', but you didn't provide one (deserializer=" + deserializer . getClass ( ) . getName ( ) + ")" ) ; } final SerializedDataTypeRegistryRequired des = ( SerializedDataTypeRegistryRequired ) deserializer ; des . setRegistry ( typeRegistry ) ; } } for ( final JsonbSerializer < ? > serializer : serializers ) { if ( serializer instanceof SerializerRegistryRequired ) { if ( serRegistry == null ) { throw new IllegalStateException ( "There is at least one serializer that requires a 'SerializerRegistry', but you didn't provide one (serializer=" + serializer . getClass ( ) . getName ( ) + ")" ) ; } final SerializerRegistryRequired ser = ( SerializerRegistryRequired ) serializer ; ser . setRegistry ( serRegistry ) ; } if ( serializer instanceof SerializedDataTypeRegistryRequired ) { if ( typeRegistry == null ) { throw new IllegalStateException ( "There is at least one serializer that requires a 'SerializedDataTypeRegistry', but you didn't provide one (serializer=" + serializer . getClass ( ) . getName ( ) + ")" ) ; } final SerializedDataTypeRegistryRequired ser = ( SerializedDataTypeRegistryRequired ) serializer ; ser . setRegistry ( typeRegistry ) ; } } initialized = true ;
public class BoUtils { /** * De - serialize byte array to " document " . * @ param data * @ return * @ since 0.10.0 */ @ SuppressWarnings ( "unchecked" ) public static Map < String , Object > bytesToDocument ( byte [ ] data ) { } }
return data != null && data . length > 0 ? SerializationUtils . fromByteArrayFst ( data , Map . class ) : null ;
public class ReceiveMessageBuilder { /** * Extract message header entry as variable . * @ param headerName * @ param variable * @ return */ public T extractFromHeader ( String headerName , String variable ) { } }
if ( headerExtractor == null ) { headerExtractor = new MessageHeaderVariableExtractor ( ) ; getAction ( ) . getVariableExtractors ( ) . add ( headerExtractor ) ; } headerExtractor . getHeaderMappings ( ) . put ( headerName , variable ) ; return self ;
public class HibernateMappingContextConfiguration { /** * Check whether any of the configured entity type filters matches * the current class descriptor contained in the metadata reader . */ protected boolean matchesFilter ( MetadataReader reader , MetadataReaderFactory readerFactory ) throws IOException { } }
for ( TypeFilter filter : ENTITY_TYPE_FILTERS ) { if ( filter . match ( reader , readerFactory ) ) { return true ; } } return false ;
public class Table { /** * Returns array for a new row with SQL DEFAULT value for each column n * where exists [ n ] is false . This provides default values only where * required and avoids evaluating these values where they will be * overwritten . */ Object [ ] getNewRowData ( Session session ) { } }
Object [ ] data = new Object [ getColumnCount ( ) ] ; int i ; if ( hasDefaultValues ) { for ( i = 0 ; i < getColumnCount ( ) ; i ++ ) { Expression def = colDefaults [ i ] ; if ( def != null ) { data [ i ] = def . getValue ( session , colTypes [ i ] ) ; } } } return data ;
public class CharacterRangeImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case XtextPackage . CHARACTER_RANGE__LEFT : return left != null ; case XtextPackage . CHARACTER_RANGE__RIGHT : return right != null ; } return super . eIsSet ( featureID ) ;
public class HyperionClient { /** * Execute the request * @ param request The data service request * @ return The HTTP response */ protected Response executeRequest ( Request request ) { } }
try { com . squareup . okhttp . Request httpRequest = buildHttpRequest ( request ) ; if ( logger . isInfoEnabled ( ) ) logger . info ( "Sending request: {} {}" , httpRequest . method ( ) , httpRequest . urlString ( ) ) ; if ( logger . isDebugEnabled ( ) && request . getRequestMethod ( ) . isBodyRequest ( ) ) { Buffer buffer = new Buffer ( ) ; httpRequest . body ( ) . writeTo ( buffer ) ; if ( maxLoggedBodySize == - 1 || buffer . size ( ) <= maxLoggedBodySize ) logger . debug ( "Request body: {}" , buffer . readUtf8 ( ) ) ; else logger . debug ( "Request body not captured: too large. " ) ; } if ( logger . isTraceEnabled ( ) ) logger . trace ( "Request headers: {}" , httpRequest . headers ( ) . toString ( ) ) ; Response response = client . newCall ( httpRequest ) . execute ( ) ; if ( response . code ( ) == HttpURLConnection . HTTP_UNAUTHORIZED && authorizationFactory != null && authorizationFactory . retryOnAuthenticationError ( ) ) { if ( authorizationFactory instanceof ResettableAuthorizationFactory ) ( ( ResettableAuthorizationFactory ) authorizationFactory ) . reset ( ) ; response = client . newCall ( httpRequest ) . execute ( ) ; } logger . info ( "Response code: {}" , response . code ( ) ) ; if ( logger . isTraceEnabled ( ) ) logger . trace ( "Response headers: {}" , response . headers ( ) . toString ( ) ) ; if ( logger . isDebugEnabled ( ) ) { ByteArrayOutputStream copy = new ByteArrayOutputStream ( ) ; copy ( response . body ( ) . byteStream ( ) , copy ) ; response = response . newBuilder ( ) . body ( ResponseBody . create ( response . body ( ) . contentType ( ) , copy . toByteArray ( ) ) ) . build ( ) ; if ( maxLoggedBodySize == - 1 || copy . size ( ) <= maxLoggedBodySize ) logger . debug ( "Response body: {}" , copy . toString ( ) ) ; else logger . debug ( "Response body not captured: too large." ) ; } if ( response . code ( ) >= HttpURLConnection . HTTP_BAD_REQUEST ) { throw readException ( response ) ; } return response ; } catch ( IOException e ) { throw new ClientConnectionException ( "Error calling service." , e ) ; }
public class Forwarder { /** * Clear history to the first matched locationId . For example , current history is * A - > B - > A - > C - > B , clearToLocationId ( " A " ) will pop B and C and leave the back stack as A - > B - > A . * < p > Note that , if { @ link # clearAll ( ) } is called , this method has no effect < / p > * @ param clearTo The presenter below the next location after clearing history * @ return This instance */ public Forwarder clearTo ( @ NotNull Class < ? extends Controller > clearTo ) { } }
clearHistory = true ; clearToLocationId = clearTo . getName ( ) ; return this ;
public class HiveProxyQueryExecutor { /** * Execute query . * @ param query the query * @ param proxy the proxy * @ throws SQLException the sql exception */ public void executeQuery ( String query , Optional < String > proxy ) throws SQLException { } }
executeQueries ( Collections . singletonList ( query ) , proxy ) ;
public class VpcSecurityGroupMembershipMarshaller { /** * Marshall the given parameter object . */ public void marshall ( VpcSecurityGroupMembership vpcSecurityGroupMembership , ProtocolMarshaller protocolMarshaller ) { } }
if ( vpcSecurityGroupMembership == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( vpcSecurityGroupMembership . getVpcSecurityGroupId ( ) , VPCSECURITYGROUPID_BINDING ) ; protocolMarshaller . marshall ( vpcSecurityGroupMembership . getStatus ( ) , STATUS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class WritableFactory { /** * Create a new writable instance ( using reflection ) given the specified key * @ param writableTypeKey Key to create a new writable instance for * @ return A new ( empty / default ) Writable instance */ public Writable newWritable ( short writableTypeKey ) { } }
Constructor < ? extends Writable > c = constructorMap . get ( writableTypeKey ) ; if ( c == null ) { throw new IllegalStateException ( "Unknown writable key: " + writableTypeKey ) ; } try { return c . newInstance ( ) ; } catch ( Exception e ) { throw new RuntimeException ( "Could not create new Writable instance" ) ; }
public class BucketPath { /** * A wrapper around * { @ link BucketPath # escapeString ( String , Map , TimeZone , boolean , int , int , * boolean ) } * with the timezone set to the default . */ public static String escapeString ( String in , Map < String , String > headers , boolean needRounding , int unit , int roundDown ) { } }
return escapeString ( in , headers , null , needRounding , unit , roundDown , false ) ;
public class Debug { public void doPost ( HttpServletRequest request , HttpServletResponse response ) throws ServletException , IOException { } }
String target = null ; Log l = LogFactory . getLog ( Debug . class ) ; if ( ! ( l instanceof LogImpl ) ) return ; LogImpl log = ( LogImpl ) l ; String action = request . getParameter ( "Action" ) ; if ( "Set Options" . equals ( action ) ) { log . setDebug ( "on" . equals ( request . getParameter ( "D" ) ) ) ; log . setSuppressWarnings ( "on" . equals ( request . getParameter ( "W" ) ) ) ; String v = request . getParameter ( "V" ) ; if ( v != null && v . length ( ) > 0 ) log . setVerbose ( Integer . parseInt ( v ) ) ; else log . setVerbose ( 0 ) ; log . setDebugPatterns ( request . getParameter ( "P" ) ) ; LogSink [ ] sinks = log . getLogSinks ( ) ; for ( int s = 0 ; sinks != null && s < sinks . length ; s ++ ) { if ( sinks [ s ] == null ) continue ; if ( "on" . equals ( request . getParameter ( "LSS" + s ) ) ) { if ( ! sinks [ s ] . isStarted ( ) ) try { sinks [ s ] . start ( ) ; } catch ( Exception e ) { log . warn ( e ) ; } } else { if ( sinks [ s ] . isStarted ( ) ) try { sinks [ s ] . stop ( ) ; } catch ( InterruptedException e ) { } } String options = request . getParameter ( "LO" + s ) ; if ( options == null ) options = "" ; if ( sinks [ s ] instanceof OutputStreamLogSink ) { OutputStreamLogSink sink = ( OutputStreamLogSink ) sinks [ s ] ; sink . setLogTags ( "on" . equals ( request . getParameter ( "LT" + s ) ) ) ; sink . setLogLabels ( "on" . equals ( request . getParameter ( "LL" + s ) ) ) ; sink . setLogStackSize ( "on" . equals ( request . getParameter ( "Ls" + s ) ) ) ; sink . setLogStackTrace ( "on" . equals ( request . getParameter ( "LS" + s ) ) ) ; sink . setSuppressStack ( "on" . equals ( request . getParameter ( "SS" + s ) ) ) ; sink . setLogOneLine ( "on" . equals ( request . getParameter ( "SL" + s ) ) ) ; sink . setFilename ( request . getParameter ( "LF" + s ) ) ; } } } else if ( "Add LogSink" . equals ( action ) ) { System . err . println ( "add log sink " + request . getParameter ( "LSC" ) ) ; try { log . add ( request . getParameter ( "LSC" ) ) ; } catch ( Exception e ) { log . warn ( e ) ; } } else if ( "Delete Stopped Sinks" . equals ( action ) ) { log . deleteStoppedLogSinks ( ) ; } response . sendRedirect ( request . getContextPath ( ) + request . getServletPath ( ) + "/" + Long . toString ( System . currentTimeMillis ( ) , 36 ) + ( target != null ? ( "#" + target ) : "" ) ) ;
public class IdentityHttpHeaderProcessor { /** * / * ( non - Javadoc ) * @ see org . archive . wayback . replay . HttpHeaderProcessor # filter ( java . util . Map , java . lang . String , java . lang . String , org . archive . wayback . ResultURIConverter , org . archive . wayback . core . CaptureSearchResult ) */ public void filter ( Map < String , String > output , String key , String value , ResultURIConverter uriConverter , CaptureSearchResult result ) { } }
if ( key . equalsIgnoreCase ( HTTP_TRANSFER_ENCODING_HEADER_UP ) ) preserve ( output , key , value ) ; else output . put ( key , value ) ;
public class DescribeUsersRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DescribeUsersRequest describeUsersRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( describeUsersRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( describeUsersRequest . getAuthenticationType ( ) , AUTHENTICATIONTYPE_BINDING ) ; protocolMarshaller . marshall ( describeUsersRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; protocolMarshaller . marshall ( describeUsersRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class BasicExecutionHandler { /** * < p > Throws a { @ link InvocationException } with the { @ link InvocationContext } . < / p > * < p > See { @ link ExecutionHandler # onError ( InvocationContext , Exception ) } < / p > * @ param context * the { @ link InvocationContext } with information on the proxy invocation * < br > < br > * @ param error * the root { @ link Exception } which resulted in a request execution error * < br > < br > * @ since 1.3.0 */ @ Override public void onError ( InvocationContext context , Exception error ) { } }
throw InvocationException . newInstance ( context , error ) ;
public class ModelElementTypeImpl { /** * Resolve all types recursively which are extending this type * @ param allExtendingTypes set of calculated extending types */ public void resolveExtendingTypes ( Set < ModelElementType > allExtendingTypes ) { } }
for ( ModelElementType modelElementType : extendingTypes ) { ModelElementTypeImpl modelElementTypeImpl = ( ModelElementTypeImpl ) modelElementType ; if ( ! allExtendingTypes . contains ( modelElementTypeImpl ) ) { allExtendingTypes . add ( modelElementType ) ; modelElementTypeImpl . resolveExtendingTypes ( allExtendingTypes ) ; } }
public class AbcGrammar { /** * nth - repeat : : = " [ " ( nth - repeat - num / nth - repeat - text ) * for compatibility , accepts ( " : | " / " | " ) nth - repeat - num */ Rule NthRepeat ( ) { } }
return FirstOf ( SequenceS ( String ( "[" ) . label ( Barline ) . suppressSubnodes ( ) , FirstOfS ( NthRepeatNum ( ) , NthRepeatText ( ) ) ) , SequenceS ( Sequence ( ZeroOrMore ( ':' ) , "|" ) . label ( Barline ) . suppressSubnodes ( ) , NthRepeatNum ( ) ) ) . label ( NthRepeat ) ;
public class Bzip2BitReader { /** * Reads up to 32 bits from the { @ link ByteBuf } . * @ param count The number of bits to read ( maximum { @ code 32 } as a size of { @ code int } ) * @ return The bits requested , right - aligned within the integer */ int readBits ( final int count ) { } }
if ( count < 0 || count > 32 ) { throw new IllegalArgumentException ( "count: " + count + " (expected: 0-32 )" ) ; } int bitCount = this . bitCount ; long bitBuffer = this . bitBuffer ; if ( bitCount < count ) { long readData ; int offset ; switch ( in . readableBytes ( ) ) { case 1 : { readData = in . readUnsignedByte ( ) ; offset = 8 ; break ; } case 2 : { readData = in . readUnsignedShort ( ) ; offset = 16 ; break ; } case 3 : { readData = in . readUnsignedMedium ( ) ; offset = 24 ; break ; } default : { readData = in . readUnsignedInt ( ) ; offset = 32 ; break ; } } bitBuffer = bitBuffer << offset | readData ; bitCount += offset ; this . bitBuffer = bitBuffer ; } this . bitCount = bitCount -= count ; return ( int ) ( bitBuffer >>> bitCount & ( count != 32 ? ( 1 << count ) - 1 : 0xFFFFFFFFL ) ) ;
public class DefaultJiraClient { /** * Construct Feature object * @ param issue * @ return Feature */ @ SuppressWarnings ( "PMD.NPathComplexity" ) protected Feature getFeature ( JSONObject issue , Team board ) { } }
Feature feature = new Feature ( ) ; feature . setsId ( getString ( issue , "id" ) ) ; feature . setsNumber ( getString ( issue , "key" ) ) ; JSONObject fields = ( JSONObject ) issue . get ( "fields" ) ; JSONObject epic = ( JSONObject ) fields . get ( "epic" ) ; String epicId = getString ( fields , featureSettings . getJiraEpicIdFieldName ( ) ) ; feature . setsEpicID ( epic != null ? getString ( epic , "id" ) : epicId ) ; JSONObject issueType = ( JSONObject ) fields . get ( "issuetype" ) ; if ( issueType != null ) { feature . setsTypeId ( getString ( issueType , "id" ) ) ; feature . setsTypeName ( getString ( issueType , "name" ) ) ; } JSONObject status = ( JSONObject ) fields . get ( "status" ) ; String sStatus = getStatus ( status ) ; feature . setsState ( sStatus ) ; feature . setsStatus ( feature . getsState ( ) ) ; String summary = getString ( fields , "summary" ) ; feature . setsName ( summary ) ; feature . setsUrl ( featureSettings . getJiraBaseUrl ( ) + ( featureSettings . getJiraBaseUrl ( ) . endsWith ( "/" ) ? "" : "/" ) + "browse/" + feature . getsNumber ( ) ) ; long aggEstimate = getLong ( fields , "aggregatetimeoriginalestimate" ) ; Long estimate = getLong ( fields , "timeoriginalestimate" ) ; int originalEstimate = 0 ; // Tasks use timetracking , stories use aggregatetimeoriginalestimate and aggregatetimeestimate if ( estimate != 0 ) { originalEstimate = estimate . intValue ( ) ; } else if ( aggEstimate != 0 ) { // this value is in seconds originalEstimate = Math . round ( ( float ) aggEstimate / 3600 ) ; } feature . setsEstimateTime ( originalEstimate ) ; String storyPoints = getString ( fields , featureSettings . getJiraStoryPointsFieldName ( ) ) ; feature . setsEstimate ( storyPoints ) ; feature . setChangeDate ( getString ( fields , "updated" ) ) ; feature . setIsDeleted ( "False" ) ; JSONObject project = ( JSONObject ) fields . get ( "project" ) ; feature . setsProjectID ( project != null ? getString ( project , "id" ) : "" ) ; feature . setsProjectName ( project != null ? getString ( project , "name" ) : "" ) ; // sProjectBeginDate - does not exist in Jira feature . setsProjectBeginDate ( "" ) ; // sProjectEndDate - does not exist in Jira feature . setsProjectEndDate ( "" ) ; // sProjectChangeDate - does not exist for this asset level in Jira feature . setsProjectChangeDate ( "" ) ; // sProjectState - does not exist in Jira feature . setsProjectState ( "" ) ; // sProjectIsDeleted - does not exist in Jira feature . setsProjectIsDeleted ( "False" ) ; // sProjectPath - does not exist in Jira feature . setsProjectPath ( "" ) ; if ( board != null ) { feature . setsTeamID ( board . getTeamId ( ) ) ; feature . setsTeamName ( board . getName ( ) ) ; } else { JSONObject team = ( JSONObject ) fields . get ( featureSettings . getJiraTeamFieldName ( ) ) ; if ( team != null ) { feature . setsTeamID ( getString ( team , "id" ) ) ; feature . setsTeamName ( getString ( team , "value" ) ) ; } } // sTeamChangeDate - not able to retrieve at this asset level from Jira feature . setsTeamChangeDate ( "" ) ; // sTeamAssetState feature . setsTeamAssetState ( "" ) ; // sTeamIsDeleted feature . setsTeamIsDeleted ( "False" ) ; // sOwnersState - does not exist in Jira at this level feature . setsOwnersState ( Collections . singletonList ( "Active" ) ) ; // sOwnersChangeDate - does not exist in Jira feature . setsOwnersChangeDate ( Collections . EMPTY_LIST ) ; // sOwnersIsDeleted - does not exist in Jira feature . setsOwnersIsDeleted ( Collections . EMPTY_LIST ) ; // issueLinks JSONArray issueLinkArray = ( JSONArray ) fields . get ( "issuelinks" ) ; feature . setIssueLinks ( getIssueLinks ( issueLinkArray ) ) ; Sprint sprint = getSprint ( fields ) ; if ( sprint != null ) { processSprintData ( feature , sprint ) ; } JSONObject assignee = ( JSONObject ) fields . get ( "assignee" ) ; processAssigneeData ( feature , assignee ) ; return feature ;
public class AbstractValidate { /** * Method without varargs to increase performance */ public < T > T [ ] noNullElements ( final T [ ] array , final String message ) { } }
notNull ( array ) ; final int index = indexOfNullElement ( array ) ; if ( index != - 1 ) { fail ( String . format ( message , index ) ) ; } return array ;
public class AbstractRedisStorage { /** * Check if the trigger identified by the given key exists * @ param triggerKey the key of the desired trigger * @ param jedis a thread - safe Redis connection * @ return true if the trigger exists ; false otherwise */ public boolean checkExists ( TriggerKey triggerKey , T jedis ) { } }
return jedis . exists ( redisSchema . triggerHashKey ( triggerKey ) ) ;
public class Hashes { /** * Preprocesses a bit vector so that SpookyHash 4 - word - state can be computed * in constant time on all prefixes . * @ param bv * a bit vector . * @ param seed * a seed for the hash . * @ return an array containing the four internal words of state during the * hash computation ; it can be passed to * { @ link # spooky4 ( BitVector , long , long , long [ ] , long [ ] ) } ( and * analogous methods ) . * @ see # spooky4 ( BitVector , long ) */ public static long [ ] preprocessSpooky4 ( final BitVector bv , final long seed ) { } }
final long length = bv . length ( ) ; if ( length < Long . SIZE * 2 ) return null ; final long [ ] state = new long [ 4 * ( int ) ( length + Long . SIZE * 2 ) / ( 4 * Long . SIZE ) ] ; long h0 , h1 , h2 , h3 ; h0 = seed ; h1 = seed ; h2 = ARBITRARY_BITS ; h3 = ARBITRARY_BITS ; long remaining = length ; long pos = 0 ; int p = 0 ; for ( ; ; ) { h2 += bv . getLong ( pos + 0 * Long . SIZE , pos + 1 * Long . SIZE ) ; h3 += bv . getLong ( pos + 1 * Long . SIZE , pos + 2 * Long . SIZE ) ; h2 = Long . rotateLeft ( h2 , 50 ) ; h2 += h3 ; h0 ^= h2 ; h3 = Long . rotateLeft ( h3 , 52 ) ; h3 += h0 ; h1 ^= h3 ; h0 = Long . rotateLeft ( h0 , 30 ) ; h0 += h1 ; h2 ^= h0 ; h1 = Long . rotateLeft ( h1 , 41 ) ; h1 += h2 ; h3 ^= h1 ; h2 = Long . rotateLeft ( h2 , 54 ) ; h2 += h3 ; h0 ^= h2 ; h3 = Long . rotateLeft ( h3 , 48 ) ; h3 += h0 ; h1 ^= h3 ; h0 = Long . rotateLeft ( h0 , 38 ) ; h0 += h1 ; h2 ^= h0 ; h1 = Long . rotateLeft ( h1 , 37 ) ; h1 += h2 ; h3 ^= h1 ; h2 = Long . rotateLeft ( h2 , 62 ) ; h2 += h3 ; h0 ^= h2 ; h3 = Long . rotateLeft ( h3 , 34 ) ; h3 += h0 ; h1 ^= h3 ; h0 = Long . rotateLeft ( h0 , 5 ) ; h0 += h1 ; h2 ^= h0 ; h1 = Long . rotateLeft ( h1 , 36 ) ; h1 += h2 ; h3 ^= h1 ; state [ p + 0 ] = h0 ; state [ p + 1 ] = h1 ; state [ p + 2 ] = h2 ; state [ p + 3 ] = h3 ; p += 4 ; if ( remaining >= Long . SIZE * 6 ) { h0 += bv . getLong ( pos + 2 * Long . SIZE , pos + 3 * Long . SIZE ) ; h1 += bv . getLong ( pos + 3 * Long . SIZE , pos + 4 * Long . SIZE ) ; remaining -= 4 * Long . SIZE ; pos += 4 * Long . SIZE ; } else return state ; }
public class RemoteEnvironment { @ Override public JobExecutionResult execute ( String jobName ) throws Exception { } }
PlanExecutor executor = getExecutor ( ) ; Plan p = createProgramPlan ( jobName ) ; // Session management is disabled , revert this commit to enable // p . setJobId ( jobID ) ; // p . setSessionTimeout ( sessionTimeout ) ; JobExecutionResult result = executor . executePlan ( p ) ; this . lastJobExecutionResult = result ; return result ;
public class FirefoxFilter { /** * Note : we don ' t take a dependency on the FirefoxDriver jar as it might not be on the classpath */ @ Override public Map < String , Object > apply ( Map < String , Object > unmodifiedCaps ) { } }
Map < String , Object > caps = unmodifiedCaps . entrySet ( ) . parallelStream ( ) . filter ( entry -> ( "browserName" . equals ( entry . getKey ( ) ) && "firefox" . equals ( entry . getValue ( ) ) ) || entry . getKey ( ) . startsWith ( "firefox_" ) || entry . getKey ( ) . startsWith ( "moz:" ) ) . filter ( entry -> Objects . nonNull ( entry . getValue ( ) ) ) . collect ( Collectors . toMap ( Map . Entry :: getKey , Map . Entry :: getValue , ( l , r ) -> l , TreeMap :: new ) ) ; // If we only have marionette in the caps , the user is asking for firefox . Make sure we inject // the browser name to be sure . if ( unmodifiedCaps . containsKey ( "marionette" ) && ! caps . containsKey ( "browserName" ) ) { caps . put ( "browserName" , "firefox" ) ; } // People might have just put the binary and profile in the OSS payload , and not in firefox // options . @ SuppressWarnings ( "unchecked" ) Map < String , Object > options = ( Map < String , Object > ) unmodifiedCaps . getOrDefault ( "moz:firefoxOptions" , new TreeMap < > ( ) ) ; if ( unmodifiedCaps . containsKey ( "firefox_binary" ) && ! options . containsKey ( "binary" ) ) { // Here ' s hoping that the binary is just a string . It should be as FirefoxBinary . toJson just // encodes the path . options . put ( "binary" , unmodifiedCaps . get ( "firefox_binary" ) ) ; } if ( unmodifiedCaps . containsKey ( "firefox_profile" ) && ! options . containsKey ( "profile" ) ) { options . put ( "profile" , unmodifiedCaps . get ( "firefox_profile" ) ) ; } if ( ! options . isEmpty ( ) ) { caps . put ( "moz:firefoxOptions" , options ) ; } return caps . isEmpty ( ) ? null : caps ;
public class CmsSitemapView { /** * Adds the gallery tree items to the parent . < p > * @ param parent the parent item * @ param galleries the gallery folder entries */ private void addGalleryEntries ( CmsGalleryTreeItem parent , List < CmsGalleryFolderEntry > galleries ) { } }
for ( CmsGalleryFolderEntry galleryFolder : galleries ) { CmsGalleryTreeItem folderItem = createGalleryFolderItem ( galleryFolder ) ; parent . addChild ( folderItem ) ; m_galleryTreeItems . put ( galleryFolder . getStructureId ( ) , folderItem ) ; addGalleryEntries ( folderItem , galleryFolder . getSubGalleries ( ) ) ; }
public class EnhancedDebuggerWindow { /** * Notification that the root window is closing . Stop listening for received and * transmitted packets in all the debugged connections . * @ param evt the event that indicates that the root window is closing */ private synchronized void rootWindowClosing ( WindowEvent evt ) { } }
// Notify to all the debuggers to stop debugging for ( EnhancedDebugger debugger : debuggers ) { debugger . cancel ( ) ; } // Release any reference to the debuggers debuggers . clear ( ) ; // Release the default instance instance = null ; frame = null ; notifyAll ( ) ;
public class AggregatorExtension { /** * / * ( non - Javadoc ) * @ see com . ibm . jaggr . service . IAggregator . ILoadedExtension # getInstance ( ) */ @ Override public Object getInstance ( ) { } }
final String sourceMethod = "getInstance" ; // $ NON - NLS - 1 $ boolean isTraceLogging = log . isLoggable ( Level . FINER ) ; if ( isTraceLogging ) { log . entering ( AggregatorExtension . class . getName ( ) , sourceMethod ) ; log . exiting ( AggregatorExtension . class . getName ( ) , sourceMethod , instance ) ; } return instance ;
public class ShareResultMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ShareResult shareResult , ProtocolMarshaller protocolMarshaller ) { } }
if ( shareResult == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( shareResult . getPrincipalId ( ) , PRINCIPALID_BINDING ) ; protocolMarshaller . marshall ( shareResult . getInviteePrincipalId ( ) , INVITEEPRINCIPALID_BINDING ) ; protocolMarshaller . marshall ( shareResult . getRole ( ) , ROLE_BINDING ) ; protocolMarshaller . marshall ( shareResult . getStatus ( ) , STATUS_BINDING ) ; protocolMarshaller . marshall ( shareResult . getShareId ( ) , SHAREID_BINDING ) ; protocolMarshaller . marshall ( shareResult . getStatusMessage ( ) , STATUSMESSAGE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class OADProfile { /** * Set the state to INACTIVE and clear state variables */ private void reset ( ) { } }
setState ( OADState . INACTIVE ) ; currentImage = null ; firmwareBundle = null ; nextBlock = 0 ; oadListener = null ; watchdog . stop ( ) ; oadApproval . reset ( ) ;
public class DataSet { /** * Writes a DataSet to the standard output streams ( stdout ) of the TaskManagers that execute * the program ( or more specifically , the data sink operators ) . On a typical cluster setup , the * data will appear in the TaskManagers ' < i > . out < / i > files . * < p > To print the data to the console or stdout stream of the client process instead , use the * { @ link # print ( ) } method . * < p > For each element of the DataSet the result of { @ link Object # toString ( ) } is written . * @ param prefix The string to prefix each line of the output with . This helps identifying outputs * from different printing sinks . * @ return The DataSink operator that writes the DataSet . * @ see # print ( ) */ public DataSink < T > printOnTaskManager ( String prefix ) { } }
return output ( new PrintingOutputFormat < T > ( prefix , false ) ) ;
public class VdmEditor { /** * Synchronizes the outliner selection with the given element position in * the editor . * @ param element * the java element to select */ protected void synchronizeOutlinePage ( INode element ) { } }
// TODO : don ' t search for mutexes if ( element instanceof AMutexSyncDefinition ) return ; if ( element instanceof ABlockSimpleBlockStm ) return ; try { synchronizeOutlinePage ( element , false ) ; // true } catch ( Exception e ) { }
public class VirtualJarFileInputStream { /** * Close the current entry , and calculate the crc value . * @ throws IOException if any problems occur */ private void closeCurrent ( ) throws IOException { } }
virtualJarInputStream . closeEntry ( ) ; currentEntry . crc = crc . getValue ( ) ; crc . reset ( ) ;
public class Locale { /** * This method must be called only for creating the Locale . * * constants due to making shortcuts . */ private static Locale createConstant ( String lang , String country ) { } }
BaseLocale base = BaseLocale . createInstance ( lang , country ) ; return getInstance ( base , null ) ;
public class Query { /** * / * ( non - Javadoc ) * @ see org . eclipse . datatools . connectivity . oda . IQuery # setObject ( java . lang . String , java . lang . Object ) */ public void setObject ( String parameterName , Object value ) throws OdaException { } }
// only applies to named input parameter parameters . put ( parameterName , value ) ;
public class ExportSupport { /** * Check if exporting data is supported in the current environment . Exporting is possible in two cases : * - The master is set to local . In this case any file system , including local FS , will work for exporting . * - The file system is not local . Local file systems do not work in cluster modes . * @ param sparkMaster the Spark master * @ param fs the Hadoop file system * @ return if export is supported */ public static boolean exportSupported ( @ NonNull String sparkMaster , @ NonNull FileSystem fs ) { } }
// Anything is supported with a local master . Regex matches ' local ' , ' local [ DIGITS ] ' or ' local [ * ] ' if ( sparkMaster . matches ( "^local(\\[(\\d+|\\*)])?$" ) ) { return true ; } // Clustered mode is supported as long as the file system is not a local one return ! fs . getUri ( ) . getScheme ( ) . equals ( "file" ) ;
public class AponSyntaxException { /** * Create a detail message . * @ param lineNumber the line number * @ param line the character line * @ param tline the trimmed character line * @ param msg the message * @ return the detail message */ private static String makeMessage ( int lineNumber , String line , String tline , String msg ) { } }
int columnNumber = ( tline != null ? line . indexOf ( tline ) : 0 ) ; StringBuilder sb = new StringBuilder ( ) ; if ( msg != null ) { sb . append ( msg ) ; } sb . append ( " [lineNumber: " ) . append ( lineNumber ) ; if ( columnNumber != - 1 ) { String lspace = line . substring ( 0 , columnNumber ) ; int tabCnt = StringUtils . search ( lspace , "\t" ) ; if ( tline != null && tline . length ( ) > 33 ) { tline = tline . substring ( 0 , 30 ) + "..." ; } sb . append ( ", columnNumber: " ) . append ( columnNumber + 1 ) ; if ( tabCnt != 0 ) { sb . append ( " (" ) ; sb . append ( "Tabs " ) . append ( tabCnt ) ; sb . append ( ", Spaces " ) . append ( columnNumber - tabCnt ) ; sb . append ( ")" ) ; } sb . append ( "] " ) . append ( tline ) ; } return sb . toString ( ) ;
public class AccountFiltersInner { /** * Update an Account Filter . * Updates an existing Account Filter in the Media Services account . * @ param resourceGroupName The name of the resource group within the Azure subscription . * @ param accountName The Media Services account name . * @ param filterName The Account Filter name * @ param parameters The request parameters * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < AccountFilterInner > updateAsync ( String resourceGroupName , String accountName , String filterName , AccountFilterInner parameters , final ServiceCallback < AccountFilterInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( updateWithServiceResponseAsync ( resourceGroupName , accountName , filterName , parameters ) , serviceCallback ) ;
public class JobInstanceSqlMapDao { /** * TODO : ( ketan ) do we really need to reload the current state from DB ? */ private void logIfJobIsCompleted ( JobInstance jobInstance ) { } }
JobState currentState = getCurrentState ( jobInstance . getId ( ) ) ; if ( currentState . isCompleted ( ) && ! jobInstance . isCopy ( ) ) { String message = String . format ( "State change for a completed Job is not allowed. Job %s is currently State=%s, Result=%s" , jobInstance . getIdentifier ( ) , jobInstance . getState ( ) , jobInstance . getResult ( ) ) ; LOG . warn ( message , new Exception ( ) . fillInStackTrace ( ) ) ; }
public class SimpleAttachable { /** * { @ inheritDoc } */ public synchronized < T > T getAttachment ( final AttachmentKey < T > key ) { } }
if ( key == null ) { return null ; } return key . cast ( attachments . get ( key ) ) ;
public class DummyBaseTransactionManager { /** * Rolls back the transaction associated with the calling thread . * @ throws IllegalStateException If the transaction is in a state where it cannot be rolled back . This could be * because the calling thread is not associated with a transaction , or because it is in * the { @ link javax . transaction . Status # STATUS _ PREPARED prepared state } . * @ throws SecurityException If the caller is not allowed to roll back this transaction . * @ throws javax . transaction . SystemException * If the transaction service fails in an unexpected way . */ @ Override public void rollback ( ) throws IllegalStateException , SecurityException , SystemException { } }
Transaction tx = getTransaction ( ) ; if ( tx == null ) throw new IllegalStateException ( "no transaction associated with thread" ) ; tx . rollback ( ) ; // Disassociate tx from thread . setTransaction ( null ) ;
public class StructureTools { /** * Returns an array of the requested Atoms from the Structure object . * Iterates over all groups and checks if the requested atoms are in this * group , no matter if this is a { @ link AminoAcid } or { @ link HetatomImpl } * group . If the group does not contain all requested atoms then no atoms * are added for that group . For structures with more than one model , only * model 0 will be used . * @ param s * the structure to get the atoms from * @ param atomNames * contains the atom names to be used . * @ return an Atom [ ] array */ public static final Atom [ ] getAtomArray ( Structure s , String [ ] atomNames ) { } }
List < Chain > chains = s . getModel ( 0 ) ; List < Atom > atoms = new ArrayList < Atom > ( ) ; extractAtoms ( atomNames , chains , atoms ) ; return atoms . toArray ( new Atom [ atoms . size ( ) ] ) ;
public class Moment { /** * / * [ deutsch ] * < p > Konstruiert einen neuen UTC - Zeitstempel mit Hilfe von * Zeitkoordinaten auf der angegebenen Zeitskala . < / p > * < p > Die angegebene verstrichene Zeit { @ code elapsedTime } wird intern * in die UTC - Epochenzeit umgerechnet , sollte eine andere Zeitskala als * UTC angegeben sein . Die Zeitskala TAI wird erst ab der TAI - Epoche * 1958-01-01 unterst & uuml ; tzt , die Zeitskala GPS erst ab 1980-01-06 . < / p > * @ param elapsedTime elapsed seconds on given time scale * @ param nanosecond nanosecond fraction of last second * @ param scale time scale reference * @ return new moment instance * @ throws IllegalArgumentException if the nanosecond is not in the range * { @ code 0 < = nanosecond < = 999,999,999 } or if elapsed time is * out of supported range limits beyond year + / - 999,999,999 or * out of time scale range * @ throws IllegalStateException if time scale is not POSIX but * leap second support is switched off by configuration * @ see LeapSeconds # isEnabled ( ) */ public static Moment of ( long elapsedTime , int nanosecond , TimeScale scale ) { } }
if ( ( elapsedTime == 0 ) && ( nanosecond == 0 ) && ( scale == POSIX ) ) { return Moment . UNIX_EPOCH ; } return new Moment ( elapsedTime , nanosecond , scale ) ;
public class JSONs { /** * Read an optional integer . * @ param o the object to parse * @ param id the key in the map that points to an integer * @ param def the default integer value if the key is absent * @ return the resulting integer * @ throws JSONConverterException if the key does not point to a int */ public static int optInt ( JSONObject o , String id , int def ) throws JSONConverterException { } }
if ( o . containsKey ( id ) ) { try { return ( Integer ) o . get ( id ) ; } catch ( ClassCastException e ) { throw new JSONConverterException ( "Unable to read a int from string '" + id + "'" , e ) ; } } return def ;
public class ImageStatistics { /** * Returns the sum of all the pixels in the image . * @ param img Input image . Not modified . */ public static long sum ( InterleavedS64 img ) { } }
if ( BoofConcurrency . USE_CONCURRENT ) { return ImplImageStatistics_MT . sum ( img ) ; } else { return ImplImageStatistics . sum ( img ) ; }
public class MethodCallImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public boolean eIsSet ( int featureID ) { } }
switch ( featureID ) { case SimpleExpressionsPackage . METHOD_CALL__VALUE : return VALUE_EDEFAULT == null ? value != null : ! VALUE_EDEFAULT . equals ( value ) ; } return super . eIsSet ( featureID ) ;
public class FormatDate { /** * Create the internal Formatter instance and perform the formatting . * @ throws JspException if a JSP exception has occurred */ public void doTag ( ) throws JspException { } }
JspTag parentTag = SimpleTagSupport . findAncestorWithClass ( this , IFormattable . class ) ; // if there are errors we need to either add these to the parent AbstractBastTag or report an error . if ( hasErrors ( ) ) { if ( parentTag instanceof IFormattable ) { IFormattable parent = ( IFormattable ) parentTag ; parent . formatterHasError ( ) ; } reportErrors ( ) ; return ; } if ( parentTag instanceof IFormattable ) { IFormattable parent = ( IFormattable ) parentTag ; DateFormatter dateFmt = new DateFormatter ( ) ; dateFmt . setPattern ( _pattern ) ; dateFmt . setLocale ( getLocale ( ) ) ; dateFmt . setInputPattern ( _stringInput ) ; parent . addFormatter ( dateFmt ) ; } else { String s = Bundle . getString ( "Tags_FormattableParentRequired" ) ; registerTagError ( s , null ) ; reportErrors ( ) ; }
public class QuartzScheduler { /** * Add ( register ) the given < code > Calendar < / code > to the Scheduler . * @ throws SchedulerException * if there is an internal Scheduler error , or a Calendar with the * same name already exists , and < code > replace < / code > is * < code > false < / code > . */ public void addCalendar ( final String calName , final ICalendar calendar , final boolean replace , final boolean updateTriggers ) throws SchedulerException { } }
validateState ( ) ; m_aResources . getJobStore ( ) . storeCalendar ( calName , calendar , replace , updateTriggers ) ;
public class GVRBoundsPicker { /** * Tests the bounding volumes of a set of scene objects against * all the colliders the scene and returns a list of collisions . * This function is not meant for general collision detection * but can be used to implement simple bounds - based collisions . * Inside GearVRF it is used for cursor hit testing . * This method is thread safe because it guarantees that only * one thread at a time is examining the scene graph , * and it extracts the hit data during within its synchronized block . You * can then examine the return list without worrying about another thread * corrupting your hit data . * Unlike ray based picking , the hit location for sphere picking is very * inexact . Currently the hit location reported is on the surface of the collider . * Mesh colliders are not supported and the mesh is not examined * during collision detection . Instead the bounding volume of the scene object * is used , not it ' s mesh collider . * @ param scene * The { @ link GVRScene } with all the objects to be tested . * @ param collidables * An array of { @ link GVRSceneObject } s to collide against the scene . * @ return A list of { @ link GVRPickedObject } , sorted by distance from the * pick ray origin . Each { @ link GVRPickedObject } contains the scene object * which owns the { @ link GVRCollider } along with the hit * location and distance . * @ since 1.6.6 */ public static final GVRPickedObject [ ] pickBounds ( GVRScene scene , List < GVRSceneObject > collidables ) { } }
sFindObjectsLock . lock ( ) ; try { final GVRPickedObject [ ] result = NativePicker . pickBounds ( scene . getNative ( ) , collidables ) ; if ( result == null ) { return sEmptyList ; } return result ; } finally { sFindObjectsLock . unlock ( ) ; }
public class MenuUtil { /** * Adds a new menu item to the menu with the specified name and * attributes . * @ param l the action listener . * @ param menu the menu to add the item to . * @ param name the item name . * @ param mnem the mnemonic key for the item . * @ return the new menu item . */ public static JMenuItem addMenuItem ( ActionListener l , JMenu menu , String name , int mnem ) { } }
return addMenuItem ( l , menu , name , Integer . valueOf ( mnem ) , null ) ;
public class ImageLocalNormalization { /** * < p > Normalizes the input image such that local statics are a zero mean and with standard deviation * of 1 . The image border is handled by truncating the kernel and renormalizing it so that it ' s sum is * still one . < / p > * < p > output [ x , y ] = ( input [ x , y ] - mean [ x , y ] ) / ( stdev [ x , y ] + delta ) < / p > * @ param input Input image * @ param maxPixelValue maximum value of a pixel element in the input image . - 1 = compute max value . Typically * this is 255 or 1. * @ param delta A small value used to avoid divide by zero errors . Typical 1e - 4f for 32 bit and 1e - 8 for 64bit * @ param output Storage for output */ public void zeroMeanStdOne ( int radius , T input , double maxPixelValue , double delta , T output ) { } }
// check preconditions and initialize data structures initialize ( input , output ) ; // avoid overflow issues by ensuring that the max pixel value is 1 T adjusted = ensureMaxValueOfOne ( input , maxPixelValue ) ; // take advantage of 2D gaussian kernels being separable if ( border == null ) { WorkArrays work = GeneralizedImageOps . createWorkArray ( input . getImageType ( ) ) ; GBlurImageOps . mean ( adjusted , localMean , radius , output , work ) ; GPixelMath . pow2 ( adjusted , pow2 ) ; GBlurImageOps . mean ( pow2 , localPow2 , radius , output , work ) ; } else { throw new IllegalArgumentException ( "Only renormalize border supported here so far. This can be changed..." ) ; } // Compute the final output if ( imageType == GrayF32 . class ) computeOutput ( ( GrayF32 ) input , ( float ) delta , ( GrayF32 ) output , ( GrayF32 ) adjusted ) ; else computeOutput ( ( GrayF64 ) input , delta , ( GrayF64 ) output , ( GrayF64 ) adjusted ) ;
public class ThreadLocalBuilderBasedDeserializer { /** * Copied from { @ code com . fasterxml . jackson . databind . deser . BuilderBasedDeserializer } * and modified to use { @ link ThreadLocalBuilder } instantiation and build . */ private Object vanillaDeserializeAndFinishBuild ( final JsonParser p , final DeserializationContext ctxt , final JsonToken t ) throws IOException { } }
try { return ThreadLocalBuilder . buildGeneric ( _threadLocalBuilderClass , b -> { Object bean = b ; try { while ( p . getCurrentToken ( ) != JsonToken . END_OBJECT ) { final String propName = p . getCurrentName ( ) ; // Skip field name : p . nextToken ( ) ; final SettableBeanProperty prop = _beanProperties . find ( propName ) ; if ( prop != null ) { // normal case try { bean = prop . deserializeSetAndReturn ( p , ctxt , bean ) ; // CHECKSTYLE . OFF : IllegalCatch - Retain existing behavior } catch ( final IOException | RuntimeException e ) { // CHECKSTYLE . ON : IllegalCatch // TODO ( ville ) : Convert to throwing wrapAndThrow result . This improves coverage . // See : https : / / github . com / FasterXML / jackson - databind / pull / 1871 // throw wrapAndThrow ( e , bean , propName , ctxt ) ; wrapAndThrow ( e , bean , propName , ctxt ) ; } } else { handleUnknownVanilla ( p , ctxt , bean , propName ) ; } p . nextToken ( ) ; } } catch ( final IOException e ) { throw new WrappedIOExceptionException ( e ) ; } } ) ; } catch ( final WrappedIOExceptionException e ) { throw e . getCause ( ) ; // CHECKSTYLE . OFF : IllegalCatch - Match behavior in BuilderBasedDeserializer } catch ( final Exception e ) { // CHECKSTYLE . ON : IllegalCatch return _underlyingBuilderBasedDeserializer . wrapInstantiationProblem ( e , ctxt ) ; }
public class LibertyFeaturesToMavenRepo { /** * Add Maven coordinates into the modified JSON file . * @ param modifiedJsonFile * The location to write the modified JSON file . * @ param jsonArray * The original JSON array of all features . * @ param features * The map of symbolic names to LibertyFeature objects which has * Maven coordinates . * @ throws IOException */ private static void addMavenCoordinates ( File modifiedJsonFile , JsonArray jsonArray , Map < String , LibertyFeature > features ) throws IOException { } }
JsonArrayBuilder jsonArrayBuilder = Json . createArrayBuilder ( ) ; for ( int i = 0 ; i < jsonArray . size ( ) ; i ++ ) { JsonObject jsonObject = jsonArray . getJsonObject ( i ) ; JsonObjectBuilder jsonObjectBuilder = Json . createObjectBuilder ( jsonObject ) ; JsonObject wlpInfo = jsonObject . getJsonObject ( Constants . WLP_INFORMATION_KEY ) ; JsonObjectBuilder wlpInfoBuilder = Json . createObjectBuilder ( wlpInfo ) ; JsonArray provideFeatureArray = wlpInfo . getJsonArray ( Constants . PROVIDE_FEATURE_KEY ) ; String symbolicName = provideFeatureArray . getString ( 0 ) ; wlpInfoBuilder . add ( Constants . MAVEN_COORDINATES_KEY , features . get ( symbolicName ) . getMavenCoordinates ( ) . toString ( ) ) ; jsonObjectBuilder . add ( Constants . WLP_INFORMATION_KEY , wlpInfoBuilder ) ; jsonArrayBuilder . add ( jsonObjectBuilder ) ; } // Write JSON to the modified file FileOutputStream out = null ; try { Map < String , Object > config = new HashMap < String , Object > ( ) ; config . put ( JsonGenerator . PRETTY_PRINTING , true ) ; JsonWriterFactory writerFactory = Json . createWriterFactory ( config ) ; out = new FileOutputStream ( modifiedJsonFile ) ; JsonWriter streamWriter = writerFactory . createWriter ( out ) ; streamWriter . write ( jsonArrayBuilder . build ( ) ) ; } finally { if ( out != null ) { out . close ( ) ; } }
public class BlockAutomaton { /** * Retrieves a list of outgoing edges of a block ( state ) . * @ param block * the block ( state ) . * @ return the outgoing edges of the given block ( state ) . */ public List < BlockEdge < S , L > > getOutgoingEdges ( Block < S , L > block ) { } }
return Arrays . asList ( edges [ block . getId ( ) ] ) ;
public class OptionGroupOption { /** * The versions that are available for the option . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setOptionGroupOptionVersions ( java . util . Collection ) } or * { @ link # withOptionGroupOptionVersions ( java . util . Collection ) } if you want to override the existing values . * @ param optionGroupOptionVersions * The versions that are available for the option . * @ return Returns a reference to this object so that method calls can be chained together . */ public OptionGroupOption withOptionGroupOptionVersions ( OptionVersion ... optionGroupOptionVersions ) { } }
if ( this . optionGroupOptionVersions == null ) { setOptionGroupOptionVersions ( new com . amazonaws . internal . SdkInternalList < OptionVersion > ( optionGroupOptionVersions . length ) ) ; } for ( OptionVersion ele : optionGroupOptionVersions ) { this . optionGroupOptionVersions . add ( ele ) ; } return this ;
public class ModelMigration { /** * { @ inheritDoc } */ @ Override public String generateMigration ( ) throws IOException { } }
if ( scriptInfo != null ) { return null ; } if ( diff == null ) { diff ( ) ; } setOffline ( ) ; String version = null ; try { if ( diff . isEmpty ( ) ) { logger . info ( "no changes detected - no migration written" ) ; return null ; } // there were actually changes to write Migration dbMigration = diff . getMigration ( ) ; version = getVersion ( migrationModel ) ; logger . info ( "generating migration:{}" , version ) ; if ( ! writeMigration ( dbMigration , version ) ) { logger . warn ( "migration already exists, not generating DDL" ) ; } else { if ( databasePlatform != null ) { // writer needs the current model to provide table / column details for // history ddl generation ( triggers , history tables etc ) DdlWrite write = new DdlWrite ( new MConfiguration ( ) , currentModel . read ( ) ) ; PlatformDdlWriter writer = createDdlWriter ( ) ; writer . processMigration ( dbMigration , write ) ; } } } finally { if ( ! online ) { DbOffline . reset ( ) ; } } return null ;