_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q173400 | OCreateIndexStatement.getIndexClass | test | private OClass getIndexClass(OCommandContext ctx) {
if (className == null) {
return null;
}
OClass result = ctx.getDatabase().getMetadata().getSchema().getClass(className.getStringValue());
if (result == null) {
throw new OCommandExecutionException("Cannot find class " + className);
}
return result;
} | java | {
"resource": ""
} |
q173401 | OSequenceCached.nextWithNewCurrentValue | test | protected long nextWithNewCurrentValue(long currentValue, boolean executeViaDistributed)
throws OSequenceLimitReachedException, ODatabaseException {
if (!executeViaDistributed) {
//we don't want synchronization on whole method, because called with executeViaDistributed == true
//will later call nextWithNewCurrentValue with parameter executeViaDistributed == false
//and that will cause deadlock
synchronized (this) {
cacheStart = currentValue;
return nextWork();
}
} else {
try{
return sendSequenceActionSetAndNext(currentValue);
}
catch (InterruptedException | ExecutionException exc){
OLogManager.instance().error(this, exc.getMessage(), exc, (Object[]) null);
throw new ODatabaseException(exc.getMessage());
}
}
} | java | {
"resource": ""
} |
q173402 | OClassImpl.truncate | test | public void truncate() throws IOException {
ODatabaseDocumentInternal db = getDatabase();
db.checkSecurity(ORule.ResourceGeneric.CLASS, ORole.PERMISSION_UPDATE);
if (isSubClassOf(OSecurityShared.RESTRICTED_CLASSNAME)) {
throw new OSecurityException(
"Class '" + getName() + "' cannot be truncated because has record level security enabled (extends '"
+ OSecurityShared.RESTRICTED_CLASSNAME + "')");
}
final OStorage storage = db.getStorage();
acquireSchemaReadLock();
try {
for (int id : clusterIds) {
OCluster cl = storage.getClusterById(id);
db.checkForClusterPermissions(cl.getName());
cl.truncate();
}
for (OIndex<?> index : getClassIndexes())
index.clear();
Set<OIndex<?>> superclassIndexes = new HashSet<OIndex<?>>();
superclassIndexes.addAll(getIndexes());
superclassIndexes.removeAll(getClassIndexes());
for (OIndex index : superclassIndexes) {
index.rebuild();
}
} finally {
releaseSchemaReadLock();
}
} | java | {
"resource": ""
} |
q173403 | OClassImpl.addBaseClass | test | protected OClass addBaseClass(final OClassImpl iBaseClass) {
checkRecursion(iBaseClass);
if (subclasses == null)
subclasses = new ArrayList<OClass>();
if (subclasses.contains(iBaseClass))
return this;
subclasses.add(iBaseClass);
addPolymorphicClusterIdsWithInheritance(iBaseClass);
return this;
} | java | {
"resource": ""
} |
q173404 | OClassImpl.addPolymorphicClusterIds | test | protected void addPolymorphicClusterIds(final OClassImpl iBaseClass) {
Set<Integer> clusters = new TreeSet<Integer>();
for (int clusterId : polymorphicClusterIds) {
clusters.add(clusterId);
}
for (int clusterId : iBaseClass.polymorphicClusterIds) {
if (clusters.add(clusterId)) {
try {
addClusterIdToIndexes(clusterId);
} catch (RuntimeException e) {
OLogManager.instance().warn(this, "Error adding clusterId '%d' to index of class '%s'", e, clusterId, getName());
clusters.remove(clusterId);
}
}
}
polymorphicClusterIds = new int[clusters.size()];
int i = 0;
for (Integer cluster : clusters) {
polymorphicClusterIds[i] = cluster;
i++;
}
} | java | {
"resource": ""
} |
q173405 | ORecordSerializerCSVAbstract.linkToStream | test | private static OIdentifiable linkToStream(final StringBuilder buffer, final ODocument iParentRecord, Object iLinked) {
if (iLinked == null)
// NULL REFERENCE
return null;
OIdentifiable resultRid = null;
ORID rid;
if (iLinked instanceof ORID) {
// JUST THE REFERENCE
rid = (ORID) iLinked;
assert rid.getIdentity().isValid() || (ODatabaseRecordThreadLocal.instance().get().getStorage() instanceof OStorageProxy) :
"Impossible to serialize invalid link " + rid.getIdentity();
resultRid = rid;
} else {
if (iLinked instanceof String)
iLinked = new ORecordId((String) iLinked);
if (!(iLinked instanceof OIdentifiable))
throw new IllegalArgumentException(
"Invalid object received. Expected a OIdentifiable but received type=" + iLinked.getClass().getName() + " and value="
+ iLinked);
// RECORD
ORecord iLinkedRecord = ((OIdentifiable) iLinked).getRecord();
rid = iLinkedRecord.getIdentity();
assert rid.getIdentity().isValid() || (ODatabaseRecordThreadLocal.instance().get().getStorage() instanceof OStorageProxy) :
"Impossible to serialize invalid link " + rid.getIdentity();
final ODatabaseDocument database = ODatabaseRecordThreadLocal.instance().get();
if (iParentRecord != null) {
if (!database.isRetainRecords())
// REPLACE CURRENT RECORD WITH ITS ID: THIS SAVES A LOT OF MEMORY
resultRid = iLinkedRecord.getIdentity();
}
}
if (rid.isValid())
rid.toString(buffer);
return resultRid;
} | java | {
"resource": ""
} |
q173406 | OByteBufferPool.release | test | public final void release(OPointer pointer) {
if (TRACK) {
pointerMapping.remove(pointer);
}
long poolSize = pointersPoolSize.incrementAndGet();
if (poolSize > this.poolSize) {
pointersPoolSize.decrementAndGet();
allocator.deallocate(pointer);
} else {
pointersPool.add(pointer);
}
} | java | {
"resource": ""
} |
q173407 | OByteBufferPool.checkMemoryLeaks | test | public void checkMemoryLeaks() {
boolean detected = false;
if (TRACK) {
for (Map.Entry<OPointer, PointerTracker> entry : pointerMapping.entrySet()) {
OLogManager.instance()
.errorNoDb(this, "DIRECT-TRACK: unreleased direct memory pointer `%X` detected.", entry.getValue().allocation,
System.identityHashCode(entry.getKey()));
detected = true;
}
}
assert !detected;
} | java | {
"resource": ""
} |
q173408 | OByteBufferPool.clear | test | public void clear() {
for (OPointer pointer : pointersPool) {
allocator.deallocate(pointer);
}
pointersPool.clear();
pointersPoolSize.set(0);
for (OPointer pointer : pointerMapping.keySet()) {
allocator.deallocate(pointer);
}
pointerMapping.clear();
} | java | {
"resource": ""
} |
q173409 | OBinaryProtocol.bytes2int | test | public static int bytes2int(final byte[] b, final int offset) {
return (b[offset]) << 24 | (0xff & b[offset + 1]) << 16 | (0xff & b[offset + 2]) << 8 | ((0xff & b[offset + 3]));
} | java | {
"resource": ""
} |
q173410 | ODistributedAbstractPlugin.onOpen | test | @Override
public void onOpen(final ODatabaseInternal iDatabase) {
if (!isRelatedToLocalServer(iDatabase))
return;
if (isOffline() && status != NODE_STATUS.STARTING)
return;
final ODatabaseDocumentInternal currDb = ODatabaseRecordThreadLocal.instance().getIfDefined();
try {
final String dbName = iDatabase.getName();
final ODistributedConfiguration cfg = getDatabaseConfiguration(dbName);
if (cfg == null)
return;
} catch (HazelcastException e) {
throw OException.wrapException(new OOfflineNodeException("Hazelcast instance is not available"), e);
} catch (HazelcastInstanceNotActiveException e) {
throw OException.wrapException(new OOfflineNodeException("Hazelcast instance is not available"), e);
} finally {
// RESTORE ORIGINAL DATABASE INSTANCE IN TL
ODatabaseRecordThreadLocal.instance().set(currDb);
}
} | java | {
"resource": ""
} |
q173411 | ODistributedAbstractPlugin.installClustersOfClass | test | public boolean installClustersOfClass(final ODatabaseInternal iDatabase, final OClass iClass,
OModifiableDistributedConfiguration cfg) {
final String databaseName = iDatabase.getName();
if (iClass.isAbstract())
return false;
// INIT THE DATABASE IF NEEDED
getMessageService().registerDatabase(databaseName, cfg);
return executeInDistributedDatabaseLock(databaseName, 20000, cfg,
new OCallable<Boolean, OModifiableDistributedConfiguration>() {
@Override
public Boolean call(final OModifiableDistributedConfiguration lastCfg) {
final Set<String> availableNodes = getAvailableNodeNames(iDatabase.getName());
final List<String> cluster2Create = clusterAssignmentStrategy
.assignClusterOwnershipOfClass(iDatabase, lastCfg, iClass, availableNodes, true);
final Map<OClass, List<String>> cluster2CreateMap = new HashMap<OClass, List<String>>(1);
cluster2CreateMap.put(iClass, cluster2Create);
createClusters(iDatabase, cluster2CreateMap, lastCfg);
return true;
}
});
} | java | {
"resource": ""
} |
q173412 | ODistributedAbstractPlugin.dumpServersStatus | test | protected void dumpServersStatus() {
final ODocument cfg = getClusterConfiguration();
final String compactStatus = ODistributedOutput.getCompactServerStatus(this, cfg);
if (!lastServerDump.equals(compactStatus)) {
lastServerDump = compactStatus;
ODistributedServerLog
.info(this, getLocalNodeName(), null, DIRECTION.NONE, "Distributed servers status (*=current @=lockmgr[%s]):\n%s",
getLockManagerServer(), ODistributedOutput.formatServerStatus(this, cfg));
}
} | java | {
"resource": ""
} |
q173413 | OCollections.indexOf | test | public static int indexOf(final Object[] array, final Comparable object) {
for (int i = 0; i < array.length; ++i) {
if (object.compareTo(array[i]) == 0)
// FOUND
return i;
}
return -1;
} | java | {
"resource": ""
} |
q173414 | OCollections.indexOf | test | public static int indexOf(final int[] array, final int object) {
for (int i = 0; i < array.length; ++i) {
if (array[i] == object)
// FOUND
return i;
}
return -1;
} | java | {
"resource": ""
} |
q173415 | OCommandExecutorSQLSelect.getInvolvedClusters | test | @Override
public Set<String> getInvolvedClusters() {
final Set<String> clusters = new HashSet<String>();
if (parsedTarget != null) {
final ODatabaseDocument db = getDatabase();
if (parsedTarget.getTargetQuery() != null && parsedTarget
.getTargetRecords() instanceof OCommandExecutorSQLResultsetDelegate) {
// SUB-QUERY: EXECUTE IT LOCALLY
// SUB QUERY, PROPAGATE THE CALL
final Set<String> clIds = ((OCommandExecutorSQLResultsetDelegate) parsedTarget.getTargetRecords()).getInvolvedClusters();
for (String c : clIds) {
// FILTER THE CLUSTER WHERE THE USER HAS THE RIGHT ACCESS
if (checkClusterAccess(db, c)) {
clusters.add(c);
}
}
} else if (parsedTarget.getTargetRecords() != null) {
// SINGLE RECORDS: BROWSE ALL (COULD BE EXPENSIVE).
for (OIdentifiable identifiable : parsedTarget.getTargetRecords()) {
final String c = db.getClusterNameById(identifiable.getIdentity().getClusterId()).toLowerCase(Locale.ENGLISH);
// FILTER THE CLUSTER WHERE THE USER HAS THE RIGHT ACCESS
if (checkClusterAccess(db, c)) {
clusters.add(c);
}
}
}
if (parsedTarget.getTargetClasses() != null) {
return getInvolvedClustersOfClasses(parsedTarget.getTargetClasses().values());
}
if (parsedTarget.getTargetClusters() != null) {
return getInvolvedClustersOfClusters(parsedTarget.getTargetClusters().keySet());
}
if (parsedTarget.getTargetIndex() != null) {
// EXTRACT THE CLASS NAME -> CLUSTERS FROM THE INDEX DEFINITION
return getInvolvedClustersOfIndex(parsedTarget.getTargetIndex());
}
}
return clusters;
} | java | {
"resource": ""
} |
q173416 | OCommandExecutorSQLSelect.handleResult | test | @Override
protected boolean handleResult(final OIdentifiable iRecord, final OCommandContext iContext) {
lastRecord = iRecord;
if ((orderedFields.isEmpty() || fullySortedByIndex || isRidOnlySort()) && skip > 0 && this.unwindFields == null
&& this.expandTarget == null) {
lastRecord = null;
skip--;
return true;
}
if (!addResult(lastRecord, iContext)) {
return false;
}
return continueSearching();
} | java | {
"resource": ""
} |
q173417 | OCommandExecutorSQLSelect.getTemporaryRIDCounter | test | public int getTemporaryRIDCounter(final OCommandContext iContext) {
final OTemporaryRidGenerator parentQuery = (OTemporaryRidGenerator) iContext.getVariable("parentQuery");
return parentQuery != null && parentQuery != this ?
parentQuery.getTemporaryRIDCounter(iContext) :
serialTempRID.getAndIncrement();
} | java | {
"resource": ""
} |
q173418 | OCommandExecutorSQLSelect.reportTip | test | protected void reportTip(final String iMessage) {
Orient.instance().getProfiler().reportTip(iMessage);
List<String> tips = (List<String>) context.getVariable("tips");
if (tips == null) {
tips = new ArrayList<String>(3);
context.setVariable("tips", tips);
}
tips.add(iMessage);
} | java | {
"resource": ""
} |
q173419 | OCommandExecutorSQLSelect.parseFetchplan | test | protected boolean parseFetchplan(final String w) throws OCommandSQLParsingException {
if (!w.equals(KEYWORD_FETCHPLAN)) {
return false;
}
parserSkipWhiteSpaces();
int start = parserGetCurrentPosition();
parserNextWord(true);
int end = parserGetCurrentPosition();
parserSkipWhiteSpaces();
int position = parserGetCurrentPosition();
while (!parserIsEnded()) {
final String word = OIOUtils.getStringContent(parserNextWord(true));
if (!OPatternConst.PATTERN_FETCH_PLAN.matcher(word).matches()) {
break;
}
end = parserGetCurrentPosition();
parserSkipWhiteSpaces();
position = parserGetCurrentPosition();
}
parserSetCurrentPosition(position);
if (end < 0) {
fetchPlan = OIOUtils.getStringContent(parserText.substring(start));
} else {
fetchPlan = OIOUtils.getStringContent(parserText.substring(start, end));
}
request.setFetchPlan(fetchPlan);
return true;
} | java | {
"resource": ""
} |
q173420 | OCommandExecutorSQLSelect.parseNoCache | test | protected boolean parseNoCache(final String w) throws OCommandSQLParsingException {
if (!w.equals(KEYWORD_NOCACHE))
return false;
noCache = true;
return true;
} | java | {
"resource": ""
} |
q173421 | OCommandExecutorSQLSelect.optimizeSort | test | private boolean optimizeSort(OClass iSchemaClass) {
OIndexCursor cursor = getOptimizedSortCursor(iSchemaClass);
if (cursor != null) {
fetchValuesFromIndexCursor(cursor);
return true;
}
return false;
} | java | {
"resource": ""
} |
q173422 | OETLJob.status | test | public ODocument status() {
synchronized (listener) {
ODocument status = new ODocument();
status.field("cfg", cfg);
status.field("status", this.status);
String lastBatchLog = "";
if (this.messageHandler != null) {
lastBatchLog = extractBatchLog();
}
status.field("log", lastBatchLog);
if (this.status == Status.FINISHED) {
listener.notifyAll();
}
return status;
}
} | java | {
"resource": ""
} |
q173423 | OCommandRequestTextAbstract.execute | test | @SuppressWarnings("unchecked")
public <RET> RET execute(final Object... iArgs) {
setParameters(iArgs);
OExecutionThreadLocal.INSTANCE.get().onAsyncReplicationOk = onAsyncReplicationOk;
OExecutionThreadLocal.INSTANCE.get().onAsyncReplicationError = onAsyncReplicationError;
return (RET) ODatabaseRecordThreadLocal.instance().get().getStorage().command(this);
} | java | {
"resource": ""
} |
q173424 | OAbstractPaginatedStorage.handleJVMError | test | public final void handleJVMError(final Error e) {
if (jvmError.compareAndSet(null, e)) {
OLogManager.instance().errorNoDb(this, "JVM error was thrown", e);
}
} | java | {
"resource": ""
} |
q173425 | OAbstractPaginatedStorage.validatedPutIndexValue | test | @SuppressWarnings("UnusedReturnValue")
public boolean validatedPutIndexValue(int indexId, final Object key, final ORID value,
final OBaseIndexEngine.Validator<Object, ORID> validator) throws OInvalidIndexEngineIdException {
indexId = extractInternalId(indexId);
try {
if (transaction.get() != null) {
return doValidatedPutIndexValue(indexId, key, value, validator);
}
checkOpenness();
stateLock.acquireReadLock();
try {
checkOpenness();
checkLowDiskSpaceRequestsAndReadOnlyConditions();
return doValidatedPutIndexValue(indexId, key, value, validator);
} finally {
stateLock.releaseReadLock();
}
} catch (final OInvalidIndexEngineIdException ie) {
throw logAndPrepareForRethrow(ie);
} catch (final RuntimeException ee) {
throw logAndPrepareForRethrow(ee);
} catch (final Error ee) {
throw logAndPrepareForRethrow(ee);
} catch (final Throwable t) {
throw logAndPrepareForRethrow(t);
}
} | java | {
"resource": ""
} |
q173426 | OAbstractPaginatedStorage.rollback | test | public void rollback(final OMicroTransaction microTransaction) {
try {
checkOpenness();
stateLock.acquireReadLock();
try {
try {
checkOpenness();
if (transaction.get() == null) {
return;
}
if (transaction.get().getMicroTransaction().getId() != microTransaction.getId()) {
throw new OStorageException(
"Passed in and active micro-transaction are different micro-transactions. Passed in micro-transaction cannot be "
+ "rolled back.");
}
makeStorageDirty();
rollbackStorageTx();
microTransaction.updateRecordCacheAfterRollback();
txRollback.incrementAndGet();
} catch (final IOException e) {
throw OException.wrapException(new OStorageException("Error during micro-transaction rollback"), e);
} finally {
transaction.set(null);
}
} finally {
stateLock.releaseReadLock();
}
} catch (final RuntimeException ee) {
throw logAndPrepareForRethrow(ee);
} catch (final Error ee) {
throw logAndPrepareForRethrow(ee);
} catch (final Throwable t) {
throw logAndPrepareForRethrow(t);
}
} | java | {
"resource": ""
} |
q173427 | OAbstractPaginatedStorage.command | test | @Override
public final Object command(final OCommandRequestText iCommand) {
try {
while (true) {
try {
final OCommandExecutor executor = OCommandManager.instance().getExecutor(iCommand);
// COPY THE CONTEXT FROM THE REQUEST
executor.setContext(iCommand.getContext());
executor.setProgressListener(iCommand.getProgressListener());
executor.parse(iCommand);
return executeCommand(iCommand, executor);
} catch (final ORetryQueryException ignore) {
if (iCommand instanceof OQueryAbstract) {
final OQueryAbstract query = (OQueryAbstract) iCommand;
query.reset();
}
}
}
} catch (final RuntimeException ee) {
throw logAndPrepareForRethrow(ee);
} catch (final Error ee) {
throw logAndPrepareForRethrow(ee, false);
} catch (final Throwable t) {
throw logAndPrepareForRethrow(t);
}
} | java | {
"resource": ""
} |
q173428 | OAbstractPaginatedStorage.registerCluster | test | private int registerCluster(final OCluster cluster) {
final int id;
if (cluster != null) {
// CHECK FOR DUPLICATION OF NAMES
if (clusterMap.containsKey(cluster.getName().toLowerCase(configuration.getLocaleInstance()))) {
throw new OConfigurationException(
"Cannot add cluster '" + cluster.getName() + "' because it is already registered in database '" + name + "'");
}
// CREATE AND ADD THE NEW REF SEGMENT
clusterMap.put(cluster.getName().toLowerCase(configuration.getLocaleInstance()), cluster);
id = cluster.getId();
} else {
id = clusters.size();
}
setCluster(id, cluster);
return id;
} | java | {
"resource": ""
} |
q173429 | OPartitionedDatabasePool.setProperty | test | public Object setProperty(final String iName, final Object iValue) {
if (iValue != null) {
return properties.put(iName.toLowerCase(Locale.ENGLISH), iValue);
} else {
return properties.remove(iName.toLowerCase(Locale.ENGLISH));
}
} | java | {
"resource": ""
} |
q173430 | OCommandExecutorSQLCreateClass.execute | test | public Object execute(final Map<Object, Object> iArgs) {
if (className == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final ODatabaseDocument database = getDatabase();
boolean alreadyExists = database.getMetadata().getSchema().existsClass(className);
if (!alreadyExists || !ifNotExists) {
if (clusters != null)
database.getMetadata().getSchema().createClass(className, clusters, superClasses.toArray(new OClass[0]));
else
database.getMetadata().getSchema().createClass(className, clusterIds, superClasses.toArray(new OClass[0]));
}
return database.getMetadata().getSchema().getClasses().size();
} | java | {
"resource": ""
} |
q173431 | OCommandExecutorSQLHASyncDatabase.execute | test | public Object execute(final Map<Object, Object> iArgs) {
final ODatabaseDocumentInternal database = getDatabase();
database.checkSecurity(ORule.ResourceGeneric.DATABASE, "sync", ORole.PERMISSION_UPDATE);
final OStorage stg = database.getStorage();
if (!(stg instanceof ODistributedStorage))
throw new ODistributedException("SYNC DATABASE command cannot be executed against a non distributed server");
final ODistributedStorage dStg = (ODistributedStorage) stg;
final OHazelcastPlugin dManager = (OHazelcastPlugin) dStg.getDistributedManager();
if (dManager == null || !dManager.isEnabled())
throw new OCommandExecutionException("OrientDB is not started in distributed mode");
final String databaseName = database.getName();
return dManager.installDatabase(true, databaseName, parsedStatement.isForce(), !parsedStatement.isFull());
} | java | {
"resource": ""
} |
q173432 | ODatabaseDocumentAbstract.delete | test | public ODatabase<ORecord> delete(final ORID iRecord, final int iVersion) {
ORecord record = load(iRecord);
ORecordInternal.setVersion(record, iVersion);
delete(record);
return this;
} | java | {
"resource": ""
} |
q173433 | ODatabaseDocumentAbstract.callbackHooks | test | public ORecordHook.RESULT callbackHooks(final ORecordHook.TYPE type, final OIdentifiable id) {
if (id == null || hooks.isEmpty() || id.getIdentity().getClusterId() == 0)
return ORecordHook.RESULT.RECORD_NOT_CHANGED;
final ORecordHook.SCOPE scope = ORecordHook.SCOPE.typeToScope(type);
final int scopeOrdinal = scope.ordinal();
final ORID identity = id.getIdentity().copy();
if (!pushInHook(identity))
return ORecordHook.RESULT.RECORD_NOT_CHANGED;
try {
final ORecord rec = id.getRecord();
if (rec == null)
return ORecordHook.RESULT.RECORD_NOT_CHANGED;
final OScenarioThreadLocal.RUN_MODE runMode = OScenarioThreadLocal.INSTANCE.getRunMode();
boolean recordChanged = false;
for (ORecordHook hook : hooksByScope[scopeOrdinal]) {
switch (runMode) {
case DEFAULT: // NON_DISTRIBUTED OR PROXIED DB
if (getStorage().isDistributed()
&& hook.getDistributedExecutionMode() == ORecordHook.DISTRIBUTED_EXECUTION_MODE.TARGET_NODE)
// SKIP
continue;
break; // TARGET NODE
case RUNNING_DISTRIBUTED:
if (hook.getDistributedExecutionMode() == ORecordHook.DISTRIBUTED_EXECUTION_MODE.SOURCE_NODE)
continue;
}
final ORecordHook.RESULT res = hook.onTrigger(type, rec);
if (res == ORecordHook.RESULT.RECORD_CHANGED)
recordChanged = true;
else if (res == ORecordHook.RESULT.SKIP_IO)
// SKIP IO OPERATION
return res;
else if (res == ORecordHook.RESULT.SKIP)
// SKIP NEXT HOOKS AND RETURN IT
return res;
else if (res == ORecordHook.RESULT.RECORD_REPLACED)
return res;
}
return recordChanged ? ORecordHook.RESULT.RECORD_CHANGED : ORecordHook.RESULT.RECORD_NOT_CHANGED;
} finally {
popInHook(identity);
}
} | java | {
"resource": ""
} |
q173434 | ODatabaseDocumentAbstract.delete | test | public ODatabaseDocument delete(final ORID iRecord) {
checkOpenness();
checkIfActive();
final ORecord rec = load(iRecord);
if (rec != null)
delete(rec);
return this;
} | java | {
"resource": ""
} |
q173435 | ODatabaseDocumentAbstract.countView | test | public long countView(final String viewName) {
final OView cls = getMetadata().getImmutableSchemaSnapshot().getView(viewName);
if (cls == null)
throw new IllegalArgumentException("View '" + cls + "' not found in database");
return countClass(cls, false);
} | java | {
"resource": ""
} |
q173436 | ODatabaseDocumentAbstract.countClass | test | public long countClass(final String iClassName, final boolean iPolymorphic) {
final OClass cls = getMetadata().getImmutableSchemaSnapshot().getClass(iClassName);
if (cls == null)
throw new IllegalArgumentException("Class '" + cls + "' not found in database");
return countClass(cls, iPolymorphic);
} | java | {
"resource": ""
} |
q173437 | ODatabaseDocumentAbstract.activateOnCurrentThread | test | @Override
public ODatabaseDocumentAbstract activateOnCurrentThread() {
final ODatabaseRecordThreadLocal tl = ODatabaseRecordThreadLocal.instance();
if (tl != null)
tl.set(this);
return this;
} | java | {
"resource": ""
} |
q173438 | OEncryptionFactory.register | test | public void register(final OEncryption iEncryption) {
try {
final String name = iEncryption.name();
if (instances.containsKey(name))
throw new IllegalArgumentException("Encryption with name '" + name + "' was already registered");
if (classes.containsKey(name))
throw new IllegalArgumentException("Encryption with name '" + name + "' was already registered");
instances.put(name, iEncryption);
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot register storage encryption algorithm '%s'", e, iEncryption);
}
} | java | {
"resource": ""
} |
q173439 | OrientBlob.getRelativeIndex | test | private int getRelativeIndex(long pos) {
int currentSize = 0;
currentChunkIndex = 0;
// loop until we find the chuks holding the given position
while (pos >= (currentSize += binaryDataChunks.get(currentChunkIndex).length))
currentChunkIndex++;
currentChunk = binaryDataChunks.get(currentChunkIndex);
currentSize -= currentChunk.length;
// the position referred to the target binary chunk
int relativePosition = (int) (pos - currentSize);
// the index of the first byte to be returned
return relativePosition - 1;
} | java | {
"resource": ""
} |
q173440 | OBaseParser.parserOptionalWord | test | protected String parserOptionalWord(final boolean iUpperCase) {
parserPreviousPos = parserCurrentPos;
parserNextWord(iUpperCase);
if (parserLastWord.length() == 0)
return null;
return parserLastWord.toString();
} | java | {
"resource": ""
} |
q173441 | OBaseParser.parserRequiredWord | test | protected String parserRequiredWord(final boolean iUpperCase, final String iCustomMessage, String iSeparators) {
if (iSeparators == null)
iSeparators = " ()=><,\r\n";
parserNextWord(iUpperCase, iSeparators);
if (parserLastWord.length() == 0)
throwSyntaxErrorException(iCustomMessage);
if (parserLastWord.charAt(0) == '`' && parserLastWord.charAt(parserLastWord.length() - 1) == '`') {
return parserLastWord.substring(1, parserLastWord.length() - 1);
}
return parserLastWord.toString();
} | java | {
"resource": ""
} |
q173442 | OBaseParser.parserNextChars | test | protected int parserNextChars(final boolean iUpperCase, final boolean iMandatory, final String... iCandidateWords) {
parserPreviousPos = parserCurrentPos;
parserSkipWhiteSpaces();
parserEscapeSequenceCount = 0;
parserLastWord.setLength(0);
final String[] processedWords = Arrays.copyOf(iCandidateWords, iCandidateWords.length);
// PARSE THE CHARS
final String text2Use = iUpperCase ? parserTextUpperCase : parserText;
final int max = text2Use.length();
parserCurrentPos = parserCurrentPos + parserTextUpperCase.length() - parserText.length();
// PARSE TILL 1 CHAR AFTER THE END TO SIMULATE A SEPARATOR AS EOF
for (int i = 0; parserCurrentPos <= max; ++i) {
final char ch = parserCurrentPos < max ? text2Use.charAt(parserCurrentPos) : '\n';
final boolean separator = ch == ' ' || ch == '\r' || ch == '\n' || ch == '\t' || ch == '(';
if (!separator)
parserLastWord.append(ch);
// CLEAR CANDIDATES
int candidatesWordsCount = 0;
int candidatesWordsPos = -1;
for (int c = 0; c < processedWords.length; ++c) {
final String w = processedWords[c];
if (w != null) {
final int wordSize = w.length();
if ((separator && wordSize > i) || (!separator && (i > wordSize - 1 || w.charAt(i) != ch)))
// DISCARD IT
processedWords[c] = null;
else {
candidatesWordsCount++;
if (candidatesWordsCount == 1)
// REMEMBER THE POSITION
candidatesWordsPos = c;
}
}
}
if (candidatesWordsCount == 1) {
// ONE RESULT, CHECKING IF FOUND
final String w = processedWords[candidatesWordsPos];
if (w.length() == i + (separator ? 0 : 1) && !Character.isLetter(ch))
// FOUND!
return candidatesWordsPos;
}
if (candidatesWordsCount == 0 || separator)
break;
parserCurrentPos++;
}
if (iMandatory)
throwSyntaxErrorException("Found unexpected keyword '" + parserLastWord + "' while it was expected '"
+ Arrays.toString(iCandidateWords) + "'");
return -1;
} | java | {
"resource": ""
} |
q173443 | OBaseParser.parserOptionalKeyword | test | protected boolean parserOptionalKeyword(final String... iWords) {
parserNextWord(true, " \r\n,");
if (parserLastWord.length() == 0)
return false;
// FOUND: CHECK IF IT'S IN RANGE
boolean found = iWords.length == 0;
for (String w : iWords) {
if (parserLastWord.toString().equals(w)) {
found = true;
break;
}
}
if (!found)
throwSyntaxErrorException("Found unexpected keyword '" + parserLastWord + "' while it was expected '"
+ Arrays.toString(iWords) + "'");
return true;
} | java | {
"resource": ""
} |
q173444 | OBaseParser.parserCheckSeparator | test | private boolean parserCheckSeparator(final char c, final String iSeparatorChars) {
for (int sepIndex = 0; sepIndex < iSeparatorChars.length(); ++sepIndex) {
if (iSeparatorChars.charAt(sepIndex) == c) {
parserLastSeparator = c;
return true;
}
}
return false;
} | java | {
"resource": ""
} |
q173445 | OCommandExecutorSQLDropClass.execute | test | public Object execute(final Map<Object, Object> iArgs) {
if (className == null) {
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
}
final ODatabaseDocument database = getDatabase();
if (ifExists && !database.getMetadata().getSchema().existsClass(className)) {
return true;
}
final OClass cls = database.getMetadata().getSchema().getClass(className);
if (cls == null) {
return null;
}
final long records = cls.count(true);
if (records > 0 && !unsafe) {
// NOT EMPTY, CHECK IF CLASS IS OF VERTEX OR EDGES
if (cls.isSubClassOf("V")) {
// FOUND VERTEX CLASS
throw new OCommandExecutionException("'DROP CLASS' command cannot drop class '" + className
+ "' because it contains Vertices. Use 'DELETE VERTEX' command first to avoid broken edges in a database, or apply the 'UNSAFE' keyword to force it");
} else if (cls.isSubClassOf("E")) {
// FOUND EDGE CLASS
throw new OCommandExecutionException("'DROP CLASS' command cannot drop class '" + className
+ "' because it contains Edges. Use 'DELETE EDGE' command first to avoid broken vertices in a database, or apply the 'UNSAFE' keyword to force it");
}
}
database.getMetadata().getSchema().dropClass(className);
if (records > 0 && unsafe) {
// NOT EMPTY, CHECK IF CLASS IS OF VERTEX OR EDGES
if (cls.isSubClassOf("V")) {
// FOUND VERTICES
if (unsafe)
OLogManager.instance().warn(this,
"Dropped class '%s' containing %d vertices using UNSAFE mode. Database could contain broken edges", className,
records);
} else if (cls.isSubClassOf("E")) {
// FOUND EDGES
OLogManager.instance().warn(this,
"Dropped class '%s' containing %d edges using UNSAFE mode. Database could contain broken vertices", className, records);
}
}
return true;
} | java | {
"resource": ""
} |
q173446 | OStorageConfigurationSegment.clearConfigurationFiles | test | private void clearConfigurationFiles() throws IOException {
final Path file = storagePath.resolve(NAME);
Files.deleteIfExists(file);
final Path backupFile = storagePath.resolve(BACKUP_NAME);
Files.deleteIfExists(backupFile);
} | java | {
"resource": ""
} |
q173447 | OMemoryInputStream.getAsByteArrayOffset | test | public int getAsByteArrayOffset() {
if (position >= length)
return -1;
final int begin = position;
final int size = OBinaryProtocol.bytes2int(buffer, position);
position += OBinaryProtocol.SIZE_INT + size;
return begin;
} | java | {
"resource": ""
} |
q173448 | OAuth2FeignRequestInterceptor.extract | test | protected String extract(String tokenType) {
OAuth2AccessToken accessToken = getToken();
return String.format("%s %s", tokenType, accessToken.getValue());
} | java | {
"resource": ""
} |
q173449 | OAuth2FeignRequestInterceptor.acquireAccessToken | test | protected OAuth2AccessToken acquireAccessToken()
throws UserRedirectRequiredException {
AccessTokenRequest tokenRequest = oAuth2ClientContext.getAccessTokenRequest();
if (tokenRequest == null) {
throw new AccessTokenRequiredException(
"Cannot find valid context on request for resource '"
+ resource.getId() + "'.",
resource);
}
String stateKey = tokenRequest.getStateKey();
if (stateKey != null) {
tokenRequest.setPreservedState(
oAuth2ClientContext.removePreservedState(stateKey));
}
OAuth2AccessToken existingToken = oAuth2ClientContext.getAccessToken();
if (existingToken != null) {
oAuth2ClientContext.setAccessToken(existingToken);
}
OAuth2AccessToken obtainableAccessToken;
obtainableAccessToken = accessTokenProvider.obtainAccessToken(resource,
tokenRequest);
if (obtainableAccessToken == null || obtainableAccessToken.getValue() == null) {
throw new IllegalStateException(
" Access token provider returned a null token, which is illegal according to the contract.");
}
oAuth2ClientContext.setAccessToken(obtainableAccessToken);
return obtainableAccessToken;
} | java | {
"resource": ""
} |
q173450 | AccessTokenContextRelay.copyToken | test | public boolean copyToken() {
if (context.getAccessToken() == null) {
Authentication authentication = SecurityContextHolder.getContext()
.getAuthentication();
if (authentication != null) {
Object details = authentication.getDetails();
if (details instanceof OAuth2AuthenticationDetails) {
OAuth2AuthenticationDetails holder = (OAuth2AuthenticationDetails) details;
String token = holder.getTokenValue();
DefaultOAuth2AccessToken accessToken = new DefaultOAuth2AccessToken(
token);
String tokenType = holder.getTokenType();
if (tokenType != null) {
accessToken.setTokenType(tokenType);
}
context.setAccessToken(accessToken);
return true;
}
}
}
return false;
} | java | {
"resource": ""
} |
q173451 | FastBufferedInputStream.noMoreCharacters | test | protected boolean noMoreCharacters() throws IOException {
if (avail == 0) {
avail = is.read(buffer);
if (avail <= 0) {
avail = 0;
return true;
}
pos = 0;
}
return false;
} | java | {
"resource": ""
} |
q173452 | FastBufferedInputStream.readLine | test | public int readLine(final byte[] array, final EnumSet<LineTerminator> terminators) throws IOException {
return readLine(array, 0, array.length, terminators);
} | java | {
"resource": ""
} |
q173453 | FastBufferedInputStream.readLine | test | public int readLine(final byte[] array, final int off, final int len, final EnumSet<LineTerminator> terminators) throws IOException {
ByteArrays.ensureOffsetLength(array ,off, len);
if (len == 0) return 0; // 0-length reads always return 0
if (noMoreCharacters()) return -1;
int i, k = 0, remaining = len, read = 0; // The number of bytes still to be read
for(;;) {
for(i = 0; i < avail && i < remaining && (k = buffer[pos + i]) != '\n' && k != '\r' ; i++);
System.arraycopy(buffer, pos, array, off + read, i);
pos += i;
avail -= i;
read += i;
remaining -= i;
if (remaining == 0) {
readBytes += read;
return read; // We did not stop because of a terminator
}
if (avail > 0) { // We met a terminator
if (k == '\n') { // LF first
pos++;
avail--;
if (terminators.contains(LineTerminator.LF)) {
readBytes += read + 1;
return read;
}
else {
array[off + read++] = '\n';
remaining--;
}
}
else if (k == '\r') { // CR first
pos++;
avail--;
if (terminators.contains(LineTerminator.CR_LF)) {
if (avail > 0) {
if (buffer[pos] == '\n') { // CR/LF with LF already in the buffer.
pos ++;
avail--;
readBytes += read + 2;
return read;
}
}
else { // We must search for the LF.
if (noMoreCharacters()) {
// Not found a matching LF because of end of file, will return CR in buffer if not a terminator
if (! terminators.contains(LineTerminator.CR)) {
array[off + read++] = '\r';
remaining--;
readBytes += read;
}
else readBytes += read + 1;
return read;
}
if (buffer[0] == '\n') {
// Found matching LF, won't return terminators in the buffer
pos++;
avail--;
readBytes += read + 2;
return read;
}
}
}
if (terminators.contains(LineTerminator.CR)) {
readBytes += read + 1;
return read;
}
array[off + read++] = '\r';
remaining--;
}
}
else if (noMoreCharacters()) {
readBytes += read;
return read;
}
}
} | java | {
"resource": ""
} |
q173454 | FastBufferedInputStream.skipByReading | test | private long skipByReading(final long n) throws IOException {
long toSkip = n;
int len;
while(toSkip > 0) {
len = is.read(buffer, 0, (int)Math.min(buffer.length, toSkip));
if (len > 0) toSkip -= len;
else break;
}
return n - toSkip;
} | java | {
"resource": ""
} |
q173455 | FastBufferedInputStream.skip | test | @Override
public long skip(final long n) throws IOException {
if (n <= avail) {
final int m = (int)n;
pos += m;
avail -= m;
readBytes += n;
return n;
}
long toSkip = n - avail, result = 0;
avail = 0;
while (toSkip != 0 && (result = is == System.in ? skipByReading(toSkip) : is.skip(toSkip)) < toSkip) {
if (result == 0) {
if (is.read() == -1) break;
toSkip--;
}
else toSkip -= result;
}
final long t = n - (toSkip - result);
readBytes += t;
return t;
} | java | {
"resource": ""
} |
q173456 | Arrays.ensureOffsetLength | test | public static void ensureOffsetLength(final int arrayLength, final int offset, final int length) {
if (offset < 0) throw new ArrayIndexOutOfBoundsException("Offset (" + offset + ") is negative");
if (length < 0) throw new IllegalArgumentException("Length (" + length + ") is negative");
if (offset + length > arrayLength) throw new ArrayIndexOutOfBoundsException("Last index (" + (offset + length) + ") is greater than array length (" + arrayLength + ")");
} | java | {
"resource": ""
} |
q173457 | Arrays.mergeSort | test | public static void mergeSort(final int from, final int to, final IntComparator c, final Swapper swapper) {
/*
* We retain the same method signature as quickSort. Given only a comparator and swapper we
* do not know how to copy and move elements from/to temporary arrays. Hence, in contrast to
* the JDK mergesorts this is an "in-place" mergesort, i.e. does not allocate any temporary
* arrays. A non-inplace mergesort would perhaps be faster in most cases, but would require
* non-intuitive delegate objects...
*/
final int length = to - from;
// Insertion sort on smallest arrays
if (length < MERGESORT_NO_REC) {
for (int i = from; i < to; i++) {
for (int j = i; j > from && (c.compare(j - 1, j) > 0); j--) {
swapper.swap(j, j - 1);
}
}
return;
}
// Recursively sort halves
int mid = (from + to) >>> 1;
mergeSort(from, mid, c, swapper);
mergeSort(mid, to, c, swapper);
// If list is already sorted, nothing left to do. This is an
// optimization that results in faster sorts for nearly ordered lists.
if (c.compare(mid - 1, mid) <= 0) return;
// Merge sorted halves
inPlaceMerge(from, mid, to, c, swapper);
} | java | {
"resource": ""
} |
q173458 | Arrays.swap | test | protected static void swap(final Swapper swapper, int a, int b, final int n) {
for (int i = 0; i < n; i++, a++, b++) swapper.swap(a, b);
} | java | {
"resource": ""
} |
q173459 | Arrays.parallelQuickSort | test | public static void parallelQuickSort(final int from, final int to, final IntComparator comp, final Swapper swapper) {
final ForkJoinPool pool = new ForkJoinPool(Runtime.getRuntime().availableProcessors());
pool.invoke(new ForkJoinGenericQuickSort(from, to, comp, swapper));
pool.shutdown();
} | java | {
"resource": ""
} |
q173460 | HashCommon.murmurHash3 | test | public static int murmurHash3(int x) {
x ^= x >>> 16;
x *= 0x85ebca6b;
x ^= x >>> 13;
x *= 0xc2b2ae35;
x ^= x >>> 16;
return x;
} | java | {
"resource": ""
} |
q173461 | HashCommon.murmurHash3 | test | public static long murmurHash3(long x) {
x ^= x >>> 33;
x *= 0xff51afd7ed558ccdL;
x ^= x >>> 33;
x *= 0xc4ceb9fe1a85ec53L;
x ^= x >>> 33;
return x;
} | java | {
"resource": ""
} |
q173462 | InspectableFileCachedInputStream.write | test | @Override
public int write(final ByteBuffer byteBuffer) throws IOException {
ensureOpen();
final int remaining = byteBuffer.remaining();
if (inspectable < buffer.length) {
// Still some space in the inspectable buffer.
final int toBuffer = Math.min(buffer.length - inspectable, remaining);
byteBuffer.get(buffer, inspectable, toBuffer);
inspectable += toBuffer;
}
if (byteBuffer.hasRemaining()) {
fileChannel.position(writePosition);
writePosition += fileChannel.write(byteBuffer);
}
return remaining;
} | java | {
"resource": ""
} |
q173463 | InspectableFileCachedInputStream.truncate | test | public void truncate(final long size) throws FileNotFoundException, IOException {
fileChannel.truncate(Math.max(size, writePosition));
} | java | {
"resource": ""
} |
q173464 | FastBufferedOutputStream.position | test | @Override
public void position(final long newPosition) throws IOException {
flush();
if (repositionableStream != null) repositionableStream.position(newPosition);
else if (fileChannel != null) fileChannel.position(newPosition);
else throw new UnsupportedOperationException("position() can only be called if the underlying byte stream implements the RepositionableStream interface or if the getChannel() method of the underlying byte stream exists and returns a FileChannel");
} | java | {
"resource": ""
} |
q173465 | AbstractRefreshMetricsListener.noFailRefreshEndMetricsReporting | test | private final void noFailRefreshEndMetricsReporting(ConsumerRefreshMetrics refreshMetrics) {
try {
refreshEndMetricsReporting(refreshMetrics);
} catch (Exception e) {
// Metric reporting is not considered critical to consumer refresh. Log exceptions and continue.
log.log(Level.SEVERE, "Encountered an exception in reporting consumer refresh metrics, ignoring exception and continuing with consumer refresh", e);
}
} | java | {
"resource": ""
} |
q173466 | SimultaneousExecutor.awaitSuccessfulCompletionOfCurrentTasks | test | public void awaitSuccessfulCompletionOfCurrentTasks() throws InterruptedException, ExecutionException {
for(Future<?> f : futures) {
f.get();
}
futures.clear();
} | java | {
"resource": ""
} |
q173467 | HashIndexSelect.findMatches | test | public Stream<S> findMatches(Q query) {
Object[] queryArray = matchFields.stream().map(mf -> mf.extract(query)).toArray();
HollowHashIndexResult matches = hhi.findMatches(queryArray);
if (matches == null) {
return Stream.empty();
}
return matches.stream().mapToObj(i -> selectField.extract(api, i));
} | java | {
"resource": ""
} |
q173468 | HollowCompactor.findCompactionTargets | test | private Set<String> findCompactionTargets() {
List<HollowSchema> schemas = HollowSchemaSorter.dependencyOrderedSchemaList(readEngine.getSchemas());
Set<String> typesToCompact = new HashSet<String>();
for(HollowSchema schema : schemas) {
if(isCompactionCandidate(schema.getName())) {
if(!candidateIsDependentOnAnyTargetedType(schema.getName(), typesToCompact))
typesToCompact.add(schema.getName());
}
}
return typesToCompact;
} | java | {
"resource": ""
} |
q173469 | HollowFilesystemBlobStorageCleaner.cleanSnapshots | test | @Override
public void cleanSnapshots() {
File[] files = getFilesByType(HollowProducer.Blob.Type.SNAPSHOT.prefix);
if(files == null || files.length <= numOfSnapshotsToKeep) {
return;
}
sortByLastModified(files);
for(int i= numOfSnapshotsToKeep; i < files.length; i++){
File file = files[i];
boolean deleted = file.delete();
if(!deleted) {
log.warning("Could not delete snapshot " + file.getPath());
}
}
} | java | {
"resource": ""
} |
q173470 | HollowReadFieldUtils.fieldHashCode | test | public static int fieldHashCode(HollowObjectTypeDataAccess typeAccess, int ordinal, int fieldPosition) {
HollowObjectSchema schema = typeAccess.getSchema();
switch(schema.getFieldType(fieldPosition)) {
case BOOLEAN:
Boolean bool = typeAccess.readBoolean(ordinal, fieldPosition);
return booleanHashCode(bool);
case BYTES:
case STRING:
return typeAccess.findVarLengthFieldHashCode(ordinal, fieldPosition);
case DOUBLE:
double d = typeAccess.readDouble(ordinal, fieldPosition);
return doubleHashCode(d);
case FLOAT:
float f = typeAccess.readFloat(ordinal, fieldPosition);
return floatHashCode(f);
case INT:
return intHashCode(typeAccess.readInt(ordinal, fieldPosition));
case LONG:
long l = typeAccess.readLong(ordinal, fieldPosition);
return longHashCode(l);
case REFERENCE:
return typeAccess.readOrdinal(ordinal, fieldPosition);
}
throw new IllegalStateException("I don't know how to hash a " + schema.getFieldType(fieldPosition));
} | java | {
"resource": ""
} |
q173471 | HollowReadFieldUtils.fieldsAreEqual | test | public static boolean fieldsAreEqual(HollowObjectTypeDataAccess typeAccess1, int ordinal1, int fieldPosition1, HollowObjectTypeDataAccess typeAccess2, int ordinal2, int fieldPosition2) {
HollowObjectSchema schema1 = typeAccess1.getSchema();
switch(schema1.getFieldType(fieldPosition1)) {
case BOOLEAN:
Boolean bool1 = typeAccess1.readBoolean(ordinal1, fieldPosition1);
Boolean bool2 = typeAccess2.readBoolean(ordinal2, fieldPosition2);
return bool1 == bool2;
case BYTES:
byte[] data1 = typeAccess1.readBytes(ordinal1, fieldPosition1);
byte[] data2 = typeAccess2.readBytes(ordinal2, fieldPosition2);
return Arrays.equals(data1, data2);
case DOUBLE:
double d1 = typeAccess1.readDouble(ordinal1, fieldPosition1);
double d2 = typeAccess2.readDouble(ordinal2, fieldPosition2);
return Double.compare(d1, d2) == 0;
case FLOAT:
float f1 = typeAccess1.readFloat(ordinal1, fieldPosition1);
float f2 = typeAccess2.readFloat(ordinal2, fieldPosition2);
return Float.compare(f1, f2) == 0;
case INT:
int i1 = typeAccess1.readInt(ordinal1, fieldPosition1);
int i2 = typeAccess2.readInt(ordinal2, fieldPosition2);
return i1 == i2;
case LONG:
long l1 = typeAccess1.readLong(ordinal1, fieldPosition1);
long l2 = typeAccess2.readLong(ordinal2, fieldPosition2);
return l1 == l2;
case STRING:
String s1 = typeAccess1.readString(ordinal1, fieldPosition1);
return typeAccess2.isStringFieldEqual(ordinal2, fieldPosition2, s1);
case REFERENCE:
if(typeAccess1 == typeAccess2 && fieldPosition1 == fieldPosition2)
return typeAccess1.readOrdinal(ordinal1, fieldPosition1) == typeAccess2.readOrdinal(ordinal2, fieldPosition2);
default:
}
throw new IllegalStateException("I don't know how to test equality for a " + schema1.getFieldType(fieldPosition1));
} | java | {
"resource": ""
} |
q173472 | TransitiveSetTraverser.removeReferencedOutsideClosure | test | public static void removeReferencedOutsideClosure(HollowReadStateEngine stateEngine, Map<String, BitSet> matches) {
List<HollowSchema> orderedSchemas = HollowSchemaSorter.dependencyOrderedSchemaList(stateEngine);
Collections.reverse(orderedSchemas);
for(HollowSchema referencedSchema : orderedSchemas) {
if(matches.containsKey(referencedSchema.getName())) {
for(HollowSchema referencerSchema : orderedSchemas) {
if(referencerSchema == referencedSchema)
break;
if(matches.containsKey(referencedSchema.getName()) && matches.get(referencedSchema.getName()).cardinality() > 0)
traverseReferencesOutsideClosure(stateEngine, referencerSchema.getName(), referencedSchema.getName(), matches, REMOVE_REFERENCED_OUTSIDE_CLOSURE);
}
}
}
} | java | {
"resource": ""
} |
q173473 | AbstractProducerMetricsListener.onAnnouncementComplete | test | @Override
public void onAnnouncementComplete(com.netflix.hollow.api.producer.Status status, HollowProducer.ReadState readState, long version, Duration elapsed) {
boolean isAnnouncementSuccess = false;
long dataSizeBytes = 0l;
if (status.getType() == com.netflix.hollow.api.producer.Status.StatusType.SUCCESS) {
isAnnouncementSuccess = true;
lastAnnouncementSuccessTimeNanoOptional = OptionalLong.of(System.nanoTime());
}
HollowReadStateEngine stateEngine = readState.getStateEngine();
dataSizeBytes = stateEngine.calcApproxDataSize();
announcementMetricsBuilder
.setDataSizeBytes(dataSizeBytes)
.setIsAnnouncementSuccess(isAnnouncementSuccess)
.setAnnouncementDurationMillis(elapsed.toMillis());
lastAnnouncementSuccessTimeNanoOptional.ifPresent(announcementMetricsBuilder::setLastAnnouncementSuccessTimeNano);
announcementMetricsReporting(announcementMetricsBuilder.build());
} | java | {
"resource": ""
} |
q173474 | AbstractProducerMetricsListener.onCycleComplete | test | @Override
public void onCycleComplete(com.netflix.hollow.api.producer.Status status, HollowProducer.ReadState readState, long version, Duration elapsed) {
boolean isCycleSuccess;
long cycleEndTimeNano = System.nanoTime();
if (status.getType() == com.netflix.hollow.api.producer.Status.StatusType.SUCCESS) {
isCycleSuccess = true;
consecutiveFailures = 0l;
lastCycleSuccessTimeNanoOptional = OptionalLong.of(cycleEndTimeNano);
} else {
isCycleSuccess = false;
consecutiveFailures ++;
}
cycleMetricsBuilder
.setConsecutiveFailures(consecutiveFailures)
.setCycleDurationMillis(elapsed.toMillis())
.setIsCycleSuccess(isCycleSuccess);
lastCycleSuccessTimeNanoOptional.ifPresent(cycleMetricsBuilder::setLastCycleSuccessTimeNano);
cycleMetricsReporting(cycleMetricsBuilder.build());
} | java | {
"resource": ""
} |
q173475 | HollowBlobHeaderReader.readHeaderTags | test | private Map<String, String> readHeaderTags(DataInputStream dis) throws IOException {
int numHeaderTags = dis.readShort();
Map<String, String> headerTags = new HashMap<String, String>();
for (int i = 0; i < numHeaderTags; i++) {
headerTags.put(dis.readUTF(), dis.readUTF());
}
return headerTags;
} | java | {
"resource": ""
} |
q173476 | HollowObjectMapper.extractPrimaryKey | test | public RecordPrimaryKey extractPrimaryKey(Object o) {
HollowObjectTypeMapper typeMapper = (HollowObjectTypeMapper) getTypeMapper(o.getClass(), null, null);
return new RecordPrimaryKey(typeMapper.getTypeName(), typeMapper.extractPrimaryKey(o));
} | java | {
"resource": ""
} |
q173477 | ThreadSafeBitSet.clearAll | test | public void clearAll() {
ThreadSafeBitSetSegments segments = this.segments.get();
for(int i=0;i<segments.numSegments();i++) {
AtomicLongArray segment = segments.getSegment(i);
for(int j=0;j<segment.length();j++) {
segment.set(j, 0L);
}
}
} | java | {
"resource": ""
} |
q173478 | SnapshotPopulatedOrdinalsReader.readOrdinals | test | public static void readOrdinals(DataInputStream dis, HollowTypeStateListener[] listeners) throws IOException {
int numLongs = dis.readInt();
int currentOrdinal = 0;
for(int i=0;i<numLongs;i++) {
long l = dis.readLong();
notifyPopulatedOrdinals(l, currentOrdinal, listeners);
currentOrdinal += 64;
}
} | java | {
"resource": ""
} |
q173479 | HollowSparseIntegerSet.size | test | public long size() {
SparseBitSet current;
long size;
do {
current = sparseBitSetVolatile;
size = current.estimateBitsUsed();
} while (current != sparseBitSetVolatile);
return size;
} | java | {
"resource": ""
} |
q173480 | DiffViewOutputGenerator.getFieldValue | test | private static String getFieldValue(HollowDiffViewRow row, boolean useFrom) {
Field field = useFrom ? row.getFieldPair().getFrom() : row.getFieldPair().getTo();
if (row.getFieldPair().isLeafNode()) {
return field.getValue() == null ? "null"
: field.getValue().toString().replace("|", "│");
} else {
String suffix = field.getValue() == null ? " [null]" : "";
return "(" + field.getTypeName() + ")" + suffix;
}
} | java | {
"resource": ""
} |
q173481 | SegmentedByteArray.copy | test | public void copy(ByteData src, long srcPos, long destPos, long length) {
for(long i=0;i<length;i++) {
set(destPos++, src.get(srcPos++));
}
} | java | {
"resource": ""
} |
q173482 | SegmentedByteArray.copy | test | public int copy(long srcPos, byte[] data, int destPos, int length) {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = (int)(segmentSize - (srcPos & bitmask));
int dataPosition = destPos;
while(length > 0) {
byte[] segment = segments[(int)(srcPos >>> log2OfSegmentSize)];
int bytesToCopyFromSegment = Math.min(remainingBytesInSegment, length);
System.arraycopy(segment, (int)(srcPos & bitmask), data, dataPosition, bytesToCopyFromSegment);
dataPosition += bytesToCopyFromSegment;
srcPos += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(srcPos & bitmask);
length -= bytesToCopyFromSegment;
}
return dataPosition - destPos;
} | java | {
"resource": ""
} |
q173483 | SegmentedByteArray.rangeEquals | test | public boolean rangeEquals(long rangeStart, SegmentedByteArray compareTo, long cmpStart, int length) {
for(int i=0;i<length;i++)
if(get(rangeStart + i) != compareTo.get(cmpStart + i))
return false;
return true;
} | java | {
"resource": ""
} |
q173484 | SegmentedByteArray.orderedCopy | test | public void orderedCopy(SegmentedByteArray src, long srcPos, long destPos, long length) {
int segmentLength = 1 << log2OfSegmentSize;
int currentSegment = (int)(destPos >>> log2OfSegmentSize);
int segmentStartPos = (int)(destPos & bitmask);
int remainingBytesInSegment = segmentLength - segmentStartPos;
while(length > 0) {
int bytesToCopyFromSegment = (int)Math.min(remainingBytesInSegment, length);
ensureCapacity(currentSegment);
int copiedBytes = src.orderedCopy(srcPos, segments[currentSegment], segmentStartPos, bytesToCopyFromSegment);
srcPos += copiedBytes;
length -= copiedBytes;
segmentStartPos = 0;
remainingBytesInSegment = segmentLength;
currentSegment++;
}
} | java | {
"resource": ""
} |
q173485 | SegmentedByteArray.orderedCopy | test | public int orderedCopy(long srcPos, byte[] data, int destPos, int length) {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = (int)(segmentSize - (srcPos & bitmask));
int dataPosition = destPos;
while(length > 0) {
byte[] segment = segments[(int)(srcPos >>> log2OfSegmentSize)];
int bytesToCopyFromSegment = Math.min(remainingBytesInSegment, length);
orderedCopy(segment, (int)(srcPos & bitmask), data, dataPosition, bytesToCopyFromSegment);
dataPosition += bytesToCopyFromSegment;
srcPos += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(srcPos & bitmask);
length -= bytesToCopyFromSegment;
}
return dataPosition - destPos;
} | java | {
"resource": ""
} |
q173486 | SegmentedByteArray.readFrom | test | public void readFrom(InputStream is, long length) throws IOException {
int segmentSize = 1 << log2OfSegmentSize;
int segment = 0;
byte scratch[] = new byte[segmentSize];
while(length > 0) {
ensureCapacity(segment);
long bytesToCopy = Math.min(segmentSize, length);
long bytesCopied = 0;
while(bytesCopied < bytesToCopy) {
bytesCopied += is.read(scratch, (int)bytesCopied, (int)(bytesToCopy - bytesCopied));
}
orderedCopy(scratch, 0, segments[segment++], 0, (int)bytesCopied);
length -= bytesCopied;
}
} | java | {
"resource": ""
} |
q173487 | SegmentedByteArray.writeTo | test | public void writeTo(OutputStream os, long startPosition, long len) throws IOException {
int segmentSize = 1 << log2OfSegmentSize;
int remainingBytesInSegment = segmentSize - (int)(startPosition & bitmask);
long remainingBytesInCopy = len;
while(remainingBytesInCopy > 0) {
long bytesToCopyFromSegment = Math.min(remainingBytesInSegment, remainingBytesInCopy);
os.write(segments[(int)(startPosition >>> log2OfSegmentSize)], (int)(startPosition & bitmask), (int)bytesToCopyFromSegment);
startPosition += bytesToCopyFromSegment;
remainingBytesInSegment = segmentSize - (int)(startPosition & bitmask);
remainingBytesInCopy -= bytesToCopyFromSegment;
}
} | java | {
"resource": ""
} |
q173488 | SegmentedByteArray.ensureCapacity | test | private void ensureCapacity(int segmentIndex) {
while(segmentIndex >= segments.length) {
segments = Arrays.copyOf(segments, segments.length * 3 / 2);
}
if(segments[segmentIndex] == null) {
segments[segmentIndex] = memoryRecycler.getByteArray();
}
} | java | {
"resource": ""
} |
q173489 | HollowObjectSchema.getPosition | test | public int getPosition(String fieldName) {
Integer index = nameFieldIndexLookup.get(fieldName);
if (index == null) {
return -1;
}
return index;
} | java | {
"resource": ""
} |
q173490 | HollowHashIndexBuilder.calculateDedupedSizesAndTotalNumberOfSelectBuckets | test | private long calculateDedupedSizesAndTotalNumberOfSelectBuckets(MultiLinkedElementArray elementArray, GrowingSegmentedLongArray matchIndexHashAndSizeArray) {
long totalBuckets = 0;
long maxSize = 0;
int[] selectArray = new int[8];
for(int i=0;i<elementArray.numLists();i++) {
int listSize = elementArray.listSize(i);
int setSize = 0;
int predictedBuckets = HashCodes.hashTableSize(listSize);
int hashMask = predictedBuckets - 1;
if(predictedBuckets > selectArray.length)
selectArray = new int[predictedBuckets];
for(int j=0;j<predictedBuckets;j++)
selectArray[j] = -1;
HollowOrdinalIterator iter = elementArray.iterator(i);
int selectOrdinal = iter.next();
while(selectOrdinal != HollowOrdinalIterator.NO_MORE_ORDINALS) {
int hash = HashCodes.hashInt(selectOrdinal);
int bucket = hash & hashMask;
while(true) {
if(selectArray[bucket] == selectOrdinal)
break;
if(selectArray[bucket] == -1) {
selectArray[bucket] = selectOrdinal;
setSize++;
break;
}
bucket = (bucket+1) & hashMask;
}
selectOrdinal = iter.next();
}
long matchIndexHashAndSize = matchIndexHashAndSizeArray.get(i);
matchIndexHashAndSize |= (long)setSize << 32;
matchIndexHashAndSizeArray.set(i, matchIndexHashAndSize);
totalBuckets += HashCodes.hashTableSize(setSize);
if(setSize > maxSize)
maxSize = setSize;
}
return totalBuckets | (long)bitsRequiredToRepresentValue(maxSize) << 56;
} | java | {
"resource": ""
} |
q173491 | HollowAPIGenerator.hasCollectionsInDataSet | test | protected static boolean hasCollectionsInDataSet(HollowDataset dataset) {
for(HollowSchema schema : dataset.getSchemas()) {
if ((schema instanceof HollowListSchema) ||
(schema instanceof HollowSetSchema) ||
(schema instanceof HollowMapSchema)) {
return true;
}
}
return false;
} | java | {
"resource": ""
} |
q173492 | HollowAPIGenerator.generateFiles | test | public void generateFiles(File directory) throws IOException {
if (packageName != null && !packageName.trim().isEmpty()) {
String packageDir = packageName.replace(".", File.separator);
if (!directory.getAbsolutePath().endsWith(packageDir)) {
directory = new File(directory, packageDir);
}
}
directory.mkdirs();
HollowAPIClassJavaGenerator apiClassGenerator = new HollowAPIClassJavaGenerator(packageName, apiClassname,
dataset, parameterizeClassNames, config);
HollowAPIFactoryJavaGenerator apiFactoryGenerator = new HollowAPIFactoryJavaGenerator(packageName,
apiClassname, dataset, config);
HollowHashIndexGenerator hashIndexGenerator = new HollowHashIndexGenerator(packageName, apiClassname, dataset, config);
generateFile(directory, apiClassGenerator);
generateFile(directory, apiFactoryGenerator);
generateFile(directory, hashIndexGenerator);
generateFilesForHollowSchemas(directory);
} | java | {
"resource": ""
} |
q173493 | HollowAPIGenerator.generateFilesForHollowSchemas | test | protected void generateFilesForHollowSchemas(File directory) throws IOException {
for(HollowSchema schema : dataset.getSchemas()) {
String type = schema.getName();
if (config.isUseHollowPrimitiveTypes() && HollowCodeGenerationUtils.isPrimitiveType(type)) continue; // skip if using hollow primitive type
generateFile(directory, getStaticAPIGenerator(schema));
generateFile(directory, getHollowObjectGenerator(schema));
generateFile(directory, getHollowFactoryGenerator(schema));
if(schema.getSchemaType() == SchemaType.OBJECT) {
HollowObjectSchema objSchema = (HollowObjectSchema)schema;
generateFile(directory, new HollowObjectDelegateInterfaceGenerator(packageName, objSchema,
ergonomicShortcuts, dataset, config));
generateFile(directory, new HollowObjectDelegateCachedImplGenerator(packageName, objSchema,
ergonomicShortcuts, dataset, config));
generateFile(directory, new HollowObjectDelegateLookupImplGenerator(packageName, objSchema,
ergonomicShortcuts, dataset, config));
generateFile(directory, new HollowDataAccessorGenerator(packageName, apiClassname, objSchema,
dataset, config));
if (!config.isReservePrimaryKeyIndexForTypeWithPrimaryKey()) {
generateFile(directory, new LegacyHollowPrimaryKeyIndexGenerator(packageName, apiClassname,
objSchema, dataset, config));
} else if ((objSchema).getPrimaryKey() != null) {
generateFile(directory, new HollowPrimaryKeyIndexGenerator(dataset, packageName, apiClassname,
objSchema, config));
generateFile(directory, new HollowUniqueKeyIndexGenerator(packageName, apiClassname, objSchema,
dataset, config));
}
}
}
} | java | {
"resource": ""
} |
q173494 | HollowPrimaryKeyValueDeriver.keyMatches | test | public boolean keyMatches(int ordinal, Object... keys) {
if(keys.length != fieldPathIndexes.length)
return false;
for(int i=0;i<keys.length;i++) {
if(!keyMatches(keys[i], ordinal, i))
return false;
}
return true;
} | java | {
"resource": ""
} |
q173495 | HollowPrimaryKeyValueDeriver.getRecordKey | test | public Object[] getRecordKey(int ordinal) {
Object[] results = new Object[fieldPathIndexes.length];
for (int i = 0; i < fieldPathIndexes.length; i++) {
results[i] = readValue(ordinal, i);
}
return results;
} | java | {
"resource": ""
} |
q173496 | FieldPaths.createFieldPathForPrimaryKey | test | public static FieldPath<ObjectFieldSegment> createFieldPathForPrimaryKey(
HollowDataset dataset, String type, String path) {
boolean autoExpand = !path.endsWith("!");
path = autoExpand ? path : path.substring(0, path.length() - 1);
FieldPath<FieldSegment> fp = createFieldPath(dataset, type, path, autoExpand, false, false);
// Erasure trick to avoid copying when it is known the list only contains
// instances of ObjectFieldSegment
assert fp.segments.stream().allMatch(o -> o instanceof ObjectFieldSegment);
@SuppressWarnings( {"unchecked", "raw"})
FieldPath<ObjectFieldSegment> result = (FieldPath<ObjectFieldSegment>) (FieldPath) fp;
return result;
} | java | {
"resource": ""
} |
q173497 | FieldPaths.createFieldPathForHashIndex | test | public static FieldPath<FieldSegment> createFieldPathForHashIndex(HollowDataset dataset, String type, String path) {
return createFieldPath(dataset, type, path, false, false, true);
} | java | {
"resource": ""
} |
q173498 | FieldPaths.createFieldPathForPrefixIndex | test | public static FieldPath<FieldSegment> createFieldPathForPrefixIndex(
HollowDataset dataset, String type, String path, boolean autoExpand) {
// If autoExpand is false then requireFullPath must be true
boolean requireFullPath = !autoExpand;
return createFieldPath(dataset, type, path, autoExpand, requireFullPath, true);
} | java | {
"resource": ""
} |
q173499 | ObjectIdentityOrdinalMap.put | test | public void put(Object obj, int ordinal) {
int hashCode = System.identityHashCode(obj);
int segment = segment(hashCode);
segments[segment].put(obj, hashCode, ordinal);
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.