_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q173000 | EndpointPublisherImpl.doPublish | test | protected Context doPublish(ServiceTarget target, DeploymentUnit unit) throws Exception {
Deployment deployment = unit.getAttachment(WSAttachmentKeys.DEPLOYMENT_KEY);
List<Endpoint> endpoints = deployment.getService().getEndpoints();
//If we're running in a Service, that will already have proper dependencies set on the installed endpoint services,
//otherwise we need to explicitly wait for the endpoint services to be started before creating the webapp.
if (!runningInService) {
final ServiceRegistry registry = unit.getServiceRegistry();
final StabilityMonitor monitor = new StabilityMonitor();
for (Endpoint ep : endpoints) {
final ServiceName serviceName = EndpointService.getServiceName(unit, ep.getShortName());
monitor.addController(registry.getRequiredService(serviceName));
}
try {
monitor.awaitStability();
} finally {
monitor.clear();
}
}
deployment.addAttachment(WebDeploymentController.class, startWebApp(host, unit)); //TODO simplify and use findChild later in destroy()/stopWebApp()
return new Context(unit.getAttachment(WSAttachmentKeys.JBOSSWEB_METADATA_KEY).getContextRoot(), endpoints);
} | java | {
"resource": ""
} |
q173001 | EndpointPublisherImpl.stopWebApp | test | protected void stopWebApp(Deployment deployment) throws Exception {
WebDeploymentController context;
try {
context = deployment.getAttachment(WebDeploymentController.class);
context.stop();
} catch (Exception e) {
throw WSLogger.ROOT_LOGGER.stopContextPhaseFailed(e);
}
try {
context.destroy();
} catch (Exception e) {
throw WSLogger.ROOT_LOGGER.destroyContextPhaseFailed(e);
}
} | java | {
"resource": ""
} |
q173002 | WeldBootstrapService.start | test | public synchronized void start(final StartContext context) {
if (started) {
throw WeldLogger.ROOT_LOGGER.alreadyRunning("WeldContainer");
}
started = true;
WeldLogger.DEPLOYMENT_LOGGER.startingWeldService(deploymentName);
// set up injected services
addWeldService(SecurityServices.class, securityServicesSupplier.get());
TransactionServices transactionServices = weldTransactionServicesSupplier != null ? weldTransactionServicesSupplier.get() : null;
if (transactionServices != null) {
addWeldService(TransactionServices.class, transactionServices);
}
if (!deployment.getServices().contains(ExecutorServices.class)) {
addWeldService(ExecutorServices.class, executorServicesSupplier.get());
}
ModuleGroupSingletonProvider.addClassLoaders(deployment.getModule().getClassLoader(),
deployment.getSubDeploymentClassLoaders());
ClassLoader oldTccl = WildFlySecurityManager.getCurrentContextClassLoaderPrivileged();
try {
WildFlySecurityManager.setCurrentContextClassLoaderPrivileged(deployment.getModule().getClassLoader());
bootstrap.startContainer(deploymentName, environment, deployment);
WeldProvider.containerInitialized(Container.instance(deploymentName), getBeanManager(), deployment);
} finally {
WildFlySecurityManager.setCurrentContextClassLoaderPrivileged(oldTccl);
}
weldBootstrapServiceConsumer.accept(this);
} | java | {
"resource": ""
} |
q173003 | WSEndpointConfigMapping.registerEndpointConfig | test | public void registerEndpointConfig(final String endpointClass, final EndpointConfig config) {
if ((endpointClass == null) || (config == null)) {
throw new IllegalArgumentException();
}
endpointConfigMap.put(endpointClass, config);
} | java | {
"resource": ""
} |
q173004 | TimerTask.retryTimeout | test | protected void retryTimeout(TimerImpl timer) throws Exception {
boolean callTimeout = false;
timer.lock();
try {
if (timer.isActive()) {
EJB3_TIMER_LOGGER.retryingTimeout(timer);
timer.setTimerState(TimerState.RETRY_TIMEOUT, Thread.currentThread());
timerService.persistTimer(timer, false);
callTimeout = true;
} else {
EJB3_TIMER_LOGGER.timerNotActive(timer);
}
} finally {
timer.unlock();
}
if(callTimeout) {
this.callTimeout(timer);
}
} | java | {
"resource": ""
} |
q173005 | TimerTask.postTimeoutProcessing | test | protected void postTimeoutProcessing(TimerImpl timer) throws InterruptedException {
timer.lock();
try {
TimerState timerState = timer.getState();
if (timerState != TimerState.CANCELED
&& timerState != TimerState.EXPIRED) {
if (timer.getInterval() == 0) {
timerService.expireTimer(timer);
} else {
timer.setTimerState(TimerState.ACTIVE, null);
}
timerService.persistTimer(timer, false);
}
} finally {
timer.unlock();
}
} | java | {
"resource": ""
} |
q173006 | SecurityActions.getDeclaredMethods | test | static Method[] getDeclaredMethods(final Class<?> c) {
if (System.getSecurityManager() == null)
return c.getDeclaredMethods();
return AccessController.doPrivileged(new PrivilegedAction<Method[]>() {
public Method[] run() {
return c.getDeclaredMethods();
}
});
} | java | {
"resource": ""
} |
q173007 | SecurityActions.getDeclaredFields | test | static Field[] getDeclaredFields(final Class<?> c) {
if (System.getSecurityManager() == null)
return c.getDeclaredFields();
return AccessController.doPrivileged(new PrivilegedAction<Field[]>() {
public Field[] run() {
return c.getDeclaredFields();
}
});
} | java | {
"resource": ""
} |
q173008 | SecurityActions.getConstructor | test | static Constructor<?> getConstructor(final Class<?> c, final Class<?>... params)
throws NoSuchMethodException {
if (System.getSecurityManager() == null)
return c.getConstructor(params);
Constructor<?> result = AccessController.doPrivileged(new PrivilegedAction<Constructor<?>>() {
public Constructor<?> run() {
try {
return c.getConstructor(params);
} catch (NoSuchMethodException e) {
return null;
}
}
});
if (result != null)
return result;
throw new NoSuchMethodException();
} | java | {
"resource": ""
} |
q173009 | SecurityActions.getMethod | test | static Method getMethod(final Class<?> c, final String name, final Class<?>... params)
throws NoSuchMethodException {
if (System.getSecurityManager() == null)
return c.getMethod(name, params);
Method result = AccessController.doPrivileged(new PrivilegedAction<Method>() {
public Method run() {
try {
return c.getMethod(name, params);
} catch (NoSuchMethodException e) {
return null;
}
}
});
if (result != null)
return result;
throw new NoSuchMethodException();
} | java | {
"resource": ""
} |
q173010 | MigrateOperation.addMessagingActiveMQExtension | test | private void addMessagingActiveMQExtension(OperationContext context, Map<PathAddress, ModelNode> migrationOperations, boolean describe) {
Resource root = context.readResourceFromRoot(PathAddress.EMPTY_ADDRESS, false);
if (root.getChildrenNames(EXTENSION).contains(MESSAGING_ACTIVEMQ_EXTENSION)) {
// extension is already added, do nothing
return;
}
PathAddress extensionAddress = pathAddress(EXTENSION, MESSAGING_ACTIVEMQ_EXTENSION);
OperationEntry addEntry = context.getRootResourceRegistration().getOperationEntry(extensionAddress, ADD);
ModelNode addOperation = createAddOperation(extensionAddress);
addOperation.get(MODULE).set(MESSAGING_ACTIVEMQ_MODULE);
if (describe) {
migrationOperations.put(extensionAddress, addOperation);
} else {
context.addStep(context.getResult().get(extensionAddress.toString()), addOperation, addEntry.getOperationHandler(), MODEL);
}
} | java | {
"resource": ""
} |
q173011 | MigrateOperation.parameterIsAllowed | test | private boolean parameterIsAllowed(String name, String resourceType) {
switch (resourceType) {
case REMOTE_ACCEPTOR:
case HTTP_ACCEPTOR:
case REMOTE_CONNECTOR:
case HTTP_CONNECTOR:
// WFLY-5667 - for now remove only use-nio. Revisit this code when Artemis offers an API
// to know which parameters are ignored.
if ("use-nio".equals(name)) {
return false;
} else {
return true;
}
default:
// accept any parameter for other resources.
return true;
}
} | java | {
"resource": ""
} |
q173012 | MigrateOperation.migrateGenericTransport | test | private void migrateGenericTransport(ModelNode addOperation) {
String factoryClass = addOperation.get(FACTORY_CLASS.getName()).asString();
final String newFactoryClass;
switch (factoryClass) {
case HORNETQ_NETTY_ACCEPTOR_FACTORY:
newFactoryClass = ARTEMIS_NETTY_ACCEPTOR_FACTORY;
break;
case HORNETQ_NETTY_CONNECTOR_FACTORY:
newFactoryClass = ARTEMIS_NETTY_CONNECTOR_FACTORY;
break;
default:
newFactoryClass = factoryClass;
}
addOperation.get(FACTORY_CLASS.getName()).set(newFactoryClass);
} | java | {
"resource": ""
} |
q173013 | CancellationFlag.cancel | test | public boolean cancel(boolean setFlag) {
final AtomicInteger stateRef = this.stateRef;
int oldVal, newVal;
do {
oldVal = stateRef.get();
if (oldVal == ST_WAITING) {
newVal = ST_CANCELLED;
} else if (oldVal == ST_CANCELLED) {
if (! setFlag) {
return true;
}
newVal = ST_CANCELLED_FLAG_SET;
} else if (oldVal == ST_CANCELLED_FLAG_SET) {
// do nothing
return true;
} else if (oldVal == ST_STARTED) {
if (! setFlag) {
return false;
}
newVal = ST_STARTED_FLAG_SET;
} else {
assert oldVal == ST_STARTED_FLAG_SET;
return false;
}
} while (! stateRef.compareAndSet(oldVal, newVal));
return newVal == ST_CANCELLED || newVal == ST_CANCELLED_FLAG_SET;
} | java | {
"resource": ""
} |
q173014 | CancellationFlag.runIfNotCancelled | test | public boolean runIfNotCancelled() {
final AtomicInteger stateRef = this.stateRef;
int oldVal;
do {
oldVal = stateRef.get();
if (oldVal == ST_CANCELLED || oldVal == ST_CANCELLED_FLAG_SET) {
return false;
} else if (oldVal != ST_WAITING) {
throw Assert.unreachableCode();
}
} while (! stateRef.compareAndSet(oldVal, ST_STARTED));
return true;
} | java | {
"resource": ""
} |
q173015 | OPropertyEmbedded.setTypeInternal | test | protected void setTypeInternal(final OType iType) {
getDatabase().checkSecurity(ORule.ResourceGeneric.SCHEMA, ORole.PERMISSION_UPDATE);
acquireSchemaWriteLock();
try {
if (iType == globalRef.getType())
// NO CHANGES
return;
if (!iType.getCastable().contains(globalRef.getType()))
throw new IllegalArgumentException("Cannot change property type from " + globalRef.getType() + " to " + iType);
this.globalRef = owner.owner.findOrCreateGlobalProperty(this.globalRef.getName(), iType);
} finally {
releaseSchemaWriteLock();
}
} | java | {
"resource": ""
} |
q173016 | OBaseExpression.canExecuteIndexedFunctionWithoutIndex | test | public boolean canExecuteIndexedFunctionWithoutIndex(OFromClause target, OCommandContext context, OBinaryCompareOperator operator,
Object right) {
if (this.identifier == null) {
return false;
}
return identifier.canExecuteIndexedFunctionWithoutIndex(target, context, operator, right);
} | java | {
"resource": ""
} |
q173017 | OWhereClause.estimate | test | public long estimate(OClass oClass, long threshold, OCommandContext ctx) {
long count = oClass.count();
if (count > 1) {
count = count / 2;
}
if (count < threshold) {
return count;
}
long indexesCount = 0l;
List<OAndBlock> flattenedConditions = flatten();
Set<OIndex<?>> indexes = oClass.getIndexes();
for (OAndBlock condition : flattenedConditions) {
List<OBinaryCondition> indexedFunctConditions = condition
.getIndexedFunctionConditions(oClass, (ODatabaseDocumentInternal) ctx.getDatabase());
long conditionEstimation = Long.MAX_VALUE;
if (indexedFunctConditions != null) {
for (OBinaryCondition cond : indexedFunctConditions) {
OFromClause from = new OFromClause(-1);
OFromItem item = new OFromItem(-1);
from.item = item;
from.item.setIdentifier(new OIdentifier(oClass.getName()));
long newCount = cond.estimateIndexed(from, ctx);
if (newCount < conditionEstimation) {
conditionEstimation = newCount;
}
}
} else {
Map<String, Object> conditions = getEqualityOperations(condition, ctx);
for (OIndex index : indexes) {
if (index.getType().equals(OClass.INDEX_TYPE.FULLTEXT.name()) || index.getType()
.equals(OClass.INDEX_TYPE.FULLTEXT_HASH_INDEX.name())) {
continue;
}
List<String> indexedFields = index.getDefinition().getFields();
int nMatchingKeys = 0;
for (String indexedField : indexedFields) {
if (conditions.containsKey(indexedField)) {
nMatchingKeys++;
} else {
break;
}
}
if (nMatchingKeys > 0) {
long newCount = estimateFromIndex(index, conditions, nMatchingKeys);
if (newCount < conditionEstimation) {
conditionEstimation = newCount;
}
}
}
}
if (conditionEstimation > count) {
return count;
}
indexesCount += conditionEstimation;
}
return Math.min(indexesCount, count);
} | java | {
"resource": ""
} |
q173018 | OReflectionHelper.findClasses | test | private static List<Class<?>> findClasses(final File iDirectory, String iPackageName, ClassLoader iClassLoader)
throws ClassNotFoundException {
final List<Class<?>> classes = new ArrayList<Class<?>>();
if (!iDirectory.exists())
return classes;
iPackageName += "." + iDirectory.getName();
String className;
final File[] files = iDirectory.listFiles();
if (files != null)
for (File file : files) {
if (file.isDirectory()) {
if (file.getName().contains("."))
continue;
classes.addAll(findClasses(file, iPackageName, iClassLoader));
} else if (file.getName().endsWith(CLASS_EXTENSION)) {
className = file.getName().substring(0, file.getName().length() - CLASS_EXTENSION.length());
classes.add(Class.forName(iPackageName + '.' + className, true, iClassLoader));
}
}
return classes;
} | java | {
"resource": ""
} |
q173019 | OReflectionHelper.getClassessOfInterface | test | public static List<Class<?>> getClassessOfInterface(String thePackage, Class<?> theInterface, final ClassLoader iClassLoader) {
List<Class<?>> classList = new ArrayList<Class<?>>();
try {
for (Class<?> discovered : getClassesFor(thePackage, iClassLoader)) {
if (Arrays.asList(discovered.getInterfaces()).contains(theInterface)) {
classList.add(discovered);
}
}
} catch (ClassNotFoundException ex) {
OLogManager.instance().error(null, "Error finding classes", ex);
}
return classList;
} | java | {
"resource": ""
} |
q173020 | OReflectionHelper.getGenericMultivalueType | test | public static Class<?> getGenericMultivalueType(final Field p) {
if (p.getType() instanceof Class<?>) {
final Type genericType = p.getGenericType();
if (genericType != null && genericType instanceof ParameterizedType) {
final ParameterizedType pt = (ParameterizedType) genericType;
if (pt.getActualTypeArguments() != null && pt.getActualTypeArguments().length > 0) {
if (((Class<?>) pt.getRawType()).isAssignableFrom(Map.class)) {
if (pt.getActualTypeArguments()[1] instanceof Class<?>) {
return (Class<?>) pt.getActualTypeArguments()[1];
} else if (pt.getActualTypeArguments()[1] instanceof ParameterizedType)
return (Class<?>) ((ParameterizedType) pt.getActualTypeArguments()[1]).getRawType();
} else if (pt.getActualTypeArguments()[0] instanceof Class<?>) {
return (Class<?>) pt.getActualTypeArguments()[0];
} else if (pt.getActualTypeArguments()[0] instanceof ParameterizedType)
return (Class<?>) ((ParameterizedType) pt.getActualTypeArguments()[0]).getRawType();
}
} else if (p.getType().isArray())
return p.getType().getComponentType();
}
return null;
} | java | {
"resource": ""
} |
q173021 | OQueryOperatorContainsText.evaluateRecord | test | @Override
public Object evaluateRecord(final OIdentifiable iRecord, ODocument iCurrentResult, final OSQLFilterCondition iCondition,
final Object iLeft, final Object iRight, OCommandContext iContext, final ODocumentSerializer serializer) {
if (iLeft == null || iRight == null)
return false;
return iLeft.toString().indexOf(iRight.toString()) > -1;
} | java | {
"resource": ""
} |
q173022 | ODocumentSimpleFieldHandlingStrategy.deriveFieldType | test | protected OType deriveFieldType(ODocument iRecord, String fieldName, OType requestedFieldType) {
// Schema defined types can not be ignored
if (iRecord.getSchemaClass().existsProperty(fieldName)) {
return iRecord.getSchemaClass().getProperty(fieldName).getType();
}
// New type
if (requestedFieldType != null) {
return requestedFieldType;
}
// Existing type (not fixed by the schema)
return iRecord.fieldType(fieldName);
} | java | {
"resource": ""
} |
q173023 | OLocalRecordCache.updateRecord | test | public void updateRecord(final ORecord record) {
if (record.getIdentity().getClusterId() != excludedCluster && record.getIdentity().isValid() && !record.isDirty()
&& !ORecordVersionHelper.isTombstone(record.getVersion())) {
if (underlying.get(record.getIdentity()) != record)
underlying.put(record);
}
} | java | {
"resource": ""
} |
q173024 | OLocalRecordCache.findRecord | test | public ORecord findRecord(final ORID rid) {
ORecord record;
record = underlying.get(rid);
if (record != null)
Orient.instance().getProfiler().updateCounter(CACHE_HIT, "Record found in Level1 Cache", 1L, "db.*.cache.level1.cache.found");
else
Orient.instance().getProfiler().updateCounter(CACHE_MISS, "Record not found in Level1 Cache", 1L,
"db.*.cache.level1.cache.notFound");
return record;
} | java | {
"resource": ""
} |
q173025 | OSelectExecutionPlanner.getMinimalSetOfNodesForShardedQuery | test | private Map<String, Set<String>> getMinimalSetOfNodesForShardedQuery(String localNode, Map<String, Set<String>> clusterMap,
Set<String> queryClusters) {
//approximate algorithm, the problem is NP-complete
Map<String, Set<String>> result = new LinkedHashMap<>();
Set<String> uncovered = new HashSet<>();
uncovered.addAll(queryClusters);
uncovered = uncovered.stream().filter(x -> x != null).map(x -> x.toLowerCase(Locale.ENGLISH)).collect(Collectors.toSet());
//try local node first
Set<String> nextNodeClusters = new HashSet<>();
Set<String> clustersForNode = clusterMap.get(localNode);
if (clustersForNode != null) {
nextNodeClusters.addAll(clustersForNode);
}
nextNodeClusters.retainAll(uncovered);
if (nextNodeClusters.size() > 0) {
result.put(localNode, nextNodeClusters);
uncovered.removeAll(nextNodeClusters);
}
while (uncovered.size() > 0) {
String nextNode = findItemThatCoversMore(uncovered, clusterMap);
nextNodeClusters = new HashSet<>();
nextNodeClusters.addAll(clusterMap.get(nextNode));
nextNodeClusters.retainAll(uncovered);
if (nextNodeClusters.size() == 0) {
throw new OCommandExecutionException(
"Cannot execute a sharded query: clusters [" + uncovered.stream().collect(Collectors.joining(", "))
+ "] are not present on any node" + "\n [" + clusterMap.entrySet().stream()
.map(x -> "" + x.getKey() + ":(" + x.getValue().stream().collect(Collectors.joining(",")) + ")")
.collect(Collectors.joining(", ")) + "]");
}
result.put(nextNode, nextNodeClusters);
uncovered.removeAll(nextNodeClusters);
}
return result;
} | java | {
"resource": ""
} |
q173026 | OSelectExecutionPlanner.moveFlattededEqualitiesLeft | test | private static List<OAndBlock> moveFlattededEqualitiesLeft(List<OAndBlock> flattenedWhereClause) {
if (flattenedWhereClause == null) {
return null;
}
List<OAndBlock> result = new ArrayList<>();
for (OAndBlock block : flattenedWhereClause) {
List<OBooleanExpression> equalityExpressions = new ArrayList<>();
List<OBooleanExpression> nonEqualityExpressions = new ArrayList<>();
OAndBlock newBlock = block.copy();
for (OBooleanExpression exp : newBlock.getSubBlocks()) {
if (exp instanceof OBinaryCondition) {
if (((OBinaryCondition) exp).getOperator() instanceof OEqualsCompareOperator) {
equalityExpressions.add(exp);
} else {
nonEqualityExpressions.add(exp);
}
} else {
nonEqualityExpressions.add(exp);
}
}
OAndBlock newAnd = new OAndBlock(-1);
newAnd.getSubBlocks().addAll(equalityExpressions);
newAnd.getSubBlocks().addAll(nonEqualityExpressions);
result.add(newAnd);
}
return result;
} | java | {
"resource": ""
} |
q173027 | OSelectExecutionPlanner.addOrderByProjections | test | private static void addOrderByProjections(QueryPlanningInfo info) {
if (info.orderApplied || info.expand || info.unwind != null || info.orderBy == null || info.orderBy.getItems().size() == 0
|| info.projection == null || info.projection.getItems() == null || (info.projection.getItems().size() == 1
&& info.projection.getItems().get(0).isAll())) {
return;
}
OOrderBy newOrderBy = info.orderBy == null ? null : info.orderBy.copy();
List<OProjectionItem> additionalOrderByProjections = calculateAdditionalOrderByProjections(info.projection.getAllAliases(),
newOrderBy);
if (additionalOrderByProjections.size() > 0) {
info.orderBy = newOrderBy;//the ORDER BY has changed
}
if (additionalOrderByProjections.size() > 0) {
info.projectionAfterOrderBy = new OProjection(-1);
info.projectionAfterOrderBy.setItems(new ArrayList<>());
for (String alias : info.projection.getAllAliases()) {
info.projectionAfterOrderBy.getItems().add(projectionFromAlias(new OIdentifier(alias)));
}
for (OProjectionItem item : additionalOrderByProjections) {
if (info.preAggregateProjection != null) {
info.preAggregateProjection.getItems().add(item);
info.aggregateProjection.getItems().add(projectionFromAlias(item.getAlias()));
info.projection.getItems().add(projectionFromAlias(item.getAlias()));
} else {
info.projection.getItems().add(item);
}
}
}
} | java | {
"resource": ""
} |
q173028 | OSelectExecutionPlanner.extractSubQueries | test | private static void extractSubQueries(QueryPlanningInfo info) {
SubQueryCollector collector = new SubQueryCollector();
if (info.perRecordLetClause != null) {
info.perRecordLetClause.extractSubQueries(collector);
}
int i = 0;
int j = 0;
for (Map.Entry<OIdentifier, OStatement> entry : collector.getSubQueries().entrySet()) {
OIdentifier alias = entry.getKey();
OStatement query = entry.getValue();
if (query.refersToParent()) {
addRecordLevelLet(info, alias, query, j++);
} else {
addGlobalLet(info, alias, query, i++);
}
}
collector.reset();
if (info.whereClause != null) {
info.whereClause.extractSubQueries(collector);
}
if (info.projection != null) {
info.projection.extractSubQueries(collector);
}
if (info.orderBy != null) {
info.orderBy.extractSubQueries(collector);
}
if (info.groupBy != null) {
info.groupBy.extractSubQueries(collector);
}
for (Map.Entry<OIdentifier, OStatement> entry : collector.getSubQueries().entrySet()) {
OIdentifier alias = entry.getKey();
OStatement query = entry.getValue();
if (query.refersToParent()) {
addRecordLevelLet(info, alias, query);
} else {
addGlobalLet(info, alias, query);
}
}
} | java | {
"resource": ""
} |
q173029 | OSelectExecutionPlanner.isFromClusters | test | private boolean isFromClusters(ORid rid, Set<String> filterClusters, ODatabase database) {
if (filterClusters == null) {
throw new IllegalArgumentException();
}
String clusterName = database.getClusterNameById(rid.getCluster().getValue().intValue());
return filterClusters.contains(clusterName);
} | java | {
"resource": ""
} |
q173030 | OSelectExecutionPlanner.handleClassWithIndexForSortOnly | test | private boolean handleClassWithIndexForSortOnly(OSelectExecutionPlan plan, OIdentifier queryTarget, Set<String> filterClusters,
QueryPlanningInfo info, OCommandContext ctx, boolean profilingEnabled) {
OSchema schema = getSchemaFromContext(ctx);
OClass clazz = schema.getClass(queryTarget.getStringValue());
if (clazz == null) {
clazz = schema.getView(queryTarget.getStringValue());
if (clazz == null) {
throw new OCommandExecutionException("Class not found: " + queryTarget);
}
}
for (OIndex idx : clazz.getIndexes().stream().filter(i -> i.supportsOrderedIterations()).filter(i -> i.getDefinition() != null)
.collect(Collectors.toList())) {
List<String> indexFields = idx.getDefinition().getFields();
if (indexFields.size() < info.orderBy.getItems().size()) {
continue;
}
boolean indexFound = true;
String orderType = null;
for (int i = 0; i < info.orderBy.getItems().size(); i++) {
OOrderByItem orderItem = info.orderBy.getItems().get(i);
if (orderItem.getCollate() != null) {
return false;
}
String indexField = indexFields.get(i);
if (i == 0) {
orderType = orderItem.getType();
} else {
if (orderType == null || !orderType.equals(orderItem.getType())) {
indexFound = false;
break;//ASC/DESC interleaved, cannot be used with index.
}
}
if (!(indexField.equals(orderItem.getAlias()) || isInOriginalProjection(indexField, orderItem.getAlias()))) {
indexFound = false;
break;
}
}
if (indexFound && orderType != null) {
plan.chain(new FetchFromIndexValuesStep(idx, orderType.equals(OOrderByItem.ASC), ctx, profilingEnabled));
int[] filterClusterIds = null;
if (filterClusters != null) {
filterClusterIds = filterClusters.stream().map(name -> ctx.getDatabase().getClusterIdByName(name)).mapToInt(i -> i)
.toArray();
}
plan.chain(new GetValueFromIndexEntryStep(ctx, filterClusterIds, profilingEnabled));
if (info.serverToClusters.size() == 1) {
info.orderApplied = true;
}
return true;
}
}
return false;
} | java | {
"resource": ""
} |
q173031 | OSelectExecutionPlanner.isDiamondHierarchy | test | private boolean isDiamondHierarchy(OClass clazz) {
Set<OClass> traversed = new HashSet<>();
List<OClass> stack = new ArrayList<>();
stack.add(clazz);
while (!stack.isEmpty()) {
OClass current = stack.remove(0);
traversed.add(current);
for (OClass sub : current.getSubclasses()) {
if (traversed.contains(sub)) {
return true;
}
stack.add(sub);
traversed.add(sub);
}
}
return false;
} | java | {
"resource": ""
} |
q173032 | OSelectExecutionPlanner.getOrderDirection | test | private Boolean getOrderDirection(QueryPlanningInfo info) {
if (info.orderBy == null) {
return null;
}
String result = null;
for (OOrderByItem item : info.orderBy.getItems()) {
if (result == null) {
result = item.getType() == null ? OOrderByItem.ASC : item.getType();
} else {
String newType = item.getType() == null ? OOrderByItem.ASC : item.getType();
if (!newType.equals(result)) {
return null;
}
}
}
return result == null || result.equals(OOrderByItem.ASC);
} | java | {
"resource": ""
} |
q173033 | OSelectExecutionPlanner.requiresMultipleIndexLookups | test | private boolean requiresMultipleIndexLookups(OAndBlock keyCondition) {
for (OBooleanExpression oBooleanExpression : keyCondition.getSubBlocks()) {
if (!(oBooleanExpression instanceof OBinaryCondition)) {
return true;
}
}
return false;
} | java | {
"resource": ""
} |
q173034 | OSelectExecutionPlanner.findBestIndexFor | test | private IndexSearchDescriptor findBestIndexFor(OCommandContext ctx, Set<OIndex<?>> indexes, OAndBlock block, OClass clazz) {
//get all valid index descriptors
List<IndexSearchDescriptor> descriptors = indexes.stream().filter(x -> x.getInternal().canBeUsedInEqualityOperators())
.map(index -> buildIndexSearchDescriptor(ctx, index, block, clazz)).filter(Objects::nonNull)
.filter(x -> x.keyCondition != null).filter(x -> x.keyCondition.getSubBlocks().size() > 0).collect(Collectors.toList());
List<IndexSearchDescriptor> fullTextIndexDescriptors = indexes.stream()
.filter(idx->idx.getType().equalsIgnoreCase("FULLTEXT"))
.filter(idx->!idx.getAlgorithm().equalsIgnoreCase("LUCENE"))
.map(idx -> buildIndexSearchDescriptorForFulltext(ctx, idx, block, clazz)).filter(Objects::nonNull)
.filter(x -> x.keyCondition != null).filter(x -> x.keyCondition.getSubBlocks().size() > 0).collect(Collectors.toList());
descriptors.addAll(fullTextIndexDescriptors);
//remove the redundant descriptors (eg. if I have one on [a] and one on [a, b], the first one is redundant, just discard it)
descriptors = removePrefixIndexes(descriptors);
//sort by cost
List<OPair<Integer, IndexSearchDescriptor>> sortedDescriptors = descriptors.stream()
.map(x -> (OPair<Integer, IndexSearchDescriptor>) new OPair(x.cost(ctx), x)).sorted().collect(Collectors.toList());
//get only the descriptors with the lowest cost
descriptors = sortedDescriptors.isEmpty() ?
Collections.emptyList() :
sortedDescriptors.stream().filter(x -> x.key.equals(sortedDescriptors.get(0).key)).map(x -> x.value)
.collect(Collectors.toList());
//sort remaining by the number of indexed fields
descriptors = descriptors.stream().sorted(Comparator.comparingInt(x -> x.keyCondition.getSubBlocks().size()))
.collect(Collectors.toList());
//get the one that has more indexed fields
return descriptors.isEmpty() ? null : descriptors.get(descriptors.size() - 1);
} | java | {
"resource": ""
} |
q173035 | OSelectExecutionPlanner.buildIndexSearchDescriptorForFulltext | test | private IndexSearchDescriptor buildIndexSearchDescriptorForFulltext(OCommandContext ctx, OIndex<?> index, OAndBlock block, OClass clazz) {
List<String> indexFields = index.getDefinition().getFields();
OBinaryCondition keyCondition = new OBinaryCondition(-1);
OIdentifier key = new OIdentifier("key");
keyCondition.setLeft(new OExpression(key));
boolean found = false;
OAndBlock blockCopy = block.copy();
Iterator<OBooleanExpression> blockIterator;
OAndBlock indexKeyValue = new OAndBlock(-1);
IndexSearchDescriptor result = new IndexSearchDescriptor();
result.idx = index;
result.keyCondition = indexKeyValue;
for (String indexField : indexFields) {
blockIterator = blockCopy.getSubBlocks().iterator();
boolean breakHere = false;
boolean indexFieldFound = false;
while (blockIterator.hasNext()) {
OBooleanExpression singleExp = blockIterator.next();
if (singleExp instanceof OContainsTextCondition) {
OExpression left = ((OContainsTextCondition) singleExp).getLeft();
if (left.isBaseIdentifier()) {
String fieldName = left.getDefaultAlias().getStringValue();
if (indexField.equals(fieldName)) {
found = true;
indexFieldFound = true;
OContainsTextCondition condition = new OContainsTextCondition(-1);
condition.setLeft(left);
condition.setRight(((OContainsTextCondition) singleExp).getRight().copy());
indexKeyValue.getSubBlocks().add(condition);
blockIterator.remove();
break;
}
}
}
}
if (breakHere || !indexFieldFound) {
break;
}
}
if (result.keyCondition.getSubBlocks().size() < index.getDefinition().getFields().size() && !index
.supportsOrderedIterations()) {
//hash indexes do not support partial key match
return null;
}
if (found) {
result.remainingCondition = blockCopy;
return result;
}
return null;
} | java | {
"resource": ""
} |
q173036 | OSelectExecutionPlanner.commonFactor | test | private List<IndexSearchDescriptor> commonFactor(List<IndexSearchDescriptor> indexSearchDescriptors) {
//index, key condition, additional filter (to aggregate in OR)
Map<OIndex, Map<IndexCondPair, OOrBlock>> aggregation = new HashMap<>();
for (IndexSearchDescriptor item : indexSearchDescriptors) {
Map<IndexCondPair, OOrBlock> filtersForIndex = aggregation.get(item.idx);
if (filtersForIndex == null) {
filtersForIndex = new HashMap<>();
aggregation.put(item.idx, filtersForIndex);
}
IndexCondPair extendedCond = new IndexCondPair(item.keyCondition, item.additionalRangeCondition);
OOrBlock existingAdditionalConditions = filtersForIndex.get(extendedCond);
if (existingAdditionalConditions == null) {
existingAdditionalConditions = new OOrBlock(-1);
filtersForIndex.put(extendedCond, existingAdditionalConditions);
}
existingAdditionalConditions.getSubBlocks().add(item.remainingCondition);
}
List<IndexSearchDescriptor> result = new ArrayList<>();
for (Map.Entry<OIndex, Map<IndexCondPair, OOrBlock>> item : aggregation.entrySet()) {
for (Map.Entry<IndexCondPair, OOrBlock> filters : item.getValue().entrySet()) {
result.add(new IndexSearchDescriptor(item.getKey(), filters.getKey().mainCondition, filters.getKey().additionalRange,
filters.getValue()));
}
}
return result;
} | java | {
"resource": ""
} |
q173037 | ODocumentFieldHandlingStrategyFactory.create | test | public ODocumentFieldHandlingStrategy create(int strategy) {
Optional<ODocumentFieldHandlingStrategy> registered = ODocumentFieldHandlingStrategyRegistry.getInstance()
.getStrategy(strategy);
if (registered.isPresent()) {
return registered.get();
}
Map<OType, ODocumentFieldOTypeHandlingStrategy> typeHandlingStrategies = new HashMap<OType, ODocumentFieldOTypeHandlingStrategy>();
switch (strategy) {
case SINGLE_ORECORD_BYTES:
typeHandlingStrategies.put(OType.BINARY, new ODocumentSingleRecordBytesOTypeHandlingStrategy());
break;
case SPLIT_ORECORD_BYTES:
typeHandlingStrategies.put(OType.BINARY, new ODocumentSplitRecordBytesOTypeHandlingStrategy());
break;
case SIMPLE:
default:
break;
}
ODocumentSmartFieldHandlingStrategy strategyInstance = new ODocumentSmartFieldHandlingStrategy(typeHandlingStrategies);
ODocumentFieldHandlingStrategyRegistry.getInstance().registerStrategy(strategy, strategyInstance);
return strategyInstance;
} | java | {
"resource": ""
} |
q173038 | OUser.allow | test | public ORole allow(final ORule.ResourceGeneric resourceGeneric, String resourceSpecific, final int iOperation) {
if (roles == null || roles.isEmpty()) {
if (document.field("roles") != null && !((Collection<OIdentifiable>) document.field("roles")).isEmpty()) {
final ODocument doc = document;
document = null;
fromStream(doc);
} else
throw new OSecurityAccessException(document.getDatabase().getName(),
"User '" + document.field("name") + "' has no role defined");
}
final ORole role = checkIfAllowed(resourceGeneric, resourceSpecific, iOperation);
if (role == null)
throw new OSecurityAccessException(document.getDatabase().getName(),
"User '" + document.field("name") + "' does not have permission to execute the operation '" + ORole
.permissionToString(iOperation) + "' against the resource: " + resourceGeneric + "." + resourceSpecific);
return role;
} | java | {
"resource": ""
} |
q173039 | OUser.isRuleDefined | test | public boolean isRuleDefined(final ORule.ResourceGeneric resourceGeneric, String resourceSpecific) {
for (ORole r : roles)
if (r == null)
OLogManager.instance()
.warn(this, "User '%s' has a null role, bypass it. Consider to fix this user roles before to continue", getName());
else if (r.hasRule(resourceGeneric, resourceSpecific))
return true;
return false;
} | java | {
"resource": ""
} |
q173040 | ODistributedOutput.getCompactServerStatus | test | public static String getCompactServerStatus(final ODistributedServerManager manager, final ODocument distribCfg) {
final StringBuilder buffer = new StringBuilder();
final Collection<ODocument> members = distribCfg.field("members");
if (members != null) {
buffer.append(members.size());
buffer.append(":[");
int memberCount = 0;
for (ODocument m : members) {
if (m == null)
continue;
if (memberCount++ > 0)
buffer.append(",");
final String serverName = m.field("name");
buffer.append(serverName);
buffer.append((Object)m.field("status"));
final Collection<String> databases = m.field("databases");
if (databases != null) {
buffer.append("{");
int dbCount = 0;
for (String dbName : databases) {
final ODistributedConfiguration dbCfg = manager.getDatabaseConfiguration(dbName, false);
if (dbCfg == null)
continue;
if (dbCount++ > 0)
buffer.append(",");
buffer.append(dbName);
buffer.append("=");
buffer.append(manager.getDatabaseStatus(serverName, dbName));
buffer.append(" (");
buffer.append(dbCfg.getServerRole(serverName));
buffer.append(")");
}
buffer.append("}");
}
}
buffer.append("]");
}
return buffer.toString();
} | java | {
"resource": ""
} |
q173041 | OHazelcastPlugin.initSystemDatabase | test | protected void initSystemDatabase() {
final ODocument defaultCfg = getStorage(OSystemDatabase.SYSTEM_DB_NAME)
.loadDatabaseConfiguration(getDefaultDatabaseConfigFile());
defaultCfg.field("autoDeploy", false);
final OModifiableDistributedConfiguration sysCfg = new OModifiableDistributedConfiguration(defaultCfg);
sysCfg.removeServer("<NEW_NODE>");
messageService.registerDatabase(OSystemDatabase.SYSTEM_DB_NAME, sysCfg);
sysCfg.addNewNodeInServerList(getLocalNodeName());
} | java | {
"resource": ""
} |
q173042 | OHazelcastPlugin.loadLocalDatabases | test | protected void loadLocalDatabases() {
final List<String> dbs = new ArrayList<String>(serverInstance.getAvailableStorageNames().keySet());
Collections.sort(dbs);
for (final String databaseName : dbs) {
if (messageService.getDatabase(databaseName) == null) {
ODistributedServerLog.info(this, nodeName, null, DIRECTION.NONE, "Opening database '%s'...", databaseName);
// INIT THE STORAGE
final ODistributedStorage stg = getStorage(databaseName);
executeInDistributedDatabaseLock(databaseName, 60000, null, new OCallable<Object, OModifiableDistributedConfiguration>() {
@Override
public Object call(OModifiableDistributedConfiguration cfg) {
ODistributedServerLog.info(this, nodeName, null, DIRECTION.NONE, "Current node started as %s for database '%s'",
cfg.getServerRole(nodeName), databaseName);
final ODistributedDatabaseImpl ddb = messageService.registerDatabase(databaseName, cfg);
ddb.resume();
// 1ST NODE TO HAVE THE DATABASE
cfg.addNewNodeInServerList(nodeName);
// COLLECT ALL THE CLUSTERS WITH REMOVED NODE AS OWNER
reassignClustersOwnership(nodeName, databaseName, cfg, true);
try {
ddb.getSyncConfiguration().setLastLSN(nodeName, ((OAbstractPaginatedStorage) stg.getUnderlying()).getLSN(), false);
} catch (IOException e) {
ODistributedServerLog
.error(this, nodeName, null, DIRECTION.NONE, "Error on saving distributed LSN for database '%s' (err=%s).",
databaseName, e.getMessage());
}
ddb.setOnline();
return null;
}
});
}
}
} | java | {
"resource": ""
} |
q173043 | OHazelcastPlugin.memberRemoved | test | @Override
public void memberRemoved(final MembershipEvent iEvent) {
try {
updateLastClusterChange();
if (iEvent.getMember() == null)
return;
final String nodeLeftName = getNodeName(iEvent.getMember());
if (nodeLeftName == null)
return;
removeServer(nodeLeftName, true);
} catch (HazelcastInstanceNotActiveException | RetryableHazelcastException e) {
OLogManager.instance().error(this, "Hazelcast is not running", e);
} catch (Exception e) {
OLogManager.instance().error(this, "Error on removing the server '%s'", e, getNodeName(iEvent.getMember()));
}
} | java | {
"resource": ""
} |
q173044 | OHazelcastPlugin.electNewLockManager | test | @Override
public String electNewLockManager() {
if (hazelcastInstance == null)
throw new HazelcastInstanceNotActiveException();
final ILock lock = hazelcastInstance.getLock("orientdb.lockManagerElection");
lock.lock();
try {
// TRY ALL THE SERVERS IN ORDER (ALL THE SERVERS HAVE THE SAME LIST)
String lockManagerServer = getLockManagerRequester().getServer();
// PROTECT FROM DOUBLE LOCK MANAGER ELECTION IN CASE OF REMOVE OF LOCK MANAGER
if (lockManagerServer != null && getActiveServers().contains(lockManagerServer))
return lockManagerServer;
final String originalLockManager = lockManagerServer;
ODistributedServerLog
.debug(this, nodeName, originalLockManager, DIRECTION.OUT, "lock '%s' is unreachable, electing a new lock...",
originalLockManager);
int lockManagerServerId = -1;
if (lockManagerServer != null && registeredNodeByName.containsKey(lockManagerServer))
lockManagerServerId = registeredNodeByName.get(lockManagerServer);
String newServer = null;
int currIndex = lockManagerServerId;
for (int i = 0; i < registeredNodeById.size(); ++i) {
currIndex++;
if (currIndex >= registeredNodeById.size())
// RESTART FROM THE FIRST
currIndex = 0;
newServer = registeredNodeById.get(currIndex);
if (newServer == null)
throw new OConfigurationException("Found null server at index " + currIndex + " of server list " + registeredNodeById);
if (newServer.equalsIgnoreCase(getLocalNodeName()) || activeNodes.containsKey(newServer)) {
// TODO: IMPROVE ELECTION BY CHECKING AL THE NODES AGREE ON IT
ODistributedServerLog
.debug(this, nodeName, newServer, DIRECTION.OUT, "Trying to elected server '%s' as new lock (old=%s)...", newServer,
originalLockManager);
try {
getLockManagerRequester().setServer(newServer);
configurationMap.put(CONFIG_LOCKMANAGER, getLockManagerRequester().getServer());
ODistributedServerLog
.info(this, nodeName, newServer, DIRECTION.OUT, "Elected server '%s' as new lock (old=%s)", newServer,
originalLockManager);
break;
} catch (Exception e) {
// NO SERVER RESPONDED, THE SERVER COULD BE ISOLATED, GO AHEAD WITH THE NEXT IN THE LIST
ODistributedServerLog
.info(this, nodeName, newServer, DIRECTION.OUT, "Error on electing server '%s' as new lock (error: %s)", newServer,
e);
}
}
}
return newServer;
} finally {
lock.unlock();
}
} | java | {
"resource": ""
} |
q173045 | OHazelcastPlugin.assignLockManagerFromCluster | test | private void assignLockManagerFromCluster() {
String lockManagerServer = null;
while (lockManagerServer == null) {
if (activeNodes.size() == 1) {
// ONLY CURRENT NODE ONLINE, SET IT AS INITIAL LOCK MANAGER
lockManagerServer = nodeName;
if (configurationMap.putIfAbsent(CONFIG_LOCKMANAGER, lockManagerServer) == null)
break;
} else {
lockManagerServer = (String) configurationMap.get(CONFIG_LOCKMANAGER);
if (lockManagerServer != null && lockManagerServer.equals(nodeName)) {
// LAST LOCK MANAGER WAS CURRENT NODE? TRY TO FORCE A NEW ELECTION
OLogManager.instance().info(this, "Found lock as current node, even if it was offline. Forcing a new election...");
getLockManagerRequester().setServer(lockManagerServer);
lockManagerServer = electNewLockManager();
break;
}
if (lockManagerServer != null)
break;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
getLockManagerRequester().setServer(lockManagerServer);
OLogManager.instance().info(this, "Distributed Lock Manager server is '%s'", lockManagerServer);
} | java | {
"resource": ""
} |
q173046 | OMicroTransaction.commit | test | public void commit() {
if (!active)
throw error("Inactive micro-transaction on commit");
if (level < 1)
throw error("Unbalanced micro-transaction, level = " + level);
--level;
if (level == 0) {
active = false;
doCommit();
}
} | java | {
"resource": ""
} |
q173047 | OMicroTransaction.rollback | test | public void rollback() {
if (!active)
throw error("Inactive micro-transaction on rollback");
if (level < 1)
throw error("Unbalanced micro-transaction, level = " + level);
--level;
if (level == 0) {
active = false;
doRollback();
}
} | java | {
"resource": ""
} |
q173048 | OMicroTransaction.updateIdentityAfterRecordCommit | test | public void updateIdentityAfterRecordCommit(final ORID oldRid, final ORID newRid) {
if (oldRid.equals(newRid))
return; // no change, ignore
// XXX: Identity update may mutate the index keys, so we have to identify and reinsert potentially affected index keys to keep
// the OTransactionIndexChanges.changesPerKey in a consistent state.
final List<KeyChangesUpdateRecord> keyRecordsToReinsert = new ArrayList<>();
final OIndexManager indexManager = getDatabase().getMetadata().getIndexManager();
for (Map.Entry<String, OTransactionIndexChanges> entry : indexOperations.entrySet()) {
final OIndex<?> index = indexManager.getIndex(entry.getKey());
if (index == null)
throw new OTransactionException("Cannot find index '" + entry.getValue() + "' while committing transaction");
final Dependency[] fieldRidDependencies = getIndexFieldRidDependencies(index);
if (!isIndexMayDependOnRids(fieldRidDependencies))
continue;
final OTransactionIndexChanges indexChanges = entry.getValue();
for (final Iterator<OTransactionIndexChangesPerKey> iterator = indexChanges.changesPerKey.values().iterator(); iterator
.hasNext(); ) {
final OTransactionIndexChangesPerKey keyChanges = iterator.next();
if (isIndexKeyMayDependOnRid(keyChanges.key, oldRid, fieldRidDependencies)) {
keyRecordsToReinsert.add(new KeyChangesUpdateRecord(keyChanges, indexChanges));
iterator.remove();
}
}
}
// Update the identity.
final ORecordOperation rec = resolveRecordOperation(oldRid);
if (rec != null) {
updatedRids.put(newRid.copy(), oldRid.copy());
if (!rec.getRecord().getIdentity().equals(newRid)) {
ORecordInternal.onBeforeIdentityChanged(rec.getRecord());
final ORecordId recordId = (ORecordId) rec.getRecord().getIdentity();
if (recordId == null) {
ORecordInternal.setIdentity(rec.getRecord(), new ORecordId(newRid));
} else {
recordId.setClusterPosition(newRid.getClusterPosition());
recordId.setClusterId(newRid.getClusterId());
}
ORecordInternal.onAfterIdentityChanged(rec.getRecord());
}
}
// Reinsert the potentially affected index keys.
for (KeyChangesUpdateRecord record : keyRecordsToReinsert)
record.indexChanges.changesPerKey.put(record.keyChanges.key, record.keyChanges);
// Update the indexes.
final List<OTransactionRecordIndexOperation> transactionIndexOperations = recordIndexOperations.get(translateRid(oldRid));
if (transactionIndexOperations != null) {
for (final OTransactionRecordIndexOperation indexOperation : transactionIndexOperations) {
OTransactionIndexChanges indexEntryChanges = indexOperations.get(indexOperation.index);
if (indexEntryChanges == null)
continue;
final OTransactionIndexChangesPerKey keyChanges;
if (indexOperation.key == null) {
keyChanges = indexEntryChanges.nullKeyChanges;
} else {
keyChanges = indexEntryChanges.changesPerKey.get(indexOperation.key);
}
if (keyChanges != null)
updateChangesIdentity(oldRid, newRid, keyChanges);
}
}
} | java | {
"resource": ""
} |
q173049 | OMicroTransaction.updateRecordCacheAfterRollback | test | public void updateRecordCacheAfterRollback() {
final OLocalRecordCache databaseLocalCache = database.getLocalCache();
for (ORecordOperation recordOperation : recordOperations.values())
databaseLocalCache.deleteRecord(recordOperation.getRecord().getIdentity());
} | java | {
"resource": ""
} |
q173050 | OFileUtils.prepareForFileCreationOrReplacement | test | public static void prepareForFileCreationOrReplacement(Path path, Object requester, String operation) throws IOException {
if (Files.deleteIfExists(path))
OLogManager.instance().warn(requester, "'%s' deleted while %s", path, operation);
final Path parent = path.getParent();
if (parent != null)
Files.createDirectories(parent);
} | java | {
"resource": ""
} |
q173051 | OFileUtils.atomicMoveWithFallback | test | public static void atomicMoveWithFallback(Path source, Path target, Object requester) throws IOException {
try {
Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
} catch (AtomicMoveNotSupportedException ignore) {
OLogManager.instance()
.warn(requester, "atomic file move is not possible, falling back to regular move (moving '%s' to '%s')", source, target);
Files.move(source, target);
}
} | java | {
"resource": ""
} |
q173052 | Pattern.getDisjointPatterns | test | public List<Pattern> getDisjointPatterns() {
Map<PatternNode, String> reverseMap = new IdentityHashMap<>();
reverseMap.putAll(this.aliasToNode.entrySet().stream().collect(Collectors.toMap(x -> x.getValue(), x -> x.getKey())));
List<Pattern> result = new ArrayList<>();
while (!reverseMap.isEmpty()) {
Pattern pattern = new Pattern();
result.add(pattern);
Map.Entry<PatternNode, String> nextNode = reverseMap.entrySet().iterator().next();
Set<PatternNode> toVisit = new HashSet<>();
toVisit.add(nextNode.getKey());
while (toVisit.size() > 0) {
PatternNode currentNode = toVisit.iterator().next();
toVisit.remove(currentNode);
if (reverseMap.containsKey(currentNode)) {
pattern.aliasToNode.put(reverseMap.get(currentNode), currentNode);
reverseMap.remove(currentNode);
for (PatternEdge x : currentNode.out) {
toVisit.add(x.in);
}
for (PatternEdge x : currentNode.in) {
toVisit.add(x.out);
}
}
}
pattern.recalculateNumOfEdges();
}
return result;
} | java | {
"resource": ""
} |
q173053 | ODistributedStorage.executeOnlyLocally | test | protected boolean executeOnlyLocally(final String localNodeName, final ODistributedConfiguration dbCfg,
final OCommandExecutor exec, final Collection<String> involvedClusters, final Collection<String> nodes) {
boolean executeLocally = false;
if (exec.isIdempotent()) {
final int availableNodes = nodes.size();
// IDEMPOTENT: CHECK IF CAN WORK LOCALLY ONLY
int maxReadQuorum;
if (involvedClusters.isEmpty())
maxReadQuorum = dbCfg.getReadQuorum(null, availableNodes, localNodeName);
else {
maxReadQuorum = 0;
for (String cl : involvedClusters)
maxReadQuorum = Math.max(maxReadQuorum, dbCfg.getReadQuorum(cl, availableNodes, localNodeName));
}
if (nodes.contains(localNodeName) && maxReadQuorum <= 1)
executeLocally = true;
}
return executeLocally;
}
public boolean isLocalEnv() {
return localDistributedDatabase == null || dManager == null || distributedConfiguration == null || OScenarioThreadLocal.INSTANCE
.isRunModeDistributed();
}
public OStorageOperationResult<ORawBuffer> readRecord(final ORecordId iRecordId, final String iFetchPlan,
final boolean iIgnoreCache, final boolean prefetchRecords, final ORecordCallback<ORawBuffer> iCallback) {
if (isLocalEnv()) {
// ALREADY DISTRIBUTED
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, prefetchRecords, iCallback);
}
final ORawBuffer memCopy = localDistributedDatabase.getRecordIfLocked(iRecordId);
if (memCopy != null)
return new OStorageOperationResult<ORawBuffer>(memCopy);
try {
final String clusterName = getClusterNameByRID(iRecordId);
final ODistributedConfiguration dbCfg = distributedConfiguration;
final List<String> nodes = dbCfg.getServers(clusterName, null);
final int availableNodes = nodes.size();
// CHECK IF LOCAL NODE OWNS THE DATA AND READ-QUORUM = 1: GET IT LOCALLY BECAUSE IT'S FASTER
final String localNodeName = dManager.getLocalNodeName();
if (nodes.isEmpty()
|| nodes.contains(dManager.getLocalNodeName()) && dbCfg.getReadQuorum(clusterName, availableNodes, localNodeName) <= 1) {
// DON'T REPLICATE
return (OStorageOperationResult<ORawBuffer>) OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
return wrapped.readRecord(iRecordId, iFetchPlan, iIgnoreCache, prefetchRecords, iCallback);
}
});
}
final OReadRecordTask task = ((OReadRecordTask) dManager.getTaskFactoryManager().getFactoryByServerNames(nodes)
.createTask(OReadRecordTask.FACTORYID)).init(iRecordId);
// DISTRIBUTE IT
final ODistributedResponse response = dManager
.sendRequest(getName(), Collections.singleton(clusterName), nodes, task, dManager.getNextMessageIdCounter(),
EXECUTION_MODE.RESPONSE, null, null, null);
final Object dResult = response != null ? response.getPayload() : null;
if (dResult instanceof ONeedRetryException)
throw (ONeedRetryException) dResult;
else if (dResult instanceof Exception)
throw OException
.wrapException(new ODistributedException("Error on execution distributed read record"), (Exception) dResult);
return new OStorageOperationResult<ORawBuffer>((ORawBuffer) dResult);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route read record operation for %s to the distributed node", e, iRecordId);
// UNREACHABLE
return null;
}
}
@Override
public OStorageOperationResult<ORawBuffer> readRecordIfVersionIsNotLatest(final ORecordId rid, final String fetchPlan,
final boolean ignoreCache, final int recordVersion) throws ORecordNotFoundException {
if (isLocalEnv()) {
return wrapped.readRecordIfVersionIsNotLatest(rid, fetchPlan, ignoreCache, recordVersion);
}
final ORawBuffer memCopy = localDistributedDatabase.getRecordIfLocked(rid);
if (memCopy != null)
return new OStorageOperationResult<ORawBuffer>(memCopy);
try {
final String clusterName = getClusterNameByRID(rid);
final ODistributedConfiguration dbCfg = distributedConfiguration;
final List<String> nodes = dbCfg.getServers(clusterName, null);
final int availableNodes = nodes.size();
// CHECK IF LOCAL NODE OWNS THE DATA AND READ-QUORUM = 1: GET IT LOCALLY BECAUSE IT'S FASTER
final String localNodeName = dManager.getLocalNodeName();
if (nodes.isEmpty()
|| nodes.contains(dManager.getLocalNodeName()) && dbCfg.getReadQuorum(clusterName, availableNodes, localNodeName) <= 1) {
// DON'T REPLICATE
return (OStorageOperationResult<ORawBuffer>) OScenarioThreadLocal.executeAsDistributed(new Callable() {
@Override
public Object call() throws Exception {
return wrapped.readRecordIfVersionIsNotLatest(rid, fetchPlan, ignoreCache, recordVersion);
}
});
}
final OReadRecordIfNotLatestTask task = (OReadRecordIfNotLatestTask) dManager.getTaskFactoryManager()
.getFactoryByServerNames(nodes).createTask(OReadRecordIfNotLatestTask.FACTORYID);
task.init(rid, recordVersion);
// DISTRIBUTE IT
final Object result = dManager
.sendRequest(getName(), Collections.singleton(clusterName), nodes, task, dManager.getNextMessageIdCounter(),
EXECUTION_MODE.RESPONSE, null, null, null).getPayload();
if (result instanceof ONeedRetryException)
throw (ONeedRetryException) result;
else if (result instanceof Exception)
throw OException.wrapException(new ODistributedException("Error on execution distributed read record"), (Exception) result);
return new OStorageOperationResult<ORawBuffer>((ORawBuffer) result);
} catch (ONeedRetryException e) {
// PASS THROUGH
throw e;
} catch (Exception e) {
handleDistributedException("Cannot route read record operation for %s to the distributed node", e, rid);
// UNREACHABLE
return null;
}
}
@Override
public OStorageOperationResult<Boolean> deleteRecord(final ORecordId iRecordId, final int iVersion, final int iMode,
final ORecordCallback<Boolean> iCallback) {
// IF is a real delete should be with a tx
return wrapped.deleteRecord(iRecordId, iVersion, iMode, iCallback);
} | java | {
"resource": ""
} |
q173054 | JPAHandler.characters | test | @Override
public void characters(char[] ch, int start, int length) throws SAXException {
builder.append(ch, start, length);
} | java | {
"resource": ""
} |
q173055 | OChannelBinaryAsynchClient.isConnected | test | public boolean isConnected() {
final Socket s = socket;
return s != null && !s.isClosed() && s.isConnected() && !s.isInputShutdown() && !s.isOutputShutdown();
} | java | {
"resource": ""
} |
q173056 | OByteBufferUtils.mergeShortFromBuffers | test | public static short mergeShortFromBuffers(final ByteBuffer buffer, final ByteBuffer buffer1) {
short result = 0;
result = (short) (result | (buffer.get() & MASK));
result = (short) (result << SIZE_OF_BYTE_IN_BITS);
result = (short) (result | (buffer1.get() & MASK));
return result;
} | java | {
"resource": ""
} |
q173057 | OByteBufferUtils.splitShortToBuffers | test | public static void splitShortToBuffers(final ByteBuffer buffer, final ByteBuffer buffer1, final short iValue) {
buffer.put((byte) (MASK & (iValue >>> SIZE_OF_BYTE_IN_BITS)));
buffer1.put((byte) (MASK & iValue));
} | java | {
"resource": ""
} |
q173058 | OCommandExecutorSQLCreateProperty.execute | test | public Object execute(final Map<Object, Object> iArgs) {
if (type == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
final ODatabaseDocument database = getDatabase();
final OClassEmbedded sourceClass = (OClassEmbedded) database.getMetadata().getSchema().getClass(className);
if (sourceClass == null)
throw new OCommandExecutionException("Source class '" + className + "' not found");
OPropertyImpl prop = (OPropertyImpl) sourceClass.getProperty(fieldName);
if (prop != null) {
if (ifNotExists) {
return sourceClass.properties().size();
}
throw new OCommandExecutionException(
"Property '" + className + "." + fieldName + "' already exists. Remove it before to retry.");
}
// CREATE THE PROPERTY
OClass linkedClass = null;
OType linkedType = null;
if (linked != null) {
// FIRST SEARCH BETWEEN CLASSES
linkedClass = database.getMetadata().getSchema().getClass(linked);
if (linkedClass == null)
// NOT FOUND: SEARCH BETWEEN TYPES
linkedType = OType.valueOf(linked.toUpperCase(Locale.ENGLISH));
}
// CREATE IT LOCALLY
OPropertyImpl internalProp = sourceClass.addPropertyInternal(fieldName, type, linkedType, linkedClass, unsafe);
if (readonly) {
internalProp.setReadonly(true);
}
if (mandatory) {
internalProp.setMandatory(true);
}
if (notnull) {
internalProp.setNotNull(true);
}
if (max != null) {
internalProp.setMax(max);
}
if (min != null) {
internalProp.setMin(min);
}
if (defaultValue != null) {
internalProp.setDefaultValue(defaultValue);
}
return sourceClass.properties().size();
} | java | {
"resource": ""
} |
q173059 | OIndexFullText.put | test | @Override
public OIndexFullText put(Object key, final OIdentifiable singleValue) {
if (key == null) {
return this;
}
key = getCollatingValue(key);
final Set<String> words = splitIntoWords(key.toString());
// FOREACH WORD CREATE THE LINK TO THE CURRENT DOCUMENT
for (final String word : words) {
acquireSharedLock();
try {
if (apiVersion == 0) {
doPutV0(singleValue, word);
} else if (apiVersion == 1) {
doPutV1(singleValue, word);
} else {
throw new IllegalStateException("Invalid API version, " + apiVersion);
}
} finally {
releaseSharedLock();
}
}
return this;
} | java | {
"resource": ""
} |
q173060 | OIndexFullText.remove | test | @Override
public boolean remove(Object key, final OIdentifiable value) {
if (key == null) {
return false;
}
key = getCollatingValue(key);
final Set<String> words = splitIntoWords(key.toString());
final OModifiableBoolean removed = new OModifiableBoolean(false);
for (final String word : words) {
acquireSharedLock();
try {
if (apiVersion == 0) {
removeV0(value, removed, word);
} else if (apiVersion == 1) {
removeV1(value, removed, word);
} else {
throw new IllegalStateException("Invalid API version, " + apiVersion);
}
} finally {
releaseSharedLock();
}
}
return removed.getValue();
} | java | {
"resource": ""
} |
q173061 | OSQLQuery.run | test | @SuppressWarnings("unchecked")
public List<T> run(final Object... iArgs) {
final ODatabaseDocumentInternal database = ODatabaseRecordThreadLocal.instance().get();
if (database == null)
throw new OQueryParsingException("No database configured");
((OMetadataInternal) database.getMetadata()).makeThreadLocalSchemaSnapshot();
try {
setParameters(iArgs);
Object o = database.getStorage().command(this);
if (o instanceof List) {
return (List<T>) o;
} else {
return (List<T>) Collections.singletonList(o);
}
} finally {
((OMetadataInternal) database.getMetadata()).clearThreadLocalSchemaSnapshot();
}
} | java | {
"resource": ""
} |
q173062 | OSQLQuery.runFirst | test | public T runFirst(final Object... iArgs) {
setLimit(1);
final List<T> result = execute(iArgs);
return result != null && !result.isEmpty() ? result.get(0) : null;
} | java | {
"resource": ""
} |
q173063 | OFileClassic.create | test | public void create() throws IOException {
acquireWriteLock();
try {
acquireExclusiveAccess();
openChannel();
init();
setVersion(OFileClassic.CURRENT_VERSION);
version = OFileClassic.CURRENT_VERSION;
initAllocationMode();
} finally {
releaseWriteLock();
}
} | java | {
"resource": ""
} |
q173064 | OFileClassic.checkRegions | test | private long checkRegions(final long iOffset, final long iLength) {
acquireReadLock();
try {
if (iOffset < 0 || iOffset + iLength > size) {
throw new OIOException(
"You cannot access outside the file size (" + size + " bytes). You have requested portion " + iOffset + "-" + (iOffset
+ iLength) + " bytes. File: " + this);
}
return iOffset + HEADER_SIZE;
} finally {
releaseReadLock();
}
} | java | {
"resource": ""
} |
q173065 | OFileClassic.replaceContentWith | test | public void replaceContentWith(final Path newContentFile) throws IOException {
acquireWriteLock();
try {
close();
Files.copy(newContentFile, osFile, StandardCopyOption.REPLACE_EXISTING);
open();
} finally {
releaseWriteLock();
}
} | java | {
"resource": ""
} |
q173066 | OStorageRemote.command | test | public Object command(final OCommandRequestText iCommand) {
final boolean live = iCommand instanceof OLiveQuery;
final ODatabaseDocumentInternal database = ODatabaseRecordThreadLocal.instance().get();
final boolean asynch = iCommand instanceof OCommandRequestAsynch && ((OCommandRequestAsynch) iCommand).isAsynchronous();
OCommandRequest request = new OCommandRequest(database, asynch, iCommand, live);
OCommandResponse response = networkOperation(request, "Error on executing command: " + iCommand);
return response.getResult();
} | java | {
"resource": ""
} |
q173067 | OStorageRemote.endRequest | test | public void endRequest(final OChannelBinaryAsynchClient iNetwork) throws IOException {
if (iNetwork == null)
return;
iNetwork.flush();
iNetwork.releaseWriteLock();
} | java | {
"resource": ""
} |
q173068 | OStorageRemote.addHost | test | protected String addHost(String host) {
if (host.startsWith(LOCALHOST))
host = LOCAL_IP + host.substring("localhost".length());
if (host.contains("/"))
host = host.substring(0, host.indexOf("/"));
// REGISTER THE REMOTE SERVER+PORT
if (!host.contains(":"))
host += ":" + (clientConfiguration.getValueAsBoolean(OGlobalConfiguration.CLIENT_USE_SSL) ?
getDefaultSSLPort() :
getDefaultPort());
else if (host.split(":").length < 2 || host.split(":")[1].trim().length() == 0)
host += (clientConfiguration.getValueAsBoolean(OGlobalConfiguration.CLIENT_USE_SSL) ? getDefaultSSLPort() : getDefaultPort());
// DISABLED BECAUSE THIS DID NOT ALLOW TO CONNECT TO LOCAL HOST ANYMORE IF THE SERVER IS BOUND TO 127.0.0.1
// CONVERT 127.0.0.1 TO THE PUBLIC IP IF POSSIBLE
// if (host.startsWith(LOCAL_IP)) {
// try {
// final String publicIP = InetAddress.getLocalHost().getHostAddress();
// host = publicIP + host.substring(LOCAL_IP.length());
// } catch (UnknownHostException e) {
// // IGNORE IT
// }
// }
synchronized (serverURLs) {
if (!serverURLs.contains(host)) {
serverURLs.add(host);
OLogManager.instance().debug(this, "Registered the new available server '%s'", host);
}
}
return host;
} | java | {
"resource": ""
} |
q173069 | OStorageRemote.beginRequest | test | public OChannelBinaryAsynchClient beginRequest(final OChannelBinaryAsynchClient network, final byte iCommand,
OStorageRemoteSession session) throws IOException {
network.beginRequest(iCommand, session);
return network;
} | java | {
"resource": ""
} |
q173070 | OLazyCollectionUtil.getDatabase | test | protected static OObjectDatabaseTx getDatabase() {
ODatabaseInternal<?> databaseOwner = ODatabaseRecordThreadLocal.instance().get().getDatabaseOwner();
if (databaseOwner instanceof OObjectDatabaseTx) {
return (OObjectDatabaseTx) databaseOwner;
} else if (databaseOwner instanceof ODatabaseDocumentInternal) {
return new OObjectDatabaseTx((ODatabaseDocumentInternal) databaseOwner);
}
throw new IllegalStateException("Current database not of expected type");
} | java | {
"resource": ""
} |
q173071 | OClientConnectionManager.getConnection | test | public OClientConnection getConnection(final int iChannelId, ONetworkProtocol protocol) {
// SEARCH THE CONNECTION BY ID
OClientConnection connection = connections.get(iChannelId);
if (connection != null)
connection.setProtocol(protocol);
return connection;
} | java | {
"resource": ""
} |
q173072 | OClientConnectionManager.kill | test | public void kill(final OClientConnection connection) {
if (connection != null) {
final ONetworkProtocol protocol = connection.getProtocol();
try {
// INTERRUPT THE NEWTORK MANAGER TOO
protocol.interrupt();
} catch (Exception e) {
OLogManager.instance().error(this, "Error during interruption of binary protocol", e);
}
disconnect(connection);
// KILL THE NETWORK MANAGER TOO
protocol.sendShutdown();
}
} | java | {
"resource": ""
} |
q173073 | OClientConnectionManager.interrupt | test | public void interrupt(final int iChannelId) {
final OClientConnection connection = connections.get(iChannelId);
if (connection != null) {
final ONetworkProtocol protocol = connection.getProtocol();
if (protocol != null)
// INTERRUPT THE NEWTORK MANAGER
protocol.softShutdown();
}
} | java | {
"resource": ""
} |
q173074 | OClientConnectionManager.disconnect | test | public boolean disconnect(final int iChannelId) {
OLogManager.instance().debug(this, "Disconnecting connection with id=%d", iChannelId);
final OClientConnection connection = connections.remove(iChannelId);
if (connection != null) {
OServerPluginHelper.invokeHandlerCallbackOnClientDisconnection(server, connection);
connection.close();
removeConnectionFromSession(connection);
// CHECK IF THERE ARE OTHER CONNECTIONS
for (Entry<Integer, OClientConnection> entry : connections.entrySet()) {
if (entry.getValue().getProtocol().equals(connection.getProtocol())) {
OLogManager.instance()
.debug(this, "Disconnected connection with id=%d but are present other active channels", iChannelId);
return false;
}
}
OLogManager.instance().debug(this, "Disconnected connection with id=%d, no other active channels found", iChannelId);
return true;
}
OLogManager.instance().debug(this, "Cannot find connection with id=%d", iChannelId);
return false;
} | java | {
"resource": ""
} |
q173075 | OClientConnectionManager.pushDistribCfg2Clients | test | public void pushDistribCfg2Clients(final ODocument iConfig) {
if (iConfig == null)
return;
final Set<String> pushed = new HashSet<String>();
for (OClientConnection c : connections.values()) {
if (!c.getData().supportsLegacyPushMessages)
continue;
try {
final String remoteAddress = c.getRemoteAddress();
if (pushed.contains(remoteAddress))
// ALREADY SENT: JUMP IT
continue;
} catch (Exception e) {
// SOCKET EXCEPTION SKIP IT
continue;
}
if (!(c.getProtocol() instanceof ONetworkProtocolBinary) || c.getData().getSerializationImpl() == null)
// INVOLVE ONLY BINARY PROTOCOLS
continue;
final ONetworkProtocolBinary p = (ONetworkProtocolBinary) c.getProtocol();
final OChannelBinary channel = p.getChannel();
final ORecordSerializer ser = ORecordSerializerFactory.instance().getFormat(c.getData().getSerializationImpl());
if (ser == null)
return;
final byte[] content = ser.toStream(iConfig, false);
try {
// TRY ACQUIRING THE LOCK FOR MAXIMUM 3 SECS TO AVOID TO FREEZE CURRENT THREAD
if (channel.tryAcquireWriteLock(TIMEOUT_PUSH)) {
try {
channel.writeByte(OChannelBinaryProtocol.PUSH_DATA);
channel.writeInt(Integer.MIN_VALUE);
channel.writeByte(OChannelBinaryProtocol.REQUEST_PUSH_DISTRIB_CONFIG);
channel.writeBytes(content);
channel.flush();
pushed.add(c.getRemoteAddress());
OLogManager.instance().debug(this, "Sent updated cluster configuration to the remote client %s", c.getRemoteAddress());
} finally {
channel.releaseWriteLock();
}
} else {
OLogManager.instance()
.info(this, "Timeout on sending updated cluster configuration to the remote client %s", c.getRemoteAddress());
}
} catch (Exception e) {
OLogManager.instance().warn(this, "Cannot push cluster configuration to the client %s", e, c.getRemoteAddress());
}
}
} | java | {
"resource": ""
} |
q173076 | OEmbeddedRidBag.swap | test | public boolean swap(int index, OIdentifiable newValue) {
EntriesIterator iter = (EntriesIterator) rawIterator();
int currIndex = 0;
while (iter.hasNext()) {
iter.next();
if (index == currIndex) {
iter.swapValueOnCurrent(newValue);
return true;
}
currIndex++;
}
return false;
} | java | {
"resource": ""
} |
q173077 | OTransactionAbstract.close | test | @Override
public void close() {
for (Map.Entry<ORID, LockedRecordMetadata> lock : locks.entrySet()) {
try {
final LockedRecordMetadata lockedRecordMetadata = lock.getValue();
if (lockedRecordMetadata.strategy.equals(OStorage.LOCKING_STRATEGY.EXCLUSIVE_LOCK)) {
((OAbstractPaginatedStorage) getDatabase().getStorage().getUnderlying()).releaseWriteLock(lock.getKey());
} else if (lockedRecordMetadata.strategy.equals(OStorage.LOCKING_STRATEGY.SHARED_LOCK)) {
((OAbstractPaginatedStorage) getDatabase().getStorage().getUnderlying()).releaseReadLock(lock.getKey());
}
} catch (Exception e) {
OLogManager.instance().debug(this, "Error on releasing lock against record " + lock.getKey(), e);
}
}
locks.clear();
} | java | {
"resource": ""
} |
q173078 | OSchemaProxyObject.synchronizeSchema | test | public synchronized void synchronizeSchema() {
OObjectDatabaseTx database = ((OObjectDatabaseTx) ODatabaseRecordThreadLocal.instance().get().getDatabaseOwner());
Collection<Class<?>> registeredEntities = database.getEntityManager().getRegisteredEntities();
boolean automaticSchemaGeneration = database.isAutomaticSchemaGeneration();
boolean reloadSchema = false;
for (Class<?> iClass : registeredEntities) {
if (Proxy.class.isAssignableFrom(iClass) || iClass.isEnum() || OReflectionHelper.isJavaType(iClass) || iClass
.isAnonymousClass())
return;
if (!database.getMetadata().getSchema().existsClass(iClass.getSimpleName())) {
database.getMetadata().getSchema().createClass(iClass.getSimpleName());
reloadSchema = true;
}
for (Class<?> currentClass = iClass; currentClass != Object.class; ) {
if (automaticSchemaGeneration && !currentClass.equals(Object.class) && !currentClass.equals(ODocument.class)) {
((OSchemaProxyObject) database.getMetadata().getSchema()).generateSchema(currentClass, database.getUnderlying());
}
String iClassName = currentClass.getSimpleName();
currentClass = currentClass.getSuperclass();
if (currentClass == null || currentClass.equals(ODocument.class))
// POJO EXTENDS ODOCUMENT: SPECIAL CASE: AVOID TO CONSIDER
// ODOCUMENT FIELDS
currentClass = Object.class;
if (database != null && !database.isClosed() && !currentClass.equals(Object.class)) {
OClass oSuperClass;
OClass currentOClass = database.getMetadata().getSchema().getClass(iClassName);
if (!database.getMetadata().getSchema().existsClass(currentClass.getSimpleName())) {
oSuperClass = database.getMetadata().getSchema().createClass(currentClass.getSimpleName());
reloadSchema = true;
} else {
oSuperClass = database.getMetadata().getSchema().getClass(currentClass.getSimpleName());
reloadSchema = true;
}
if (!currentOClass.getSuperClasses().contains(oSuperClass)) {
currentOClass.setSuperClasses(Arrays.asList(oSuperClass));
reloadSchema = true;
}
}
}
}
if (database != null && !database.isClosed() && reloadSchema) {
database.getMetadata().getSchema().reload();
}
} | java | {
"resource": ""
} |
q173079 | OAbstractProfiler.updateMetadata | test | protected void updateMetadata(final String iName, final String iDescription, final METRIC_TYPE iType) {
if (iDescription != null && dictionary.putIfAbsent(iName, iDescription) == null)
types.put(iName, iType);
} | java | {
"resource": ""
} |
q173080 | ORecordIteratorClusters.last | test | @Override
public ORecordIteratorClusters<REC> last() {
if (clusterIds.length == 0)
return this;
browsedRecords = 0;
currentClusterIdx = clusterIds.length - 1;
updateClusterRange();
current.setClusterId(clusterIds[currentClusterIdx]);
resetCurrentPosition();
prevPosition();
final ORecord record = getRecord();
currentRecord = readCurrentRecord(record, 0);
if (currentRecord != null && !include(currentRecord)) {
currentRecord = null;
hasPrevious();
}
return this;
} | java | {
"resource": ""
} |
q173081 | OJSONReader.nextChar | test | public int nextChar() throws IOException {
if (missedChar != null) {
// RETURNS THE PREVIOUS PARSED CHAR
c = missedChar.charValue();
missedChar = null;
} else {
int read = in.read();
if (read == -1)
return -1;
c = (char) read;
if (c == '\\') {
read = in.read();
if (read == -1)
return -1;
char c2 = (char) read;
if (c2 == 'u') {
// DECODE UNICODE CHAR
final StringBuilder buff = new StringBuilder(8);
for (int i = 0; i < 4; ++i) {
read = in.read();
if (read == -1)
return -1;
buff.append((char) read);
}
cursor += 6;
return (char) Integer.parseInt(buff.toString(), 16);
} else {
// REMEMBER THE CURRENT CHAR TO RETURN NEXT TIME
missedChar = c2;
}
}
}
cursor++;
if (c == NEW_LINE) {
++lineNumber;
columnNumber = 0;
} else
++columnNumber;
return (char) c;
} | java | {
"resource": ""
} |
q173082 | OCommandExecutorSQLFindReferences.execute | test | public Object execute(final Map<Object, Object> iArgs) {
if (recordIds.isEmpty() && subQuery == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
if (subQuery != null) {
final List<OIdentifiable> result = new OCommandSQL(subQuery.toString()).execute();
for (OIdentifiable id : result)
recordIds.add(id.getIdentity());
}
return OFindReferenceHelper.findReferences(recordIds, classList);
} | java | {
"resource": ""
} |
q173083 | OCommandRequestAbstract.onAsyncReplicationError | test | @Override
public OCommandRequestAbstract onAsyncReplicationError(final OAsyncReplicationError iCallback) {
if (iCallback != null) {
onAsyncReplicationError = new OAsyncReplicationError() {
int retry = 0;
@Override
public ACTION onAsyncReplicationError(Throwable iException, final int iRetry) {
switch (iCallback.onAsyncReplicationError(iException, ++retry)) {
case RETRY:
execute();
break;
case IGNORE:
}
return ACTION.IGNORE;
}
};
} else
onAsyncReplicationError = null;
return this;
} | java | {
"resource": ""
} |
q173084 | OCompressionFactory.register | test | public void register(final Class<? extends OCompression> compression) {
try {
final OCompression tempInstance = compression.newInstance();
final String name = tempInstance.name();
if (compressions.containsKey(name))
throw new IllegalArgumentException("Compression with name '" + name + "' was already registered");
if (compressionClasses.containsKey(tempInstance.name()))
throw new IllegalArgumentException("Compression with name '" + name + "' was already registered");
compressionClasses.put(name, compression);
} catch (Exception e) {
OLogManager.instance().error(this, "Cannot register storage compression algorithm '%s'", e, compression);
}
} | java | {
"resource": ""
} |
q173085 | ONative.getOpenFilesLimit | test | public int getOpenFilesLimit(boolean verbose, int recommended, int defLimit) {
if (Platform.isLinux()) {
final OCLibrary.Rlimit rlimit = new OCLibrary.Rlimit();
final int result = C_LIBRARY.getrlimit(OCLibrary.RLIMIT_NOFILE, rlimit);
if (result == 0 && rlimit.rlim_cur > 0) {
if (verbose) {
OLogManager.instance().infoNoDb(this, "Detected limit of amount of simultaneously open files is %d, "
+ " limit of open files for disk cache will be set to %d", rlimit.rlim_cur, rlimit.rlim_cur / 2 - 512);
}
if (rlimit.rlim_cur < recommended) {
OLogManager.instance()
.warnNoDb(this, "Value of limit of simultaneously open files is too small, recommended value is %d", recommended);
}
return (int) rlimit.rlim_cur / 2 - 512;
} else {
if (verbose) {
OLogManager.instance().infoNoDb(this, "Can not detect value of limit of open files.");
}
}
} else if (Platform.isWindows()) {
if (verbose) {
OLogManager.instance()
.infoNoDb(this, "Windows OS is detected, %d limit of open files will be set for the disk cache.", recommended);
}
return recommended;
}
if (verbose) {
OLogManager.instance().infoNoDb(this, "Default limit of open files (%d) will be used.", defLimit);
}
return defLimit;
} | java | {
"resource": ""
} |
q173086 | OMathExpression.allowsIndexedFunctionExecutionOnTarget | test | public boolean allowsIndexedFunctionExecutionOnTarget(OFromClause target, OCommandContext context,
OBinaryCompareOperator operator, Object right) {
if (this.childExpressions.size() != 1) {
return false;
}
return this.childExpressions.get(0).allowsIndexedFunctionExecutionOnTarget(target, context, operator, right);
} | java | {
"resource": ""
} |
q173087 | OMemoryStream.move | test | public void move(final int iFrom, final int iPosition) {
if (iPosition == 0)
return;
final int to = iFrom + iPosition;
final int size = iPosition > 0 ? buffer.length - to : buffer.length - iFrom;
System.arraycopy(buffer, iFrom, buffer, to, size);
} | java | {
"resource": ""
} |
q173088 | OMemoryStream.fill | test | public void fill(final int iLength, final byte iFiller) {
assureSpaceFor(iLength);
Arrays.fill(buffer, position, position + iLength, iFiller);
position += iLength;
} | java | {
"resource": ""
} |
q173089 | OScriptExecutionPlan.executeUntilReturn | test | public OExecutionStepInternal executeUntilReturn() {
if (steps.size() > 0) {
lastStep = steps.get(steps.size() - 1);
}
for (int i = 0; i < steps.size() - 1; i++) {
ScriptLineStep step = steps.get(i);
if (step.containsReturn()) {
OExecutionStepInternal returnStep = step.executeUntilReturn(ctx);
if (returnStep != null) {
lastStep = returnStep;
return lastStep;
}
}
OResultSet lastResult = step.syncPull(ctx, 100);
while (lastResult.hasNext()) {
while (lastResult.hasNext()) {
lastResult.next();
}
lastResult = step.syncPull(ctx, 100);
}
}
this.lastStep = steps.get(steps.size() - 1);
return lastStep;
} | java | {
"resource": ""
} |
q173090 | OScriptExecutionPlan.executeFull | test | public OExecutionStepInternal executeFull() {
for (int i = 0; i < steps.size(); i++) {
ScriptLineStep step = steps.get(i);
if (step.containsReturn()) {
OExecutionStepInternal returnStep = step.executeUntilReturn(ctx);
if (returnStep != null) {
return returnStep;
}
}
OResultSet lastResult = step.syncPull(ctx, 100);
while (lastResult.hasNext()) {
while (lastResult.hasNext()) {
lastResult.next();
}
lastResult = step.syncPull(ctx, 100);
}
}
return null;
} | java | {
"resource": ""
} |
q173091 | OQueryTargetOperator.evaluateRecord | test | @Override
public Object evaluateRecord(final OIdentifiable iRecord, ODocument iCurrentResult, final OSQLFilterCondition iCondition,
final Object iLeft, final Object iRight, OCommandContext iContext, final ODocumentSerializer serializer) {
return true;
} | java | {
"resource": ""
} |
q173092 | OCommandExecutorSQLMoveVertex.execute | test | public Object execute(final Map<Object, Object> iArgs) {
ODatabaseDocumentInternal db = getDatabase();
db.begin();
if (className == null && clusterName == null)
throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet");
OModifiableBoolean shutdownGraph = new OModifiableBoolean();
final boolean txAlreadyBegun = getDatabase().getTransaction().isActive();
try {
final Set<OIdentifiable> sourceRIDs = OSQLEngine.getInstance().parseRIDTarget(db, source, context, iArgs);
// CREATE EDGES
final List<ODocument> result = new ArrayList<ODocument>(sourceRIDs.size());
for (OIdentifiable from : sourceRIDs) {
final OVertex fromVertex = toVertex(from);
if (fromVertex == null)
continue;
final ORID oldVertex = fromVertex.getIdentity().copy();
final ORID newVertex = fromVertex.moveTo(className, clusterName);
final ODocument newVertexDoc = newVertex.getRecord();
if (fields != null) {
// EVALUATE FIELDS
for (final OPair<String, Object> f : fields) {
if (f.getValue() instanceof OSQLFunctionRuntime)
f.setValue(((OSQLFunctionRuntime) f.getValue()).getValue(newVertex.getRecord(), null, context));
}
OSQLHelper.bindParameters(newVertexDoc, fields, new OCommandParameters(iArgs), context);
}
if (merge != null)
newVertexDoc.merge(merge, true, false);
// SAVE CHANGES
newVertexDoc.save();
// PUT THE MOVE INTO THE RESULT
result
.add(new ODocument().setTrackingChanges(false).field("old", oldVertex, OType.LINK).field("new", newVertex, OType.LINK));
if (batch > 0 && result.size() % batch == 0) {
db.commit();
db.begin();
}
}
db.commit();
return result;
} finally {
// if (!txAlreadyBegun)
// db.commit();
}
} | java | {
"resource": ""
} |
q173093 | OStorageConfigurationImpl.load | test | public OStorageConfigurationImpl load(final OContextConfiguration configuration) throws OSerializationException {
lock.acquireWriteLock();
try {
initConfiguration(configuration);
final byte[] record = storage.readRecord(CONFIG_RID, null, false, false, null).getResult().buffer;
if (record == null)
throw new OStorageException("Cannot load database configuration. The database seems corrupted");
fromStream(record, 0, record.length, streamCharset);
} finally {
lock.releaseWriteLock();
}
return this;
} | java | {
"resource": ""
} |
q173094 | OMatchStatement.parse | test | @Override
public <RET extends OCommandExecutor> RET parse(OCommandRequest iRequest) {
final OCommandRequestText textRequest = (OCommandRequestText) iRequest;
if (iRequest instanceof OSQLSynchQuery) {
request = (OSQLSynchQuery<ODocument>) iRequest;
} else if (iRequest instanceof OSQLAsynchQuery) {
request = (OSQLAsynchQuery<ODocument>) iRequest;
} else {
// BUILD A QUERY OBJECT FROM THE COMMAND REQUEST
request = new OSQLSynchQuery<ODocument>(textRequest.getText());
if (textRequest.getResultListener() != null) {
request.setResultListener(textRequest.getResultListener());
}
}
String queryText = textRequest.getText();
// please, do not look at this... refactor this ASAP with new executor structure
final InputStream is = new ByteArrayInputStream(queryText.getBytes());
OrientSql osql = null;
try {
ODatabaseDocumentInternal db = getDatabase();
if (db == null) {
osql = new OrientSql(is);
} else {
osql = new OrientSql(is, db.getStorage().getConfiguration().getCharset());
}
} catch (UnsupportedEncodingException e) {
OLogManager.instance().warn(this,
"Invalid charset for database " + getDatabase() + " " + getDatabase().getStorage().getConfiguration().getCharset());
osql = new OrientSql(is);
}
try {
OMatchStatement result = (OMatchStatement) osql.parse();
this.matchExpressions = result.matchExpressions;
this.notMatchExpressions = result.notMatchExpressions;
this.returnItems = result.returnItems;
this.returnAliases = result.returnAliases;
this.limit = result.limit;
} catch (ParseException e) {
OCommandSQLParsingException ex = new OCommandSQLParsingException(e, queryText);
OErrorCode.QUERY_PARSE_ERROR.throwException(ex.getMessage(), ex);
}
buildPatterns();
pattern.validate();
return (RET) this;
} | java | {
"resource": ""
} |
q173095 | OMatchStatement.execute | test | @Override
public Object execute(Map<Object, Object> iArgs) {
this.context.setInputParameters(iArgs);
return execute(this.request, this.context, this.progressListener);
} | java | {
"resource": ""
} |
q173096 | OMatchStatement.updateScheduleStartingAt | test | private void updateScheduleStartingAt(PatternNode startNode, Set<PatternNode> visitedNodes, Set<PatternEdge> visitedEdges,
Map<String, Set<String>> remainingDependencies, List<EdgeTraversal> resultingSchedule) {
// OrientDB requires the schedule to contain all edges present in the query, which is a stronger condition
// than simply visiting all nodes in the query. Consider the following example query:
// MATCH {
// class: A,
// as: foo
// }.in() {
// as: bar
// }, {
// class: B,
// as: bar
// }.out() {
// as: foo
// } RETURN $matches
// The schedule for the above query must have two edges, even though there are only two nodes and they can both
// be visited with the traversal of a single edge.
//
// To satisfy it, we obey the following for each non-optional node:
// - ignore edges to neighboring nodes which have unsatisfied dependencies;
// - for visited neighboring nodes, add their edge if it wasn't already present in the schedule, but do not
// recurse into the neighboring node;
// - for unvisited neighboring nodes with satisfied dependencies, add their edge and recurse into them.
visitedNodes.add(startNode);
for (Set<String> dependencies : remainingDependencies.values()) {
dependencies.remove(startNode.alias);
}
Map<PatternEdge, Boolean> edges = new LinkedHashMap<PatternEdge, Boolean>();
for (PatternEdge outEdge : startNode.out) {
edges.put(outEdge, true);
}
for (PatternEdge inEdge : startNode.in) {
edges.put(inEdge, false);
}
for (Map.Entry<PatternEdge, Boolean> edgeData : edges.entrySet()) {
PatternEdge edge = edgeData.getKey();
boolean isOutbound = edgeData.getValue();
PatternNode neighboringNode = isOutbound ? edge.in : edge.out;
if (!remainingDependencies.get(neighboringNode.alias).isEmpty()) {
// Unsatisfied dependencies, ignore this neighboring node.
continue;
}
if (visitedNodes.contains(neighboringNode)) {
if (!visitedEdges.contains(edge)) {
// If we are executing in this block, we are in the following situation:
// - the startNode has not been visited yet;
// - it has a neighboringNode that has already been visited;
// - the edge between the startNode and the neighboringNode has not been scheduled yet.
//
// The isOutbound value shows us whether the edge is outbound from the point of view of the startNode.
// However, if there are edges to the startNode, we must visit the startNode from an already-visited
// neighbor, to preserve the validity of the traversal. Therefore, we negate the value of isOutbound
// to ensure that the edge is always scheduled in the direction from the already-visited neighbor
// toward the startNode. Notably, this is also the case when evaluating "optional" nodes -- we always
// visit the optional node from its non-optional and already-visited neighbor.
//
// The only exception to the above is when we have edges with "while" conditions. We are not allowed
// to flip their directionality, so we leave them as-is.
boolean traversalDirection;
if (startNode.optional || edge.item.isBidirectional()) {
traversalDirection = !isOutbound;
} else {
traversalDirection = isOutbound;
}
visitedEdges.add(edge);
resultingSchedule.add(new EdgeTraversal(edge, traversalDirection));
}
} else if (!startNode.optional) {
// If the neighboring node wasn't visited, we don't expand the optional node into it, hence the above check.
// Instead, we'll allow the neighboring node to add the edge we failed to visit, via the above block.
if (visitedEdges.contains(edge)) {
// Should never happen.
throw new AssertionError("The edge was visited, but the neighboring vertex was not: " + edge + " " + neighboringNode);
}
visitedEdges.add(edge);
resultingSchedule.add(new EdgeTraversal(edge, isOutbound));
updateScheduleStartingAt(neighboringNode, visitedNodes, visitedEdges, remainingDependencies, resultingSchedule);
}
}
} | java | {
"resource": ""
} |
q173097 | OCompositeIndexDefinition.addIndex | test | public void addIndex(final OIndexDefinition indexDefinition) {
indexDefinitions.add(indexDefinition);
if (indexDefinition instanceof OIndexDefinitionMultiValue) {
if (multiValueDefinitionIndex == -1)
multiValueDefinitionIndex = indexDefinitions.size() - 1;
else
throw new OIndexException("Composite key cannot contain more than one collection item");
}
collate.addCollate(indexDefinition.getCollate());
} | java | {
"resource": ""
} |
q173098 | OProjectionItem.splitForAggregation | test | public OProjectionItem splitForAggregation(AggregateProjectionSplit aggregateSplit, OCommandContext ctx) {
if (isAggregate()) {
OProjectionItem result = new OProjectionItem(-1);
result.alias = getProjectionAlias();
result.expression = expression.splitForAggregation(aggregateSplit, ctx);
result.nestedProjection = nestedProjection;
return result;
} else {
return this;
}
} | java | {
"resource": ""
} |
q173099 | OJson.toObjectDetermineType | test | public Object toObjectDetermineType(OResult source, OCommandContext ctx){
String className = getClassNameForDocument(ctx);
String type = getTypeForDocument(ctx);
if (className != null || (type != null && "d".equalsIgnoreCase(type))) {
return toDocument(source, ctx, className);
}
else{
return toMap(source, ctx);
}
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.