_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q21400 | PlanAligner.getTimedPhase | train | private TimedPhase getTimedPhase(final List<TimedPhase> timedPhases, final DateTime effectiveDate, final WhichPhase which) {
TimedPhase cur = null;
TimedPhase next = null;
for (final TimedPhase phase : timedPhases) {
if (phase.getStartPhase().isAfter(effectiveDate)) {
... | java | {
"resource": ""
} |
q21401 | EhcacheShiroManagerProvider.getEhcacheManager | train | private org.ehcache.CacheManager getEhcacheManager() {
try {
final Field f = eh107CacheManager.getClass().getDeclaredField("ehCacheManager");
f.setAccessible(true);
return (org.ehcache.CacheManager) f.get(eh107CacheManager);
} catch (final IllegalAccessException e) {... | java | {
"resource": ""
} |
q21402 | ItemsInterval.buildForMissingInterval | train | public void buildForMissingInterval(@Nullable final LocalDate startDate, @Nullable final LocalDate endDate, @Nullable final UUID targetInvoiceId, final Collection<Item> output, final boolean addRepair) {
final Item item = createNewItem(startDate, endDate, targetInvoiceId, addRepair);
if (item != null) {... | java | {
"resource": ""
} |
q21403 | ItemsInterval.buildFromItems | train | public void buildFromItems(final Collection<Item> output, final boolean mergeMode) {
buildForMissingInterval(null, null, null, output, mergeMode);
} | java | {
"resource": ""
} |
q21404 | InvoiceDateUtils.calculateProrationBetweenDates | train | private static BigDecimal calculateProrationBetweenDates(final LocalDate startDate, final LocalDate endDate, final LocalDate previousBillingCycleDate, final LocalDate nextBillingCycleDate) {
final int daysBetween = Days.daysBetween(previousBillingCycleDate, nextBillingCycleDate).getDays();
return calcul... | java | {
"resource": ""
} |
q21405 | IncompletePaymentTransactionTask.computeNewTransactionStatusFromPaymentTransactionInfoPlugin | train | private TransactionStatus computeNewTransactionStatusFromPaymentTransactionInfoPlugin(final PaymentTransactionInfoPlugin input, final TransactionStatus currentTransactionStatus) {
final TransactionStatus newTransactionStatus = PaymentTransactionInfoPluginConverter.toTransactionStatus(input);
return (new... | java | {
"resource": ""
} |
q21406 | BlockingStateOrdering.insertFromBlockingEvent | train | private int insertFromBlockingEvent(final Collection<UUID> allEntitlementUUIDs, final BlockingState currentBlockingState, final List<SubscriptionEvent> inputExistingEvents, final SupportForOlderVersionThan_0_17_X backwardCompatibleContext, final InternalTenantContext internalTenantContext, final Collection<Subscription... | java | {
"resource": ""
} |
q21407 | BlockingStateOrdering.findPrevNext | train | private SubscriptionEvent[] findPrevNext(final List<SubscriptionEvent> events, final UUID targetEntitlementId, final SubscriptionEvent insertionEvent) {
// Find prev/next event for the same entitlement
final SubscriptionEvent[] result = new DefaultSubscriptionEvent[2];
if (insertionEvent == null... | java | {
"resource": ""
} |
q21408 | EntitySqlDaoWrapperFactory.become | train | public <NewSqlDao extends EntitySqlDao<NewEntityModelDao, NewEntity>,
NewEntityModelDao extends EntityModelDao<NewEntity>,
NewEntity extends Entity> NewSqlDao become(final Class<NewSqlDao> newSqlDaoClass) {
final NewSqlDao newSqlDao = SqlObjectBuilder.attach(handle, newSqlDaoClass);
... | java | {
"resource": ""
} |
q21409 | HeapClusterAnalyzer.prepare | train | public void prepare() {
for(JavaClass jc: heap.getAllClasses()) {
for(TypeFilterStep it: interestingTypes) {
if (it.evaluate(jc)) {
rootClasses.add(jc.getName());
}
}
for(TypeFilterStep bt: blacklistedTypes) {
... | java | {
"resource": ""
} |
q21410 | LongHashMap.init | train | private void init(int initCapacity) {
assert (initCapacity & -initCapacity) == initCapacity; // power of 2
assert initCapacity >= MINIMUM_CAPACITY;
assert initCapacity <= MAXIMUM_CAPACITY;
threshold = (initCapacity * 3)/ 4;
table = new long[2 * initCapacity];
} | java | {
"resource": ""
} |
q21411 | LongHashMap.resize | train | private void resize(int newCapacity) {
// assert (newCapacity & -newCapacity) == newCapacity; // power of 2
int newLength = newCapacity * 2;
long[] oldTable = table;
int oldLength = oldTable.length;
if (oldLength == 2*MAXIMUM_CAPACITY) { // can't expand any further
i... | java | {
"resource": ""
} |
q21412 | MagicReader.readMagic | train | public static byte[] readMagic(InputStream is) throws IOException {
byte[] buf = new byte[32];
int n = 0;
while(true) {
int c = is.read();
if (c < 0) {
throw new EOFException("Cannot read magic");
}
if (n >= buf.length) {
... | java | {
"resource": ""
} |
q21413 | CommandLauncher.breakCage | train | private void breakCage(String... args) {
if ("false".equalsIgnoreCase(System.getProperty("sjk.breakCage", "true"))) {
// do not break
return;
}
RuntimeMXBean rtBean = ManagementFactory.getRuntimeMXBean();
String spec = rtBean.getSpecVersion();
if (spec.startsWith("1.")) {
// good classic Java
if (... | java | {
"resource": ""
} |
q21414 | JsonWriteContext.writeFieldName | train | public final int writeFieldName(String name)
{
if (_type == TYPE_OBJECT) {
if (_currentName != null) { // just wrote a name...
return STATUS_EXPECT_VALUE;
}
_currentName = name;
return (_index < 0) ? STATUS_OK_AS_IS : STATUS_OK_AFTER_COMMA;
... | java | {
"resource": ""
} |
q21415 | WriterBasedGenerator._writeLongString | train | private void _writeLongString(String text)
throws IOException, JsonGenerationException
{
// First things first: let's flush the buffer to get some more room
_flushBuffer();
// Then we can write
final int textLen = text.length();
int offset = 0;
do {
... | java | {
"resource": ""
} |
q21416 | WriterBasedGenerator._writeString | train | private final void _writeString(char[] text, int offset, int len)
throws IOException, JsonGenerationException
{
if (_maximumNonEscapedChar != 0) {
_writeStringASCII(text, offset, len, _maximumNonEscapedChar);
return;
}
/* Let's just find longest spans... | java | {
"resource": ""
} |
q21417 | WriterBasedGenerator._writeSimpleObject | train | protected void _writeSimpleObject(Object value) throws IOException, JsonGenerationException {
/* 31-Dec-2009, tatu: Actually, we could just handle some basic
* types even without codec. This can improve interoperability,
* and specifically help with TokenBuffer.
*/
if (value == null) {... | java | {
"resource": ""
} |
q21418 | JsonProcessingException.getMessage | train | @Override
public String getMessage()
{
String msg = super.getMessage();
if (msg == null) {
msg = "N/A";
}
JsonLocation loc = getLocation();
if (loc != null) {
StringBuilder sb = new StringBuilder();
sb.append(msg);
sb.append... | java | {
"resource": ""
} |
q21419 | ChannelPool.getChannelPool | train | private List<CompletableFuture<Channel>> getChannelPool(Address address) {
List<CompletableFuture<Channel>> channelPool = channels.get(address);
if (channelPool != null) {
return channelPool;
}
return channels.computeIfAbsent(address, e -> {
List<CompletableFuture<Channel>> defaultList = new... | java | {
"resource": ""
} |
q21420 | ChannelPool.getChannel | train | CompletableFuture<Channel> getChannel(Address address, String messageType) {
List<CompletableFuture<Channel>> channelPool = getChannelPool(address);
int offset = getChannelOffset(messageType);
CompletableFuture<Channel> channelFuture = channelPool.get(offset);
if (channelFuture == null || channelFuture... | java | {
"resource": ""
} |
q21421 | Match.map | train | public <V> Match<V> map(Function<T, V> mapper) {
if (matchAny) {
return any();
} else if (value == null) {
return negation ? ifNotNull() : ifNull();
} else {
return negation ? ifNotValue(mapper.apply(value)) : ifValue(mapper.apply(value));
}
} | java | {
"resource": ""
} |
q21422 | Match.matches | train | public boolean matches(T other) {
if (matchAny) {
return true;
} else if (other == null) {
return negation ? value != null : value == null;
} else {
if (value instanceof byte[]) {
boolean equal = Arrays.equals((byte[]) value, (byte[]) other);
return negation ? !equal : equa... | java | {
"resource": ""
} |
q21423 | SynchronousReplicator.completeFutures | train | private void completeFutures() {
long commitIndex = queues.values().stream()
.map(queue -> queue.ackedIndex)
.reduce(Math::min)
.orElse(0L);
for (long i = context.getCommitIndex() + 1; i <= commitIndex; i++) {
CompletableFuture<Void> future = futures.remove(i);
if (future != ... | java | {
"resource": ""
} |
q21424 | MappableJournalSegmentReader.map | train | void map(ByteBuffer buffer) {
if (!(reader instanceof MappedJournalSegmentReader)) {
JournalReader<E> reader = this.reader;
this.reader = new MappedJournalSegmentReader<>(buffer, segment, maxEntrySize, index, namespace);
this.reader.reset(reader.getNextIndex());
reader.close();
}
} | java | {
"resource": ""
} |
q21425 | MappableJournalSegmentReader.unmap | train | void unmap() {
if (reader instanceof MappedJournalSegmentReader) {
JournalReader<E> reader = this.reader;
this.reader = new FileChannelJournalSegmentReader<>(channel, segment, maxEntrySize, index, namespace);
this.reader.reset(reader.getNextIndex());
reader.close();
}
} | java | {
"resource": ""
} |
q21426 | DistributedLogServerContext.compactBySize | train | private void compactBySize() {
if (maxLogSize > 0 && journal.size() > maxLogSize) {
JournalSegment<LogEntry> compactSegment = null;
Long compactIndex = null;
for (JournalSegment<LogEntry> segment : journal.segments()) {
Collection<JournalSegment<LogEntry>> remainingSegments = journal.segme... | java | {
"resource": ""
} |
q21427 | DistributedLogServerContext.compactByAge | train | private void compactByAge() {
if (maxLogAge != null) {
long currentTime = System.currentTimeMillis();
JournalSegment<LogEntry> compactSegment = null;
Long compactIndex = null;
for (JournalSegment<LogEntry> segment : journal.segments()) {
if (currentTime - segment.descriptor().updated... | java | {
"resource": ""
} |
q21428 | RaftPartitionServer.delete | train | public void delete() {
try {
Files.walkFileTree(partition.dataDirectory().toPath(), new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Files.delete(file);
return FileVisitResult.CONTINUE;
... | java | {
"resource": ""
} |
q21429 | RecoveringSessionClient.openProxy | train | private void openProxy(CompletableFuture<SessionClient> future) {
log.debug("Opening proxy session");
proxyFactory.get().thenCompose(proxy -> proxy.connect()).whenComplete((proxy, error) -> {
if (error == null) {
synchronized (this) {
this.log = ContextualLoggerFactory.getLogger(getClass... | java | {
"resource": ""
} |
q21430 | Threads.namedThreads | train | public static ThreadFactory namedThreads(String pattern, Logger log) {
return new ThreadFactoryBuilder()
.setNameFormat(pattern)
.setThreadFactory(new AtomixThreadFactory())
.setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception on " + t.getName(), e))
.build();
} | java | {
"resource": ""
} |
q21431 | HashBasedPrimaryElection.handleClusterMembershipEvent | train | private void handleClusterMembershipEvent(ClusterMembershipEvent event) {
if (event.type() == ClusterMembershipEvent.Type.MEMBER_ADDED || event.type() == ClusterMembershipEvent.Type.MEMBER_REMOVED) {
recomputeTerm(groupMembershipService.getMembership(partitionId.group()));
}
} | java | {
"resource": ""
} |
q21432 | HashBasedPrimaryElection.incrementTerm | train | private long incrementTerm() {
counters.compute(clusterMembershipService.getLocalMember().id(), (id, value) -> value != null ? value + 1 : 1);
broadcastCounters();
return currentTerm();
} | java | {
"resource": ""
} |
q21433 | HashBasedPrimaryElection.recomputeTerm | train | private synchronized void recomputeTerm(PartitionGroupMembership membership) {
if (membership == null) {
return;
}
// Create a list of candidates based on the availability of members in the group.
List<GroupMember> candidates = new ArrayList<>();
for (MemberId memberId : membership.members())... | java | {
"resource": ""
} |
q21434 | HashBasedPrimaryElection.close | train | void close() {
broadcastFuture.cancel(false);
groupMembershipService.removeListener(groupMembershipEventListener);
clusterMembershipService.removeListener(clusterMembershipEventListener);
} | java | {
"resource": ""
} |
q21435 | LogicalClock.update | train | public LogicalTimestamp update(LogicalTimestamp timestamp) {
if (timestamp.value() > currentTimestamp.value()) {
this.currentTimestamp = timestamp;
}
return currentTimestamp;
} | java | {
"resource": ""
} |
q21436 | LogicalClock.incrementAndUpdate | train | public LogicalTimestamp incrementAndUpdate(LogicalTimestamp timestamp) {
long nextValue = currentTimestamp.value() + 1;
if (timestamp.value() > nextValue) {
return update(timestamp);
}
return increment();
} | java | {
"resource": ""
} |
q21437 | PassiveRole.handleAppend | train | protected CompletableFuture<AppendResponse> handleAppend(final AppendRequest request) {
CompletableFuture<AppendResponse> future = new CompletableFuture<>();
// Check that the term of the given request matches the local term or update the term.
if (!checkTerm(request, future)) {
return future;
}
... | java | {
"resource": ""
} |
q21438 | PassiveRole.checkTerm | train | protected boolean checkTerm(AppendRequest request, CompletableFuture<AppendResponse> future) {
RaftLogWriter writer = raft.getLogWriter();
if (request.term() < raft.getTerm()) {
log.debug("Rejected {}: request term is less than the current term ({})", request, raft.getTerm());
return failAppend(writ... | java | {
"resource": ""
} |
q21439 | PassiveRole.checkPreviousEntry | train | protected boolean checkPreviousEntry(AppendRequest request, CompletableFuture<AppendResponse> future) {
RaftLogWriter writer = raft.getLogWriter();
RaftLogReader reader = raft.getLogReader();
// If the previous term is set, validate that it matches the local log.
// We check the previous log term since... | java | {
"resource": ""
} |
q21440 | PassiveRole.completeAppend | train | protected boolean completeAppend(boolean succeeded, long lastLogIndex, CompletableFuture<AppendResponse> future) {
future.complete(logResponse(AppendResponse.builder()
.withStatus(RaftResponse.Status.OK)
.withTerm(raft.getTerm())
.withSucceeded(succeeded)
.withLastLogIndex(lastLogInd... | java | {
"resource": ""
} |
q21441 | BackupRole.applyOperations | train | private void applyOperations(long fromIndex, long toIndex) {
for (long i = fromIndex + 1; i <= toIndex; i++) {
BackupOperation operation = operations.poll();
if (operation == null) {
requestRestore(context.primary());
break;
}
if (context.nextIndex(operation.index())) {
... | java | {
"resource": ""
} |
q21442 | BackupRole.applyExecute | train | private void applyExecute(ExecuteOperation operation) {
Session session = context.getOrCreateSession(operation.session(), operation.node());
if (operation.operation() != null) {
try {
context.service().apply(new DefaultCommit<>(
context.setIndex(operation.index()),
operatio... | java | {
"resource": ""
} |
q21443 | BackupRole.applyExpire | train | private void applyExpire(ExpireOperation operation) {
context.setTimestamp(operation.timestamp());
PrimaryBackupSession session = context.getSession(operation.session());
if (session != null) {
context.expireSession(session.sessionId().id());
}
} | java | {
"resource": ""
} |
q21444 | BackupRole.applyClose | train | private void applyClose(CloseOperation operation) {
context.setTimestamp(operation.timestamp());
PrimaryBackupSession session = context.getSession(operation.session());
if (session != null) {
context.closeSession(session.sessionId().id());
}
} | java | {
"resource": ""
} |
q21445 | BackupRole.requestRestore | train | private void requestRestore(MemberId primary) {
context.protocol().restore(primary, RestoreRequest.request(context.descriptor(), context.currentTerm()))
.whenCompleteAsync((response, error) -> {
if (error == null && response.status() == PrimaryBackupResponse.Status.OK) {
context.resetI... | java | {
"resource": ""
} |
q21446 | EventsResource.getEventLogName | train | private String getEventLogName(String subject, String id) {
return String.format("%s-%s", subject, id);
} | java | {
"resource": ""
} |
q21447 | LogProxySession.getOrCreateSession | train | private Session getOrCreateSession(SessionId sessionId) {
Session session = sessions.get(sessionId);
if (session == null) {
session = new LocalSession(sessionId, name(), type(), null, service.serializer());
sessions.put(session.sessionId(), session);
service.register(session);
}
return... | java | {
"resource": ""
} |
q21448 | LogProxySession.consume | train | @SuppressWarnings("unchecked")
private void consume(LogRecord record) {
// Decode the raw log operation from the record.
LogOperation operation = decodeInternal(record.value());
// If this operation is not destined for this primitive, ignore it.
// TODO: If multiple primitives of different types are ... | java | {
"resource": ""
} |
q21449 | AbstractAtomicMapService.valuesEqual | train | protected boolean valuesEqual(MapEntryValue oldValue, MapEntryValue newValue) {
return (oldValue == null && newValue == null)
|| (oldValue != null && newValue != null && valuesEqual(oldValue.value(), newValue.value()));
} | java | {
"resource": ""
} |
q21450 | AbstractAtomicMapService.valuesEqual | train | protected boolean valuesEqual(byte[] oldValue, byte[] newValue) {
return (oldValue == null && newValue == null)
|| (oldValue != null && newValue != null && Arrays.equals(oldValue, newValue));
} | java | {
"resource": ""
} |
q21451 | AbstractAtomicMapService.valueIsNull | train | protected boolean valueIsNull(MapEntryValue value) {
return value == null || value.type() == MapEntryValue.Type.TOMBSTONE;
} | java | {
"resource": ""
} |
q21452 | AbstractAtomicMapService.putValue | train | protected void putValue(K key, MapEntryValue value) {
MapEntryValue oldValue = entries().put(key, value);
cancelTtl(oldValue);
scheduleTtl(key, value);
} | java | {
"resource": ""
} |
q21453 | AbstractAtomicMapService.scheduleTtl | train | protected void scheduleTtl(K key, MapEntryValue value) {
if (value.ttl() > 0) {
value.timer = getScheduler().schedule(Duration.ofMillis(value.ttl()), () -> {
entries().remove(key, value);
publish(new AtomicMapEvent<>(AtomicMapEvent.Type.REMOVE, key, null, toVersioned(value)));
});
}
... | java | {
"resource": ""
} |
q21454 | AbstractAtomicMapService.cancelTtl | train | protected void cancelTtl(MapEntryValue value) {
if (value != null && value.timer != null) {
value.timer.cancel();
}
} | java | {
"resource": ""
} |
q21455 | AbstractAtomicMapService.removeIf | train | private MapEntryUpdateResult<K, byte[]> removeIf(long index, K key, Predicate<MapEntryValue> predicate) {
MapEntryValue value = entries().get(key);
// If the value does not exist or doesn't match the predicate, return a PRECONDITION_FAILED error.
if (valueIsNull(value) || !predicate.test(value)) {
re... | java | {
"resource": ""
} |
q21456 | AbstractAtomicMapService.replaceIf | train | private MapEntryUpdateResult<K, byte[]> replaceIf(
long index, K key, MapEntryValue newValue, Predicate<MapEntryValue> predicate) {
MapEntryValue oldValue = entries().get(key);
// If the key is not set or the current value doesn't match the predicate, return a PRECONDITION_FAILED error.
if (valueIsNu... | java | {
"resource": ""
} |
q21457 | AbstractAtomicMapService.commitTransaction | train | private CommitResult commitTransaction(TransactionScope<K> transactionScope) {
TransactionLog<MapUpdate<K, byte[]>> transactionLog = transactionScope.transactionLog();
boolean retainTombstones = !activeTransactions.isEmpty();
List<AtomicMapEvent<K, byte[]>> eventsToPublish = Lists.newArrayList();
for (... | java | {
"resource": ""
} |
q21458 | AbstractAtomicMapService.discardTombstones | train | private void discardTombstones() {
if (activeTransactions.isEmpty()) {
Iterator<Map.Entry<K, MapEntryValue>> iterator = entries().entrySet().iterator();
while (iterator.hasNext()) {
MapEntryValue value = iterator.next().getValue();
if (value.type() == MapEntryValue.Type.TOMBSTONE) {
... | java | {
"resource": ""
} |
q21459 | AbstractAtomicMapService.publish | train | private void publish(List<AtomicMapEvent<K, byte[]>> events) {
listeners.forEach(listener -> events.forEach(event -> getSession(listener).accept(client -> client.change(event))));
} | java | {
"resource": ""
} |
q21460 | PrimitiveBuilder.protocol | train | protected PrimitiveProtocol protocol() {
PrimitiveProtocol protocol = this.protocol;
if (protocol == null) {
PrimitiveProtocolConfig<?> protocolConfig = config.getProtocolConfig();
if (protocolConfig == null) {
Collection<PartitionGroup> partitionGroups = managementService.getPartitionServic... | java | {
"resource": ""
} |
q21461 | DocumentTreeResult.ok | train | public static <V> DocumentTreeResult<V> ok(V result) {
return new DocumentTreeResult<V>(Status.OK, result);
} | java | {
"resource": ""
} |
q21462 | PrimitiveEvent.event | train | public static PrimitiveEvent event(EventType eventType, byte[] value) {
return new PrimitiveEvent(EventType.canonical(eventType), value);
} | java | {
"resource": ""
} |
q21463 | CollectionUpdateResult.ok | train | public static <T> CollectionUpdateResult<T> ok(T result) {
return new CollectionUpdateResult<>(Status.OK, result);
} | java | {
"resource": ""
} |
q21464 | CollectionUpdateResult.noop | train | public static <T> CollectionUpdateResult<T> noop(T result) {
return new CollectionUpdateResult<>(Status.NOOP, result);
} | java | {
"resource": ""
} |
q21465 | PrimitiveOperation.operation | train | public static PrimitiveOperation operation(OperationId id, byte[] value) {
return new PrimitiveOperation(OperationId.simplify(id), value);
} | java | {
"resource": ""
} |
q21466 | Version.from | train | public static Version from(String version) {
String[] fields = version.split("[.-]", 4);
checkArgument(fields.length >= 3, "version number is invalid");
return new Version(
parseInt(fields[0]),
parseInt(fields[1]),
parseInt(fields[2]),
fields.length == 4 ? fields[3] : null);
... | java | {
"resource": ""
} |
q21467 | Version.from | train | public static Version from(int major, int minor, int patch, String build) {
return new Version(major, minor, patch, build);
} | java | {
"resource": ""
} |
q21468 | ConfigMapper.loadFiles | train | public <T> T loadFiles(Class<T> type, List<File> files, List<String> resources) {
if (files == null) {
return loadResources(type, resources);
}
Config config = ConfigFactory.systemProperties();
for (File file : files) {
config = config.withFallback(ConfigFactory.parseFile(file, ConfigParseO... | java | {
"resource": ""
} |
q21469 | ClasspathScanningRegistry.newInstance | train | @SuppressWarnings("unchecked")
private static <T> T newInstance(Class<?> type) {
try {
return (T) type.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new ServiceException("Cannot instantiate service class " + type, e);
}
} | java | {
"resource": ""
} |
q21470 | AtomixAgent.main | train | public static void main(String[] args) throws Exception {
// Parse the command line arguments.
final List<String> unknown = new ArrayList<>();
final Namespace namespace = parseArgs(args, unknown);
final Namespace extraArgs = parseUnknown(unknown);
extraArgs.getAttrs().forEach((key, value) -> System.... | java | {
"resource": ""
} |
q21471 | AtomixAgent.parseArgs | train | static Namespace parseArgs(String[] args, List<String> unknown) {
ArgumentParser parser = createParser();
Namespace namespace = null;
try {
namespace = parser.parseKnownArgs(args, unknown);
} catch (ArgumentParserException e) {
parser.handleError(e);
System.exit(1);
}
return na... | java | {
"resource": ""
} |
q21472 | AtomixAgent.parseUnknown | train | static Namespace parseUnknown(List<String> unknown) {
Map<String, Object> attrs = new HashMap<>();
String attr = null;
for (String arg : unknown) {
if (arg.startsWith("--")) {
int splitIndex = arg.indexOf('=');
if (splitIndex == -1) {
attr = arg.substring(2);
} else {... | java | {
"resource": ""
} |
q21473 | AtomixAgent.createLogger | train | static Logger createLogger(Namespace namespace) {
String logConfig = namespace.getString("log_config");
if (logConfig != null) {
System.setProperty("logback.configurationFile", logConfig);
}
System.setProperty("atomix.log.directory", namespace.getString("log_dir"));
System.setProperty("atomix.... | java | {
"resource": ""
} |
q21474 | AtomixAgent.createConfig | train | static AtomixConfig createConfig(Namespace namespace) {
final List<File> configFiles = namespace.getList("config");
final String memberId = namespace.getString("member");
final Address address = namespace.get("address");
final String host = namespace.getString("host");
final String rack = namespace.... | java | {
"resource": ""
} |
q21475 | AtomixAgent.buildAtomix | train | private static Atomix buildAtomix(Namespace namespace) {
return Atomix.builder(createConfig(namespace)).withShutdownHookEnabled().build();
} | java | {
"resource": ""
} |
q21476 | AtomixAgent.buildRestService | train | private static ManagedRestService buildRestService(Atomix atomix, Namespace namespace) {
final String httpHost = namespace.getString("http_host");
final Integer httpPort = namespace.getInt("http_port");
return RestService.builder()
.withAtomix(atomix)
.withAddress(Address.from(httpHost, http... | java | {
"resource": ""
} |
q21477 | Address.from | train | public static Address from(int port) {
try {
InetAddress address = getLocalAddress();
return new Address(address.getHostName(), port);
} catch (UnknownHostException e) {
throw new IllegalArgumentException("Failed to locate host", e);
}
} | java | {
"resource": ""
} |
q21478 | Address.getLocalAddress | train | private static InetAddress getLocalAddress() throws UnknownHostException {
try {
return InetAddress.getLocalHost(); // first NIC
} catch (Exception ignore) {
return InetAddress.getByName(null);
}
} | java | {
"resource": ""
} |
q21479 | Address.address | train | public InetAddress address(boolean resolve) {
if (resolve) {
address = resolveAddress();
return address;
}
if (address == null) {
synchronized (this) {
if (address == null) {
address = resolveAddress();
}
}
}
return address;
} | java | {
"resource": ""
} |
q21480 | Address.type | train | public Type type() {
if (type == null) {
synchronized (this) {
if (type == null) {
type = address() instanceof Inet6Address ? Type.IPV6 : Type.IPV4;
}
}
}
return type;
} | java | {
"resource": ""
} |
q21481 | RaftServiceContext.installSnapshot | train | public void installSnapshot(SnapshotReader reader) {
log.debug("Installing snapshot {}", reader.snapshot().index());
reader.skip(Bytes.LONG); // Skip the service ID
PrimitiveType primitiveType;
try {
primitiveType = raft.getPrimitiveTypes().getPrimitiveType(reader.readString());
} catch (Confi... | java | {
"resource": ""
} |
q21482 | RaftServiceContext.takeSnapshot | train | public void takeSnapshot(SnapshotWriter writer) {
log.debug("Taking snapshot {}", writer.snapshot().index());
// Serialize sessions to the in-memory snapshot and request a snapshot from the state machine.
writer.writeLong(primitiveId.id());
writer.writeString(primitiveType.name());
writer.writeStri... | java | {
"resource": ""
} |
q21483 | RaftServiceContext.openSession | train | public long openSession(long index, long timestamp, RaftSession session) {
log.debug("Opening session {}", session.sessionId());
// Update the state machine index/timestamp.
tick(index, timestamp);
// Set the session timestamp to the current service timestamp.
session.setLastUpdated(currentTimesta... | java | {
"resource": ""
} |
q21484 | RaftServiceContext.keepAlive | train | public boolean keepAlive(long index, long timestamp, RaftSession session, long commandSequence, long eventIndex) {
// If the service has been deleted, just return false to ignore the keep-alive.
if (deleted) {
return false;
}
// Update the state machine index/timestamp.
tick(index, timestamp)... | java | {
"resource": ""
} |
q21485 | RaftServiceContext.keepAliveSessions | train | public void keepAliveSessions(long index, long timestamp) {
log.debug("Resetting session timeouts");
this.currentIndex = index;
this.currentTimestamp = Math.max(currentTimestamp, timestamp);
for (RaftSession session : sessions.getSessions(primitiveId)) {
session.setLastUpdated(timestamp);
}
... | java | {
"resource": ""
} |
q21486 | RaftServiceContext.closeSession | train | public void closeSession(long index, long timestamp, RaftSession session, boolean expired) {
log.debug("Closing session {}", session.sessionId());
// Update the session's timestamp to prevent it from being expired.
session.setLastUpdated(timestamp);
// Update the state machine index/timestamp.
tic... | java | {
"resource": ""
} |
q21487 | RaftServiceContext.executeCommand | train | public OperationResult executeCommand(long index, long sequence, long timestamp, RaftSession session, PrimitiveOperation operation) {
// If the service has been deleted then throw an unknown service exception.
if (deleted) {
log.warn("Service {} has been deleted by another process", serviceName);
th... | java | {
"resource": ""
} |
q21488 | RaftServiceContext.sequenceCommand | train | private OperationResult sequenceCommand(long index, long sequence, RaftSession session) {
OperationResult result = session.getResult(sequence);
if (result == null) {
log.debug("Missing command result at index {}", index);
}
return result;
} | java | {
"resource": ""
} |
q21489 | RaftServiceContext.applyCommand | train | private OperationResult applyCommand(long index, long sequence, long timestamp, PrimitiveOperation operation, RaftSession session) {
long eventIndex = session.getEventIndex();
Commit<byte[]> commit = new DefaultCommit<>(index, operation.id(), operation.value(), session, timestamp);
OperationResult result;... | java | {
"resource": ""
} |
q21490 | RaftServiceContext.executeQuery | train | public CompletableFuture<OperationResult> executeQuery(
long index,
long sequence,
long timestamp,
RaftSession session,
PrimitiveOperation operation) {
CompletableFuture<OperationResult> future = new CompletableFuture<>();
executeQuery(index, sequence, timestamp, session, operation... | java | {
"resource": ""
} |
q21491 | RaftServiceContext.executeQuery | train | private void executeQuery(
long index,
long sequence,
long timestamp,
RaftSession session,
PrimitiveOperation operation,
CompletableFuture<OperationResult> future) {
// If the service has been deleted then throw an unknown service exception.
if (deleted) {
log.warn("Ser... | java | {
"resource": ""
} |
q21492 | RaftServiceContext.close | train | public void close() {
for (RaftSession serviceSession : sessions.getSessions(serviceId())) {
serviceSession.close();
service.close(serviceSession.sessionId());
}
service.close();
deleted = true;
} | java | {
"resource": ""
} |
q21493 | DefaultRaftServer.shutdown | train | public CompletableFuture<Void> shutdown() {
if (!started) {
return Futures.exceptionalFuture(new IllegalStateException("Server not running"));
}
CompletableFuture<Void> future = new AtomixFuture<>();
context.getThreadContext().execute(() -> {
started = false;
context.transition(Role.I... | java | {
"resource": ""
} |
q21494 | Generics.getGenericClassType | train | public static Type getGenericClassType(Object instance, Class<?> clazz, int position) {
Class<?> type = instance.getClass();
while (type != Object.class) {
if (type.getGenericSuperclass() instanceof ParameterizedType) {
ParameterizedType genericSuperclass = (ParameterizedType) type.getGenericSuper... | java | {
"resource": ""
} |
q21495 | Generics.getGenericInterfaceType | train | public static Type getGenericInterfaceType(Object instance, Class<?> iface, int position) {
Class<?> type = instance.getClass();
while (type != Object.class) {
for (Type genericType : type.getGenericInterfaces()) {
if (genericType instanceof ParameterizedType) {
ParameterizedType paramet... | java | {
"resource": ""
} |
q21496 | PrimaryRole.heartbeat | train | private void heartbeat() {
long index = context.nextIndex();
long timestamp = System.currentTimeMillis();
replicator.replicate(new HeartbeatOperation(index, timestamp))
.thenRun(() -> context.setTimestamp(timestamp));
} | java | {
"resource": ""
} |
q21497 | RaftSessionRegistry.addSession | train | public RaftSession addSession(RaftSession session) {
RaftSession existingSession = sessions.putIfAbsent(session.sessionId().id(), session);
return existingSession != null ? existingSession : session;
} | java | {
"resource": ""
} |
q21498 | RaftSessionRegistry.getSessions | train | public Collection<RaftSession> getSessions(PrimitiveId primitiveId) {
return sessions.values().stream()
.filter(session -> session.getService().serviceId().equals(primitiveId))
.filter(session -> session.getState().active())
.collect(Collectors.toSet());
} | java | {
"resource": ""
} |
q21499 | RaftSessionRegistry.removeSessions | train | public void removeSessions(PrimitiveId primitiveId) {
sessions.entrySet().removeIf(e -> e.getValue().getService().serviceId().equals(primitiveId));
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.