focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static HttpResponseStatus parseLine(CharSequence line) {
return (line instanceof AsciiString) ? parseLine((AsciiString) line) : parseLine(line.toString());
}
|
@Test
public void parseLineStringMalformedCode() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
parseLine("200a");
}
});
}
|
public static <T> ListenableFuture<T> submit(final RequestBuilder<T> requestBuilder) {
return transformFromTargetAndResult(submitInternal(requestBuilder));
}
|
@Test
public void testErrorLoad() {
// Load some unsupported model.
final ListenableFuture<Bitmap> future =
GlideFutures.submit(Glide.with(app).asBitmap().load(app));
// Make sure that it throws.
assertThrows(
ExecutionException.class,
new ThrowingRunnable() {
@Override
public void run() throws Throwable {
Futures.getDone(future);
}
});
}
|
public static int read(
final UnsafeBuffer termBuffer,
final int termOffset,
final FragmentHandler handler,
final int fragmentsLimit,
final Header header,
final ErrorHandler errorHandler,
final long currentPosition,
final Position subscriberPosition)
{
int fragmentsRead = 0;
int offset = termOffset;
final int capacity = termBuffer.capacity();
header.buffer(termBuffer);
try
{
while (fragmentsRead < fragmentsLimit && offset < capacity)
{
final int frameLength = frameLengthVolatile(termBuffer, offset);
if (frameLength <= 0)
{
break;
}
final int frameOffset = offset;
offset += BitUtil.align(frameLength, FRAME_ALIGNMENT);
if (!isPaddingFrame(termBuffer, frameOffset))
{
++fragmentsRead;
header.offset(frameOffset);
handler.onFragment(termBuffer, frameOffset + HEADER_LENGTH, frameLength - HEADER_LENGTH, header);
}
}
}
catch (final Exception ex)
{
errorHandler.onError(ex);
}
finally
{
final long newPosition = currentPosition + (offset - termOffset);
if (newPosition > currentPosition)
{
subscriberPosition.setOrdered(newPosition);
}
}
return fragmentsRead;
}
|
@Test
void shouldReadMultipleMessages()
{
final int msgLength = 1;
final int frameLength = HEADER_LENGTH + msgLength;
final int alignedFrameLength = align(frameLength, FRAME_ALIGNMENT);
final int termOffset = 0;
when(termBuffer.getIntVolatile(0)).thenReturn(frameLength);
when(termBuffer.getIntVolatile(alignedFrameLength)).thenReturn(frameLength);
when(termBuffer.getShort(anyInt())).thenReturn((short)HDR_TYPE_DATA);
final int readOutcome = TermReader.read(
termBuffer, termOffset, handler, Integer.MAX_VALUE, header, errorHandler, 0, subscriberPosition);
assertEquals(2, readOutcome);
final InOrder inOrder = inOrder(termBuffer, handler, subscriberPosition);
inOrder.verify(termBuffer).getIntVolatile(0);
inOrder.verify(handler).onFragment(eq(termBuffer), eq(HEADER_LENGTH), eq(msgLength), any(Header.class));
inOrder.verify(termBuffer).getIntVolatile(alignedFrameLength);
inOrder
.verify(handler)
.onFragment(eq(termBuffer), eq(alignedFrameLength + HEADER_LENGTH), eq(msgLength), any(Header.class));
inOrder.verify(subscriberPosition).setOrdered(alignedFrameLength * 2L);
}
|
public int getBytesSize(SeaTunnelRowType rowType) {
if (size == 0) {
int s = 0;
for (int i = 0; i < fields.length; i++) {
s += getBytesForValue(fields[i], rowType.getFieldType(i));
}
size = s;
}
return size;
}
|
@Test
void testForRowSize() {
Map<String, Object> map = new HashMap<>();
map.put(
"key1",
new SeaTunnelRow(
new Object[] {
1, "test", 1L, new BigDecimal("3333.333"),
}));
map.put(
"key2",
new SeaTunnelRow(
new Object[] {
1, "test", 1L, new BigDecimal("3333.333"),
}));
SeaTunnelRow row =
new SeaTunnelRow(
new Object[] {
1,
"test",
1L,
map,
new BigDecimal("3333.333"),
new String[] {"test2", "test", "3333.333"},
new Integer[] {1, 2, 3},
new Long[] {1L, 2L, 3L},
new Double[] {1D, 2D},
new Float[] {1F, 2F},
new Boolean[] {Boolean.TRUE, Boolean.FALSE},
new Byte[] {1, 2, 3, 4},
new Short[] {Short.parseShort("1")}
});
SeaTunnelRow row2 =
new SeaTunnelRow(
new Object[] {
1,
"test",
1L,
map,
new BigDecimal("3333.333"),
new String[] {"test2", "test", "3333.333", null},
new Integer[] {1, 2, 3, null},
new Long[] {1L, 2L, 3L, null},
new Double[] {1D, 2D, null},
new Float[] {1F, 2F, null},
new Boolean[] {Boolean.TRUE, Boolean.FALSE, null},
new Byte[] {1, 2, 3, 4, null},
new Short[] {Short.parseShort("1"), null}
});
SeaTunnelRowType rowType =
new SeaTunnelRowType(
new String[] {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12"
},
new SeaTunnelDataType<?>[] {
BasicType.INT_TYPE,
BasicType.STRING_TYPE,
BasicType.LONG_TYPE,
new MapType<>(
BasicType.STRING_TYPE,
new SeaTunnelRowType(
new String[] {"f0", "f1", "f2", "f3"},
new SeaTunnelDataType<?>[] {
BasicType.INT_TYPE,
BasicType.STRING_TYPE,
BasicType.LONG_TYPE,
new DecimalType(10, 3)
})),
new DecimalType(10, 3),
ArrayType.STRING_ARRAY_TYPE,
ArrayType.INT_ARRAY_TYPE,
ArrayType.LONG_ARRAY_TYPE,
ArrayType.DOUBLE_ARRAY_TYPE,
ArrayType.FLOAT_ARRAY_TYPE,
ArrayType.BOOLEAN_ARRAY_TYPE,
ArrayType.BYTE_ARRAY_TYPE,
ArrayType.SHORT_ARRAY_TYPE
});
Assertions.assertEquals(249, row.getBytesSize(rowType));
Assertions.assertEquals(249, row.getBytesSize());
Assertions.assertEquals(249, row2.getBytesSize(rowType));
Assertions.assertEquals(249, row2.getBytesSize());
}
|
@Override
public void onChangeLogParsed(Run<?, ?> run, SCM scm, TaskListener listener, ChangeLogSet<?> changelog) throws Exception {
try {
JiraSite jiraSite = JiraSite.get(run.getParent());
if (jiraSite == null) {
return;
}
Collection<String> issueKeys = getIssueKeys(changelog, jiraSite.getIssuePattern());
if (issueKeys.isEmpty()) {
return;
}
String jql = constructJQLQuery(issueKeys);
JiraSession session = jiraSite.getSession();
if (session == null) {
return;
}
// Query for JIRA issues
List<Issue> issues = session.getIssuesFromJqlSearch(jql);
Set<JiraIssue> issuesFromJqlSearch = issues == null ? Collections.emptySet() :
issues.stream().map( JiraIssue::new ).collect( Collectors.toSet() );
// If there are no JIRA issues, do not update the actions
if (issuesFromJqlSearch.isEmpty()) {
return;
}
// Create or update the JiraBuildAction
JiraBuildAction action = run.getAction(JiraBuildAction.class);
if (action == null) {
run.addAction(new JiraBuildAction(run, issuesFromJqlSearch));
} else {
action.addIssues(issuesFromJqlSearch);
}
run.save();
} catch (Exception e ){ // we do not want to fail the build if an issue happen here
LOGGER.warn( "Failure executing Jira query to fetch issues. Skipping recording Jira issues.: {}", e.getMessage() );
// stack trace in debug mode
LOGGER.debug( e.getMessage(), e);
}
}
|
@Test
public void onChangeLogParsed() throws Exception {
JiraSCMListener listener = new JiraSCMListener();
Job job = mock(Job.class);
Run run = mock(Run.class);
ChangeLogSet logSet = mock(ChangeLogSet.class);
final ChangeLogSet.Entry entry = mock(ChangeLogSet.Entry.class);
when(entry.getParent()).thenReturn(logSet);
when(logSet.getRun()).thenReturn(run);
when(run.getParent()).thenReturn(job);
when(entry.getMsg()).thenReturn("TEST-123");
ChangeLogSet<ChangeLogSet.Entry> set = new ChangeLogSet<ChangeLogSet.Entry>(run, null) {
@Override
public boolean isEmptySet() {
return false;
}
@Override
public Iterator<Entry> iterator() {
return Collections.singletonList(entry).iterator();
}
};
// Setup JIRA site
jiraSiteMockedStatic = mockStatic(JiraSite.class);
JiraSite site = mock(JiraSite.class);
JiraSession session = mock(JiraSession.class);
when(site.getIssuePattern()).thenReturn(JiraSite.DEFAULT_ISSUE_PATTERN);
when(site.getSession()).thenReturn(session);
when(JiraSite.get(job)).thenReturn(site);
Issue rawIssue = mock(Issue.class);
when(rawIssue.getKey()).thenReturn("TEST-123");
when(rawIssue.getSummary()).thenReturn("Foo");
when(session.getIssuesFromJqlSearch("key in ('TEST-123')")).thenReturn(Collections.singletonList(rawIssue));
JiraBuildAction action = new JiraBuildAction(run, new HashSet());
when(run.getAction(JiraBuildAction.class)).thenReturn(action);
listener.onChangeLogParsed(run, null,null, set);
Assert.assertFalse(action.getIssues().isEmpty());
JiraIssue issue = action.getIssue("TEST-123");
Assert.assertNotNull(issue);
Assert.assertEquals("TEST-123", issue.getKey());
}
|
@VisibleForTesting
ZonedDateTime parseZoned(final String text, final ZoneId zoneId) {
final TemporalAccessor parsed = formatter.parse(text);
final ZoneId parsedZone = parsed.query(TemporalQueries.zone());
ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply(
ObjectUtils.defaultIfNull(parsedZone, zoneId));
for (final TemporalField override : ChronoField.values()) {
if (parsed.isSupported(override)) {
if (!resolved.isSupported(override)) {
throw new KsqlException(
"Unsupported temporal field in timestamp: " + text + " (" + override + ")");
}
final long value = parsed.getLong(override);
if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) {
if (!parsed.isSupported(ChronoField.YEAR)) {
throw new KsqlException("Leap day cannot be parsed without supplying the year field");
}
// eagerly override year, to avoid mismatch with epoch year, which is not a leap year
resolved = resolved.withYear(parsed.get(ChronoField.YEAR));
}
resolved = resolved.with(override, value);
}
}
return resolved;
}
|
@Test
public void shouldParseFullLocalDateWithOptionalElements() {
// Given
final String format = "yyyy-MM-dd[ HH:mm:ss]";
final String timestamp = "1605-11-05";
// When
final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID);
// Then
assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER)));
}
|
@Override
public TableDataConsistencyCheckResult swapToObject(final YamlTableDataConsistencyCheckResult yamlConfig) {
if (null == yamlConfig) {
return null;
}
if (!Strings.isNullOrEmpty(yamlConfig.getIgnoredType())) {
return new TableDataConsistencyCheckResult(TableDataConsistencyCheckIgnoredType.valueOf(yamlConfig.getIgnoredType()));
}
return new TableDataConsistencyCheckResult(yamlConfig.isMatched());
}
|
@Test
void assertSwapToObjectWithString() {
TableDataConsistencyCheckResult actual = yamlTableDataConsistencyCheckResultSwapper.swapToObject("ignoredType: NO_UNIQUE_KEY");
assertThat(actual.getIgnoredType(), is(TableDataConsistencyCheckIgnoredType.NO_UNIQUE_KEY));
assertFalse(actual.isMatched());
}
|
static void askForHardStop(File tmpDir) throws IOException {
writeToShareMemory(tmpDir, 1, (byte) 0xFF);
}
|
@Test
public void askForHardStop_write_right_bit_with_right_value_in_right_file() throws Exception {
File tempFolder = temporaryFolder.newFolder();
Shutdowner.askForHardStop(tempFolder);
try (RandomAccessFile sharedMemory = new RandomAccessFile(new File(tempFolder, "sharedmemory"), "r")) {
// Using values from org.sonar.process.ProcessCommands
MappedByteBuffer mappedByteBuffer = sharedMemory.getChannel().map(FileChannel.MapMode.READ_ONLY, 0, 50L * 10);
assertThat(mappedByteBuffer.get(1)).isEqualTo((byte) 0xFF);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
|
public static <
EventTypeT,
EventKeyTypeT,
ResultTypeT,
StateTypeT extends MutableState<EventTypeT, ResultTypeT>>
OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create(
OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) {
return new AutoValue_OrderedEventProcessor<>(handler);
}
|
@Test
public void testHandlingOfDuplicateSequences() throws CannotProvideCoderException {
Event[] events = {
Event.create(3, "id-1", "d"),
Event.create(2, "id-1", "c"),
// Duplicates to be buffered
Event.create(3, "id-1", "d"),
Event.create(3, "id-1", "d"),
Event.create(0, "id-1", "a"),
Event.create(1, "id-1", "b"),
// Duplicates after the events are processed
Event.create(1, "id-1", "b"),
Event.create(3, "id-1", "d"),
};
int resultCount = 4;
int duplicateCount = 4;
Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>();
expectedStatuses.add(
KV.of(
"id-1",
OrderedProcessingStatus.create(
3L, 0, null, null, events.length, resultCount, duplicateCount, false)));
Collection<KV<String, String>> expectedOutput = new ArrayList<>();
expectedOutput.add(KV.of("id-1", "a"));
expectedOutput.add(KV.of("id-1", "ab"));
expectedOutput.add(KV.of("id-1", "abc"));
expectedOutput.add(KV.of("id-1", "abcd"));
Collection<KV<String, KV<Long, UnprocessedEvent<String>>>> duplicates = new ArrayList<>();
duplicates.add(KV.of("id-1", KV.of(3L, UnprocessedEvent.create("d", Reason.duplicate))));
duplicates.add(KV.of("id-1", KV.of(3L, UnprocessedEvent.create("d", Reason.duplicate))));
duplicates.add(KV.of("id-1", KV.of(1L, UnprocessedEvent.create("b", Reason.duplicate))));
duplicates.add(KV.of("id-1", KV.of(3L, UnprocessedEvent.create("d", Reason.duplicate))));
testProcessing(
events,
expectedStatuses,
expectedOutput,
duplicates,
EMISSION_FREQUENCY_ON_EVERY_ELEMENT,
INITIAL_SEQUENCE_OF_0,
LARGE_MAX_RESULTS_PER_OUTPUT,
DONT_PRODUCE_STATUS_ON_EVERY_EVENT);
}
|
public static void checkParamsLength(final Integer argsLength, final Integer typesLength) {
if (argsLength < typesLength) {
throw new ShenyuException("args.length < types.length");
}
}
|
@Test
public void testcheckParamsLength() {
assertDoesNotThrow(() -> ParamCheckUtils.checkParamsLength(2, 2));
}
|
@SuppressWarnings("unchecked")
public static <T> NFAFactory<T> compileFactory(
final Pattern<T, ?> pattern, boolean timeoutHandling) {
if (pattern == null) {
// return a factory for empty NFAs
return new NFAFactoryImpl<>(
0,
Collections.<String, Long>emptyMap(),
Collections.<State<T>>emptyList(),
timeoutHandling);
} else {
final NFAFactoryCompiler<T> nfaFactoryCompiler = new NFAFactoryCompiler<>(pattern);
nfaFactoryCompiler.compileFactory();
return new NFAFactoryImpl<>(
nfaFactoryCompiler.getWindowTime(),
nfaFactoryCompiler.getWindowTimes(),
nfaFactoryCompiler.getStates(),
timeoutHandling);
}
}
|
@Test
public void testNoUnnecessaryStateCopiesCreated() {
final Pattern<Event, Event> pattern =
Pattern.<Event>begin("start")
.where(startFilter)
.notFollowedBy("not")
.where(startFilter)
.followedBy("oneOrMore")
.where(startFilter)
.oneOrMore()
.followedBy("end")
.where(endFilter);
final NFACompiler.NFAFactoryCompiler<Event> nfaFactoryCompiler =
new NFACompiler.NFAFactoryCompiler<>(pattern);
nfaFactoryCompiler.compileFactory();
int endStateCount = 0;
for (State<Event> state : nfaFactoryCompiler.getStates()) {
if (state.getName().equals("end")) {
endStateCount++;
}
}
assertEquals(1, endStateCount);
}
|
public JobStatsExtended enrich(JobStats jobStats) {
JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats);
if (lock.tryLock()) {
setFirstRelevantJobStats(latestJobStats);
setJobStatsExtended(latestJobStats);
setPreviousJobStats(latestJobStats);
lock.unlock();
}
return jobStatsExtended;
}
|
@Test
void firstRelevantJobStatsIsUpdatedAfterWorkIsDone() {
JobStats firstJobStats = getJobStats(0L, 0L, 0L, 100L);
jobStatsEnricher.enrich(firstJobStats);
SleepUtils.sleep(2); //sleeping as JVM is too fast and runs code in the same nanosecond
JobStats secondJobStats = getJobStats(10L, 0L, 0L, 100L);
jobStatsEnricher.enrich(secondJobStats);
SleepUtils.sleep(2); //sleeping as JVM is too fast and runs code in the same nanosecond
JobStats thirdJobStats = getJobStats(0L, 0L, 0L, 110L);
jobStatsEnricher.enrich(thirdJobStats);
SleepUtils.sleep(2); //sleeping as JVM is too fast and runs code in the same nanosecond
JobStats jobStats = Whitebox.getInternalState(jobStatsEnricher, "firstRelevantJobStats");
assertThat(jobStats).isEqualToComparingFieldByField(thirdJobStats);
}
|
public boolean isEqualTo(Version version) {
return major == version.major && minor == version.minor;
}
|
@Test
public void isEqualTo() throws Exception {
assertTrue(V3_0.isEqualTo(of(3, 0)));
assertFalse(V3_0.isEqualTo(of(4, 0)));
}
|
@Override
public MetadataNode child(String name) {
try {
Integer brokerId = Integer.valueOf(name);
ControllerRegistration registration = image.controllers().get(brokerId);
if (registration == null) return null;
return new MetadataLeafNode(registration.toString());
} catch (NumberFormatException e) {
return null;
}
}
|
@Test
public void testNode1Child() {
MetadataNode child = NODE.child("2");
assertNotNull(child);
assertEquals("ControllerRegistration(id=2, " +
"incarnationId=adGo6sTPS0uJshjvdTUmqQ, " +
"zkMigrationReady=false, " +
"listeners=[], " +
"supportedFeatures={metadata.version: 1-4})",
child.stringify());
}
|
public ClientDetailsEntity() {
}
|
@Test
public void testClientDetailsEntity() {
Date now = new Date();
ClientDetailsEntity c = new ClientDetailsEntity();
c.setClientId("s6BhdRkqt3");
c.setClientSecret("ZJYCqe3GGRvdrudKyZS0XhGv_Z45DuKhCUk0gBR1vZk");
c.setApplicationType(ClientDetailsEntity.AppType.WEB);
c.setRedirectUris(ImmutableSet.of("https://client.example.org/callback", "https://client.example.org/callback2"));
c.setClientName("My Example");
c.setLogoUri("https://client.example.org/logo.png");
c.setSubjectType(ClientDetailsEntity.SubjectType.PAIRWISE);
c.setSectorIdentifierUri("https://other.example.net/file_of_redirect_uris.json");
c.setTokenEndpointAuthMethod(ClientDetailsEntity.AuthMethod.SECRET_BASIC);
c.setJwksUri("https://client.example.org/my_public_keys.jwks");
c.setUserInfoEncryptedResponseAlg(JWEAlgorithm.RSA1_5);
c.setUserInfoEncryptedResponseEnc(EncryptionMethod.A128CBC_HS256);
c.setContacts(ImmutableSet.of("ve7jtb@example.org", "mary@example.org"));
c.setRequestUris(ImmutableSet.of("https://client.example.org/rf.txt#qpXaRLh_n93TTR9F252ValdatUQvQiJi5BDub2BeznA"));
c.setCreatedAt(now);
c.setAccessTokenValiditySeconds(600);
assertEquals("s6BhdRkqt3", c.getClientId());
assertEquals("ZJYCqe3GGRvdrudKyZS0XhGv_Z45DuKhCUk0gBR1vZk", c.getClientSecret());
assertEquals(ClientDetailsEntity.AppType.WEB, c.getApplicationType());
assertEquals(ImmutableSet.of("https://client.example.org/callback", "https://client.example.org/callback2"), c.getRedirectUris());
assertEquals("My Example", c.getClientName());
assertEquals("https://client.example.org/logo.png", c.getLogoUri());
assertEquals(ClientDetailsEntity.SubjectType.PAIRWISE, c.getSubjectType());
assertEquals("https://other.example.net/file_of_redirect_uris.json", c.getSectorIdentifierUri());
assertEquals(ClientDetailsEntity.AuthMethod.SECRET_BASIC, c.getTokenEndpointAuthMethod());
assertEquals("https://client.example.org/my_public_keys.jwks", c.getJwksUri());
assertEquals(JWEAlgorithm.RSA1_5, c.getUserInfoEncryptedResponseAlg());
assertEquals(EncryptionMethod.A128CBC_HS256, c.getUserInfoEncryptedResponseEnc());
assertEquals(ImmutableSet.of("ve7jtb@example.org", "mary@example.org"), c.getContacts());
assertEquals(ImmutableSet.of("https://client.example.org/rf.txt#qpXaRLh_n93TTR9F252ValdatUQvQiJi5BDub2BeznA"), c.getRequestUris());
assertEquals(now, c.getCreatedAt());
assertEquals(600, c.getAccessTokenValiditySeconds().intValue());
}
|
public Map<String, String> build() {
Map<String, String> builder = new HashMap<>();
configureFileSystem(builder);
configureNetwork(builder);
configureCluster(builder);
configureSecurity(builder);
configureOthers(builder);
LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]",
builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY),
builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY));
return builder;
}
|
@Test
public void configureSecurity_whenHttpKeystoreNotProvided_shouldNotAddHttpProperties() throws Exception {
Props props = minProps(true);
File keystore = temp.newFile("keystore.p12");
File truststore = temp.newFile("truststore.p12");
props.set(CLUSTER_SEARCH_PASSWORD.getKey(), "qwerty");
props.set(CLUSTER_ES_KEYSTORE.getKey(), keystore.getAbsolutePath());
props.set(CLUSTER_ES_TRUSTSTORE.getKey(), truststore.getAbsolutePath());
EsSettings settings = new EsSettings(props, new EsInstallation(props), system);
Map<String, String> outputParams = settings.build();
assertThat(outputParams)
.doesNotContainKey("xpack.security.http.ssl.enabled")
.doesNotContainKey("xpack.security.http.ssl.keystore.path");
}
|
@SafeVarargs
public final PromiseAggregator<V, F> add(Promise<V>... promises) {
ObjectUtil.checkNotNull(promises, "promises");
if (promises.length == 0) {
return this;
}
synchronized (this) {
if (pendingPromises == null) {
int size;
if (promises.length > 1) {
size = promises.length;
} else {
size = 2;
}
pendingPromises = new LinkedHashSet<Promise<V>>(size);
}
for (Promise<V> p : promises) {
if (p == null) {
continue;
}
pendingPromises.add(p);
p.addListener(this);
}
}
return this;
}
|
@Test
public void testAddNullFuture() {
@SuppressWarnings("unchecked")
Promise<Void> p = mock(Promise.class);
@SuppressWarnings("deprecation")
final PromiseAggregator<Void, Future<Void>> a =
new PromiseAggregator<Void, Future<Void>>(p);
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
a.add((Promise<Void>[]) null);
}
});
}
|
Duration getLockAtLeastFor(AnnotationData annotation) {
return getValue(
annotation.getLockAtLeastFor(),
annotation.getLockAtLeastForString(),
this.defaultLockAtLeastFor,
"lockAtLeastForString");
}
|
@Test
public void shouldGetPositiveGracePeriodFromAnnotationWithString() throws NoSuchMethodException {
noopResolver();
SpringLockConfigurationExtractor.AnnotationData annotation =
getAnnotation("annotatedMethodWithPositiveGracePeriodWithString");
TemporalAmount gracePeriod = extractor.getLockAtLeastFor(annotation);
assertThat(gracePeriod).isEqualTo(Duration.of(10, MILLIS));
}
|
@Override
public boolean next() throws SQLException {
if (orderByValuesQueue.isEmpty()) {
return false;
}
if (isFirstNext) {
isFirstNext = false;
return true;
}
OrderByValue firstOrderByValue = orderByValuesQueue.poll();
if (firstOrderByValue.next()) {
orderByValuesQueue.offer(firstOrderByValue);
}
if (orderByValuesQueue.isEmpty()) {
return false;
}
setCurrentQueryResult(orderByValuesQueue.peek().getQueryResult());
return true;
}
|
@Test
void assertNextForMix() throws SQLException {
List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class), mock(QueryResult.class), mock(QueryResult.class));
for (int i = 0; i < 3; i++) {
QueryResultMetaData metaData = mock(QueryResultMetaData.class);
when(queryResults.get(i).getMetaData()).thenReturn(metaData);
when(metaData.getColumnName(1)).thenReturn("col1");
when(metaData.getColumnName(2)).thenReturn("col2");
}
ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL"));
when(queryResults.get(0).next()).thenReturn(true, false);
when(queryResults.get(0).getValue(1, Object.class)).thenReturn("2");
when(queryResults.get(1).next()).thenReturn(true, true, true, false);
when(queryResults.get(1).getValue(1, Object.class)).thenReturn("2", "2", "3", "3", "4", "4");
when(queryResults.get(2).next()).thenReturn(true, true, false);
when(queryResults.get(2).getValue(1, Object.class)).thenReturn("1", "1", "3", "3");
MergedResult actual = resultMerger.merge(queryResults, selectStatementContext, createDatabase(), mock(ConnectionContext.class));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("1"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("2"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("2"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("3"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("3"));
assertTrue(actual.next());
assertThat(actual.getValue(1, Object.class).toString(), is("4"));
assertFalse(actual.next());
}
|
@Override
public long extract(
final ConsumerRecord<Object, Object> record,
final long previousTimestamp
) {
return timestampExtractor.extract(record, previousTimestamp);
}
|
@Test(expected = UnsupportedOperationException.class)
public void shouldThrowUnsupportedExceptionOnExtractGenericRow() {
// when/Then
new MetadataTimestampExtractor(timestampExtractor)
.extract(mock(Struct.class), mock(GenericRow.class));
}
|
@Override
public NativeEntity<CacheDto> createNativeEntity(Entity entity,
Map<String, ValueReference> parameters,
Map<EntityDescriptor, Object> nativeEntities,
String username) {
if (entity instanceof EntityV1) {
return decode((EntityV1) entity, parameters);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
}
|
@Test
public void createNativeEntity() {
final Entity entity = EntityV1.builder()
.id(ModelId.of("1"))
.type(ModelTypes.LOOKUP_CACHE_V1)
.data(objectMapper.convertValue(LookupCacheEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("no-op-cache"),
ValueReference.of("No-op cache"),
ValueReference.of("No-op cache"),
ReferenceMapUtils.toReferenceMap(ImmutableMap.of("type", "none"))
), JsonNode.class))
.build();
assertThat(cacheService.findAll()).isEmpty();
final NativeEntity<CacheDto> nativeEntity = facade.createNativeEntity(entity, Collections.emptyMap(), Collections.emptyMap(), "username");
final NativeEntityDescriptor descriptor = nativeEntity.descriptor();
final CacheDto cacheDto = nativeEntity.entity();
assertThat(nativeEntity.descriptor().id()).isNotNull();
assertThat(descriptor.type()).isEqualTo(ModelTypes.LOOKUP_CACHE_V1);
assertThat(cacheDto.name()).isEqualTo("no-op-cache");
assertThat(cacheDto.title()).isEqualTo("No-op cache");
assertThat(cacheDto.description()).isEqualTo("No-op cache");
assertThat(cacheDto.config().type()).isEqualTo("none");
assertThat(cacheService.findAll()).hasSize(1);
}
|
@Override
public KsqlSecurityContext provide(final ApiSecurityContext apiSecurityContext) {
final Optional<KsqlPrincipal> principal = apiSecurityContext.getPrincipal();
final Optional<String> authHeader = apiSecurityContext.getAuthHeader();
final List<Entry<String, String>> requestHeaders = apiSecurityContext.getRequestHeaders();
// A user context is not necessary if a user context provider is not present or the user
// principal is missing. If a failed authentication attempt results in a missing principle,
// then the authentication plugin will have already failed the connection before calling
// this method. Therefore, if we've reached this method with a missing principle, then this
// must be a valid connection that does not require authentication.
// For these cases, we create a default service context that the missing user can use.
final boolean requiresUserContext =
securityExtension != null
&& securityExtension.getUserContextProvider().isPresent()
&& principal.isPresent();
if (!requiresUserContext) {
return new KsqlSecurityContext(
principal,
defaultServiceContextFactory.create(
ksqlConfig,
authHeader,
schemaRegistryClientFactory,
connectClientFactory,
sharedClient,
requestHeaders,
principal)
);
}
return securityExtension.getUserContextProvider()
.map(provider -> new KsqlSecurityContext(
principal,
userServiceContextFactory.create(
ksqlConfig,
authHeader,
provider.getKafkaClientSupplier(principal.get()),
provider.getSchemaRegistryClientFactory(principal.get()),
connectClientFactory,
sharedClient,
requestHeaders,
principal)))
.get();
}
|
@Test
public void shouldPassAuthHeaderToDefaultFactory() {
// Given:
when(securityExtension.getUserContextProvider()).thenReturn(Optional.empty());
when(apiSecurityContext.getAuthHeader()).thenReturn(Optional.of("some-auth"));
// When:
ksqlSecurityContextProvider.provide(apiSecurityContext);
// Then:
verify(defaultServiceContextFactory).create(any(), eq(Optional.of("some-auth")), any(), any(), any(), any(), any());
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
public void isEqualTo_WithoutToleranceParameter_Fail_NotEqual() {
expectFailureWhenTestingThat(array(2.2f)).isEqualTo(array(JUST_OVER_2POINT2));
assertFailureKeys("expected", "but was", "differs at index");
assertFailureValue("expected", "[" + floatToString(JUST_OVER_2POINT2) + "]");
assertFailureValue("but was", "[" + floatToString(2.2f) + "]");
assertFailureValue("differs at index", "[0]");
}
|
@VisibleForTesting
static LocalImage cacheDockerImageTar(
BuildContext buildContext,
Path tarPath,
ProgressEventDispatcher.Factory progressEventDispatcherFactory,
TempDirectoryProvider tempDirectoryProvider)
throws IOException, LayerCountMismatchException {
ExecutorService executorService = buildContext.getExecutorService();
Path destination = tempDirectoryProvider.newDirectory();
try (TimerEventDispatcher ignored =
new TimerEventDispatcher(
buildContext.getEventHandlers(),
"Extracting tar " + tarPath + " into " + destination)) {
TarExtractor.extract(tarPath, destination);
DockerManifestEntryTemplate loadManifest;
try (InputStream manifestStream =
Files.newInputStream(destination.resolve("manifest.json"))) {
loadManifest =
JsonMapper.builder()
.configure(MapperFeature.ACCEPT_CASE_INSENSITIVE_PROPERTIES, true)
.build()
.readValue(manifestStream, DockerManifestEntryTemplate[].class)[0];
}
Path configPath = destination.resolve(loadManifest.getConfig());
ContainerConfigurationTemplate configurationTemplate =
JsonTemplateMapper.readJsonFromFile(configPath, ContainerConfigurationTemplate.class);
// Don't compute the digest of the loaded Java JSON instance.
BlobDescriptor originalConfigDescriptor =
Blobs.from(configPath).writeTo(ByteStreams.nullOutputStream());
List<String> layerFiles = loadManifest.getLayerFiles();
if (configurationTemplate.getLayerCount() != layerFiles.size()) {
throw new LayerCountMismatchException(
"Invalid base image format: manifest contains "
+ layerFiles.size()
+ " layers, but container configuration contains "
+ configurationTemplate.getLayerCount()
+ " layers");
}
buildContext
.getBaseImageLayersCache()
.writeLocalConfig(originalConfigDescriptor.getDigest(), configurationTemplate);
// Check the first layer to see if the layers are compressed already. 'docker save' output
// is uncompressed, but a jib-built tar has compressed layers.
boolean layersAreCompressed =
!layerFiles.isEmpty() && isGzipped(destination.resolve(layerFiles.get(0)));
// Process layer blobs
try (ProgressEventDispatcher progressEventDispatcher =
progressEventDispatcherFactory.create(
"processing base image layers", layerFiles.size())) {
// Start compressing layers in parallel
List<Future<PreparedLayer>> preparedLayers = new ArrayList<>();
for (int index = 0; index < layerFiles.size(); index++) {
Path layerFile = destination.resolve(layerFiles.get(index));
DescriptorDigest diffId = configurationTemplate.getLayerDiffId(index);
ProgressEventDispatcher.Factory layerProgressDispatcherFactory =
progressEventDispatcher.newChildProducer();
preparedLayers.add(
executorService.submit(
() ->
compressAndCacheTarLayer(
buildContext.getBaseImageLayersCache(),
diffId,
layerFile,
layersAreCompressed,
layerProgressDispatcherFactory)));
}
return new LocalImage(preparedLayers, configurationTemplate);
}
}
}
|
@Test
public void testCacheDockerImageTar_validTar() throws Exception {
Path tarBuild = getResource("core/extraction/jib-image.tar");
LocalImage result =
LocalBaseImageSteps.cacheDockerImageTar(
buildContext, tarBuild, progressEventDispatcherFactory, tempDirectoryProvider);
Mockito.verify(progressEventDispatcher, Mockito.times(2)).newChildProducer();
Assert.assertEquals(2, result.layers.size());
Assert.assertEquals(
"5e701122d3347fae0758cd5b7f0692c686fcd07b0e7fd9c4a125fbdbbedc04dd",
result.layers.get(0).get().getDiffId().getHash());
Assert.assertEquals(
"0011328ac5dfe3dde40c7c5e0e00c98d1833a3aeae2bfb668cf9eb965c229c7f",
result.layers.get(0).get().getBlobDescriptor().getDigest().getHash());
Assert.assertEquals(
"f1ac3015bcbf0ada4750d728626eb10f0f585199e2b667dcd79e49f0e926178e",
result.layers.get(1).get().getDiffId().getHash());
Assert.assertEquals(
"c10ef24a5cef5092bbcb5a5666721cff7b86ce978c203a958d1fc86ee6c19f94",
result.layers.get(1).get().getBlobDescriptor().getDigest().getHash());
Assert.assertEquals(2, result.configurationTemplate.getLayerCount());
}
|
public static Parser parser() {
return ParserImpl.INSTANCE;
}
|
@Test
void testCharsetInputValidation() {
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() throws IOException {
HostsFileEntriesProvider.parser().parse((Charset[]) null);
}
});
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() throws IOException {
HostsFileEntriesProvider.parser().parse(new File(""), (Charset[]) null);
}
});
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
HostsFileEntriesProvider.parser().parseSilently((Charset[]) null);
}
});
assertThrows(NullPointerException.class, new Executable() {
@Override
public void execute() {
HostsFileEntriesProvider.parser().parseSilently(new File(""), (Charset[]) null);
}
});
}
|
String extractStem(String className) {
if (className == null)
return null;
int lastDotIndex = className.lastIndexOf(CoreConstants.DOT);
if (lastDotIndex == -1)
return null;
if ((lastDotIndex + 1) == className.length())
return null;
return className.substring(lastDotIndex + 1);
}
|
@Test
public void testStemExtraction() {
assertNull(imh.extractStem(null));
assertNull(imh.extractStem(""));
assertNull(imh.extractStem("bla."));
assertEquals("Foo", imh.extractStem("bla.Foo"));
assertEquals("Foo", imh.extractStem("com.titi.bla.Foo"));
}
|
public static LevenbergMarquardt fit(DifferentiableMultivariateFunction func, double[] x, double[] y, double[] p) {
return fit(func, x, y, p, 0.0001, 20);
}
|
@Test
public void test() {
System.out.println("LevenbergMarquardt");
MathEx.setSeed(19650218); // to get repeatable results.
double[] x = new double[100];
double[] y = new double[100];
GaussianDistribution d = new GaussianDistribution(0.0, 1);
for (int i = 0; i < x.length; i++) {
x[i] = (i+1) * 0.05;
y[i] = 1.0 / (1 + 1.2 * Math.pow(x[i], 1.8)) + d.rand() * 0.03;
}
double[] p = {0.5, 0.0};
LevenbergMarquardt lma = LevenbergMarquardt.fit(func, x, y, p);
assertEquals(0.0863, lma.sse, 1E-4);
assertEquals(1.2260, lma.parameters[0], 1E-4);
assertEquals(1.8024, lma.parameters[1], 1E-4);
}
|
public String reverseResolve(String address) {
if (WalletUtils.isValidAddress(address, addressLength)) {
String reverseName = Numeric.cleanHexPrefix(address) + REVERSE_NAME_SUFFIX;
PublicResolver resolver = obtainOffchainResolver(reverseName);
byte[] nameHash = NameHash.nameHashAsBytes(reverseName);
String name;
try {
name = resolver.name(nameHash).send();
} catch (Exception e) {
throw new RuntimeException("Unable to execute Ethereum request", e);
}
if (!isValidEnsName(name, addressLength)) {
throw new RuntimeException("Unable to resolve name for address: " + address);
} else {
return name;
}
} else {
throw new EnsResolutionException("Address is invalid: " + address);
}
}
|
@Test
public void testReverseResolve() throws Exception {
configureSyncing(false);
configureLatestBlock(System.currentTimeMillis() / 1000); // block timestamp is in seconds
NetVersion netVersion = new NetVersion();
netVersion.setResult(Long.toString(ChainIdLong.MAINNET));
String resolverAddress =
"0x0000000000000000000000004c641fb9bad9b60ef180c31f56051ce826d21a9a";
String contractName =
"0x0000000000000000000000000000000000000000000000000000000000000020"
+ TypeEncoder.encode(new Utf8String("web3j.eth"));
EthCall resolverAddressResponse = new EthCall();
resolverAddressResponse.setResult(resolverAddress);
EthCall contractNameResponse = new EthCall();
contractNameResponse.setResult(contractName);
when(web3jService.send(any(Request.class), eq(NetVersion.class))).thenReturn(netVersion);
when(web3jService.send(any(Request.class), eq(EthCall.class)))
.thenReturn(resolverAddressResponse);
when(web3jService.send(any(Request.class), eq(EthCall.class)))
.thenReturn(contractNameResponse);
assertEquals(
ensResolver.reverseResolve("0x19e03255f667bdfd50a32722df860b1eeaf4d635"),
("web3j.eth"));
}
|
public static DaysWindows weeks(int number, int startDayOfWeek) {
return new DaysWindows(
7 * number, DEFAULT_START_DATE.withDayOfWeek(startDayOfWeek), DateTimeZone.UTC);
}
|
@Test
public void testWeeks() throws Exception {
Map<IntervalWindow, Set<String>> expected = new HashMap<>();
final List<Long> timestamps =
Arrays.asList(
makeTimestamp(2014, 1, 1, 0, 0).getMillis(),
makeTimestamp(2014, 1, 5, 5, 5).getMillis(),
makeTimestamp(2014, 1, 8, 0, 0).getMillis(),
makeTimestamp(2014, 1, 12, 5, 5).getMillis(),
makeTimestamp(2015, 1, 1, 0, 0).getMillis(),
makeTimestamp(2015, 1, 6, 5, 5).getMillis());
expected.put(
new IntervalWindow(makeTimestamp(2014, 1, 1, 0, 0), makeTimestamp(2014, 1, 8, 0, 0)),
set(timestamps.get(0), timestamps.get(1)));
expected.put(
new IntervalWindow(makeTimestamp(2014, 1, 8, 0, 0), makeTimestamp(2014, 1, 15, 0, 0)),
set(timestamps.get(2), timestamps.get(3)));
expected.put(
new IntervalWindow(makeTimestamp(2014, 12, 31, 0, 0), makeTimestamp(2015, 1, 7, 0, 0)),
set(timestamps.get(4), timestamps.get(5)));
assertEquals(
expected, runWindowFn(CalendarWindows.weeks(1, DateTimeConstants.WEDNESDAY), timestamps));
}
|
@Override
@SuppressFBWarnings("PATH_TRAVERSAL_IN") // suppressing because we are using the getValidFilePath
public String getMimeType(String file) {
if (file == null || !file.contains(".")) {
return null;
}
String mimeType = null;
// may not work on Lambda until mailcap package is present https://github.com/aws/serverless-java-container/pull/504
try {
mimeType = Files.probeContentType(Paths.get(file));
} catch (IOException | InvalidPathException e) {
log("unable to probe for content type, will use fallback", e);
}
if (mimeType == null) {
try {
String mimeTypeGuess = URLConnection.guessContentTypeFromName(new File(file).getName());
if (mimeTypeGuess !=null) {
mimeType = mimeTypeGuess;
}
} catch (Exception e) {
log("couldn't find a better contentType than " + mimeType + " for file " + file, e);
}
}
return mimeType;
}
|
@Test
@Disabled
void getMimeType_disabledPath_expectException() {
AwsServletContext ctx = new AwsServletContext(null);
try {
assertNull(ctx.getMimeType("/usr/local/lib/nothing"));
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().startsWith("File path not allowed"));
} catch (Exception e) {
e.printStackTrace();
fail("Unrecognized exception");
}
}
|
public static boolean containsLocalIp(List<InetSocketAddress> clusterAddresses,
AlluxioConfiguration conf) {
String localAddressIp = getLocalIpAddress((int) conf.getMs(PropertyKey
.NETWORK_HOST_RESOLUTION_TIMEOUT_MS));
for (InetSocketAddress addr : clusterAddresses) {
String clusterNodeIp;
try {
clusterNodeIp = InetAddress.getByName(addr.getHostName()).getHostAddress();
if (clusterNodeIp.equals(localAddressIp)) {
return true;
}
} catch (UnknownHostException e) {
LOG.error("Get raft cluster node ip by hostname({}) failed",
addr.getHostName(), e);
}
}
return false;
}
|
@Test
public void testContainsLocalIP() {
List<InetSocketAddress> clusterAddresses = new ArrayList<>();
InetSocketAddress raftNodeAddress1 = new InetSocketAddress(NetworkAddressUtils
.getLocalHostName(
(int) mConfiguration.getMs(PropertyKey.NETWORK_HOST_RESOLUTION_TIMEOUT_MS)),
10);
InetSocketAddress raftNodeAddress2 = new InetSocketAddress("host2", 20);
InetSocketAddress raftNodeAddress3 = new InetSocketAddress("host3", 30);
clusterAddresses.add(raftNodeAddress1);
clusterAddresses.add(raftNodeAddress2);
clusterAddresses.add(raftNodeAddress3);
assertTrue(NetworkAddressUtils.containsLocalIp(clusterAddresses, mConfiguration));
}
|
public String getRealm() {
return realm;
}
|
@Test
public void testRules() throws Exception {
checkTranslation("omalley@" + KerberosTestUtils.getRealm(), "omalley");
checkTranslation("hdfs/10.0.0.1@" + KerberosTestUtils.getRealm(), "hdfs");
checkTranslation("oom@YAHOO.COM", "oom");
checkTranslation("johndoe/zoo@FOO.COM", "guest");
checkTranslation("joe/admin@FOO.COM", "joe");
checkTranslation("joe/root@FOO.COM", "root");
}
|
@Override
public boolean markSupported() {
return false;
}
|
@Test
public void testMarkSupported() throws Exception {
try (InputStream sample = new ByteArrayInputStream(sample1.getBytes());
JsonArrayFixingInputStream instance = new JsonArrayFixingInputStream(sample)) {
boolean result = instance.markSupported();
assertFalse(result);
}
}
|
public void init(ScannerReportWriter writer) {
File analysisLog = writer.getFileStructure().analysisLog();
try (BufferedWriter fileWriter = Files.newBufferedWriter(analysisLog.toPath(), StandardCharsets.UTF_8)) {
writePlugins(fileWriter);
writeBundledAnalyzers(fileWriter);
writeGlobalSettings(fileWriter);
writeProjectSettings(fileWriter);
writeModulesSettings(fileWriter);
} catch (IOException e) {
throw new IllegalStateException("Unable to write analysis log", e);
}
}
|
@Test
public void init_splitsPluginsByTypeInTheFile() throws IOException {
DefaultInputModule parent = new DefaultInputModule(ProjectDefinition.create()
.setBaseDir(temp.newFolder())
.setWorkDir(temp.newFolder())
.setProperty("sonar.projectKey", "parent")
.setProperty(SONAR_SKIP, "true"));
when(hierarchy.root()).thenReturn(parent);
when(pluginRepo.getExternalPluginsInfos()).thenReturn(List.of(new PluginInfo("xoo").setName("Xoo").setVersion(Version.create("1.0"))));
when(pluginRepo.getBundledPluginsInfos()).thenReturn(List.of(new PluginInfo("java").setName("Java").setVersion(Version.create("9.7"))));
publisher.init(writer);
List<String> lines = FileUtils.readLines(writer.getFileStructure().analysisLog(), StandardCharsets.UTF_8);
System.out.println(lines);
assertThat(lines).contains("Plugins:",
" - Xoo 1.0 (xoo)",
"Bundled analyzers:",
" - Java 9.7 (java)");
}
|
public void updateTopicConfig(final TopicConfig topicConfig) {
updateSingleTopicConfigWithoutPersist(topicConfig);
this.persist(topicConfig.getTopicName(), topicConfig);
}
|
@Test
public void testAddWrongValueOnCreating() {
Map<String, String> attributes = new HashMap<>();
attributes.put("+" + TopicAttributes.QUEUE_TYPE_ATTRIBUTE.getName(), "wrong-value");
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopicName("new-topic");
topicConfig.setAttributes(attributes);
RuntimeException runtimeException = Assert.assertThrows(RuntimeException.class, () -> topicConfigManager.updateTopicConfig(topicConfig));
Assert.assertEquals("value is not in set: [SimpleCQ, BatchCQ]", runtimeException.getMessage());
}
|
@Override
public <KR, VR> KStream<KR, VR> map(final KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends KR, ? extends VR>> mapper) {
return map(mapper, NamedInternal.empty());
}
|
@Test
public void shouldNotAllowNullMapperOnMap() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.map(null));
assertThat(exception.getMessage(), equalTo("mapper can't be null"));
}
|
@Override
public void add(long key, String value) {
// fix https://github.com/crossoverJie/cim/issues/79
treeMap.clear();
for (int i = 0; i < VIRTUAL_NODE_SIZE; i++) {
Long hash = super.hash("vir" + key + i);
treeMap.put(hash,value);
}
treeMap.put(key, value);
}
|
@Test
public void getFirstNodeValue2() {
AbstractConsistentHash map = new TreeMapConsistentHash() ;
List<String> strings = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
strings.add("127.0.0." + i) ;
}
String process = map.process(strings,"zhangsan2");
System.out.println(process);
Assert.assertEquals("127.0.0.3",process);
}
|
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
}
|
@Test
public void testNonIncreasingOffsetRecordBatchHasMetricsLogged() {
ByteBuffer buf = ByteBuffer.allocate(512);
MemoryRecordsBuilder builder = MemoryRecords.builder(buf, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, TimestampType.CREATE_TIME, 0L);
builder.appendWithOffset(0, RecordBatch.NO_TIMESTAMP, null, "hello".getBytes());
builder.appendWithOffset(2, RecordBatch.NO_TIMESTAMP, null, "there".getBytes());
builder.appendWithOffset(3, RecordBatch.NO_TIMESTAMP, null, "beautiful".getBytes());
MemoryRecords records = builder.build();
records.batches().iterator().next().setLastOffset(2);
assertThrows(InvalidRecordException.class, () -> new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.GZIP,
Compression.gzip().build(),
false,
RecordBatch.MAGIC_VALUE_V0,
TimestampType.CREATE_TIME,
5000L,
5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0L), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
));
assertEquals(metricsRecorder.recordInvalidOffsetCount, 1);
}
|
public boolean register(String nodeId, List<ZookeeperNodeInfo> payload) {
if (payload == null || payload.isEmpty()) {
return false;
}
lock.lock();
try {
createPathIfNotExisted();
if (member != null) {
LOG.warn("GroupMember has already registered. Please deregister first.");
return false;
}
String payloadString = getPropertiesString(payload);
member = new GroupMember(client, path, nodeId, payloadString.getBytes());
member.start();
LOG.info("Register Node[" + nodeId + "] in path[" + path + "].");
return true;
} finally {
lock.unlock();
}
}
|
@Test
public void testRegister() throws Exception {
List<ZookeeperNodeInfo> payload = new ArrayList<ZookeeperNodeInfo>();
ZookeeperNodeInfo node1 = new ZookeeperNodeInfo();
node1.setPrefix("foo");
node1.setHost("127.0.0.1");
node1.setPort(1234);
node1.setDatabase("foo_db");
node1.setUsername("foo");
node1.setPassword("password");
payload.add(node1);
ZookeeperNodeInfo node2 = new ZookeeperNodeInfo();
node2.setPrefix("bar");
node2.setHost("127.0.0.1");
node2.setPort(5678);
node2.setUsername("bar");
node2.setPassword("password");
payload.add(node2);
assertFalse(register.register("test-foo", null));
assertTrue(register.register("test-foo", payload));
assertFalse(register.register("test-foo", payload));
Thread.sleep(1000); // Wait for the node to be created.
CuratorFramework client = register.getClient();
List<String> children = client.getChildren().forPath(PATH);
assertFalse(children.isEmpty());
assertEquals(1, children.size());
assertEquals("test-foo", children.get(0));
byte[] bytes = client.getData().forPath(PATH + "/test-foo");
Properties properties = new Properties();
String str = new String(bytes);
LOG.info("ZK Data: " + str);
properties.load(new StringReader(str));
validateNodeProperties(node1, properties);
validateNodeProperties(node2, properties);
assertTrue(properties.containsKey("foo.database"));
assertFalse(properties.containsKey("bar.database"));
assertEquals("foo_db", properties.getProperty("foo.database"));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void updateJobStatus(Long id, Integer status) throws SchedulerException {
// 校验 status
if (!containsAny(status, JobStatusEnum.NORMAL.getStatus(), JobStatusEnum.STOP.getStatus())) {
throw exception(JOB_CHANGE_STATUS_INVALID);
}
// 校验存在
JobDO job = validateJobExists(id);
// 校验是否已经为当前状态
if (job.getStatus().equals(status)) {
throw exception(JOB_CHANGE_STATUS_EQUALS);
}
// 更新 Job 状态
JobDO updateObj = JobDO.builder().id(id).status(status).build();
jobMapper.updateById(updateObj);
// 更新状态 Job 到 Quartz 中
if (JobStatusEnum.NORMAL.getStatus().equals(status)) { // 开启
schedulerManager.resumeJob(job.getHandlerName());
} else { // 暂停
schedulerManager.pauseJob(job.getHandlerName());
}
}
|
@Test
public void testUpdateJobStatus_changeStatusInvalid() {
// 调用,并断言异常
assertServiceException(() -> jobService.updateJobStatus(1L, JobStatusEnum.INIT.getStatus()),
JOB_CHANGE_STATUS_INVALID);
}
|
@Override
public void setControllers(List<ControllerInfo> controllers) {
DriverHandler handler = handler();
OvsdbClientService clientService = getOvsdbClientService(handler);
if (!clientService.getControllers(handler().data().deviceId())
.equals(ImmutableSet.copyOf(controllers))) {
clientService.setControllersWithDeviceId(handler().
data().deviceId(), controllers);
}
}
|
@Test
public void testSetControllers() throws Exception {
}
|
@Override
public Map<String, Object> newMap() {
return new CaseInsensitiveMap<>();
}
|
@Test
public void testLookupCaseAgnostic() {
Map<String, Object> map = new FastHeadersMapFactory().newMap();
assertNull(map.get("foo"));
map.put("foo", "cheese");
assertEquals("cheese", map.get("foo"));
assertEquals("cheese", map.get("Foo"));
assertEquals("cheese", map.get("FOO"));
}
|
@Override
public EncodedMessage transform(ActiveMQMessage message) throws Exception {
if (message == null) {
return null;
}
long messageFormat = 0;
Header header = null;
Properties properties = null;
Map<Symbol, Object> daMap = null;
Map<Symbol, Object> maMap = null;
Map<String,Object> apMap = null;
Map<Object, Object> footerMap = null;
Section body = convertBody(message);
if (message.isPersistent()) {
if (header == null) {
header = new Header();
}
header.setDurable(true);
}
byte priority = message.getPriority();
if (priority != Message.DEFAULT_PRIORITY) {
if (header == null) {
header = new Header();
}
header.setPriority(UnsignedByte.valueOf(priority));
}
String type = message.getType();
if (type != null) {
if (properties == null) {
properties = new Properties();
}
properties.setSubject(type);
}
MessageId messageId = message.getMessageId();
if (messageId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setMessageId(getOriginalMessageId(message));
}
ActiveMQDestination destination = message.getDestination();
if (destination != null) {
if (properties == null) {
properties = new Properties();
}
properties.setTo(destination.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination));
}
ActiveMQDestination replyTo = message.getReplyTo();
if (replyTo != null) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyTo(replyTo.getQualifiedName());
if (maMap == null) {
maMap = new HashMap<>();
}
maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo));
}
String correlationId = message.getCorrelationId();
if (correlationId != null) {
if (properties == null) {
properties = new Properties();
}
try {
properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId));
} catch (AmqpProtocolException e) {
properties.setCorrelationId(correlationId);
}
}
long expiration = message.getExpiration();
if (expiration != 0) {
long ttl = expiration - System.currentTimeMillis();
if (ttl < 0) {
ttl = 1;
}
if (header == null) {
header = new Header();
}
header.setTtl(new UnsignedInteger((int) ttl));
if (properties == null) {
properties = new Properties();
}
properties.setAbsoluteExpiryTime(new Date(expiration));
}
long timeStamp = message.getTimestamp();
if (timeStamp != 0) {
if (properties == null) {
properties = new Properties();
}
properties.setCreationTime(new Date(timeStamp));
}
// JMSX Message Properties
int deliveryCount = message.getRedeliveryCounter();
if (deliveryCount > 0) {
if (header == null) {
header = new Header();
}
header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount));
}
String userId = message.getUserID();
if (userId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8)));
}
String groupId = message.getGroupID();
if (groupId != null) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupId(groupId);
}
int groupSequence = message.getGroupSequence();
if (groupSequence > 0) {
if (properties == null) {
properties = new Properties();
}
properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence));
}
final Map<String, Object> entries;
try {
entries = message.getProperties();
} catch (IOException e) {
throw JMSExceptionSupport.create(e);
}
for (Map.Entry<String, Object> entry : entries.entrySet()) {
String key = entry.getKey();
Object value = entry.getValue();
if (key.startsWith(JMS_AMQP_PREFIX)) {
if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
// skip transformer appended properties
continue;
} else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) {
messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class);
continue;
} else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
continue;
} else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
continue;
} else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (maMap == null) {
maMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length());
maMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) {
if (header == null) {
header = new Header();
}
header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class));
continue;
} else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class)));
continue;
} else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) {
if (properties == null) {
properties = new Properties();
}
properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class));
continue;
} else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (daMap == null) {
daMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length());
daMap.put(Symbol.valueOf(name), value);
continue;
} else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) {
if (footerMap == null) {
footerMap = new HashMap<>();
}
String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length());
footerMap.put(Symbol.valueOf(name), value);
continue;
}
} else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) {
// strip off the scheduled message properties
continue;
}
// The property didn't map into any other slot so we store it in the
// Application Properties section of the message.
if (apMap == null) {
apMap = new HashMap<>();
}
apMap.put(key, value);
int messageType = message.getDataStructureType();
if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) {
// Type of command to recognize advisory message
Object data = message.getDataStructure();
if(data != null) {
apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName());
}
}
}
final AmqpWritableBuffer buffer = new AmqpWritableBuffer();
encoder.setByteBuffer(buffer);
if (header != null) {
encoder.writeObject(header);
}
if (daMap != null) {
encoder.writeObject(new DeliveryAnnotations(daMap));
}
if (maMap != null) {
encoder.writeObject(new MessageAnnotations(maMap));
}
if (properties != null) {
encoder.writeObject(properties);
}
if (apMap != null) {
encoder.writeObject(new ApplicationProperties(apMap));
}
if (body != null) {
encoder.writeObject(body);
}
if (footerMap != null) {
encoder.writeObject(new Footer(footerMap));
}
return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength());
}
|
@Test
public void testConvertConnectionInfo() throws Exception {
String connectionId = "myConnectionId";
String clientId = "myClientId";
ConnectionInfo dataStructure = new ConnectionInfo();
dataStructure.setConnectionId(new ConnectionId(connectionId));
dataStructure.setClientId(clientId);
ActiveMQMessage outbound = createMessage();
Map<String, String> properties = new HashMap<String, String>();
properties.put("originUrl", "localhost");
outbound.setProperties(properties);
outbound.setDataStructure(dataStructure);
outbound.onSend();
outbound.storeContent();
JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer();
EncodedMessage encoded = transformer.transform(outbound);
assertNotNull(encoded);
Message amqp = encoded.decode();
assertNotNull(amqp.getApplicationProperties());
Map<String, Object> apMap = amqp.getApplicationProperties().getValue();
assertEquals(ConnectionInfo.class.getSimpleName(), apMap.get("ActiveMqDataStructureType"));
assertNotNull(amqp.getBody());
assertTrue(amqp.getBody() instanceof AmqpValue);
assertTrue(((AmqpValue) amqp.getBody()).getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<Object, Object> amqpMap = (Map<Object, Object>) ((AmqpValue) amqp.getBody()).getValue();
assertTrue(connectionId.equals(amqpMap.get("ConnectionId")));
assertTrue(clientId.equals(amqpMap.get("ClientId")));
}
|
public static Type convertType(TypeInfo typeInfo) {
switch (typeInfo.getOdpsType()) {
case BIGINT:
return Type.BIGINT;
case INT:
return Type.INT;
case SMALLINT:
return Type.SMALLINT;
case TINYINT:
return Type.TINYINT;
case FLOAT:
return Type.FLOAT;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
case DOUBLE:
return Type.DOUBLE;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
return ScalarType.createCharType(charTypeInfo.getLength());
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
return ScalarType.createVarcharType(varcharTypeInfo.getLength());
case STRING:
case JSON:
return ScalarType.createDefaultCatalogString();
case BINARY:
return Type.VARBINARY;
case BOOLEAN:
return Type.BOOLEAN;
case DATE:
return Type.DATE;
case TIMESTAMP:
case DATETIME:
return Type.DATETIME;
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()),
convertType(mapTypeInfo.getValueTypeInfo()));
case ARRAY:
ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo;
return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<Type> fieldTypeList =
structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType)
.collect(Collectors.toList());
return new StructType(fieldTypeList);
default:
return Type.VARCHAR;
}
}
|
@Test
public void testConvertTypeCaseSmallint() {
TypeInfo typeInfo = TypeInfoFactory.SMALLINT;
Type result = EntityConvertUtils.convertType(typeInfo);
assertEquals(Type.SMALLINT, result);
}
|
@Override
public void profileSet(JSONObject properties) {
}
|
@Test
public void profileSet() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.profileSet("abcde", "123");
}
|
public void maybeAutoCommitOffsetsAsync(long now) {
if (autoCommitEnabled) {
nextAutoCommitTimer.update(now);
if (nextAutoCommitTimer.isExpired()) {
nextAutoCommitTimer.reset(autoCommitIntervalMs);
autoCommitOffsetsAsync();
}
}
}
|
@Test
public void testAutoCommitAfterCoordinatorBackToService() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.assignFromUser(Collections.singleton(t1p));
subscriptions.seek(t1p, 100L);
coordinator.markCoordinatorUnknown("test cause");
assertTrue(coordinator.coordinatorUnknown());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
// async commit offset should find coordinator
time.sleep(autoCommitIntervalMs); // sleep for a while to ensure auto commit does happen
coordinator.maybeAutoCommitOffsetsAsync(time.milliseconds());
assertFalse(coordinator.coordinatorUnknown());
assertEquals(100L, subscriptions.position(t1p).offset);
}
}
|
@Override
public String getGroupKeyString(int rowIndex, int groupKeyColumnIndex) {
return _groupByResults.get(rowIndex).get("group").get(groupKeyColumnIndex).asText();
}
|
@Test
public void testGetGroupKeyString() {
// Run the test
final String result = _groupByResultSetUnderTest.getGroupKeyString(0, 0);
// Verify the results
assertEquals("testGroup1", result);
}
|
@SuppressWarnings({"PMD.AvoidInstantiatingObjectsInLoops"})
public void validate(Workflow workflow, User caller) {
try {
RunProperties runProperties = new RunProperties();
runProperties.setOwner(caller);
Map<String, ParamDefinition> workflowParams = workflow.getParams();
Map<String, ParamDefinition> defaultDryRunParams =
defaultParamManager.getDefaultDryRunParams();
// add run params to override params with known invalid defaults
Map<String, ParamDefinition> filteredParams =
defaultDryRunParams.entrySet().stream()
.filter(
entry ->
workflowParams != null
&& workflowParams.containsKey(entry.getKey())
&& workflowParams.get(entry.getKey()).getType()
== entry.getValue().getType())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Initiator initiator = new ValidationInitiator();
initiator.setCaller(caller);
RunRequest runRequest =
RunRequest.builder()
.initiator(initiator)
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.runParams(filteredParams)
.build();
WorkflowInstance workflowInstance =
workflowHelper.createWorkflowInstance(workflow, 1L, 1L, runProperties, runRequest);
WorkflowSummary workflowSummary =
workflowHelper.createWorkflowSummaryFromInstance(workflowInstance);
// todo: improve to traverse in DAG order to validate steps and their params
for (Step step : workflow.getSteps()) {
StepRuntimeSummary runtimeSummary =
StepRuntimeSummary.builder()
.stepId(step.getId())
.stepAttemptId(1L)
.stepInstanceId(1L)
.stepInstanceUuid(UUID.randomUUID().toString())
.stepName(StepHelper.getStepNameOrDefault(step))
.tags(step.getTags())
.type(step.getType())
.subType(step.getSubType())
.params(new LinkedHashMap<>())
.transition(StepInstanceTransition.from(step))
.synced(true)
.dependencies(Collections.emptyMap())
.build();
paramsManager.generateMergedStepParams(
workflowSummary, step, stepRuntimeMap.get(step.getType()), runtimeSummary);
}
} catch (Exception e) {
throw new MaestroDryRunException(
e,
"Exception during dry run validation for workflow %s Error=[%s] Type=[%s] StackTrace=[%s]",
workflow.getId(),
e.getMessage(),
e.getClass(),
ExceptionHelper.getStackTrace(e, MAX_STACKTRACE_LINES));
}
}
|
@Test
public void testValidatePass() {
when(paramsManager.generateMergedWorkflowParams(any(), any()))
.thenReturn(new LinkedHashMap<>());
when(paramsManager.generateMergedStepParams(any(), any(), any(), any()))
.thenReturn(new LinkedHashMap<>());
dryRunValidator.validate(definition.getWorkflow(), user);
}
|
public FileSystem get(Key key) {
synchronized (mLock) {
Value value = mCacheMap.get(key);
FileSystem fs;
if (value == null) {
// On cache miss, create and insert a new FileSystem instance,
fs = FileSystem.Factory.create(FileSystemContext.create(key.mSubject, key.mConf));
mCacheMap.put(key, new Value(fs, 1));
} else {
fs = value.mFileSystem;
value.mRefCount.getAndIncrement();
}
return new InstanceCachingFileSystem(fs, key);
}
}
|
@Test
public void getSameKey() {
Key key1 = createTestFSKey("user1");
FileSystem fs1 = mFileSystemCache.get(key1);
FileSystem fs2 = mFileSystemCache.get(key1);
assertSame(getDelegatedFileSystem(fs1), getDelegatedFileSystem(fs2));
assertFalse(fs1.isClosed());
assertFalse(fs2.isClosed());
}
|
public static ScalarType createUnifiedDecimalType() {
// for mysql compatibility
return createUnifiedDecimalType(10, 0);
}
|
@Test
public void createUnifiedDecimalTypeWithoutPrecisionAndScale() throws AnalysisException {
ScalarType.createUnifiedDecimalType();
}
|
public static String packAttributes(EnumSet<FileAttribute> attributes) {
StringBuilder buffer = new StringBuilder(FileAttribute.values().length);
int len = 0;
for (FileAttribute attribute : attributes) {
buffer.append(attribute.name().charAt(0));
len++;
}
return buffer.substring(0, len);
}
|
@Test
public void testPackAttributes() {
EnumSet<FileAttribute> attributes = EnumSet.noneOf(FileAttribute.class);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("");
attributes.add(FileAttribute.REPLICATION);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("R");
attributes.add(FileAttribute.BLOCKSIZE);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("RB");
attributes.add(FileAttribute.USER);
attributes.add(FileAttribute.CHECKSUMTYPE);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("RBUC");
attributes.add(FileAttribute.GROUP);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("RBUGC");
attributes.add(FileAttribute.PERMISSION);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("RBUGPC");
attributes.add(FileAttribute.TIMES);
assertThat(DistCpUtils.packAttributes(attributes)).isEqualTo("RBUGPCT");
}
|
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Version that = (Version) o;
return version.equals(that.getVersion());
}
|
@Test
public void testEquals() throws Exception {
assertTrue(Version.from(0, 20, 0).equals(Version.from(0, 20, 0)));
assertTrue(Version.from(0, 20, 0, "preview.1").equals(Version.from(0, 20, 0, "preview.1")));
assertTrue(Version.from(1, 2, 3).equals(Version.from(1, 2, 3)));
Version v = Version.from(0, 20, 0);
assertEquals(Version.from(0, 20, 0), v);
assertFalse(Version.from(0, 20, 0).equals(Version.from(0, 20, 1)));
assertFalse(Version.from(0, 20, 0, "preview.1").equals(Version.from(0, 20, 0, "preview.2")));
assertFalse(Version.from(0, 20, 0).equals(null));
}
|
public ImmutableSet<String> getAllRoleIds() {
// Use a MongoCollection query here to avoid the mongojack deserializing and object creation overhead
final FindIterable<Document> docs = dbCollection.find().projection(Projections.include("_id"));
return StreamSupport.stream(docs.spliterator(), false)
.map(doc -> doc.get("_id", ObjectId.class).toHexString())
.collect(ImmutableSet.toImmutableSet());
}
|
@Test
void getAllRoleIds() {
assertThat(service.getAllRoleIds()).isEqualTo(ImmutableSet.of(
"564c6707c8306e079f718980",
"56701ac4c8302ff6bee2a65d",
"5b17d7c63f3ab8204eea0589",
"58dbaa158ae4923256dc6266",
"564c6707c8306e079f71897f",
"58dbaa158ae4923256dc6265",
"59fc4b2b6e948411fadbd85d",
"59fc4b2b6e948411fadbd85e",
"5c2f6d3b3dd06601be176b85",
"5c488b67e3f1420b4d9ae635",
"5c488f1de3f14219be1cb9f6",
"5d41bb973086a840541a3ed2",
"5f1f0d2a6f58d7c052d49775",
"5f1f0d2a6f58d7c052d49778",
"5f1f0d2a6f58d7c052d4977b",
"5f22792d6f58d7c0521edb23"
));
}
|
private void validateHmsUri(String catalogHmsUri) {
if (catalogHmsUri == null) {
return;
}
Configuration conf = SparkSession.active().sessionState().newHadoopConf();
String envHmsUri = conf.get(HiveConf.ConfVars.METASTOREURIS.varname, null);
if (envHmsUri == null) {
return;
}
Preconditions.checkArgument(
catalogHmsUri.equals(envHmsUri),
"Inconsistent Hive metastore URIs: %s (Spark session) != %s (spark_catalog)",
envHmsUri,
catalogHmsUri);
}
|
@Test
public void testValidateHmsUri() {
// HMS uris match
Assert.assertTrue(
spark
.sessionState()
.catalogManager()
.v2SessionCatalog()
.defaultNamespace()[0]
.equals("default"));
// HMS uris doesn't match
spark.sessionState().catalogManager().reset();
String catalogHmsUri = "RandomString";
spark.conf().set(envHmsUriKey, hmsUri);
spark.conf().set(catalogHmsUriKey, catalogHmsUri);
IllegalArgumentException exception =
Assert.assertThrows(
IllegalArgumentException.class,
() -> spark.sessionState().catalogManager().v2SessionCatalog());
String errorMessage =
String.format(
"Inconsistent Hive metastore URIs: %s (Spark session) != %s (spark_catalog)",
hmsUri, catalogHmsUri);
Assert.assertEquals(errorMessage, exception.getMessage());
// no env HMS uri, only catalog HMS uri
spark.sessionState().catalogManager().reset();
spark.conf().set(catalogHmsUriKey, hmsUri);
spark.conf().unset(envHmsUriKey);
Assert.assertTrue(
spark
.sessionState()
.catalogManager()
.v2SessionCatalog()
.defaultNamespace()[0]
.equals("default"));
// no catalog HMS uri, only env HMS uri
spark.sessionState().catalogManager().reset();
spark.conf().set(envHmsUriKey, hmsUri);
spark.conf().unset(catalogHmsUriKey);
Assert.assertTrue(
spark
.sessionState()
.catalogManager()
.v2SessionCatalog()
.defaultNamespace()[0]
.equals("default"));
}
|
@Override
public String getName() {
return ANALYZER_NAME;
}
|
@Test
public void testAnalyzePackageJson() throws Exception {
try (Engine engine = new Engine(getSettings())) {
final Dependency result = new Dependency(BaseTest.getResourceAsFile(this, "requirements.txt"));
engine.addDependency(result);
analyzer.analyze(result, engine);
assertFalse(ArrayUtils.contains(engine.getDependencies(), result));
assertEquals(24, engine.getDependencies().length);
boolean foundPyYAML = false;
boolean foundCryptography = false;
for (Dependency d : engine.getDependencies()) {
if ("PyYAML".equals(d.getName())) {
foundPyYAML = true;
assertEquals("3.12", d.getVersion());
assertThat(d.getDisplayFileName(), equalTo("PyYAML:3.12"));
assertEquals(PythonDistributionAnalyzer.DEPENDENCY_ECOSYSTEM, d.getEcosystem());
}
if ("cryptography".equals(d.getName())) {
foundCryptography = true;
assertEquals("1.8.2", d.getVersion());
assertThat(d.getDisplayFileName(), equalTo("cryptography:1.8.2"));
assertEquals(PythonDistributionAnalyzer.DEPENDENCY_ECOSYSTEM, d.getEcosystem());
}
}
assertTrue("Expected to find PyYAML", foundPyYAML);
assertTrue("Expected to find cryptography", foundCryptography);
}
}
|
@Override
public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) {
if (requestData == null) {
return super.handle(targetName, instances, null);
}
if (!shouldHandle(instances)) {
return instances;
}
List<Object> result = routerConfig.isUseRequestRouter()
? getTargetInstancesByRequest(targetName, instances, requestData.getTag())
: getTargetInstancesByRules(targetName, instances, requestData.getPath(), requestData.getTag());
return super.handle(targetName, result, requestData);
}
|
@Test
public void testGetTargetInstancesByFlowRulesWithGlobalRules() {
RuleInitializationUtils.initGlobalAndServiceFlowMatchRules();
List<Object> instances = new ArrayList<>();
ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0");
instances.add(instance1);
ServiceInstance instance2 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.1");
instances.add(instance2);
Map<String, List<String>> header = new HashMap<>();
header.put("bar", Collections.singletonList("bar1"));
List<Object> targetInstances = flowRouteHandler.handle("foo", instances,
new RequestData(header, null, null));
Assert.assertEquals(1, targetInstances.size());
Assert.assertEquals(instance2, targetInstances.get(0));
ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetRouteRule(Collections.emptyMap());
ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetGlobalRule(Collections.emptyList());
}
|
public static GenericRow getIntermediateRow(final TableRow row) {
final GenericKey key = row.key();
final GenericRow value = row.value();
final List<?> keyFields = key.values();
value.ensureAdditionalCapacity(
1 // ROWTIME
+ keyFields.size() //all the keys
+ row.window().map(w -> 2).orElse(0) //windows
);
value.append(row.rowTime());
value.appendAll(keyFields);
row.window().ifPresent(window -> {
value.append(window.start().toEpochMilli());
value.append(window.end().toEpochMilli());
});
return value;
}
|
@Test
public void shouldReturnIntermediateRowNonWindowed() {
// Given:
final GenericRow intermediateRow1 = aValue.append(aRowtime).append(aKey);
final GenericRow intermediateRow2 = aValue2.append(aRowtime).append(aKey2);
// When:
final GenericRow genericRow1 = KsqlMaterialization.getIntermediateRow(row);
final GenericRow genericRow2 = KsqlMaterialization.getIntermediateRow(row2);
// Then:
assertThat(genericRow1, is(intermediateRow1));
assertThat(genericRow2, is(intermediateRow2));
}
|
public MetaDataService getMetaDataService() {
return metaDataService;
}
|
@Test
public void testGetMetaDataService() {
assertEquals(metaDataService, abstractShenyuClientRegisterService.getMetaDataService());
}
|
@Override
public SchemaResult getKeySchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true);
}
|
@Test
public void shouldReturnErrorFromGetKeyIfForbidden() throws Exception {
// Given:
when(srClient.getSchemaBySubjectAndId(any(), anyInt()))
.thenThrow(forbiddenException());
// When:
final SchemaResult result = supplier.getKeySchema(Optional.of(TOPIC_NAME),
Optional.of(42), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES));
// Then:
assertThat(result.schemaAndId, is(Optional.empty()));
assertThat(result.failureReason, is(not(Optional.empty())));
verifyFailureMessageForKey(result, Optional.of(42));
}
|
static Result coerceUserList(
final Collection<Expression> expressions,
final ExpressionTypeManager typeManager
) {
return coerceUserList(expressions, typeManager, Collections.emptyMap());
}
|
@Test
public void shouldCoerceMapOfCompatibleLiterals() {
// Given:
final ImmutableList<Expression> expressions = ImmutableList.of(
new CreateMapExpression(
ImmutableMap.of(
new IntegerLiteral(10),
new IntegerLiteral(289476)
)
),
new CreateMapExpression(
ImmutableMap.of(
new StringLiteral("123456789000"),
new StringLiteral("\t -100 \t")
)
)
);
// When:
final Result result = CoercionUtil.coerceUserList(expressions, typeManager);
// Then:
assertThat(result.commonType(), is(Optional.of(SqlTypes.map(SqlTypes.BIGINT, SqlTypes.INTEGER))));
assertThat(result.expressions(), is(ImmutableList.of(
cast(new CreateMapExpression(
ImmutableMap.of(
new IntegerLiteral(10),
new IntegerLiteral(289476)
)
), SqlTypes.map(SqlTypes.BIGINT, SqlTypes.INTEGER)),
cast(new CreateMapExpression(
ImmutableMap.of(
new StringLiteral("123456789000"),
new StringLiteral("\t -100 \t")
)
), SqlTypes.map(SqlTypes.BIGINT, SqlTypes.INTEGER))
)));
}
|
public static List<String> validateXml(InputStream schemaStream, String xmlString) throws Exception {
return validateXml(schemaStream, xmlString, null);
}
|
@Test
public void testValidXmlAgainstInvalidSchema() throws Exception {
InputStream schemaStream = new FileInputStream("target/test-classes/io/github/microcks/util/invalid-schema.xsd");
String validXml = """
<note>
<to>Tove</to>
<from>Jani</from>
<heading>Reminder</heading>
<body>Don't forget me this weekend!</body>
</note>
""";
Executable validationExecutable = () -> XmlSchemaValidator.validateXml(schemaStream, validXml);
assertThrows(SAXParseException.class, validationExecutable, "Expected SAXParseException due to schema mismatch.");
}
|
@Override
public int compareTo(ResourceDescription o) {
if (o.getResourceUsage().size() > resourceUsageByName.size()) {
return -1;
}
if (exactlyEquals(o.getResourceUsage())) {
return 0;
}
for (Map.Entry<String, ResourceUsage> entry : o.getResourceUsage().entrySet()) {
// if we don't have any entry which is in other but not in our set, we fail
String resourceName = entry.getKey();
// check if we have this resource, if not clearly we are lesser
if (resourceUsageByName.containsKey(resourceName)) {
int less = resourceUsageByName.get(resourceName).compareTo(entry.getValue());
// not using the resource till its last shred, so <= 0 is failure
if (less <= 0) {
return -1;
}
} else {
return -1;
}
}
return 1;
}
|
@Test
public void compareTo() {
PulsarResourceDescription one = new PulsarResourceDescription();
one.put("cpu", new ResourceUsage(0.1, 0.2));
PulsarResourceDescription two = new PulsarResourceDescription();
two.put("cpu", new ResourceUsage(0.1, 0.2));
assertEquals(0, one.compareTo(two));
}
|
@JsonIgnore
public LongParamDefinition getCompletedByTsParam() {
if (completedByTs != null) {
return ParamDefinition.buildParamDefinition(PARAM_NAME, completedByTs);
}
if (completedByHour != null) {
String timeZone = tz == null ? "WORKFLOW_CRON_TIMEZONE" : String.format("'%s'", tz);
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(COMPLETED_HOUR_TCT_TS, timeZone, completedByHour))
.build();
}
if (durationMinutes != null) {
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(DURATION_MINUTES_TCT_TS, durationMinutes))
.build();
}
throw new MaestroInternalError(
"Invalid TCT definition, neither of time fields is set: %s", this);
}
|
@Test
public void testGetCompletedByTsParamWithDurationMinutes() {
Tct tct = new Tct();
tct.setDurationMinutes(60);
tct.setTz("UTC");
LongParamDefinition expected =
LongParamDefinition.builder()
.name("completed_by_ts")
.expression("return new DateTime(RUN_TS).plusMinutes(60).getMillis();")
.build();
LongParamDefinition actual = tct.getCompletedByTsParam();
assertEquals(expected, actual);
}
|
public static <T> List<List<T>> groupPartitions(List<T> elements, int numGroups) {
if (numGroups <= 0)
throw new IllegalArgumentException("Number of groups must be positive.");
List<List<T>> result = new ArrayList<>(numGroups);
// Each group has either n+1 or n raw partitions
int perGroup = elements.size() / numGroups;
int leftover = elements.size() - (numGroups * perGroup);
int assigned = 0;
for (int group = 0; group < numGroups; group++) {
int numThisGroup = group < leftover ? perGroup + 1 : perGroup;
List<T> groupList = new ArrayList<>(numThisGroup);
for (int i = 0; i < numThisGroup; i++) {
groupList.add(elements.get(assigned));
assigned++;
}
result.add(groupList);
}
return result;
}
|
@Test
public void testGroupPartitionsInvalidCount() {
assertThrows(IllegalArgumentException.class,
() -> ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 0));
}
|
public static Application getDefaultEditor() {
final Application application = finder.getDescription(
preferences.getProperty("editor.bundleIdentifier"));
if(finder.isInstalled(application)) {
return application;
}
return Application.notfound;
}
|
@Test
public void testGetDefaultEditor() {
assertSame(Application.notfound, EditorFactory.getDefaultEditor());
}
|
public static InExpression bind(final InExpression segment, final SegmentType parentSegmentType, final SQLStatementBinderContext binderContext,
final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) {
ExpressionSegment boundLeft = ExpressionSegmentBinder.bind(segment.getLeft(), parentSegmentType, binderContext, tableBinderContexts, outerTableBinderContexts);
ExpressionSegment boundRight = ExpressionSegmentBinder.bind(segment.getRight(), parentSegmentType, binderContext, tableBinderContexts, outerTableBinderContexts);
return new InExpression(segment.getStartIndex(), segment.getStopIndex(), boundLeft, boundRight, segment.isNot());
}
|
@Test
void assertInExpressionBinder() {
InExpression inExpression = new InExpression(0, 10,
new LiteralExpressionSegment(0, 0, "left"),
new LiteralExpressionSegment(0, 0, "right"), true);
SQLStatementBinderContext binderContext = mock(SQLStatementBinderContext.class);
InExpression actual = InExpressionBinder.bind(inExpression, SegmentType.PROJECTION, binderContext, Collections.emptyMap(), Collections.emptyMap());
assertThat(actual.getText(), is("leftright"));
assertTrue(actual.isNot());
}
|
@Override
public void filter(ContainerRequestContext context)
{
if (internalAuthenticationManager.isInternalRequest(context) ||
(internalAuthenticationManager.isInternalJwtEnabled() && isAccessingInternalEndpoint(resourceInfo))) {
Principal authenticatedPrincipal = internalAuthenticationManager.authenticateInternalRequest(context);
if (authenticatedPrincipal == null) {
ResponseBuilder responseBuilder = Response.serverError();
responseBuilder.status(SC_UNAUTHORIZED, "Unauthorized");
context.abortWith(responseBuilder.build());
}
else {
principal = Optional.of(authenticatedPrincipal);
}
}
}
|
@Test
public void testJwtAuthenticationRejectsWithNoBearerTokenJwtEnabled()
{
String sharedSecret = "secret";
boolean internalJwtEnabled = true;
InternalAuthenticationManager internalAuthenticationManager = new InternalAuthenticationManager(Optional.of(sharedSecret), "nodeId", internalJwtEnabled);
InternalAuthenticationFilter internalAuthenticationFilter =
new InternalAuthenticationFilter(internalAuthenticationManager, new ResourceInfoBuilder(TaskResource.class, null, null).build());
MockContainerRequestContext containerRequestContext = new MockContainerRequestContext(ImmutableListMultimap.of());
internalAuthenticationFilter.filter(containerRequestContext);
assertEquals(containerRequestContext.getResponse().getStatus(), SC_UNAUTHORIZED);
assertEquals("Unauthorized", containerRequestContext.getResponse().getStatusInfo().getReasonPhrase());
}
|
@Override
public int compute(QB batch, boolean flush, boolean shutdown) {
return workerObserver.observeExecutionComputation(batch, () -> execution.compute(batch, flush, shutdown));
}
|
@Test
public void compute() throws IOException {
final ManualAdvanceClock manualAdvanceClock = new ManualAdvanceClock(Instant.now());
final MetricExt rootMetric = MetricExtFactory.newMetricExtFromTestClock(manualAdvanceClock);
final MockCompiledExecution mockQueueBatchExecution = new MockCompiledExecution(manualAdvanceClock);
final AbstractNamespacedMetricExt processEventsNamespace = namespaceMetric(rootMetric, "events");
final AbstractNamespacedMetricExt pipelineAEventsNamespace = namespaceMetric(rootMetric, "pipelines", "a", "events");
final AbstractNamespacedMetricExt pipelineBEventsNamespace = namespaceMetric(rootMetric, "pipelines", "b", "events");
// we create two worker observers, one for each pipeline, connected to the relevant metric namespaces
final WorkerObserver pipelineAWorkerObserver = new WorkerObserver(processEventsNamespace, pipelineAEventsNamespace);
final WorkerObserver pipelineBWorkerObserver = new WorkerObserver(processEventsNamespace, pipelineBEventsNamespace);
// we create three observed executions to test, one for pipeline A, and two for pipeline B
final ObservedExecution<MockQueueBatch> executionPipelineAWorker1 = pipelineAWorkerObserver.ofExecution(mockQueueBatchExecution);
final ObservedExecution<MockQueueBatch> executionPipelineBWorker1 = pipelineBWorkerObserver.ofExecution(mockQueueBatchExecution);
final ObservedExecution<MockQueueBatch> executionPipelineBWorker2 = pipelineBWorkerObserver.ofExecution(mockQueueBatchExecution);
// in pipeline A, we take 110.9ms to filter 100 events and output 10 events
final MockQueueBatch mockQueueBatchA = new MockQueueBatch(100, 10, 110_900_000L);
final int eventsOutputA = executionPipelineAWorker1.compute(mockQueueBatchA, false, false);
assertThat(eventsOutputA, is(equalTo(10)));
// in pipeline B on worker 1, we take 1010.9ms to filter 1000 events and output 100 events
final MockQueueBatch mockQueueBatchB = new MockQueueBatch(1000, 100, 1_010_900_000L);
final int eventsOutputB = executionPipelineBWorker1.compute(mockQueueBatchB, false, false);
assertThat(eventsOutputB, is(equalTo(100)));
// again in pipeline B on worker 1, we take 10010.9ms to filter 1000 events and output 1000 events
final MockQueueBatch mockQueueBatchB2 = new MockQueueBatch(1000, 1000, 10_010_900_000L);
final int eventsOutputB2 = executionPipelineBWorker1.compute(mockQueueBatchB2, false, false);
assertThat(eventsOutputB2, is(equalTo(1000)));
// and in pipeline B on worker 2, we take 100010.9ms to filter 1000 events and output 10000 events
final MockQueueBatch mockQueueBatchB3 = new MockQueueBatch(1000, 10000, 100_010_900_000L);
final int eventsOutputB3 = executionPipelineBWorker2.compute(mockQueueBatchB3, false, false);
assertThat(eventsOutputB3, is(equalTo(10000)));
// validate that the inbound filter counts made it to our independent pipeline metrics and to the combined process
final LongCounter pipelineAEventsFilteredCounter = LongCounter.fromRubyBase(pipelineAEventsNamespace, MetricKeys.FILTERED_KEY);
final LongCounter pipelineBEventsFilteredCounter = LongCounter.fromRubyBase(pipelineBEventsNamespace, MetricKeys.FILTERED_KEY);
final LongCounter processEventsFilteredCounter = LongCounter.fromRubyBase(processEventsNamespace, MetricKeys.FILTERED_KEY);
assertThat(pipelineAEventsFilteredCounter.getValue(), is(equalTo(100L)));
assertThat(pipelineBEventsFilteredCounter.getValue(), is(equalTo(3000L)));
assertThat(processEventsFilteredCounter.getValue(), is(equalTo(3100L)));
// validate that the outbound execution counts made it to our independent pipeline metrics and to the combined process
final LongCounter pipelineAEventsOutCounter = LongCounter.fromRubyBase(pipelineAEventsNamespace, MetricKeys.OUT_KEY);
final LongCounter pipelineBEventsOutCounter = LongCounter.fromRubyBase(pipelineBEventsNamespace, MetricKeys.OUT_KEY);
final LongCounter processEventsOutCounter = LongCounter.fromRubyBase(processEventsNamespace, MetricKeys.OUT_KEY);
assertThat(pipelineAEventsOutCounter.getValue(), is(equalTo(10L)));
assertThat(pipelineBEventsOutCounter.getValue(), is(equalTo(11100L)));
assertThat(processEventsOutCounter.getValue(), is(equalTo(11110L)));
// validate that the timings were reported to our independent pipeline metrics and to the combined process
final TimerMetric pipelineADurationTimer = TimerMetric.fromRubyBase(pipelineAEventsNamespace, MetricKeys.DURATION_IN_MILLIS_KEY);
final TimerMetric pipelineBDurationTimer = TimerMetric.fromRubyBase(pipelineBEventsNamespace, MetricKeys.DURATION_IN_MILLIS_KEY);
final TimerMetric processDurationTimer = TimerMetric.fromRubyBase(processEventsNamespace, MetricKeys.DURATION_IN_MILLIS_KEY);
assertThat(pipelineADurationTimer.getValue(), is(equalTo(110L))); // 110.9 -> 110
assertThat(pipelineBDurationTimer.getValue(), is(equalTo(111032L))); // 1010.9 + 10010.9 + 100010.9 = 111032.7 -> 111032
assertThat(processDurationTimer.getValue(), is(equalTo(111143L))); // 110.9 + 101.9 + 1001.9 + 10001.9 = 111143.6 -> 111143
}
|
@Override
public String group() {
return Constants.SERVICE_METADATA;
}
|
@Test
void testGroup() {
String group = serviceMetadataProcessor.group();
assertEquals(Constants.SERVICE_METADATA, group);
}
|
public static <T> Set<T> symmetricDifference(Collection<? extends T> c1, Collection<? extends T> c2) {
Set<T> diff1 = new HashSet<>(c1);
diff1.removeAll(c2);
Set<T> diff2 = new HashSet<>(c2);
diff2.removeAll(c1);
diff1.addAll(diff2);
return diff1;
}
|
@Test
public void testSymmetricDifference() {
assertTrue(CollectionUtil.equalContentsIgnoreOrder(
List.of(1, 2, 3),
CollectionUtil.symmetricDifference(l1, l2)));
}
|
@Override
public Map<K, Object> executeOnEntries(com.hazelcast.map.EntryProcessor entryProcessor) {
return map.executeOnEntries(entryProcessor);
}
|
@Test
public void testExecuteOnEntries() {
map.put(23, "value-23");
map.put(42, "value-42");
Map<Integer, Object> resultMap = adapter.executeOnEntries(new IMapReplaceEntryProcessor("value", "newValue"));
assertEquals(2, resultMap.size());
assertEquals("newValue-23", resultMap.get(23));
assertEquals("newValue-42", resultMap.get(42));
assertEquals("newValue-23", map.get(23));
assertEquals("newValue-42", map.get(42));
}
|
@Override
public boolean tryFence(HAServiceTarget target, String args) {
ProcessBuilder builder;
String cmd = parseArgs(target.getTransitionTargetHAStatus(), args);
if (!Shell.WINDOWS) {
builder = new ProcessBuilder("bash", "-e", "-c", cmd);
} else {
builder = new ProcessBuilder("cmd.exe", "/c", cmd);
}
setConfAsEnvVars(builder.environment());
addTargetInfoAsEnvVars(target, builder.environment());
Process p;
try {
p = builder.start();
p.getOutputStream().close();
} catch (IOException e) {
LOG.warn("Unable to execute " + cmd, e);
return false;
}
String pid = tryGetPid(p);
LOG.info("Launched fencing command '" + cmd + "' with "
+ ((pid != null) ? ("pid " + pid) : "unknown pid"));
String logPrefix = abbreviate(cmd, ABBREV_LENGTH);
if (pid != null) {
logPrefix = "[PID " + pid + "] " + logPrefix;
}
// Pump logs to stderr
StreamPumper errPumper = new StreamPumper(
LOG, logPrefix, p.getErrorStream(),
StreamPumper.StreamType.STDERR);
errPumper.start();
StreamPumper outPumper = new StreamPumper(
LOG, logPrefix, p.getInputStream(),
StreamPumper.StreamType.STDOUT);
outPumper.start();
int rc;
try {
rc = p.waitFor();
errPumper.join();
outPumper.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted while waiting for fencing command: " + cmd);
return false;
}
return rc == 0;
}
|
@Test
public void testStdoutLogging() {
assertTrue(fencer.tryFence(TEST_TARGET, "echo hello"));
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo hello: hello"));
}
|
public synchronized boolean saveNamespace(long timeWindow, long txGap,
FSNamesystem source) throws IOException {
if (timeWindow > 0 || txGap > 0) {
final FSImageStorageInspector inspector = storage.readAndInspectDirs(
EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK),
StartupOption.REGULAR);
FSImageFile image = inspector.getLatestImages().get(0);
File imageFile = image.getFile();
final long checkpointTxId = image.getCheckpointTxId();
final long checkpointAge = Time.now() - imageFile.lastModified();
if (checkpointAge <= timeWindow * 1000 &&
checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) {
return false;
}
}
saveNamespace(source, NameNodeFile.IMAGE, null);
return true;
}
|
@Test
public void testHasNonEcBlockUsingStripedIDForLoadUCFile()
throws IOException{
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loaducfile";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loaducfile";
String clientMachine = "testMachine_loaducfile";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(
new Block(blkId, blkNumBytes, timestamp), (short)3);
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
|
@Override
public V pollLastAndOfferFirstTo(String queueName, long timeout, TimeUnit unit) throws InterruptedException {
return commandExecutor.getInterrupted(pollLastAndOfferFirstToAsync(queueName, timeout, unit));
}
|
@Test
public void testPollLastAndOfferFirstTo() throws InterruptedException {
final RBlockingQueue<Integer> queue1 = redisson.getBlockingQueue("{queue}1");
Executors.newSingleThreadScheduledExecutor().schedule(() -> {
try {
queue1.put(3);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}, 5, TimeUnit.SECONDS);
RBlockingQueue<Integer> queue2 = redisson.getBlockingQueue("{queue}2");
queue2.put(4);
queue2.put(5);
queue2.put(6);
Integer value = queue1.pollLastAndOfferFirstTo(queue2.getName(), 5, TimeUnit.SECONDS);
assertThat(value).isEqualTo(3);
assertThat(queue2).containsExactly(3, 4, 5, 6);
RBlockingQueue<Integer> queue3 = redisson.getBlockingQueue("{queue}1");
Integer value1 = queue3.pollLastAndOfferFirstTo(queue2.getName(), 1, TimeUnit.SECONDS);
assertThat(value1).isNull();
}
|
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) {
List<String> diagramLines = filterOutComments(rawDiagramLines);
Set<PlantUmlComponent> components = parseComponents(diagramLines);
PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components);
List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines);
return new PlantUmlDiagram.Builder(plantUmlComponents)
.withDependencies(dependencies)
.build();
}
|
@Test
public void parses_component_name_that_clashes_with_alias_definition() {
PlantUmlDiagram diagram = createDiagram(TestDiagram.in(temporaryFolder)
.component("tricky as hell cause of as keyword").withAlias("alias").withStereoTypes("..any..")
.write());
PlantUmlComponent trickyAsHell = getComponentWithName("tricky as hell cause of as keyword", diagram);
assertThat(trickyAsHell.getComponentName()).isEqualTo(new ComponentName("tricky as hell cause of as keyword"));
assertThat(trickyAsHell.getAlias().get()).isEqualTo(new Alias("alias"));
}
|
public static String s2(int v) {
char[] result = new char[5];
if (v < 0) {
result[0] = '-';
v = -v;
} else {
result[0] = '+';
}
for (int i = 0; i < 4; i++) {
result[4 - i] = Character.forDigit(v & 0x0f, 16);
v >>= 4;
}
return new String(result);
}
|
@Test
public void testS2() {
Assert.assertEquals("+0000", Hex.s2(0));
Assert.assertEquals("-0000", Hex.s2(-2147483648));
Assert.assertEquals("+3039", Hex.s2(12345));
Assert.assertEquals("+02d2", Hex.s2(1234567890));
}
|
public int minValue()
{
final int initialValue = this.initialValue;
int min = 0 == size ? initialValue : Integer.MAX_VALUE;
for (final int value : values)
{
if (initialValue != value)
{
min = Math.min(min, value);
}
}
return min;
}
|
@Test
void shouldHaveNoMinValueForEmptyCollection()
{
assertEquals(INITIAL_VALUE, map.minValue());
}
|
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
}
|
@Test
public void iterableContainsExactlySingleElementNoEqualsMagic() {
expectFailureWhenTestingThat(asList(1)).containsExactly(1L);
assertFailureValueIndexed("an instance of", 0, "java.lang.Long");
}
|
@Override
public MaterialPollResult responseMessageForLatestRevisionsSince(String responseBody) {
if (isEmpty(responseBody)) return new MaterialPollResult();
Map responseBodyMap = getResponseMap(responseBody);
return new MaterialPollResult(toMaterialDataMap(responseBodyMap), toSCMRevisions(responseBodyMap));
}
|
@Test
public void shouldBuildSCMRevisionsFromLatestRevisionsSinceResponse() throws Exception {
String r1 = "{\"revision\":\"r1\",\"timestamp\":\"2011-07-14T19:43:37.100Z\",\"user\":\"some-user\",\"revisionComment\":\"comment\",\"data\":{\"dataKeyTwo\":\"data-value-two\",\"dataKeyOne\":\"data-value-one\"}," +
"\"modifiedFiles\":[{\"fileName\":\"f1\",\"action\":\"added\"},{\"fileName\":\"f2\",\"action\":\"modified\"},{\"fileName\":\"f3\",\"action\":\"deleted\"}]}";
String r2 = "{\"revision\":\"r2\",\"timestamp\":\"2011-07-14T19:43:37.101Z\",\"user\":\"new-user\",\"revisionComment\":\"comment\",\"data\":{\"dataKeyTwo\":\"data-value-two\",\"dataKeyOne\":\"data-value-one\"}," +
"\"modifiedFiles\":[{\"fileName\":\"f1\",\"action\":\"added\"}]}";
String responseBody = "{\"revisions\":[" + r1 + "," + r2 + "]}";
MaterialPollResult pollResult = messageHandler.responseMessageForLatestRevisionsSince(responseBody);
assertThat(pollResult.getMaterialData(), is(nullValue()));
List<SCMRevision> scmRevisions = pollResult.getRevisions();
assertThat(scmRevisions.size(), is(2));
assertSCMRevision(scmRevisions.get(0), "r1", "some-user", "2011-07-14T19:43:37.100Z", "comment", List.of(new ModifiedFile("f1", ModifiedAction.added), new ModifiedFile("f2", ModifiedAction.modified), new ModifiedFile("f3", ModifiedAction.deleted)));
assertSCMRevision(scmRevisions.get(1), "r2", "new-user", "2011-07-14T19:43:37.101Z", "comment", List.of(new ModifiedFile("f1", ModifiedAction.added)));
}
|
@Override
public Health check(Set<NodeHealth> nodeHealths) {
ClusterHealthResponse esClusterHealth = this.getEsClusterHealth();
if (esClusterHealth != null) {
Health minimumNodes = checkMinimumNodes(esClusterHealth);
Health clusterStatus = extractStatusHealth(esClusterHealth);
return HealthReducer.merge(minimumNodes, clusterStatus);
}
return RED_HEALTH_UNAVAILABLE;
}
|
@Test
public void check_returns_RED_with_cause_if_ES_cluster_has_less_than_three_nodes_and_status_is_RED() {
Set<NodeHealth> nodeHealths = ImmutableSet.of(newNodeHealth(GREEN));
when(esClient.clusterHealth(any()).getStatus()).thenReturn(ClusterHealthStatus.RED);
when(esClient.clusterHealth(any()).getNumberOfNodes()).thenReturn(2);
Health health = underTest.check(nodeHealths);
assertThat(health.getStatus()).isEqualTo(Health.Status.RED);
assertThat(health.getCauses()).contains("Elasticsearch status is RED", "There should be at least three search nodes");
}
|
public static RowCoder of(Schema schema) {
return new RowCoder(schema);
}
|
@Test
public void testEncodingPositionRemoveFields() throws Exception {
Schema schema1 =
Schema.builder()
.addNullableField("f_int32", FieldType.INT32)
.addNullableField("f_string", FieldType.STRING)
.addNullableField("f_boolean", FieldType.BOOLEAN)
.build();
Schema schema2 =
Schema.builder()
.addNullableField("f_int32", FieldType.INT32)
.addNullableField("f_string", FieldType.STRING)
.build();
Row row =
Row.withSchema(schema1)
.withFieldValue("f_int32", 42)
.withFieldValue("f_string", "hello world!")
.withFieldValue("f_boolean", true)
.build();
Row expected =
Row.withSchema(schema2)
.withFieldValue("f_int32", 42)
.withFieldValue("f_string", "hello world!")
.build();
ByteArrayOutputStream os = new ByteArrayOutputStream();
RowCoder.of(schema1).encode(row, os);
Row decoded = RowCoder.of(schema2).decode(new ByteArrayInputStream(os.toByteArray()));
assertEquals(expected, decoded);
}
|
@Override
@Deprecated
public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(transformerSupplier, Named.as(name), stateStoreNames);
}
|
@Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullTransformerSupplierOnFlatTransformWithNamedAndStores() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransform(null, Named.as("flatTransformer"), "storeName"));
assertThat(exception.getMessage(), equalTo("transformerSupplier can't be null"));
}
|
public static List<URL> parseConfigurators(String rawConfig) {
// compatible url JsonArray, such as [ "override://xxx", "override://xxx" ]
List<URL> compatibleUrls = parseJsonArray(rawConfig);
if (CollectionUtils.isNotEmpty(compatibleUrls)) {
return compatibleUrls;
}
List<URL> urls = new ArrayList<>();
ConfiguratorConfig configuratorConfig = parseObject(rawConfig);
String scope = configuratorConfig.getScope();
List<ConfigItem> items = configuratorConfig.getConfigs();
if (ConfiguratorConfig.SCOPE_APPLICATION.equals(scope)) {
items.forEach(item -> urls.addAll(appItemToUrls(item, configuratorConfig)));
} else {
// service scope by default.
items.forEach(item -> urls.addAll(serviceItemToUrls(item, configuratorConfig)));
}
return urls;
}
|
@Test
void parseConfiguratorsAppAnyServicesTest() throws IOException {
try (InputStream yamlStream = this.getClass().getResourceAsStream("/AppAnyServices.yml")) {
List<URL> urls = ConfigParser.parseConfigurators(streamToString(yamlStream));
Assertions.assertNotNull(urls);
Assertions.assertEquals(2, urls.size());
URL url = urls.get(0);
Assertions.assertEquals("127.0.0.1", url.getAddress());
Assertions.assertEquals("*", url.getServiceInterface());
Assertions.assertEquals(6666, url.getParameter(TIMEOUT_KEY, 0));
Assertions.assertEquals("random", url.getParameter(LOADBALANCE_KEY));
Assertions.assertEquals("demo-consumer", url.getApplication());
}
}
|
public static void printIfErrorEnabled(Logger logger, String s, Object... args) {
if (logger.isErrorEnabled()) {
logger.error(s, args);
}
}
|
@Test
void testPrintIfErrorEnabled() {
Logger logger = Mockito.mock(Logger.class);
Mockito.when(logger.isErrorEnabled()).thenReturn(true);
LoggerUtils.printIfErrorEnabled(logger, "test", "arg1", "arg2", "arg3");
Mockito.verify(logger, Mockito.times(1)).error("test", "arg1", "arg2", "arg3");
}
|
@Override
public Optional<ClusterHealthStatus> getClusterHealthStatus() {
try {
ClusterHealthResponse healthResponse = getRestHighLevelClient().cluster()
.health(new ClusterHealthRequest().waitForYellowStatus().timeout(timeValueSeconds(30)), RequestOptions.DEFAULT);
return Optional.of(healthResponse.getStatus());
} catch (IOException e) {
LOG.trace("Failed to check health status ", e);
return Optional.empty();
}
}
|
@Test
public void newInstance_whenKeyStorePassed_shouldCreateClient() throws GeneralSecurityException, IOException {
mockServerResponse(200, JSON_SUCCESS_RESPONSE);
Path keyStorePath = temp.newFile("keystore.p12").toPath();
String password = "password";
HandshakeCertificates certificate = createCertificate(mockWebServer.getHostName(), keyStorePath, password);
mockWebServer.useHttps(certificate.sslSocketFactory(), false);
EsConnectorImpl underTest = new EsConnectorImpl(Sets.newHashSet(HostAndPort.fromParts(mockWebServer.getHostName(),
mockWebServer.getPort())), null, keyStorePath, password);
assertThat(underTest.getClusterHealthStatus()).hasValue(ClusterHealthStatus.YELLOW);
}
|
@Override
public Map<String, PluginMetadataSummary> test(ViewDTO view) {
final Optional<Search> optionalSearch = searchDbService.get(view.searchId());
return optionalSearch.map(searchRequiresParameterSupport::test)
.orElseThrow(() -> new IllegalStateException("Search " + view.searchId() + " for view " + view + " is missing."));
}
|
@Test
public void returnsEmptyCapabilitiesIfViewDoesNotHaveParameters() {
final Search search = Search.builder().parameters(ImmutableSet.of()).build();
when(searchDbService.get("searchId")).thenReturn(Optional.of(search));
final Map<String, PluginMetadataSummary> result = this.requiresParameterSupport.test(view);
assertThat(result).isEmpty();
}
|
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) {
//FIXME this is a total hack but it works around a number of issues related to vertical map
//replication and horiztonal replication that can cause polygons to completely disappear when
//panning
if (pZoom < 3)
return true;
boolean latMatch = false;
boolean lonMatch = false;
//vertical wrapping detection
if (pBoundingBox.mLatSouth <= mLatNorth &&
pBoundingBox.mLatSouth >= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//normal case, non overlapping
if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//special case for when *this completely surrounds the pBoundbox
if (mLonWest <= pBoundingBox.mLonWest &&
mLonEast >= pBoundingBox.mLonEast &&
mLatNorth >= pBoundingBox.mLatNorth &&
mLatSouth <= pBoundingBox.mLatSouth)
return true;
//normal case, non overlapping
if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth)
latMatch = true;
if (mLonWest > mLonEast) {
//the date line is included in the bounding box
//we want to match lon from the dateline to the eastern bounds of the box
//and the dateline to the western bounds of the box
if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest)
lonMatch = true;
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast <= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast)
lonMatch = false;
}
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast >= pBoundingBox.mLonEast) {
lonMatch = true;
}
/*
//that is completely within this
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast<= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast )
lonMatch = false;
}
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast>= pBoundingBox.mLonEast) {
lonMatch = true;
}*/
}
return latMatch && lonMatch;
}
|
@Test
public void testOverlaps() {
// ________________
// | | |
// | | |
// |------+-------|
// | | |
// | | |
// ----------------
//box is notated as *
//test area is notated as &
// ________________
// | | |
// | *** & |
// |-----*+*------|
// | *** |
// | | |
// ----------------
//box is notated as *
//test area is notated as &
BoundingBox box = new BoundingBox(1, 1, -1, -1);
Assert.assertTrue(box.overlaps(box, 4));
BoundingBox farAway = new BoundingBox(45, 45, 44, 44);
Assert.assertTrue(farAway.overlaps(farAway, 4));
Assert.assertFalse(box.overlaps(farAway, 4));
farAway = new BoundingBox(1.1, 45, 1, 44);
Assert.assertTrue(farAway.overlaps(farAway, 4));
Assert.assertFalse(box.overlaps(farAway, 4));
farAway = new BoundingBox(2, 2, -2, -2);
Assert.assertTrue(farAway.overlaps(farAway, 4));
Assert.assertTrue(box.overlaps(farAway, 4));
//this is completely within the test box
farAway = new BoundingBox(0.5, 0.5, -0.5, -0.5);
Assert.assertTrue(farAway.overlaps(farAway, 4));
Assert.assertTrue(box.overlaps(farAway, 4));
}
|
public static List<Annotation> scanMetaAnnotation(Class<? extends Annotation> annotationType) {
return AnnotationScanner.DIRECTLY_AND_META_ANNOTATION.getAnnotationsIfSupport(annotationType);
}
|
@Test
public void scanMetaAnnotationTest() {
// RootAnnotation -> RootMetaAnnotation1 -> RootMetaAnnotation2 -> RootMetaAnnotation3
// -> RootMetaAnnotation3
final List<Annotation> annotations = AnnotationUtil.scanMetaAnnotation(RootAnnotation.class);
assertEquals(4, annotations.size());
assertTrue(annotations.get(0).annotationType() == RootMetaAnnotation3.class ||
annotations.get(0).annotationType() == RootMetaAnnotation1.class);
assertTrue(annotations.get(1).annotationType() == RootMetaAnnotation1.class ||
annotations.get(1).annotationType() == RootMetaAnnotation2.class);
assertTrue(annotations.get(2).annotationType() == RootMetaAnnotation2.class ||
annotations.get(2).annotationType() == RootMetaAnnotation3.class);
assertEquals(RootMetaAnnotation3.class, annotations.get(3).annotationType());
}
|
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowStatement) {
return Optional.of(new PostgreSQLShowVariableExecutor((ShowStatement) sqlStatement));
}
return Optional.empty();
}
|
@Test
void assertCreateWithOtherSQLStatementContextOnly() {
assertThat(new PostgreSQLAdminExecutorCreator().create(new UnknownSQLStatementContext(new PostgreSQLInsertStatement())), is(Optional.empty()));
}
|
public Plan validateReservationSubmissionRequest(
ReservationSystem reservationSystem, ReservationSubmissionRequest request,
ReservationId reservationId) throws YarnException {
String message;
if (reservationId == null) {
message = "Reservation id cannot be null. Please try again specifying "
+ " a valid reservation id by creating a new reservation id.";
throw RPCUtil.getRemoteException(message);
}
// Check if it is a managed queue
String queue = request.getQueue();
Plan plan = getPlanFromQueue(reservationSystem, queue,
AuditConstants.SUBMIT_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.SUBMIT_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testSubmitReservationNormal() {
ReservationSubmissionRequest request =
createSimpleReservationSubmissionRequest(1, 1, 1, 5, 3);
Plan plan = null;
try {
plan =
rrValidator.validateReservationSubmissionRequest(rSystem, request,
ReservationSystemTestUtil.getNewReservationId());
} catch (YarnException e) {
Assert.fail(e.getMessage());
}
Assert.assertNotNull(plan);
}
|
@Override
public String toString() {
return "ResourceConfig{" +
"url=" + url +
", id='" + id + '\'' +
", resourceType=" + resourceType +
'}';
}
|
@Test
public void when_addNonexistentResourceWithFileAndId_then_throwsException() {
// Given
String id = "exist";
String path = Paths.get("/i/do/not/" + id).toString();
File file = new File(path);
// Then
expectedException.expect(JetException.class);
expectedException.expectMessage("Not an existing, readable file: " + path);
// When
config.addClasspathResource(file, id);
}
|
public SequenceFile.Writer createWriter(File file) throws IOException {
return createWriter(toPath(file));
}
|
@Test
public void testCreateWriteReadFileOneEntry() throws Throwable {
final FileEntry source = ENTRY;
// do an explicit close to help isolate any failure.
SequenceFile.Writer writer = createWriter();
writer.append(NullWritable.get(), source);
writer.flush();
writer.close();
FileEntry readBack = new FileEntry();
try (SequenceFile.Reader reader = readEntryFile()) {
reader.next(NullWritable.get(), readBack);
}
Assertions.assertThat(readBack)
.describedAs("entry read back from sequence file")
.isEqualTo(source);
// now use the iterator to access it.
final RemoteIterator<FileEntry> it =
iterateOverEntryFile();
List<FileEntry> files = new ArrayList<>();
foreach(it, files::add);
Assertions.assertThat(files)
.describedAs("iteration over the entry file")
.hasSize(1)
.element(0)
.isEqualTo(source);
final EntryFileIO.EntryIterator et = (EntryFileIO.EntryIterator) it;
Assertions.assertThat(et)
.describedAs("entry iterator %s", et)
.matches(p -> p.isClosed())
.extracting(p -> p.getCount())
.isEqualTo(1);
}
|
@Override
public AwsProxyResponse handle(Throwable ex) {
log.error("Called exception handler for:", ex);
// adding a print stack trace in case we have no appender or we are running inside SAM local, where need the
// output to go to the stderr.
ex.printStackTrace();
if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) {
return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR));
} else {
return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR));
}
}
|
@Test
void streamHandle_InvalidResponseObjectException_jsonContentTypeHeader()
throws IOException {
ByteArrayOutputStream respStream = new ByteArrayOutputStream();
exceptionHandler.handle(new InvalidResponseObjectException(INVALID_RESPONSE_MESSAGE, null), respStream);
assertNotNull(respStream);
assertTrue(respStream.size() > 0);
AwsProxyResponse resp = objectMapper.readValue(new ByteArrayInputStream(respStream.toByteArray()), AwsProxyResponse.class);
assertNotNull(resp);
assertTrue(resp.getMultiValueHeaders().containsKey(HttpHeaders.CONTENT_TYPE));
assertEquals(MediaType.APPLICATION_JSON, resp.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE));
}
|
public static IOD load(String uri) throws IOException {
if (uri.startsWith("resource:")) {
try {
uri = ResourceLocator.getResource(uri.substring(9), IOD.class);
} catch (NullPointerException npe) {
throw new FileNotFoundException(uri);
}
} else if (uri.indexOf(':') < 2) {
uri = new File(uri).toURI().toString();
}
IOD iod = new IOD();
iod.parse(uri);
iod.trimToSize();
return iod;
}
|
@Test
public void testValidateCode() throws Exception {
IOD iod = IOD.load("resource:code-iod.xml");
Attributes attrs = new Attributes(2);
attrs.newSequence(Tag.ConceptNameCodeSequence, 1).add(
new Code("CV-9991", "99DCM4CHE", null, "CM-9991").toItem());
Attributes contentNode = new Attributes(2);
contentNode.newSequence(Tag.ConceptNameCodeSequence, 1).add(
new Code("CV-9992", "99DCM4CHE", null, "CM-9992").toItem());
contentNode.newSequence(Tag.ConceptCodeSequence, 1).add(
new Code("CV-9993", "99DCM4CHE", null, "CM-9993").toItem());
attrs.newSequence(Tag.ContentSequence, 1).add(contentNode);
ValidationResult result = attrs.validate(iod);
assertTrue(result.isValid());
}
|
public static String keywordOf(int tag, String privateCreator) {
return getElementDictionary(privateCreator).keywordOf(tag);
}
|
@Test
public void testKeywordOf() {
for (int i = 0; i < TAGS.length; i++)
assertEquals(KEYWORDS[i],
ElementDictionary.keywordOf(TAGS[i], null));
}
|
@Override
public Number lookup(String name) {
int t = dictionary.getOrDefault(name, -1);
if (t < 0) {
t = dictionary.getOrDefault("default", -1);
}
if (t < 0) {
log.warn("Pick up system reserved threshold {}ms because of config missing", SYSTEM_RESERVED_THRESHOLD);
return SYSTEM_RESERVED_THRESHOLD;
}
if (log.isDebugEnabled()) {
log.debug("Apdex threshold of {} is {}ms", name, t);
}
return t;
}
|
@Test
@Timeout(20)
public void testLookupOfDynamicUpdate() throws InterruptedException {
ConfigWatcherRegister register = new MockConfigWatcherRegister(3);
when(provider.name()).thenReturn("default");
ApdexThresholdConfig config = new ApdexThresholdConfig(provider);
register.registerConfigChangeWatcher(config);
register.start();
while (config.lookup("foo").intValue() == 500) {
Thread.sleep(2000);
}
assertThat(config.lookup("foo")).isEqualTo(200);
assertThat(config.lookup("default")).isEqualTo(1000);
assertThat(config.lookup("bar")).isEqualTo(1000);
}
|
@Override
public Producer createProducer() throws Exception {
return new FopProducer(this, fopFactory, outputType.getFormatExtended());
}
|
@Test
public void setPDFRenderingMetadataPerDocument() throws Exception {
if (!canTest()) {
// cannot run on CI
return;
}
Endpoint endpoint = context().getEndpoint("fop:pdf");
Producer producer = endpoint.createProducer();
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setHeader("CamelFop.Render.Creator", "Test User");
exchange.getIn().setBody(FopHelper.decorateTextWithXSLFO("Test Content"));
producer.process(exchange);
PDDocument document = getDocumentFrom(exchange);
String creator = FopHelper.getDocumentMetadataValue(document, COSName.CREATOR);
assertEquals("Test User", creator);
}
|
public static boolean matchAnyInterface(String address, Collection<String> interfaces) {
if (interfaces == null || interfaces.isEmpty()) {
return false;
}
for (String interfaceMask : interfaces) {
if (matchInterface(address, interfaceMask)) {
return true;
}
}
return false;
}
|
@Test
public void testMatchAnyInterface() {
assertTrue(AddressUtil.matchAnyInterface("10.235.194.23", asList("10.235.194.23", "10.235.193.121")));
assertFalse(AddressUtil.matchAnyInterface("10.235.194.23", null));
assertFalse(AddressUtil.matchAnyInterface("10.235.194.23", Collections.emptyList()));
assertFalse(AddressUtil.matchAnyInterface("10.235.194.23", singletonList("10.235.193.*")));
}
|
public static String cloudIdEncode(String... args) {
final String joinedArgs = String.join("$", args);
return Base64.getUrlEncoder().encodeToString(joinedArgs.getBytes());
}
|
@Test
public void testThrowExceptionWhenAtLeatOneSegmentIsEmpty() {
String[] raw = new String[] {"first", "", "third"};
String encoded = CloudSettingId.cloudIdEncode(raw);
Exception thrownException = assertThrows(org.jruby.exceptions.ArgumentError.class, () -> {
new CloudSettingId(encoded);
});
assertThat(thrownException.getMessage(), containsString("Cloud Id, after decoding, is invalid. Format: '<segment1>$<segment2>$<segment3>'. Received: \"" + String.join("$", raw) + "\"."));
}
|
public static StreamDescriptor createStreamDescriptor(List<OrcType> types, OrcDataSource dataSource)
{
ImmutableMap.Builder<Integer, StreamProperty> propertiesBuilder = ImmutableMap.builderWithExpectedSize(types.size());
addOrcType("", "", ROOT_ID, types, propertiesBuilder);
AllStreams allStreams = new AllStreams(dataSource, propertiesBuilder.build());
return new StreamDescriptor(ROOT_ID, DEFAULT_SEQUENCE_ID, allStreams);
}
|
@Test
public void testBuilder()
{
List<OrcType> orcTypes = getOrcTypes();
StreamDescriptor streamDescriptor = createStreamDescriptor(orcTypes, DUMMY_ORC_DATA_SOURCE);
StreamProperty rootProperty = new StreamProperty("", orcTypes.get(0), "", ImmutableList.of(1, 2));
StreamProperty column1Property = new StreamProperty(".column1", orcTypes.get(1), "column1", ImmutableList.of());
StreamProperty column2Property = new StreamProperty(".column2", orcTypes.get(2), "column2", ImmutableList.of(3, 4));
StreamProperty mapKeyProperty = new StreamProperty(".column2.key", orcTypes.get(3), "key", ImmutableList.of());
StreamProperty mapValueProperty = new StreamProperty(".column2.value", orcTypes.get(4), "value", ImmutableList.of(5));
StreamProperty listElementProperty = new StreamProperty(".column2.value.item", orcTypes.get(5), "item", ImmutableList.of(6, 7));
StreamProperty inner1Property = new StreamProperty(".column2.value.item.inner1", orcTypes.get(6), "inner1", ImmutableList.of());
StreamProperty inner2Property = new StreamProperty(".column2.value.item.inner2", orcTypes.get(7), "inner2", ImmutableList.of());
ImmutableMap.Builder<Integer, StreamProperty> streamToPropertyMapBuilder = ImmutableMap.builder();
streamToPropertyMapBuilder.put(0, rootProperty);
streamToPropertyMapBuilder.put(1, column1Property);
streamToPropertyMapBuilder.put(2, column2Property);
streamToPropertyMapBuilder.put(3, mapKeyProperty);
streamToPropertyMapBuilder.put(4, mapValueProperty);
streamToPropertyMapBuilder.put(5, listElementProperty);
streamToPropertyMapBuilder.put(6, inner1Property);
streamToPropertyMapBuilder.put(7, inner2Property);
Map<Integer, StreamProperty> streamPropertyMap = streamToPropertyMapBuilder.build();
verifyStreamDescriptor(streamDescriptor, 0, 0, streamPropertyMap);
StreamDescriptor sequenceStreamDescriptor = streamDescriptor.duplicate(10);
verifyStreamDescriptor(sequenceStreamDescriptor, 0, 10, streamPropertyMap);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.