focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Description("Laplace cdf given mean, scale parameters and value")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double laplaceCdf(
@SqlType(StandardTypes.DOUBLE) double mean,
@SqlType(StandardTypes.DOUBLE) double scale,
@SqlType(StandardTypes.DOUBLE) double value)
{
checkCondition(scale > 0, INVALID_FUNCTION_ARGUMENT, "laplaceCdf Function: scale must be greater than 0");
LaplaceDistribution distribution = new LaplaceDistribution(null, mean, scale);
return distribution.cumulativeProbability(value);
}
|
@Test
public void testLaplaceCdf()
{
assertFunction("laplace_cdf(4, 1, 4)", DOUBLE, 0.5);
assertFunction("laplace_cdf(4, 2, 4.0)", DOUBLE, 0.5);
assertFunction("round(laplace_cdf(4, 2, 4.0 - 0.4463), 2)", DOUBLE, 0.4);
assertFunction("round(laplace_cdf(-4, 2, -4.0 + 0.4463), 4)", DOUBLE, 0.6);
assertInvalidFunction("laplace_cdf(5, 0, 10)", "laplaceCdf Function: scale must be greater than 0");
assertInvalidFunction("laplace_cdf(5, -1, 10)", "laplaceCdf Function: scale must be greater than 0");
}
|
protected static String formatMemory(long bytes) {
return Long.toString(bytes);
}
|
@Test
public void testFormatMemory() {
assertThat(formatMemory(0), is("0"));
assertThat(formatMemory(1), is("1"));
assertThat(formatMemory(1023), is("1023"));
assertThat(formatMemory(1024), is("1024"));
assertThat(formatMemory(1000), is("1000"));
assertThat(formatMemory(2048), is("2048"));
assertThat(formatMemory(2000), is("2000"));
assertThat(formatMemory(2048 * 1024), is("2097152"));
assertThat(formatMemory(2048 * 2000), is("4096000"));
assertThat(formatMemory(2000 * 2000), is("4000000"));
assertThat(formatMemory(1_000_000_000_000_000L), is("1000000000000000"));
assertThat(formatMemory(1_000_000_000_000_000_000L), is("1000000000000000000"));
assertThat(formatMemory(parseMemory("1Ei")), is("1125899906842624"));
assertThat(formatMemory(parseMemory("1024Ei")), is("1152921504606846976"));
assertThat(formatMemory(524288000L), is("524288000"));
}
|
public static void setKiePMMLConstructorSuperNameInvocation(final String generatedClassName,
final ConstructorDeclaration constructorDeclaration,
final String fileName,
final String name) {
constructorDeclaration.setName(generatedClassName);
final BlockStmt body = constructorDeclaration.getBody();
final ExplicitConstructorInvocationStmt superStatement =
CommonCodegenUtils.getExplicitConstructorInvocationStmt(body)
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_CONSTRUCTOR_IN_BODY, body)));
CommonCodegenUtils.setExplicitConstructorInvocationStmtArgument(superStatement, "fileName", String.format("\"%s\"",
fileName));
CommonCodegenUtils.setExplicitConstructorInvocationStmtArgument(superStatement, "name", String.format("\"%s\"",
name));
}
|
@Test
void setKiePMMLConstructorSuperNameInvocation() {
String generatedClassName = "generatedClassName";
String fileName = "fileName";
String name = "newName";
org.kie.pmml.compiler.commons.codegenfactories.KiePMMLModelFactoryUtils.setKiePMMLConstructorSuperNameInvocation(generatedClassName,
constructorDeclaration,
fileName,
name);
commonVerifySuperInvocation(generatedClassName, fileName, name);
}
|
void backoff(int attempt, long deadline) {
int numRetry = attempt - 1;
long delay = RETRIES_DELAY_MIN_MS << numRetry;
if (delay > errorMaxDelayInMillis) {
delay = ThreadLocalRandom.current().nextLong(errorMaxDelayInMillis);
}
long currentTime = time.milliseconds();
if (delay + currentTime > deadline) {
delay = Math.max(0, deadline - currentTime);
}
log.debug("Sleeping for up to {} millis", delay);
try {
stopRequestedLatch.await(delay, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return;
}
}
|
@Test
public void testBackoffLimit() throws Exception {
MockTime time = new MockTime(0, 0, 0);
CountDownLatch exitLatch = mock(CountDownLatch.class);
RetryWithToleranceOperator<ConsumerRecord<byte[], byte[]>> retryWithToleranceOperator = new RetryWithToleranceOperator<>(5, 5000, NONE, time, errorHandlingMetrics, exitLatch);
when(exitLatch.await(300, TimeUnit.MILLISECONDS)).thenAnswer(i -> {
time.sleep(300);
return false;
});
when(exitLatch.await(600, TimeUnit.MILLISECONDS)).thenAnswer(i -> {
time.sleep(600);
return false;
});
when(exitLatch.await(1200, TimeUnit.MILLISECONDS)).thenAnswer(i -> {
time.sleep(1200);
return false;
});
when(exitLatch.await(2400, TimeUnit.MILLISECONDS)).thenAnswer(i -> {
time.sleep(2400);
return false;
});
when(exitLatch.await(500, TimeUnit.MILLISECONDS)).thenAnswer(i -> {
time.sleep(500);
return false;
});
when(exitLatch.await(0, TimeUnit.MILLISECONDS)).thenReturn(false);
retryWithToleranceOperator.backoff(1, 5000);
retryWithToleranceOperator.backoff(2, 5000);
retryWithToleranceOperator.backoff(3, 5000);
retryWithToleranceOperator.backoff(4, 5000);
retryWithToleranceOperator.backoff(5, 5000);
// Simulate a small delay between calculating the deadline, and backing off
time.sleep(1);
// We may try to begin backing off after the deadline has already passed; make sure
// that we don't wait with a negative timeout
retryWithToleranceOperator.backoff(6, 5000);
verifyNoMoreInteractions(exitLatch);
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
Object rawField = msg.getField(rule.getField());
if (rawField == null) {
return rule.getInverted();
}
if (rawField instanceof String) {
String field = (String) rawField;
Boolean result = rule.getInverted() ^ !(field.trim().isEmpty());
return result;
}
return !rule.getInverted();
}
|
@Test
public void testBasicMatch() throws Exception {
StreamRule rule = getSampleRule();
rule.setField("message");
rule.setType(StreamRuleType.PRESENCE);
rule.setInverted(false);
Message message = getSampleMessage();
StreamRuleMatcher matcher = getMatcher(rule);
Boolean result = matcher.match(message, rule);
assertTrue(result);
}
|
public java.nio.file.Path toPath(final Path file) throws LocalAccessDeniedException {
return this.toPath(file.getAbsolute());
}
|
@Test
public void toPath() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
assertNotNull(session.toPath("/Users/username"));
assertNotNull(session.toPath("/C:\\Users\\Administrator"));
assertEquals("C:\\Users\\Administrator", "/C:\\Users\\Administrator".replaceFirst("^/(.:[/\\\\])", "$1"));
assertEquals("C:/Users/Administrator", "/C:/Users/Administrator".replaceFirst("^/(.:[/\\\\])", "$1"));
session.close();
}
|
public static void updateTableStatsForCreateTable(Warehouse wh, Database db, Table tbl,
EnvironmentContext envContext, Configuration conf, Path tblPath, boolean newDir)
throws MetaException {
// If the created table is a view, skip generating the stats
if (MetaStoreUtils.isView(tbl)) {
return;
}
assert tblPath != null;
if (tbl.isSetDictionary() && tbl.getDictionary().getValues() != null) {
List<ByteBuffer> values = tbl.getDictionary().getValues().
remove(StatsSetupConst.STATS_FOR_CREATE_TABLE);
ByteBuffer buffer;
if (values != null && values.size() > 0 && (buffer = values.get(0)).hasArray()) {
String val = new String(buffer.array(), StandardCharsets.UTF_8);
StatsSetupConst.ColumnStatsSetup statsSetup = StatsSetupConst.ColumnStatsSetup.parseStatsSetup(val);
if (statsSetup.enabled) {
try {
// For an Iceberg table, a new snapshot is generated, so any leftover files would be ignored
// Set the column stats true in order to make it merge-able
if (newDir || statsSetup.isIcebergTable ||
wh.isEmptyDir(tblPath, FileUtils.HIDDEN_FILES_PATH_FILTER)) {
List<String> columns = statsSetup.columnNames;
if (columns == null || columns.isEmpty()) {
columns = getColumnNames(tbl.getSd().getCols());
}
StatsSetupConst.setStatsStateForCreateTable(tbl.getParameters(), columns, StatsSetupConst.TRUE);
}
} catch (IOException e) {
LOG.error("Error while checking the table directory: " + tblPath, e);
throw ExceptionHandler.newMetaException(e);
}
}
}
}
if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.STATS_AUTO_GATHER) &&
!getBooleanEnvProp(envContext, StatsSetupConst.DO_NOT_UPDATE_STATS)) {
LOG.debug("Calling updateTableStatsSlow for table {}.{}.{}", tbl.getCatName(), tbl.getDbName(), tbl.getTableName());
updateTableStatsSlow(db, tbl, wh, newDir, false, envContext);
}
}
|
@Test
public void testUpdateTableStatsForCreateTableDoesNotInvokeStatsCalc() throws TException {
// DO_NOT_UPDATE_STATS in env context is set to true => doesn't invoke stats calculation
Map<String, String> params = new HashMap<>(paramsWithStats);
Warehouse wh = mock(Warehouse.class);
Table tbl = new TableBuilder()
.setDbName(DB_NAME)
.setTableName(TABLE_NAME)
.addCol("id", "int")
.setTableParams(params)
.build(null);
EnvironmentContext env = new EnvironmentContext();
env.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
MetaStoreServerUtils.updateTableStatsForCreateTable(wh, db, tbl, env,
MetastoreConf.newMetastoreConf(), new Path("/tmp/0"), false);
assertFalse(messageWasLogged(MessageFormat.format(
STATS_CALC_CALL_LOG_MSG_FORMAT, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())));
}
|
protected static Map<String, String> parsePostHeaders(InputStream in) throws IOException {
Map<String, String> headerMap = new HashMap<String, String>(4);
String line;
while (true) {
line = readLine(in);
if (line == null || line.length() == 0) {
// empty line
return headerMap;
}
int index = line.indexOf(":");
if (index < 1) {
// empty value, abandon
continue;
}
String headerName = line.substring(0, index).trim().toLowerCase();
String headerValue = line.substring(index + 1).trim();
if (headerValue.length() > 0) {
headerMap.put(headerName, headerValue);
}
}
}
|
@Test
public void parsePostHeaders() throws IOException {
Map<String, String> map;
map = HttpEventTask.parsePostHeaders(new ByteArrayInputStream("".getBytes()));
assertTrue(map.size() == 0);
map = HttpEventTask.parsePostHeaders(new ByteArrayInputStream("Content-type: test \r\n\r\nbody".getBytes()));
assertEquals("test", map.get("content-type"));
map = HttpEventTask.parsePostHeaders(new ByteArrayInputStream("Content-Encoding: utf-8\r\n\r\nbody".getBytes()));
assertEquals("utf-8", map.get("content-encoding"));
}
|
public <T> HttpResponse<T> httpRequest(String url, String method, HttpHeaders headers, Object requestBodyData,
TypeReference<T> responseFormat) {
return httpRequest(url, method, headers, requestBodyData, responseFormat, null, null);
}
|
@Test
public void testStatusCodeAndErrorMessagePreserved() throws Exception {
int statusCode = Response.Status.CONFLICT.getStatusCode();
ErrorMessage errorMsg = new ErrorMessage(Response.Status.GONE.getStatusCode(), "Some Error Message");
Request req = mock(Request.class);
ContentResponse resp = mock(ContentResponse.class);
when(resp.getContentAsString()).thenReturn(toJsonString(errorMsg));
setupHttpClient(statusCode, req, resp);
ConnectRestException e = assertThrows(ConnectRestException.class, () -> httpRequest(
httpClient, MOCK_URL, TEST_METHOD, TEST_TYPE, TEST_SIGNATURE_ALGORITHM
));
assertEquals(statusCode, e.statusCode());
assertEquals(errorMsg.errorCode(), e.errorCode());
assertEquals(errorMsg.message(), e.getMessage());
}
|
public HeaderConverter newHeaderConverter(AbstractConfig config, String classPropertyName, ClassLoaderUsage classLoaderUsage) {
Class<? extends HeaderConverter> klass = null;
switch (classLoaderUsage) {
case CURRENT_CLASSLOADER:
if (!config.originals().containsKey(classPropertyName)) {
// This connector configuration does not define the header converter via the specified property name
return null;
}
// Attempt to load first with the current classloader, and plugins as a fallback.
// Note: we can't use config.getConfiguredInstance because we have to remove the property prefixes
// before calling config(...)
klass = pluginClassFromConfig(config, classPropertyName, HeaderConverter.class, scanResult.headerConverters());
break;
case PLUGINS:
// Attempt to load with the plugin class loader, which uses the current classloader as a fallback.
// Note that there will always be at least a default header converter for the worker
String converterClassOrAlias = config.getClass(classPropertyName).getName();
try {
klass = pluginClass(
delegatingLoader,
converterClassOrAlias,
HeaderConverter.class
);
} catch (ClassNotFoundException e) {
throw new ConnectException(
"Failed to find any class that implements HeaderConverter and which name matches "
+ converterClassOrAlias
+ ", available header converters are: "
+ pluginNames(scanResult.headerConverters())
);
}
}
if (klass == null) {
throw new ConnectException("Unable to initialize the HeaderConverter specified in '" + classPropertyName + "'");
}
String configPrefix = classPropertyName + ".";
Map<String, Object> converterConfig = config.originalsWithPrefix(configPrefix);
converterConfig.put(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName());
log.debug("Configuring the header converter with configuration keys:{}{}", System.lineSeparator(), converterConfig.keySet());
HeaderConverter plugin;
try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) {
plugin = newPlugin(klass);
plugin.configure(converterConfig);
}
return plugin;
}
|
@Test
public void shouldInstantiateAndConfigureDefaultHeaderConverter() {
props.remove(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG);
createConfig();
// Because it's not explicitly set on the supplied configuration, the logic to use the current classloader for the connector
// will exit immediately, and so this method always returns null
HeaderConverter headerConverter = plugins.newHeaderConverter(config,
WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG,
ClassLoaderUsage.CURRENT_CLASSLOADER);
assertNull(headerConverter);
// But we should always find it (or the worker's default) when using the plugins classloader ...
headerConverter = plugins.newHeaderConverter(config,
WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG,
ClassLoaderUsage.PLUGINS);
assertNotNull(headerConverter);
assertInstanceOf(SimpleHeaderConverter.class, headerConverter);
}
|
@Bean
public ShenyuPlugin contextPathPlugin() {
return new ContextPathPlugin();
}
|
@Test
public void testContextPathPlugin() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(ContextPathPluginConfiguration.class))
.withBean(ContextPathPluginConfigurationTest.class)
.withPropertyValues("debug=true")
.run(context -> {
ShenyuPlugin plugin = context.getBean("contextPathPlugin", ShenyuPlugin.class);
assertNotNull(plugin);
assertThat(plugin.named()).isEqualTo(PluginEnum.CONTEXT_PATH.getName());
});
}
|
@Override
public void run() {
try {
interceptorChain.doInterceptor(task);
} catch (Exception e) {
Loggers.SRV_LOG.info("Interceptor health check task {} failed", task.getTaskId(), e);
}
}
|
@Test
void testRunWithDisableHealthCheck() {
when(switchDomain.isHealthCheckEnabled()).thenReturn(false);
taskWrapper.run();
verify(distroMapper, never()).responsible(client.getResponsibleId());
}
|
@Override
public final long skip(long n) {
if (n <= 0 || n >= Integer.MAX_VALUE) {
return 0L;
}
return skipBytes((int) n);
}
|
@Test
public void testSkip() {
long s1 = in.skip(-1);
long s2 = in.skip(Integer.MAX_VALUE);
long s3 = in.skip(1);
assertEquals(0, s1);
assertEquals(0, s2);
assertEquals(1, s3);
}
|
static Counter allocateErrorCounter(
final Aeron aeron,
final MutableDirectBuffer tempBuffer,
final long archiveId)
{
int index = 0;
tempBuffer.putLong(index, archiveId);
index += SIZE_OF_LONG;
final int keyLength = index;
index += tempBuffer.putStringWithoutLengthAscii(index, "Archive Errors");
index += appendArchiveIdLabel(tempBuffer, index, archiveId);
index += AeronCounters.appendVersionInfo(tempBuffer, index, ArchiveVersion.VERSION, ArchiveVersion.GIT_SHA);
return aeron.addCounter(
AeronCounters.ARCHIVE_ERROR_COUNT_TYPE_ID,
tempBuffer,
0,
keyLength,
tempBuffer,
keyLength,
index - keyLength);
}
|
@Test
void allocateErrorCounter()
{
final long archiveId = 24623864;
final String expectedLabel = "Archive Errors - archiveId=" + archiveId + " " +
AeronCounters.formatVersionInfo(ArchiveVersion.VERSION, ArchiveVersion.GIT_SHA);
final Aeron aeron = mock(Aeron.class);
final MutableDirectBuffer tempBuffer = new UnsafeBuffer(new byte[200]);
final Counter counter = mock(Counter.class);
when(aeron.clientId()).thenReturn(archiveId);
when(aeron.addCounter(
AeronCounters.ARCHIVE_ERROR_COUNT_TYPE_ID,
tempBuffer,
0,
SIZE_OF_LONG,
tempBuffer,
SIZE_OF_LONG,
expectedLabel.length()))
.thenReturn(counter);
final Counter result = ArchiveCounters.allocateErrorCounter(aeron, tempBuffer, aeron.clientId());
assertSame(counter, result);
final InOrder inOrder = inOrder(aeron);
inOrder.verify(aeron).clientId();
inOrder.verify(aeron).addCounter(anyInt(), any(), anyInt(), anyInt(), any(), anyInt(), anyInt());
inOrder.verifyNoMoreInteractions();
assertEquals(archiveId, tempBuffer.getLong(0));
assertEquals(expectedLabel, tempBuffer.getStringWithoutLengthAscii(SIZE_OF_LONG, expectedLabel.length()));
}
|
public static boolean isInvalidPort(int port) {
return port < MIN_PORT || port > MAX_PORT;
}
|
@Test
void testIsInvalidPort() {
assertTrue(NetUtils.isInvalidPort(0));
assertTrue(NetUtils.isInvalidPort(65536));
assertFalse(NetUtils.isInvalidPort(1024));
}
|
public CreateTableBuilder withPkConstraintName(String pkConstraintName) {
this.pkConstraintName = validateConstraintName(pkConstraintName);
return this;
}
|
@Test
public void withPkConstraintName_throws_NPE_if_name_is_null() {
assertThatThrownBy(() -> underTest.withPkConstraintName(null))
.isInstanceOf(NullPointerException.class)
.hasMessageContaining("Constraint name can't be null");
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof NiciraNshMdType) {
NiciraNshMdType that = (NiciraNshMdType) obj;
return Objects.equals(nshMdType, that.nshMdType);
}
return false;
}
|
@Test
public void testEquals() {
final NiciraNshMdType nshMdType1 = new NiciraNshMdType(mdType1);
final NiciraNshMdType sameAsnshMdType1 = new NiciraNshMdType(mdType1);
final NiciraNshMdType nshMdType2 = new NiciraNshMdType(mdType2);
new EqualsTester().addEqualityGroup(nshMdType1, sameAsnshMdType1).addEqualityGroup(nshMdType2)
.testEquals();
}
|
@Override protected void render(Block html) {
String jid = $(JOB_ID);
if (jid.isEmpty()) {
html.
p().__("Sorry, can't do anything without a JobID.").__();
return;
}
JobId jobID = MRApps.toJobID(jid);
Job j = appContext.getJob(jobID);
if (j == null) {
html.p().__("Sorry, ", jid, " not found.").__();
return;
}
if(j instanceof UnparsedJob) {
final int taskCount = j.getTotalMaps() + j.getTotalReduces();
UnparsedJob oversizedJob = (UnparsedJob) j;
html.p().__("The job has a total of " + taskCount + " tasks. ")
.__("Any job larger than " + oversizedJob.getMaxTasksAllowed() +
" will not be loaded.").__();
html.p().__("You can either use the CLI tool: 'mapred job -history'"
+ " to view large jobs or adjust the property " +
JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX + ".").__();
return;
}
List<AMInfo> amInfos = j.getAMInfos();
JobInfo job = new JobInfo(j);
ResponseInfo infoBlock = info("Job Overview").
__("Job Name:", job.getName()).
__("User Name:", job.getUserName()).
__("Queue:", job.getQueueName()).
__("State:", job.getState()).
__("Uberized:", job.isUber()).
__("Submitted:", new Date(job.getSubmitTime())).
__("Started:", job.getStartTimeStr()).
__("Finished:", new Date(job.getFinishTime())).
__("Elapsed:", StringUtils.formatTime(
Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
String amString =
amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
// todo - switch to use JobInfo
List<String> diagnostics = j.getDiagnostics();
if(diagnostics != null && !diagnostics.isEmpty()) {
StringBuilder b = new StringBuilder();
for(String diag: diagnostics) {
b.append(addTaskLinks(diag));
}
infoBlock._r("Diagnostics:", b.toString());
}
if(job.getNumMaps() > 0) {
infoBlock.__("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
}
if(job.getNumReduces() > 0) {
infoBlock.__("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
infoBlock.__("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
infoBlock.__("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
}
for (ConfEntryInfo entry : job.getAcls()) {
infoBlock.__("ACL "+entry.getName()+":", entry.getValue());
}
DIV<Hamlet> div = html.
__(InfoBlock.class).
div(_INFO_WRAP);
// MRAppMasters Table
TABLE<DIV<Hamlet>> table = div.table("#job");
table.
tr().
th(amString).
__().
tr().
th(_TH, "Attempt Number").
th(_TH, "Start Time").
th(_TH, "Node").
th(_TH, "Logs").
__();
boolean odd = false;
for (AMInfo amInfo : amInfos) {
AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
job.getId(), job.getUserName(), "", "");
table.tr((odd = !odd) ? _ODD : _EVEN).
td(String.valueOf(attempt.getAttemptId())).
td(new Date(attempt.getStartTime()).toString()).
td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(),
attempt.getNodeHttpAddress()),
attempt.getNodeHttpAddress()).__().
td().a(".logslink", url(attempt.getLogsLink()),
"logs").__().
__();
}
table.__();
div.__();
html.div(_INFO_WRAP).
// Tasks table
table("#job").
tr().
th(_TH, "Task Type").
th(_TH, "Total").
th(_TH, "Complete").__().
tr(_ODD).
th().
a(url("tasks", jid, "m"), "Map").__().
td(String.valueOf(String.valueOf(job.getMapsTotal()))).
td(String.valueOf(String.valueOf(job.getMapsCompleted()))).__().
tr(_EVEN).
th().
a(url("tasks", jid, "r"), "Reduce").__().
td(String.valueOf(String.valueOf(job.getReducesTotal()))).
td(String.valueOf(String.valueOf(job.getReducesCompleted()))).__()
.__().
// Attempts table
table("#job").
tr().
th(_TH, "Attempt Type").
th(_TH, "Failed").
th(_TH, "Killed").
th(_TH, "Successful").__().
tr(_ODD).
th("Maps").
td().a(url("attempts", jid, "m",
TaskAttemptStateUI.FAILED.toString()),
String.valueOf(job.getFailedMapAttempts())).__().
td().a(url("attempts", jid, "m",
TaskAttemptStateUI.KILLED.toString()),
String.valueOf(job.getKilledMapAttempts())).__().
td().a(url("attempts", jid, "m",
TaskAttemptStateUI.SUCCESSFUL.toString()),
String.valueOf(job.getSuccessfulMapAttempts())).__().
__().
tr(_EVEN).
th("Reduces").
td().a(url("attempts", jid, "r",
TaskAttemptStateUI.FAILED.toString()),
String.valueOf(job.getFailedReduceAttempts())).__().
td().a(url("attempts", jid, "r",
TaskAttemptStateUI.KILLED.toString()),
String.valueOf(job.getKilledReduceAttempts())).__().
td().a(url("attempts", jid, "r",
TaskAttemptStateUI.SUCCESSFUL.toString()),
String.valueOf(job.getSuccessfulReduceAttempts())).__().
__().
__().
__();
}
|
@Test
public void testHsJobBlockForOversizeJobShouldDisplayWarningMessage() {
int maxAllowedTaskNum = 100;
Configuration config = new Configuration();
config.setInt(JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX, maxAllowedTaskNum);
JobHistory jobHistory =
new JobHistoryStubWithAllOversizeJobs(maxAllowedTaskNum);
jobHistory.init(config);
HsJobBlock jobBlock = new HsJobBlock(jobHistory) {
// override this so that job block can fetch a job id.
@Override
public Map<String, String> moreParams() {
Map<String, String> map = new HashMap<>();
map.put(AMParams.JOB_ID, "job_0000_0001");
return map;
}
};
// set up the test block to render HsJobBLock to
OutputStream outputStream = new ByteArrayOutputStream();
HtmlBlock.Block block = createBlockToCreateTo(outputStream);
jobBlock.render(block);
block.getWriter().flush();
String out = outputStream.toString();
Assert.assertTrue("Should display warning message for jobs that have too " +
"many tasks", out.contains("Any job larger than " + maxAllowedTaskNum +
" will not be loaded"));
}
|
@Override
public MavenArtifact searchSha1(String sha1) throws IOException {
if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) {
throw new IllegalArgumentException("Invalid SHA1 format");
}
final List<MavenArtifact> collectedMatchingArtifacts = new ArrayList<>(1);
String continuationToken = retrievePageAndAddMatchingArtifact(collectedMatchingArtifacts, sha1, null);
while (continuationToken != null && collectedMatchingArtifacts.isEmpty()) {
continuationToken = retrievePageAndAddMatchingArtifact(collectedMatchingArtifacts, sha1, continuationToken);
}
if (collectedMatchingArtifacts.isEmpty()) {
throw new FileNotFoundException("Artifact not found in Nexus");
} else {
return collectedMatchingArtifacts.get(0);
}
}
|
@Test(expected = IllegalArgumentException.class)
@Ignore
public void testMalformedSha1() throws Exception {
searcher.searchSha1("invalid");
}
|
public List<CodegenColumnDO> buildColumns(Long tableId, List<TableField> tableFields) {
List<CodegenColumnDO> columns = CodegenConvert.INSTANCE.convertList(tableFields);
int index = 1;
for (CodegenColumnDO column : columns) {
column.setTableId(tableId);
column.setOrdinalPosition(index++);
// 特殊处理:Byte => Integer
if (Byte.class.getSimpleName().equals(column.getJavaType())) {
column.setJavaType(Integer.class.getSimpleName());
}
// 初始化 Column 列的默认字段
processColumnOperation(column); // 处理 CRUD 相关的字段的默认值
processColumnUI(column); // 处理 UI 相关的字段的默认值
processColumnExample(column); // 处理字段的 swagger example 示例
}
return columns;
}
|
@Test
public void testBuildColumns() {
// 准备参数
Long tableId = randomLongId();
TableField tableField = mock(TableField.class);
List<TableField> tableFields = Collections.singletonList(tableField);
// mock 方法
TableField.MetaInfo metaInfo = mock(TableField.MetaInfo.class);
when(tableField.getMetaInfo()).thenReturn(metaInfo);
when(metaInfo.getJdbcType()).thenReturn(JdbcType.BIGINT);
when(tableField.getComment()).thenReturn("编号");
when(tableField.isKeyFlag()).thenReturn(true);
IColumnType columnType = mock(IColumnType.class);
when(tableField.getColumnType()).thenReturn(columnType);
when(columnType.getType()).thenReturn("Long");
when(tableField.getName()).thenReturn("id2");
when(tableField.getPropertyName()).thenReturn("id");
// 调用
List<CodegenColumnDO> columns = codegenBuilder.buildColumns(tableId, tableFields);
// 断言
assertEquals(1, columns.size());
CodegenColumnDO column = columns.get(0);
assertEquals(tableId, column.getTableId());
assertEquals("id2", column.getColumnName());
assertEquals("BIGINT", column.getDataType());
assertEquals("编号", column.getColumnComment());
assertFalse(column.getNullable());
assertTrue(column.getPrimaryKey());
assertEquals(1, column.getOrdinalPosition());
assertEquals("Long", column.getJavaType());
assertEquals("id", column.getJavaField());
assertNull(column.getDictType());
assertNotNull(column.getExample());
assertFalse(column.getCreateOperation());
assertTrue(column.getUpdateOperation());
assertFalse(column.getListOperation());
assertEquals("=", column.getListOperationCondition());
assertTrue(column.getListOperationResult());
assertEquals("input", column.getHtmlType());
}
|
public boolean match(int left, int right) {
return left == right;
}
|
@Test
public void intShouldEqual() {
int a = 334;
int b = 334;
boolean match = new NumberMatch().match(a, b);
assertTrue(match);
a = -123;
b = -123;
match = new NumberMatch().match(a, b);
assertTrue(match);
a = -122;
b = -123;
match = new NumberMatch().match(a, b);
assertFalse(match);
a = -123;
b = -122;
match = new NumberMatch().match(a, b);
assertFalse(match);
}
|
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
}
|
@Test
public void testEqualityIPv4() {
new EqualsTester()
.addEqualityGroup(IpPrefix.valueOf("1.2.0.0/24"),
IpPrefix.valueOf("1.2.0.0/24"),
IpPrefix.valueOf("1.2.0.4/24"))
.addEqualityGroup(IpPrefix.valueOf("1.2.0.0/16"),
IpPrefix.valueOf("1.2.0.0/16"))
.addEqualityGroup(IpPrefix.valueOf("1.2.0.0/32"),
IpPrefix.valueOf("1.2.0.0/32"))
.addEqualityGroup(IpPrefix.valueOf("1.3.0.0/24"),
IpPrefix.valueOf("1.3.0.0/24"))
.addEqualityGroup(IpPrefix.valueOf("0.0.0.0/0"),
IpPrefix.valueOf("0.0.0.0/0"))
.addEqualityGroup(IpPrefix.valueOf("255.255.255.255/32"),
IpPrefix.valueOf("255.255.255.255/32"))
.testEquals();
}
|
static SortKey[] rangeBounds(
int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) {
// sort the keys first
Arrays.sort(samples, comparator);
int numCandidates = numPartitions - 1;
SortKey[] candidates = new SortKey[numCandidates];
int step = (int) Math.ceil((double) samples.length / numPartitions);
int position = step - 1;
int numChosen = 0;
while (position < samples.length && numChosen < numCandidates) {
SortKey candidate = samples[position];
// skip duplicate values
if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) {
// linear probe for the next distinct value
position += 1;
} else {
candidates[numChosen] = candidate;
position += step;
numChosen += 1;
}
}
return candidates;
}
|
@Test
public void testRangeBoundsNonDivisible() {
// step is 3 = ceiling(11/4)
assertThat(
SketchUtil.rangeBounds(
4,
SORT_ORDER_COMPARTOR,
new SortKey[] {
CHAR_KEYS.get("a"),
CHAR_KEYS.get("b"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("d"),
CHAR_KEYS.get("e"),
CHAR_KEYS.get("f"),
CHAR_KEYS.get("g"),
CHAR_KEYS.get("h"),
CHAR_KEYS.get("i"),
CHAR_KEYS.get("j"),
CHAR_KEYS.get("k"),
}))
.containsExactly(CHAR_KEYS.get("c"), CHAR_KEYS.get("f"), CHAR_KEYS.get("i"));
}
|
public void lockClusterState(ClusterStateChange stateChange, Address initiator, UUID txnId, long leaseTime,
int memberListVersion, long partitionStateStamp) {
Preconditions.checkNotNull(stateChange);
clusterServiceLock.lock();
try {
if (!node.getNodeExtension().isStartCompleted()) {
throw new IllegalStateException("Can not lock cluster state! Startup is not completed yet!");
}
if (node.getClusterService().getClusterJoinManager().isMastershipClaimInProgress()) {
throw new IllegalStateException("Can not lock cluster state! Mastership claim is in progress!");
}
if (stateChange.isOfType(Version.class)) {
validateNodeCompatibleWith((Version) stateChange.getNewState());
validateClusterVersionChange((Version) stateChange.getNewState());
}
checkMemberListVersion(memberListVersion);
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
lockOrExtendClusterState(initiator, txnId, leaseTime);
try {
// check migration status and partition-state version again
// if partition state is changed then release the lock and fail.
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
} catch (IllegalStateException e) {
stateLockRef.set(LockGuard.NOT_LOCKED);
throw e;
}
} finally {
clusterServiceLock.unlock();
}
}
|
@Test(expected = NullPointerException.class)
public void test_lockClusterState_nullState() throws Exception {
Address initiator = newAddress();
clusterStateManager.lockClusterState(null, initiator, TXN, 1000, MEMBERLIST_VERSION, PARTITION_STAMP);
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void deleteMyCommands() {
DeleteMyCommands cmds = new DeleteMyCommands();
cmds.languageCode("en");
cmds.scope(new BotCommandScopeAllChatAdministrators());
BaseResponse response = bot.execute(cmds);
assertTrue(response.isOk());
GetMyCommands getCmds = new GetMyCommands();
getCmds.languageCode("en");
getCmds.scope(new BotCommandScopeAllChatAdministrators());
GetMyCommandsResponse commandsResponse = bot.execute(getCmds);
assertTrue(commandsResponse.isOk());
assertArrayEquals(commandsResponse.commands(), new BotCommand[0]);
}
|
void prioritizeCopiesAndShiftUps(List<MigrationInfo> migrations) {
for (int i = 0; i < migrations.size(); i++) {
prioritize(migrations, i);
}
if (logger.isFinestEnabled()) {
StringBuilder s = new StringBuilder("Migration order after prioritization: [");
int ix = 0;
for (MigrationInfo migration : migrations) {
s.append("\n\t").append(ix++).append("- ").append(migration).append(",");
}
s.deleteCharAt(s.length() - 1);
s.append("]");
logger.finest(s.toString());
}
}
|
@Test
public void testNoCopyPrioritizationAgainstCopy() throws UnknownHostException {
List<MigrationInfo> migrations = new ArrayList<>();
final MigrationInfo migration1 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5701), uuids[0]), -1, -1, -1, 0);
final MigrationInfo migration2 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5702), uuids[1]), -1, -1, -1, 1);
final MigrationInfo migration3 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5703), uuids[2]), -1, -1, -1, 2);
final MigrationInfo migration4 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5704), uuids[3]), -1, -1, -1, 3);
migrations.add(migration1);
migrations.add(migration2);
migrations.add(migration3);
migrations.add(migration4);
migrationPlanner.prioritizeCopiesAndShiftUps(migrations);
assertEquals(asList(migration1, migration2, migration3, migration4), migrations);
}
|
public boolean setSeverity(DefaultIssue issue, String severity, IssueChangeContext context) {
checkState(!issue.manualSeverity(), "Severity can't be changed");
if (!Objects.equals(severity, issue.severity())) {
issue.setFieldChange(context, SEVERITY, issue.severity(), severity);
issue.setSeverity(severity);
issue.setUpdateDate(context.date());
issue.setChanged(true);
return true;
}
return false;
}
|
@Test
void set_severity() {
boolean updated = underTest.setSeverity(issue, "BLOCKER", context);
assertThat(updated).isTrue();
assertThat(issue.severity()).isEqualTo("BLOCKER");
assertThat(issue.manualSeverity()).isFalse();
assertThat(issue.mustSendNotifications()).isFalse();
FieldDiffs.Diff diff = issue.currentChange().get(SEVERITY);
assertThat(diff.oldValue()).isNull();
assertThat(diff.newValue()).isEqualTo("BLOCKER");
}
|
@Override
public boolean contains(final Object value) {
return value instanceof Long l && contains(l.longValue());
}
|
@Test
public void initiallyContainsNoBoxedElements() {
for (int i = 0; i < 10000; i++) {
assertFalse(set.contains(Long.valueOf(i)));
}
}
|
@ScalarOperator(MODULUS)
@SqlType(StandardTypes.REAL)
public static long modulus(@SqlType(StandardTypes.REAL) long left, @SqlType(StandardTypes.REAL) long right)
{
return floatToRawIntBits(intBitsToFloat((int) left) % intBitsToFloat((int) right));
}
|
@Test
public void testModulus()
{
assertFunction("REAL'12.34' % REAL'56.78'", REAL, 12.34f % 56.78f);
assertFunction("REAL'-17.34' % REAL'-22.891'", REAL, -17.34f % -22.891f);
assertFunction("REAL'-89.123' % REAL'754.0'", REAL, -89.123f % 754.0f);
assertFunction("REAL'-0.0' % REAL'0.0'", REAL, -0.0f % 0.0f);
assertFunction("REAL'-17.71' % REAL'-1.0'", REAL, -17.71f % -1.0f);
}
|
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) {
return drlxParse(patternType, bindingId, expression, false);
}
|
@Test
public void testNullSafeExpressionsWithOr() {
SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Person.class, "$p", "name == \"John\" || == address!.city");
assertThat(result.getNullSafeExpressions().size()).isEqualTo(0); // not using NullSafeExpressions for complex OR cases
// null check is done after the first constraint
assertThat(result.getExpr().toString()).isEqualTo("_this.getName() == \"John\" || _this.getAddress() != null && _this.getName() == _this.getAddress().getCity()");
}
|
public void stop() {
if (stopped) {
return;
}
if (executor != null) {
synchronized (executor) {
stopped = true;
executor.shutdown();
}
}
SPAS_LOGGER.info("[{}] {} is stopped", appName, this.getClass().getSimpleName());
}
|
@Test
void testStop() throws NoSuchFieldException, IllegalAccessException {
credentialWatcher.stop();
Field executorField = CredentialWatcher.class.getDeclaredField("executor");
executorField.setAccessible(true);
ScheduledExecutorService executor = (ScheduledExecutorService) executorField.get(credentialWatcher);
assertTrue(executor.isShutdown());
}
|
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<? extends ColumnHandle> columns)
{
JdbcSplit jdbcSplit = (JdbcSplit) split;
ImmutableList.Builder<JdbcColumnHandle> handles = ImmutableList.builder();
for (ColumnHandle handle : columns) {
handles.add((JdbcColumnHandle) handle);
}
return new JdbcRecordSet(jdbcClient, session, jdbcSplit, handles.build());
}
|
@Test
public void testGetRecordSet()
{
ConnectorTransactionHandle transaction = new JdbcTransactionHandle();
JdbcRecordSetProvider recordSetProvider = new JdbcRecordSetProvider(jdbcClient);
RecordSet recordSet = recordSetProvider.getRecordSet(transaction, SESSION, split, ImmutableList.of(textColumn, textShortColumn, valueColumn));
assertNotNull(recordSet, "recordSet is null");
RecordCursor cursor = recordSet.cursor();
assertNotNull(cursor, "cursor is null");
Map<String, Long> data = new LinkedHashMap<>();
while (cursor.advanceNextPosition()) {
data.put(cursor.getSlice(0).toStringUtf8(), cursor.getLong(2));
assertEquals(cursor.getSlice(0), cursor.getSlice(1));
}
assertEquals(data, ImmutableMap.<String, Long>builder()
.put("one", 1L)
.put("two", 2L)
.put("three", 3L)
.put("ten", 10L)
.put("eleven", 11L)
.put("twelve", 12L)
.build());
}
|
public String convert(ILoggingEvent le) {
long timestamp = le.getTimeStamp();
return cachingDateFormatter.format(timestamp);
}
|
@Test
public void convertsDateInSpecifiedTimeZoneAsGmtOffset() {
assertEquals(formatDate("GMT-8"), convert(_timestamp, DATETIME_PATTERN, "GMT-8"));
}
|
NewCookie createAuthenticationCookie(SessionResponse token, ContainerRequestContext requestContext) {
return makeCookie(token.getAuthenticationToken(), token.validUntil(), requestContext);
}
|
@Test
void safePath() {
containerRequest.getHeaders().put(HttpConfiguration.OVERRIDE_HEADER,
List.of("http://graylog.local/path/;authentication=overridden-auth-value;"));
final CookieFactory cookieFactory = new CookieFactory(new HttpConfiguration());
final NewCookie cookie = cookieFactory.createAuthenticationCookie(sessionResponse, containerRequest);
final String cookieString =
RuntimeDelegate.getInstance().createHeaderDelegate(NewCookie.class).toString(cookie);
final Cookie parsedCookie =
RuntimeDelegate.getInstance().createHeaderDelegate(Cookie.class).fromString(cookieString);
assertThat(parsedCookie.getName()).isEqualTo("authentication");
assertThat(parsedCookie.getValue()).isEqualTo("secret-auth-value");
assertThat(cookie.getPath()).isEqualTo("/path/authentication=overridden-auth-value/");
}
|
@Override
@SuppressWarnings("unchecked")
public <A extends ThreadPoolPlugin> Optional<A> getPlugin(String pluginId) {
return mainLock.applyWithReadLock(
() -> (Optional<A>) Optional.ofNullable(registeredPlugins.get(pluginId)));
}
|
@Test
public void testGetPlugin() {
ThreadPoolPlugin plugin = new TestExecuteAwarePlugin();
manager.register(plugin);
Assert.assertSame(plugin, manager.getPlugin(plugin.getId()).orElse(null));
}
|
@VisibleForTesting
public SmsChannelDO validateSmsChannel(Long channelId) {
SmsChannelDO channelDO = smsChannelService.getSmsChannel(channelId);
if (channelDO == null) {
throw exception(SMS_CHANNEL_NOT_EXISTS);
}
if (CommonStatusEnum.isDisable(channelDO.getStatus())) {
throw exception(SMS_CHANNEL_DISABLE);
}
return channelDO;
}
|
@Test
public void testValidateSmsChannel_success() {
// 准备参数
Long channelId = randomLongId();
// mock 方法
SmsChannelDO channelDO = randomPojo(SmsChannelDO.class, o -> {
o.setId(channelId);
o.setStatus(CommonStatusEnum.ENABLE.getStatus()); // 保证 status 开启,创建必须处于这个状态
});
when(smsChannelService.getSmsChannel(eq(channelId))).thenReturn(channelDO);
// 调用
SmsChannelDO returnChannelDO = smsTemplateService.validateSmsChannel(channelId);
// 断言
assertPojoEquals(returnChannelDO, channelDO);
}
|
@Override
public PropertySource<?> locate(Environment environment) {
if (polarisConfigProperties.isEnabled()) {
CompositePropertySource compositePropertySource = new CompositePropertySource(POLARIS_CONFIG_PROPERTY_SOURCE_NAME);
try {
// load custom config extension files
initCustomPolarisConfigExtensionFiles(compositePropertySource);
// load spring boot default config files
initInternalConfigFiles(compositePropertySource);
// load custom config files
List<ConfigFileGroup> configFileGroups = polarisConfigProperties.getGroups();
if (CollectionUtils.isEmpty(configFileGroups)) {
return compositePropertySource;
}
initCustomPolarisConfigFiles(compositePropertySource, configFileGroups);
return compositePropertySource;
}
finally {
afterLocatePolarisConfigExtension(compositePropertySource);
}
}
return null;
}
|
@Test
public void testGetCustomFiles() {
PolarisConfigFileLocator locator = new PolarisConfigFileLocator(polarisConfigProperties, polarisContextProperties,
configFileService, environment);
when(polarisContextProperties.getNamespace()).thenReturn(testNamespace);
when(polarisContextProperties.getService()).thenReturn(testServiceName);
Map<String, Object> emptyMap = new HashMap<>();
ConfigKVFile emptyConfigFile = new MockedConfigKVFile(emptyMap);
when(configFileService.getConfigPropertiesFile(testNamespace, testServiceName, "application.properties")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "application.yml")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "application.yaml")).thenReturn(emptyConfigFile);
when(configFileService.getConfigPropertiesFile(testNamespace, testServiceName, "bootstrap.properties")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "bootstrap.yml")).thenReturn(emptyConfigFile);
when(configFileService.getConfigYamlFile(testNamespace, testServiceName, "bootstrap.yaml")).thenReturn(emptyConfigFile);
List<ConfigFileGroup> customFiles = new LinkedList<>();
ConfigFileGroup configFileGroup = new ConfigFileGroup();
String customGroup = "group1";
configFileGroup.setName(customGroup);
String customFile1 = "file1.properties";
String customFile2 = "file2.properties";
configFileGroup.setFiles(Lists.newArrayList(customFile1, customFile2));
customFiles.add(configFileGroup);
when(polarisConfigProperties.isEnabled()).thenReturn(true);
when(polarisConfigProperties.getGroups()).thenReturn(customFiles);
when(polarisConfigProperties.isInternalEnabled()).thenReturn(true);
when(environment.getActiveProfiles()).thenReturn(new String[] {});
// file1.properties
Map<String, Object> file1Map = new HashMap<>();
file1Map.put("k1", "v1");
file1Map.put("k2", "v2");
ConfigKVFile file1 = new MockedConfigKVFile(file1Map);
when(configFileService.getConfigPropertiesFile(testNamespace, customGroup, customFile1)).thenReturn(file1);
// file2.properties
Map<String, Object> file2Map = new HashMap<>();
file2Map.put("k1", "v11");
file2Map.put("k3", "v3");
ConfigKVFile file2 = new MockedConfigKVFile(file2Map);
when(configFileService.getConfigPropertiesFile(testNamespace, customGroup, customFile2)).thenReturn(file2);
PropertySource<?> propertySource = locator.locate(environment);
assertThat(propertySource.getProperty("k1")).isEqualTo("v1");
assertThat(propertySource.getProperty("k2")).isEqualTo("v2");
assertThat(propertySource.getProperty("k3")).isEqualTo("v3");
}
|
public void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) {
metadataImage = newImage;
// Notify all the groups subscribed to the created, updated or
// deleted topics.
Optional.ofNullable(delta.topicsDelta()).ifPresent(topicsDelta -> {
Set<String> allGroupIds = new HashSet<>();
topicsDelta.changedTopics().forEach((topicId, topicDelta) -> {
String topicName = topicDelta.name();
allGroupIds.addAll(groupsSubscribedToTopic(topicName));
});
topicsDelta.deletedTopicIds().forEach(topicId -> {
TopicImage topicImage = delta.image().topics().getTopic(topicId);
allGroupIds.addAll(groupsSubscribedToTopic(topicImage.name()));
});
allGroupIds.forEach(groupId -> {
Group group = groups.get(groupId);
if (group != null && (group.type() == CONSUMER || group.type() == SHARE)) {
((ModernGroup<?>) group).requestMetadataRefresh();
}
});
});
}
|
@Test
public void testOnNewMetadataImage() {
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range")))
.build();
// M1 in group 1 subscribes to a and b.
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group1",
new ConsumerGroupMember.Builder("group1-m1")
.setSubscribedTopicNames(Arrays.asList("a", "b"))
.build()));
// M1 in group 2 subscribes to b and c.
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group2",
new ConsumerGroupMember.Builder("group2-m1")
.setSubscribedTopicNames(Arrays.asList("b", "c"))
.build()));
// M1 in group 3 subscribes to d.
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group3",
new ConsumerGroupMember.Builder("group3-m1")
.setSubscribedTopicNames(Collections.singletonList("d"))
.build()));
// M1 in group 4 subscribes to e.
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group4",
new ConsumerGroupMember.Builder("group4-m1")
.setSubscribedTopicNames(Collections.singletonList("e"))
.build()));
// M1 in group 5 subscribes to f.
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord("group5",
new ConsumerGroupMember.Builder("group5-m1")
.setSubscribedTopicNames(Collections.singletonList("f"))
.build()));
// Ensures that all refresh flags are set to the future.
Arrays.asList("group1", "group2", "group3", "group4", "group5").forEach(groupId -> {
ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId);
group.setMetadataRefreshDeadline(context.time.milliseconds() + 5000L, 0);
assertFalse(group.hasMetadataExpired(context.time.milliseconds()));
});
// Update the metadata image.
Uuid topicA = Uuid.randomUuid();
Uuid topicB = Uuid.randomUuid();
Uuid topicC = Uuid.randomUuid();
Uuid topicD = Uuid.randomUuid();
Uuid topicE = Uuid.randomUuid();
// Create a first base image with topic a, b, c and d.
MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY);
delta.replay(new TopicRecord().setTopicId(topicA).setName("a"));
delta.replay(new PartitionRecord().setTopicId(topicA).setPartitionId(0));
delta.replay(new TopicRecord().setTopicId(topicB).setName("b"));
delta.replay(new PartitionRecord().setTopicId(topicB).setPartitionId(0));
delta.replay(new TopicRecord().setTopicId(topicC).setName("c"));
delta.replay(new PartitionRecord().setTopicId(topicC).setPartitionId(0));
delta.replay(new TopicRecord().setTopicId(topicD).setName("d"));
delta.replay(new PartitionRecord().setTopicId(topicD).setPartitionId(0));
MetadataImage image = delta.apply(MetadataProvenance.EMPTY);
// Create a delta which updates topic B, deletes topic D and creates topic E.
delta = new MetadataDelta(image);
delta.replay(new PartitionRecord().setTopicId(topicB).setPartitionId(2));
delta.replay(new RemoveTopicRecord().setTopicId(topicD));
delta.replay(new TopicRecord().setTopicId(topicE).setName("e"));
delta.replay(new PartitionRecord().setTopicId(topicE).setPartitionId(1));
image = delta.apply(MetadataProvenance.EMPTY);
// Update metadata image with the delta.
context.groupMetadataManager.onNewMetadataImage(image, delta);
// Verify the groups.
Arrays.asList("group1", "group2", "group3", "group4").forEach(groupId -> {
ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId);
assertTrue(group.hasMetadataExpired(context.time.milliseconds()));
});
Collections.singletonList("group5").forEach(groupId -> {
ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId);
assertFalse(group.hasMetadataExpired(context.time.milliseconds()));
});
// Verify image.
assertEquals(image, context.groupMetadataManager.image());
}
|
public static int[] computePhysicalIndices(
List<TableColumn> logicalColumns,
DataType physicalType,
Function<String, String> nameRemapping) {
Map<TableColumn, Integer> physicalIndexLookup =
computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping);
return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray();
}
|
@Test
void testFieldMappingRowTypeNotMatchingTypesInNestedType() {
assertThatThrownBy(
() ->
TypeMappingUtils.computePhysicalIndices(
TableSchema.builder()
.field("f0", DECIMAL(38, 18))
.field(
"f1",
ROW(
FIELD("f1_0", BIGINT()),
FIELD("f1_1", STRING())))
.build()
.getTableColumns(),
ROW(
FIELD("f0", DECIMAL(38, 18)),
FIELD(
"f1",
ROW(
FIELD("f1_0", STRING()),
FIELD("f1_1", STRING())))),
Function.identity()))
.isInstanceOf(ValidationException.class)
.hasMessage(
"Type ROW<`f1_0` BIGINT, `f1_1` STRING> of table field 'f1' does not "
+ "match with the physical type ROW<`f1_0` STRING, `f1_1` STRING> "
+ "of the 'f1' field of the TableSource return type.");
}
|
boolean isModified(Namespace namespace) {
Release release = releaseService.findLatestActiveRelease(namespace);
List<Item> items = itemService.findItemsWithoutOrdered(namespace.getId());
if (release == null) {
return hasNormalItems(items);
}
Map<String, String> releasedConfiguration = GSON.fromJson(release.getConfigurations(), GsonType.CONFIG);
Map<String, String> configurationFromItems = generateConfigurationFromItems(namespace, items);
MapDifference<String, String> difference = Maps.difference(releasedConfiguration, configurationFromItems);
return !difference.areEqual();
}
|
@Test
public void testNamespaceHasNoNormalItemsAndRelease() {
long namespaceId = 1;
Namespace namespace = createNamespace(namespaceId);
when(releaseService.findLatestActiveRelease(namespace)).thenReturn(null);
when(itemService.findItemsWithoutOrdered(namespaceId)).thenReturn(Collections.singletonList(createItem("", "")));
boolean isModified = namespaceUnlockAspect.isModified(namespace);
Assert.assertFalse(isModified);
}
|
@Override
public List<TransferItem> list(final Session<?> session, final Path directory,
final Local local, final ListProgressListener listener) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("List children for %s", directory));
}
if(directory.isSymbolicLink()
&& new DownloadSymlinkResolver(roots).resolve(directory)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Do not list children for symbolic link %s", directory));
}
return Collections.emptyList();
}
else {
final AttributedList<Path> list;
if(cache.isCached(directory)) {
list = cache.get(directory);
}
else {
list = session.getFeature(ListService.class).list(directory, listener);
cache.put(directory, list);
}
final List<TransferItem> children = new ArrayList<>();
// Return copy with filtered result only
for(Path f : new AttributedList<>(list.filter(comparator, filter))) {
children.add(new TransferItem(f, LocalFactory.get(local, f.getName())));
}
return children;
}
}
|
@Test
public void testRegexFilter() throws Exception {
final Path parent = new Path("t", EnumSet.of(Path.Type.directory));
final Transfer t = new DownloadTransfer(new Host(new TestProtocol()), parent, new NullLocal(System.getProperty("java.io.tmpdir")));
final NullSession session = new NullSession(new Host(new TestProtocol())) {
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
final AttributedList<Path> l = new AttributedList<>();
l.add(new Path("/t/.DS_Store", EnumSet.of(Path.Type.file)));
l.add(new Path("/t/t", EnumSet.of(Path.Type.file)));
return l;
}
};
final List<TransferItem> list = t.list(session, parent,
new NullLocal(System.getProperty("java.io.tmpdir")), new DisabledListProgressListener());
assertEquals(1, list.size());
assertFalse(list.contains(new TransferItem(new Path("/t/.DS_Store", EnumSet.of(Path.Type.file)))));
assertTrue(list.contains(new TransferItem(new Path("/t/t", EnumSet.of(Path.Type.file)), new Local(System.getProperty("java.io.tmpdir"), "t"))));
}
|
@ProtoFactory
public static MediaType fromString(String tree) {
if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType();
Matcher matcher = TREE_PATTERN.matcher(tree);
return parseSingleMediaType(tree, matcher, false);
}
|
@Test
public void testUnquotedParamWithSingleQuote() {
MediaType mediaType = MediaType.fromString("application/json; charset='UTF-8'");
assertMediaTypeWithParam(mediaType, "application", "json", "charset", "'UTF-8'");
Exceptions.expectException(EncodingException.class, () -> MediaType.fromString("application/json; charset='UTF 8'"));
}
|
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
}
|
@Test
public void shouldNotAllowNullNamedOnProcess() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process(processorSupplier, (Named) null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (getClassLoadingLock(name)) {
Class<?> loadedClass = findLoadedClass(name);
if (loadedClass != null) {
return loadedClass;
}
if (isClosed) {
throw new ClassNotFoundException("This ClassLoader is closed");
}
if (config.shouldAcquire(name)) {
loadedClass =
PerfStatsCollector.getInstance()
.measure("load sandboxed class", () -> maybeInstrumentClass(name));
} else {
loadedClass = getParent().loadClass(name);
}
if (resolve) {
resolveClass(loadedClass);
}
return loadedClass;
}
}
|
@Test
public void callingMethodReturningDoubleShouldInvokeClassHandler() throws Exception {
Class<?> exampleClass = loadClass(AClassWithMethodReturningDouble.class);
classHandler.valueToReturn = 456;
Method normalMethod = exampleClass.getMethod("normalMethodReturningDouble", double.class);
Object exampleInstance = exampleClass.getDeclaredConstructor().newInstance();
assertEquals(456.0, normalMethod.invoke(exampleInstance, 123d));
assertThat(transcript)
.containsExactly(
"methodInvoked: AClassWithMethodReturningDouble.__constructor__()",
"methodInvoked: AClassWithMethodReturningDouble.normalMethodReturningDouble(double"
+ " 123.0)");
}
|
@Override
public String toString() {
return String.format("Repeatedly.forever(%s)", subTriggers.get(REPEATED));
}
|
@Test
public void testToString() {
Trigger trigger =
Repeatedly.forever(
new StubTrigger() {
@Override
public String toString() {
return "innerTrigger";
}
});
assertEquals("Repeatedly.forever(innerTrigger)", trigger.toString());
}
|
public TExecPlanFragmentParams plan(TUniqueId loadId) throws UserException {
boolean isPrimaryKey = destTable.getKeysType() == KeysType.PRIMARY_KEYS;
resetAnalyzer();
// construct tuple descriptor, used for scanNode and dataSink
TupleDescriptor tupleDesc = descTable.createTupleDescriptor("DstTableTuple");
boolean negative = streamLoadInfo.getNegative();
if (isPrimaryKey) {
if (negative) {
throw new DdlException("Primary key table does not support negative load");
}
if (destTable.hasRowStorageType() && streamLoadInfo.isPartialUpdate() &&
streamLoadInfo.getPartialUpdateMode() != TPartialUpdateMode.ROW_MODE) {
throw new DdlException("column with row table only support row mode partial update");
}
} else {
if (streamLoadInfo.isPartialUpdate()) {
throw new DdlException("Only primary key table support partial update");
}
}
List<Pair<Integer, ColumnDict>> globalDicts = Lists.newArrayList();
List<Column> destColumns;
List<Boolean> missAutoIncrementColumn = Lists.newArrayList();
if (streamLoadInfo.isPartialUpdate()) {
destColumns = Load.getPartialUpateColumns(destTable, streamLoadInfo.getColumnExprDescs(),
missAutoIncrementColumn);
} else {
destColumns = destTable.getFullSchema();
}
for (Column col : destColumns) {
SlotDescriptor slotDesc = descTable.addSlotDescriptor(tupleDesc);
slotDesc.setIsMaterialized(true);
slotDesc.setColumn(col);
slotDesc.setIsNullable(col.isAllowNull());
if (negative && !col.isKey() && col.getAggregationType() != AggregateType.SUM) {
throw new DdlException("Column is not SUM AggreateType. column:" + col.getName());
}
if (col.getType().isVarchar() && Config.enable_dict_optimize_stream_load &&
IDictManager.getInstance().hasGlobalDict(destTable.getId(),
col.getColumnId())) {
Optional<ColumnDict> dict = IDictManager.getInstance().getGlobalDict(destTable.getId(), col.getColumnId());
dict.ifPresent(columnDict -> globalDicts.add(new Pair<>(slotDesc.getId().asInt(), columnDict)));
}
}
if (isPrimaryKey) {
// add op type column
SlotDescriptor slotDesc = descTable.addSlotDescriptor(tupleDesc);
slotDesc.setIsMaterialized(true);
slotDesc.setColumn(new Column(Load.LOAD_OP_COLUMN, Type.TINYINT));
slotDesc.setIsNullable(false);
}
// create scan node
StreamLoadScanNode scanNode =
new StreamLoadScanNode(loadId, new PlanNodeId(0), tupleDesc, destTable, streamLoadInfo);
scanNode.setUseVectorizedLoad(true);
scanNode.init(analyzer);
scanNode.finalizeStats(analyzer);
scanNode.setWarehouseId(streamLoadInfo.getWarehouseId());
descTable.computeMemLayout();
// create dest sink
TWriteQuorumType writeQuorum = destTable.writeQuorum();
List<Long> partitionIds = getAllPartitionIds();
boolean enableAutomaticPartition;
if (streamLoadInfo.isSpecifiedPartitions()) {
enableAutomaticPartition = false;
} else {
enableAutomaticPartition = destTable.supportedAutomaticPartition();
}
OlapTableSink olapTableSink = new OlapTableSink(destTable, tupleDesc, partitionIds, writeQuorum,
destTable.enableReplicatedStorage(), scanNode.nullExprInAutoIncrement(),
enableAutomaticPartition, streamLoadInfo.getWarehouseId());
if (missAutoIncrementColumn.size() == 1 && missAutoIncrementColumn.get(0) == Boolean.TRUE) {
olapTableSink.setMissAutoIncrementColumn();
}
if (destTable.getAutomaticBucketSize() > 0) {
olapTableSink.setAutomaticBucketSize(destTable.getAutomaticBucketSize());
}
olapTableSink.init(loadId, streamLoadInfo.getTxnId(), db.getId(), streamLoadInfo.getTimeout());
Load.checkMergeCondition(streamLoadInfo.getMergeConditionStr(), destTable, destColumns,
olapTableSink.missAutoIncrementColumn());
olapTableSink.setPartialUpdateMode(streamLoadInfo.getPartialUpdateMode());
olapTableSink.complete(streamLoadInfo.getMergeConditionStr());
// for stream load, we only need one fragment, ScanNode -> DataSink.
// OlapTableSink can dispatch data to corresponding node.
PlanFragment fragment = new PlanFragment(new PlanFragmentId(0), scanNode, DataPartition.UNPARTITIONED);
fragment.setSink(olapTableSink);
// At present, we only support dop=1 for olap table sink.
// because tablet writing needs to know the number of senders in advance
// and guaranteed order of data writing
// It can be parallel only in some scenes, for easy use 1 dop now.
fragment.setPipelineDop(1);
// After data loading, we need to check the global dict for low cardinality string column
// whether update.
fragment.setLoadGlobalDicts(globalDicts);
fragment.createDataSink(TResultSinkType.MYSQL_PROTOCAL);
TExecPlanFragmentParams params = new TExecPlanFragmentParams();
params.setProtocol_version(InternalServiceVersion.V1);
params.setFragment(fragment.toThrift());
params.setDesc_tbl(analyzer.getDescTbl().toThrift());
TPlanFragmentExecParams execParams = new TPlanFragmentExecParams();
// user load id (streamLoadInfo.id) as query id
execParams.setQuery_id(loadId);
execParams.setFragment_instance_id(new TUniqueId(loadId.hi, loadId.lo + 1));
execParams.per_exch_num_senders = Maps.newHashMap();
execParams.destinations = Lists.newArrayList();
Map<Integer, List<TScanRangeParams>> perNodeScanRange = Maps.newHashMap();
List<TScanRangeParams> scanRangeParams = Lists.newArrayList();
for (TScanRangeLocations locations : scanNode.getScanRangeLocations(0)) {
scanRangeParams.add(new TScanRangeParams(locations.getScan_range()));
}
// For stream load, only one sender
execParams.setSender_id(0);
execParams.setNum_senders(1);
perNodeScanRange.put(scanNode.getId().asInt(), scanRangeParams);
execParams.setPer_node_scan_ranges(perNodeScanRange);
params.setParams(execParams);
TQueryOptions queryOptions = new TQueryOptions();
queryOptions.setQuery_type(TQueryType.LOAD);
queryOptions.setQuery_timeout(streamLoadInfo.getTimeout());
queryOptions.setLoad_transmission_compression_type(streamLoadInfo.getTransmisionCompressionType());
queryOptions.setLog_rejected_record_num(streamLoadInfo.getLogRejectedRecordNum());
// Disable load_dop for LakeTable temporary, because BE's `LakeTabletsChannel` does not support
// parallel send from a single sender.
if (streamLoadInfo.getLoadParallelRequestNum() != 0 && !destTable.isCloudNativeTableOrMaterializedView()) {
// only dup_keys can use parallel write since other table's the order of write is important
if (destTable.getKeysType() == KeysType.DUP_KEYS) {
queryOptions.setLoad_dop(streamLoadInfo.getLoadParallelRequestNum());
} else {
queryOptions.setLoad_dop(1);
}
}
// for stream load, we use exec_mem_limit to limit the memory usage of load channel.
queryOptions.setMem_limit(streamLoadInfo.getExecMemLimit());
queryOptions.setLoad_mem_limit(streamLoadInfo.getLoadMemLimit());
if (connectContext.getSessionVariable().isEnableLoadProfile()) {
queryOptions.setEnable_profile(true);
queryOptions.setLoad_profile_collect_second(Config.stream_load_profile_collect_second);
}
params.setQuery_options(queryOptions);
TQueryGlobals queryGlobals = new TQueryGlobals();
queryGlobals.setNow_string(DATE_FORMAT.format(new Date()));
queryGlobals.setTimestamp_ms(new Date().getTime());
queryGlobals.setTime_zone(streamLoadInfo.getTimezone());
params.setQuery_globals(queryGlobals);
// Since stream load has only one fragment,
// the backend number can be directly assigned to 0
params.setBackend_num(0);
TNetworkAddress coordAddress = new TNetworkAddress(FrontendOptions.getLocalHostAddress(), Config.rpc_port);
params.setCoord(coordAddress);
LOG.info("load job id: {}, txn id: {}, parallel: {}, compress: {}, replicated: {}, quorum: {}",
DebugUtil.printId(loadId), streamLoadInfo.getTxnId(), queryOptions.getLoad_dop(),
queryOptions.getLoad_transmission_compression_type(), destTable.enableReplicatedStorage(), writeQuorum);
this.execPlanFragmentParams = params;
return params;
}
|
@Test
public void testNormalPlan() throws UserException {
List<Column> columns = Lists.newArrayList();
Column c1 = new Column("c1", Type.BIGINT, false);
columns.add(c1);
Column c2 = new Column("c2", Type.BIGINT, true);
columns.add(c2);
new Expectations() {
{
destTable.getBaseSchema();
minTimes = 0;
result = columns;
destTable.getPartitions();
minTimes = 0;
result = Arrays.asList(partition);
scanNode.init((Analyzer) any);
minTimes = 0;
scanNode.getChildren();
minTimes = 0;
result = Lists.newArrayList();
scanNode.getId();
minTimes = 0;
result = new PlanNodeId(5);
partition.getId();
minTimes = 0;
result = 0;
}
};
TStreamLoadPutRequest request = new TStreamLoadPutRequest();
request.setTxnId(1);
request.setLoadId(new TUniqueId(2, 3));
request.setFileType(TFileType.FILE_STREAM);
request.setFormatType(TFileFormatType.FORMAT_CSV_PLAIN);
request.setLoad_dop(2);
request.setPayload_compression_type("LZ4_FRAME");
StreamLoadInfo streamLoadInfo = StreamLoadInfo.fromTStreamLoadPutRequest(request, db);
StreamLoadPlanner planner = new StreamLoadPlanner(db, destTable, streamLoadInfo);
planner.plan(streamLoadInfo.getId());
Assert.assertEquals(TCompressionType.LZ4_FRAME, streamLoadInfo.getPayloadCompressionType());
}
|
@Override
public void run() {
// top-level command, do nothing
}
|
@Test
public void test_restartJob_invalidNameOrId() {
// When
// Then
exception.expectMessage("No job with name or id 'invalid' was found");
run("restart", "invalid");
}
|
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(true);
}
boolean result = true;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result &= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
}
|
@Test
void invokeBooleanParamNull() {
FunctionTestUtil.assertResult(nnAllFunction.invoke((Boolean) null), true);
}
|
public static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups) {
int errorCount = 0;
List<ConfigInfo> configInfoList = new LinkedList<>();
Map<String, ConfigValue> configValueMap = new HashMap<>();
for (ConfigValue configValue: configValues) {
String configName = configValue.name();
configValueMap.put(configName, configValue);
if (!configKeys.containsKey(configName)) {
configInfoList.add(new ConfigInfo(null, convertConfigValue(configValue, null)));
errorCount += configValue.errorMessages().size();
}
}
for (Map.Entry<String, ConfigKey> entry : configKeys.entrySet()) {
String configName = entry.getKey();
ConfigKeyInfo configKeyInfo = convertConfigKey(entry.getValue());
Type type = entry.getValue().type;
ConfigValueInfo configValueInfo = null;
if (configValueMap.containsKey(configName)) {
ConfigValue configValue = configValueMap.get(configName);
configValueInfo = convertConfigValue(configValue, type);
errorCount += configValue.errorMessages().size();
}
configInfoList.add(new ConfigInfo(configKeyInfo, configValueInfo));
}
return new ConfigInfos(connType, errorCount, groups, configInfoList);
}
|
@Test
public void testGenerateResultWithConfigValuesWithNoConfigKeysAndWithSomeErrors() {
String name = "com.acme.connector.MyConnector";
Map<String, ConfigDef.ConfigKey> keys = new HashMap<>();
List<String> groups = new ArrayList<>();
List<ConfigValue> values = new ArrayList<>();
addValue(values, "config.a1", "value.a1");
addValue(values, "config.b1", "value.b1");
addValue(values, "config.b2", "value.b2");
addValue(values, "config.c1", "value.c1", "error c1");
addValue(values, "config.extra1", "value.extra1");
addValue(values, "config.extra2", "value.extra2", "error extra2");
ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups);
assertEquals(name, infos.name());
assertEquals(groups, infos.groups());
assertEquals(values.size(), infos.values().size());
assertEquals(2, infos.errorCount());
assertNoInfoKey(infos, "config.a1");
assertNoInfoKey(infos, "config.b1");
assertNoInfoKey(infos, "config.b2");
assertNoInfoKey(infos, "config.c1");
assertNoInfoKey(infos, "config.extra1");
assertNoInfoKey(infos, "config.extra2");
assertInfoValue(infos, "config.a1", "value.a1");
assertInfoValue(infos, "config.b1", "value.b1");
assertInfoValue(infos, "config.b2", "value.b2");
assertInfoValue(infos, "config.c1", "value.c1", "error c1");
assertInfoValue(infos, "config.extra1", "value.extra1");
assertInfoValue(infos, "config.extra2", "value.extra2", "error extra2");
}
|
public LineReaders getLineReaders(Component component) {
List<LineReader> readers = new ArrayList<>();
List<CloseableIterator<?>> closeables = new ArrayList<>();
ScmLineReader scmLineReader = null;
int componentRef = component.getReportAttributes().getRef();
CloseableIterator<ScannerReport.LineCoverage> coverageIt = reportReader.readComponentCoverage(componentRef);
closeables.add(coverageIt);
readers.add(new CoverageLineReader(coverageIt));
Optional<ScmInfo> scmInfoOptional = scmInfoRepository.getScmInfo(component);
if (scmInfoOptional.isPresent()) {
scmLineReader = new ScmLineReader(scmInfoOptional.get());
readers.add(scmLineReader);
}
RangeOffsetConverter rangeOffsetConverter = new RangeOffsetConverter();
CloseableIterator<ScannerReport.SyntaxHighlightingRule> highlightingIt = reportReader.readComponentSyntaxHighlighting(componentRef);
closeables.add(highlightingIt);
readers.add(new HighlightingLineReader(component, highlightingIt, rangeOffsetConverter));
CloseableIterator<ScannerReport.Symbol> symbolsIt = reportReader.readComponentSymbols(componentRef);
closeables.add(symbolsIt);
readers.add(new SymbolsLineReader(component, symbolsIt, rangeOffsetConverter));
readers.add(new DuplicationLineReader(duplicationRepository.getDuplications(component)));
readers.add(new IsNewLineReader(newLinesRepository, component));
return new LineReadersImpl(readers, scmLineReader, closeables);
}
|
@Test
public void should_create_readers() {
initBasicReport(10);
LineReadersImpl lineReaders = (LineReadersImpl) underTest.getLineReaders(fileComponent());
assertThat(lineReaders).isNotNull();
assertThat(lineReaders.closeables).hasSize(3);
assertThat(lineReaders.readers).hasSize(5);
}
|
@Deprecated
public Authentication getAuthentication(String token) throws AccessException {
NacosUser nacosUser = jwtParser.parse(token);
List<GrantedAuthority> authorities = AuthorityUtils.commaSeparatedStringToAuthorityList(StringUtils.EMPTY);
User principal = new User(nacosUser.getUserName(), "", authorities);
return new UsernamePasswordAuthenticationToken(principal, "", authorities);
}
|
@Test
void getAuthentication() throws AccessException {
String nacosToken = jwtTokenManager.createToken("nacos");
Authentication authentication = jwtTokenManager.getAuthentication(nacosToken);
assertNotNull(authentication);
}
|
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K key) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType);
for (final ReadOnlySessionStore<K, V> store : stores) {
try {
final KeyValueIterator<Windowed<K>, V> result = store.fetch(key);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException ise) {
throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" +
" and may have been migrated to another instance; " +
"please re-discover its location from the state metadata. " +
"Original error message: " + ise);
}
}
return KeyValueIterators.emptyIterator();
}
|
@Test
public void shouldFetchKeyRangeAcrossStoresWithNullKeyTo() {
final ReadOnlySessionStoreStub<String, Long> secondUnderlying = new
ReadOnlySessionStoreStub<>();
stubProviderTwo.addStore(storeName, secondUnderlying);
underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 0L);
secondUnderlying.put(new Windowed<>("b", new SessionWindow(0, 0)), 10L);
final List<KeyValue<Windowed<String>, Long>> results = StreamsTestUtils.toList(sessionStore.fetch("a", null));
assertThat(results, equalTo(Arrays.asList(
KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 0L),
KeyValue.pair(new Windowed<>("b", new SessionWindow(0, 0)), 10L))));
}
|
public ConfigDescriptor getConfigDescriptor(Config configurationProxy)
{
Class<?> inter = configurationProxy.getClass().getInterfaces()[0];
ConfigGroup group = inter.getAnnotation(ConfigGroup.class);
if (group == null)
{
throw new IllegalArgumentException("Not a config group");
}
final List<ConfigSectionDescriptor> sections = Arrays.stream(inter.getDeclaredFields())
.filter(m -> m.isAnnotationPresent(ConfigSection.class) && m.getType() == String.class)
.map(m ->
{
try
{
return new ConfigSectionDescriptor(
String.valueOf(m.get(inter)),
m.getDeclaredAnnotation(ConfigSection.class)
);
}
catch (IllegalAccessException e)
{
log.warn("Unable to load section {}::{}", inter.getSimpleName(), m.getName());
return null;
}
})
.filter(Objects::nonNull)
.sorted((a, b) -> ComparisonChain.start()
.compare(a.getSection().position(), b.getSection().position())
.compare(a.getSection().name(), b.getSection().name())
.result())
.collect(Collectors.toList());
final List<ConfigItemDescriptor> items = Arrays.stream(inter.getMethods())
.filter(m -> m.getParameterCount() == 0 && m.isAnnotationPresent(ConfigItem.class))
.map(m -> new ConfigItemDescriptor(
m.getDeclaredAnnotation(ConfigItem.class),
m.getGenericReturnType(),
m.getDeclaredAnnotation(Range.class),
m.getDeclaredAnnotation(Alpha.class),
m.getDeclaredAnnotation(Units.class)
))
.sorted((a, b) -> ComparisonChain.start()
.compare(a.getItem().position(), b.getItem().position())
.compare(a.getItem().name(), b.getItem().name())
.result())
.collect(Collectors.toList());
return new ConfigDescriptor(group, sections, items);
}
|
@Test
public void testGetConfigDescriptor() throws IOException
{
TestConfig conf = manager.getConfig(TestConfig.class);
ConfigDescriptor descriptor = manager.getConfigDescriptor(conf);
Assert.assertEquals(2, descriptor.getItems().size());
}
|
public int currentSdkClientCount() {
Map<String, String> filter = new HashMap<>(2);
filter.put(RemoteConstants.LABEL_SOURCE, RemoteConstants.LABEL_SOURCE_SDK);
return currentClientsCount(filter);
}
|
@Test
void testCurrentSdkCount() {
assertEquals(1, connectionManager.currentSdkClientCount());
}
|
@Override
public List<ImportValidationFeedback> verifyRule( Object subject ) {
List<ImportValidationFeedback> feedback = new ArrayList<>();
if ( !isEnabled() || !( subject instanceof JobMeta ) ) {
return feedback;
}
JobMeta jobMeta = (JobMeta) subject;
String description = jobMeta.getDescription();
if ( null != description && minLength <= description.length() ) {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.APPROVAL, "A description is present" ) );
} else {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.ERROR, "A description is not present or too short" ) );
}
return feedback;
}
|
@Test
public void testVerifyRule_NullParameter_EnabledRule() {
JobHasDescriptionImportRule importRule = getImportRule( 10, true );
List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null );
assertNotNull( feedbackList );
assertTrue( feedbackList.isEmpty() );
}
|
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
cf.parse(args);
pathOnly = cf.getOpt(OPTION_PATHONLY);
dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
humanReadable = cf.getOpt(OPTION_HUMAN);
hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
orderReverse = cf.getOpt(OPTION_REVERSE);
orderTime = cf.getOpt(OPTION_MTIME);
orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
useAtime = cf.getOpt(OPTION_ATIME);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
if (args.isEmpty()) args.add(Path.CUR_DIR);
initialiseOrderComparator();
}
|
@Test
public void processPathDirOrderMtime() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + 10);
testfile02.setMtime(NOW.getTime() + 30);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 60);
testfile05.setMtime(NOW.getTime() + 50);
testfile06.setMtime(NOW.getTime() + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
}
|
@Nullable
@SuppressWarnings("checkstyle:returncount")
static Metadata resolve(InternalSerializationService ss, Object target, boolean key) {
try {
if (target instanceof Data) {
Data data = (Data) target;
if (data.isPortable()) {
ClassDefinition classDefinition = ss.getPortableContext().lookupClassDefinition(data);
return resolvePortable(classDefinition, key);
} else if (data.isCompact()) {
return resolveCompact(ss.extractSchemaFromData(data), key);
} else if (data.isJson()) {
return null;
} else {
return resolveJava(ss.toObject(data).getClass(), key);
}
} else if (target instanceof VersionedPortable) {
VersionedPortable portable = (VersionedPortable) target;
ClassDefinition classDefinition = ss.getPortableContext()
.lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), portable.getClassVersion());
return resolvePortable(classDefinition, key);
} else if (target instanceof Portable) {
Portable portable = (Portable) target;
ClassDefinition classDefinition = ss.getPortableContext()
.lookupClassDefinition(portable.getFactoryId(), portable.getClassId(), 0);
return resolvePortable(classDefinition, key);
} else if (target instanceof PortableGenericRecord) {
return resolvePortable(((PortableGenericRecord) target).getClassDefinition(), key);
} else if (target instanceof CompactGenericRecord) {
return resolveCompact(((CompactGenericRecord) target).getSchema(), key);
} else if (ss.isCompactSerializable(target)) {
Schema schema = ss.extractSchemaFromObject(target);
return resolveCompact(schema, key);
} else if (target instanceof HazelcastJsonValue) {
return null;
} else {
return resolveJava(target.getClass(), key);
}
} catch (Exception e) {
return null;
}
}
|
@Test
public void test_compactRecord() {
SerializationConfig serializationConfig = new SerializationConfig();
InternalSerializationService ss = new DefaultSerializationServiceBuilder()
.setSchemaService(CompactTestUtil.createInMemorySchemaService())
.setConfig(serializationConfig)
.build();
Metadata metadata = SampleMetadataResolver.resolve(ss, GenericRecordBuilder.compact("type-name").setInt32("field", 1).build(), key);
assertThat(metadata.fields()).containsExactly(
new MappingField("field", QueryDataType.INT, (key ? KEY : VALUE) + ".field")
);
assertThat(metadata.options()).containsExactly(
entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, COMPACT_FORMAT),
entry(key ? OPTION_KEY_COMPACT_TYPE_NAME : OPTION_VALUE_COMPACT_TYPE_NAME, "type-name")
);
metadata = SampleMetadataResolver.resolve(ss, ss.toData(GenericRecordBuilder.compact("type-name").setInt32("field", 1).build()), key);
assertThat(metadata.fields()).containsExactly(
new MappingField("field", QueryDataType.INT, (key ? KEY : VALUE) + ".field")
);
assertThat(metadata.options()).containsExactly(
entry(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT, COMPACT_FORMAT),
entry(key ? OPTION_KEY_COMPACT_TYPE_NAME : OPTION_VALUE_COMPACT_TYPE_NAME, "type-name")
);
}
|
public static Map<String, Object> coerceTypes(
final Map<String, Object> streamsProperties,
final boolean ignoreUnresolved
) {
if (streamsProperties == null) {
return Collections.emptyMap();
}
final Map<String, Object> validated = new HashMap<>(streamsProperties.size());
for (final Map.Entry<String, Object> e : streamsProperties.entrySet()) {
try {
validated.put(e.getKey(), coerceType(e.getKey(), e.getValue()));
} catch (final PropertyNotFoundException p) {
if (ignoreUnresolved) {
validated.put(e.getKey(), e.getValue());
} else {
throw p;
}
}
}
return validated;
}
|
@Test
public void shouldThrowOnUnkownPropertyFromCoerceTypes() {
// given/when:
assertThrows(
PropertyNotFoundException.class,
() -> PropertiesUtil.coerceTypes(ImmutableMap.of("foo", "bar"), false)
);
}
|
public static <K, V> Map<K, V> renameKey(Map<K, V> map, K oldKey, K newKey) {
if (isNotEmpty(map) && map.containsKey(oldKey)) {
if (map.containsKey(newKey)) {
throw new IllegalArgumentException(StrUtil.format("The key '{}' exist !", newKey));
}
map.put(newKey, map.remove(oldKey));
}
return map;
}
|
@Test
public void renameKeyTest() {
final Dict v1 = Dict.of().set("id", 12).set("name", "张三").set("age", null);
final Map<String, Object> map = MapUtil.renameKey(v1, "name", "newName");
assertEquals("张三", map.get("newName"));
}
|
@Override
public TimeUnit unit() {
return timeUnit;
}
|
@Test
public void nameAndUnit() {
DefaultTimer timer = new DefaultTimer(TimeUnit.MINUTES);
assertThat(timer.unit()).isEqualTo(TimeUnit.MINUTES);
assertThat(timer.isNoop()).isFalse();
}
|
public static <T> Global<T> globally() {
return new Global<>();
}
|
@Test
@Category(NeedsRunner.class)
public void testGroupGlobally() {
Collection<Basic> elements =
ImmutableList.of(
Basic.of("key1", 1, "value1"),
Basic.of("key1", 1, "value2"),
Basic.of("key2", 2, "value3"),
Basic.of("key2", 2, "value4"));
PCollection<Iterable<Basic>> grouped =
pipeline.apply(Create.of(elements)).apply(Group.globally());
PAssert.that(grouped).satisfies(actual -> containsSingleIterable(elements, actual));
pipeline.run();
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStart,
final Range<Instant> windowEnd,
final Optional<Position> position
) {
try {
final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key);
StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result =
stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it =
queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next();
final Window wnd = next.key.window();
if (!windowStart.contains(wnd.startTime())) {
continue;
}
if (!windowEnd.contains(wnd.endTime())) {
continue;
}
final long rowTime = wnd.end();
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
next.key,
next.value,
rowTime
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldReturnValueIfSessionStartsAtLowerBoundIfLowerStartBoundClosed() {
// Given:
final Range<Instant> startBounds = Range.closed(
LOWER_INSTANT,
UPPER_INSTANT
);
final Instant wend = LOWER_INSTANT.plusMillis(1);
givenSingleSession(LOWER_INSTANT, wend);
// When:
final KsMaterializedQueryResult<WindowedRow> result =
table.get(A_KEY, PARTITION, startBounds, Range.all());
// Then:
final Iterator<WindowedRow> rowIterator = result.getRowIterator();
assertThat(rowIterator.hasNext(), is(true));
assertThat(rowIterator.next(), is(
WindowedRow.of(
SCHEMA,
sessionKey(LOWER_INSTANT, wend),
A_VALUE,
wend.toEpochMilli()
)
));
assertThat(result.getPosition(), not(Optional.empty()));
assertThat(result.getPosition().get(), is(POSITION));
}
|
public ContentPackInstallation installContentPack(ContentPack contentPack,
Map<String, ValueReference> parameters,
String comment,
String user) {
if (contentPack instanceof ContentPackV1 contentPackV1) {
return installContentPack(contentPackV1, parameters, comment, user);
} else {
throw new IllegalArgumentException("Unsupported content pack version: " + contentPack.version());
}
}
|
@Test
public void installContentPackWithSystemStreamDependencies() throws Exception {
ImmutableSet<Entity> entities = ImmutableSet.of(createTestViewEntity(), createTestEventDefinitionEntity());
ContentPackV1 contentPack = ContentPackV1.builder()
.description("test")
.entities(entities)
.name("test")
.revision(1)
.summary("")
.vendor("")
.url(URI.create("http://graylog.com"))
.id(ModelId.of("dead-beef"))
.build();
for (String id : Stream.ALL_SYSTEM_STREAM_IDS) {
when(streamService.load(id)).thenReturn(createTestStream(id));
}
when(userService.load(TEST_USER)).thenReturn(mockUser);
when(searchDbService.save(any())).thenReturn(Search.builder().id("id").build());
when(viewService.saveWithOwner(any(), any())).thenReturn(ViewDTO.builder().id("id").title("title").searchId("id").state(Collections.emptyMap()).build());
when(eventDefinitionHandler.create(any(), any())).thenReturn(createTestEventDefinitionDto());
contentPackService.installContentPack(contentPack, Collections.emptyMap(), "", TEST_USER);
}
|
static AggregationStrategy createAggregationStrategy(CamelContext camelContext, DynamicRouterConfiguration cfg) {
AggregationStrategy strategy = Optional.ofNullable(cfg.getAggregationStrategyBean())
.or(() -> Optional.ofNullable(cfg.getAggregationStrategy())
.map(ref -> lookupByNameAndType(camelContext, ref, Object.class)
.map(aggStr -> convertAggregationStrategy.apply(aggStr, cfg))
.orElseThrow(() -> new IllegalArgumentException(
"Cannot find AggregationStrategy in Registry with name: " +
cfg.getAggregationStrategy()))))
.orElse(new NoopAggregationStrategy());
CamelContextAware.trySetCamelContext(strategy, camelContext);
return cfg.isShareUnitOfWork() ? new ShareUnitOfWorkAggregationStrategy(strategy) : strategy;
}
|
@Test
void testCreateAggregationStrategyWithInstance() {
when(mockConfig.getAggregationStrategyBean()).thenReturn(mockStrategy);
AggregationStrategy strategy = DynamicRouterRecipientListHelper.createAggregationStrategy(camelContext, mockConfig);
Assertions.assertNotNull(strategy);
}
|
@Override
public void run(DiagnosticsLogWriter writer) {
writer.startSection("HazelcastInstance");
writer.writeKeyValueEntry("thisAddress", nodeEngine.getNode().getThisAddress().toString());
writer.writeKeyValueEntry("isRunning", nodeEngine.getNode().isRunning());
writer.writeKeyValueEntry("isLite", nodeEngine.getNode().isLiteMember());
writer.writeKeyValueEntry("joined", nodeEngine.getNode().getClusterService().isJoined());
NodeState state = nodeEngine.getNode().getState();
writer.writeKeyValueEntry("nodeState", state == null ? "null" : state.toString());
UUID clusterId = nodeEngine.getClusterService().getClusterId();
writer.writeKeyValueEntry("clusterId", clusterId != null ? clusterId.toString() : "null");
writer.writeKeyValueEntry("clusterSize", nodeEngine.getClusterService().getSize());
writer.writeKeyValueEntry("isMaster", nodeEngine.getClusterService().isMaster());
Address masterAddress = nodeEngine.getClusterService().getMasterAddress();
writer.writeKeyValueEntry("masterAddress", masterAddress == null ? "null" : masterAddress.toString());
writer.startSection("Members");
for (Member member : nodeEngine.getClusterService().getMemberImpls()) {
writer.writeEntry(member.getAddress().toString());
}
writer.endSection();
writer.endSection();
}
|
@Test
public void testRun() {
plugin.run(logWriter);
assertContains("HazelcastInstance[");
assertContains("isRunning=true");
assertContains("Members[");
}
|
@Override
public <VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer,
final Aggregator<? super K, ? super V, VR> aggregator) {
return aggregate(initializer, aggregator, Materialized.with(keySerde, null));
}
|
@Test
public void shouldThrowNullPointerOnMaterializedAggregateIfAggregatorIsNull() {
assertThrows(NullPointerException.class, () -> windowedStream.aggregate(
MockInitializer.STRING_INIT,
null,
Materialized.as("store")));
}
|
@Override
public void sendSmsCode(SmsCodeSendReqDTO reqDTO) {
SmsSceneEnum sceneEnum = SmsSceneEnum.getCodeByScene(reqDTO.getScene());
Assert.notNull(sceneEnum, "验证码场景({}) 查找不到配置", reqDTO.getScene());
// 创建验证码
String code = createSmsCode(reqDTO.getMobile(), reqDTO.getScene(), reqDTO.getCreateIp());
// 发送验证码
smsSendService.sendSingleSms(reqDTO.getMobile(), null, null,
sceneEnum.getTemplateCode(), MapUtil.of("code", code));
}
|
@Test
public void sendSmsCode_tooFast() {
// mock 数据
SmsCodeDO smsCodeDO = randomPojo(SmsCodeDO.class,
o -> o.setMobile("15601691300").setTodayIndex(1));
smsCodeMapper.insert(smsCodeDO);
// 准备参数
SmsCodeSendReqDTO reqDTO = randomPojo(SmsCodeSendReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(SmsSceneEnum.MEMBER_LOGIN.getScene());
});
// mock 方法
SqlConstants.init(DbType.MYSQL);
// 调用,并断言异常
assertServiceException(() -> smsCodeService.sendSmsCode(reqDTO),
SMS_CODE_SEND_TOO_FAST);
}
|
public static CloudName from(String cloud) {
return switch (cloud) {
case "aws" -> AWS;
case "azure" -> AZURE;
case "gcp" -> GCP;
case "default" -> DEFAULT;
case "yahoo" -> YAHOO;
default -> new CloudName(cloud);
};
}
|
@Test
void returns_same_instance_for_known_clouds() {
assertSame(CloudName.from("aws"), CloudName.AWS);
assertSame(CloudName.from("azure"), CloudName.AZURE);
assertSame(CloudName.from("gcp"), CloudName.GCP);
assertSame(CloudName.from("default"), CloudName.DEFAULT);
assertSame(CloudName.from("yahoo"), CloudName.YAHOO);
assertThrows(IllegalArgumentException.class, () -> CloudName.from("aWs")); // Must be lowercase
}
|
@Override
public Health health() {
Map<String, Health> healths = circuitBreakerRegistry.getAllCircuitBreakers().stream()
.filter(this::isRegisterHealthIndicator)
.collect(Collectors.toMap(CircuitBreaker::getName,
this::mapBackendMonitorState));
Status status = this.statusAggregator.getAggregateStatus(healths.values().stream().map(Health::getStatus).collect(Collectors.toSet()));
return Health.status(status).withDetails(healths).build();
}
|
@Test
public void testHealthStatus() {
CircuitBreaker openCircuitBreaker = mock(CircuitBreaker.class);
CircuitBreaker halfOpenCircuitBreaker = mock(CircuitBreaker.class);
CircuitBreaker closeCircuitBreaker = mock(CircuitBreaker.class);
Map<CircuitBreaker.State, CircuitBreaker> expectedStateToCircuitBreaker = new HashMap<>();
expectedStateToCircuitBreaker.put(OPEN, openCircuitBreaker);
expectedStateToCircuitBreaker.put(HALF_OPEN, halfOpenCircuitBreaker);
expectedStateToCircuitBreaker.put(CLOSED, closeCircuitBreaker);
CircuitBreakerConfigurationProperties.InstanceProperties instanceProperties =
mock(CircuitBreakerConfigurationProperties.InstanceProperties.class);
CircuitBreakerConfigurationProperties circuitBreakerProperties = mock(
CircuitBreakerConfigurationProperties.class);
// given
CircuitBreakerRegistry registry = mock(CircuitBreakerRegistry.class);
CircuitBreakerConfig config = mock(CircuitBreakerConfig.class);
CircuitBreaker.Metrics metrics = mock(CircuitBreaker.Metrics.class);
// when
when(registry.getAllCircuitBreakers()).thenReturn(new HashSet<>(expectedStateToCircuitBreaker.values()));
boolean allowHealthIndicatorToFail = true;
expectedStateToCircuitBreaker.forEach(
(state, circuitBreaker) -> setCircuitBreakerWhen(state, circuitBreaker, config, metrics, instanceProperties, circuitBreakerProperties, allowHealthIndicatorToFail));
CircuitBreakersHealthIndicator healthIndicator =
new CircuitBreakersHealthIndicator(registry, circuitBreakerProperties, new SimpleStatusAggregator());
// then
Health health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.DOWN);
then(health.getDetails()).containsKeys(OPEN.name(), HALF_OPEN.name(), CLOSED.name());
assertState(OPEN, Status.DOWN, health.getDetails());
assertState(HALF_OPEN, new Status("CIRCUIT_HALF_OPEN"), health.getDetails());
assertState(CLOSED, Status.UP, health.getDetails());
}
|
public RowExpression rewriteExpression(RowExpression expression, Predicate<VariableReferenceExpression> variableScope)
{
checkArgument(determinismEvaluator.isDeterministic(expression), "Only deterministic expressions may be considered for rewrite");
return rewriteExpression(expression, variableScope, true);
}
|
@Test
public void testTriviallyRewritable()
{
EqualityInference.Builder builder = new EqualityInference.Builder(METADATA);
RowExpression expression = builder.build()
.rewriteExpression(someExpression("a1", "a2"), matchesVariables("a1", "a2"));
assertEquals(expression, someExpression("a1", "a2"));
}
|
public synchronized TableId createTable(String tableName, Schema schema)
throws BigQueryResourceManagerException {
return createTable(tableName, schema, System.currentTimeMillis() + 3600000); // 1h
}
|
@Test
public void testCreateTableShouldThrowErrorWhenSchemaIsNull() {
assertThrows(IllegalArgumentException.class, () -> testManager.createTable(TABLE_NAME, null));
}
|
@Override
public void updateUserPassword(Long id, UserProfileUpdatePasswordReqVO reqVO) {
// 校验旧密码密码
validateOldPassword(id, reqVO.getOldPassword());
// 执行更新
AdminUserDO updateObj = new AdminUserDO().setId(id);
updateObj.setPassword(encodePassword(reqVO.getNewPassword())); // 加密密码
userMapper.updateById(updateObj);
}
|
@Test
public void testUpdateUserPassword_success() {
// mock 数据
AdminUserDO dbUser = randomAdminUserDO(o -> o.setPassword("encode:tudou"));
userMapper.insert(dbUser);
// 准备参数
Long userId = dbUser.getId();
UserProfileUpdatePasswordReqVO reqVO = randomPojo(UserProfileUpdatePasswordReqVO.class, o -> {
o.setOldPassword("tudou");
o.setNewPassword("yuanma");
});
// mock 方法
when(passwordEncoder.encode(anyString())).then(
(Answer<String>) invocationOnMock -> "encode:" + invocationOnMock.getArgument(0));
when(passwordEncoder.matches(eq(reqVO.getOldPassword()), eq(dbUser.getPassword()))).thenReturn(true);
// 调用
userService.updateUserPassword(userId, reqVO);
// 断言
AdminUserDO user = userMapper.selectById(userId);
assertEquals("encode:yuanma", user.getPassword());
}
|
@Override
public String resolve(Method method, Object[] arguments, String spelExpression) {
if (StringUtils.isEmpty(spelExpression)) {
return spelExpression;
}
if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) {
return stringValueResolver.resolveStringValue(spelExpression);
}
if (spelExpression.matches(METHOD_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
if (spelExpression.matches(BEAN_SPEL_REGEX)) {
SpelRootObject rootObject = new SpelRootObject(method, arguments);
MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer);
evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory));
Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext);
return (String) evaluated;
}
return spelExpression;
}
|
@Test
public void atTest() throws Exception {
DefaultSpelResolverTest target = new DefaultSpelResolverTest();
Method testMethod = target.getClass().getMethod("testMethod", String.class);
String result = sut.resolve(testMethod, new Object[]{}, "@");
assertThat(result).isEqualTo("@");
}
|
@Override
public void accept(ICOSVisitor visitor) throws IOException
{
visitor.visitFromInt(this);
}
|
@Override
@Test
void testAccept()
{
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
COSWriter visitor = new COSWriter(outStream);
int index = 0;
try
{
for (int i = -1000; i < 3000; i += 200)
{
index = i;
COSInteger cosInt = COSInteger.get(i);
cosInt.accept(visitor);
testByteArrays(String.valueOf(i).getBytes(StandardCharsets.ISO_8859_1), outStream.toByteArray());
outStream.reset();
}
}
catch (Exception e)
{
fail("Failed to write " + index + " exception: " + e.getMessage());
}
}
|
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors(
ReconnaissanceReport reconnaissanceReport) {
return tsunamiPlugins.entrySet().stream()
.filter(entry -> isVulnDetector(entry.getKey()))
.map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport))
.flatMap(Streams::stream)
.collect(toImmutableList());
}
|
@Test
public void
getVulnDetectors_whenRemoteDetectorOsFilterHasMatchingService_returnsMatchedService() {
NetworkService wordPressService =
NetworkService.newBuilder()
.setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80))
.setTransportProtocol(TransportProtocol.TCP)
.setServiceName("http")
.setSoftware(Software.newBuilder().setName("WordPress"))
.build();
NetworkService jenkinsService =
NetworkService.newBuilder()
.setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443))
.setTransportProtocol(TransportProtocol.TCP)
.setServiceName("https")
.setSoftware(Software.newBuilder().setName("Jenkins"))
.build();
NetworkService noNameService =
NetworkService.newBuilder()
.setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 12345))
.setTransportProtocol(TransportProtocol.TCP)
.build();
ReconnaissanceReport fakeReconnaissanceReport =
ReconnaissanceReport.newBuilder()
.setTargetInfo(
TargetInfo.newBuilder()
.addOperatingSystemClasses(
OperatingSystemClass.newBuilder()
.setType("general purpose")
.setVendor("Vendor")
.setOsFamily("FakeOS")
.setAccuracy(96)))
.addNetworkServices(wordPressService)
.addNetworkServices(jenkinsService)
.addNetworkServices(noNameService)
.build();
PluginManager pluginManager =
Guice.createInjector(
new FakePortScannerBootstrapModule(),
new FakeServiceFingerprinterBootstrapModule(),
FakeFilteringRemoteDetector.getModule())
.getInstance(PluginManager.class);
ImmutableList<PluginMatchingResult<VulnDetector>> vulnDetectors =
pluginManager.getVulnDetectors(fakeReconnaissanceReport);
assertThat(vulnDetectors).hasSize(1);
ImmutableList<MatchedPlugin> matchedResult =
((FakeFilteringRemoteDetector) vulnDetectors.get(0).tsunamiPlugin()).getMatchedPlugins();
assertThat(matchedResult).hasSize(4);
assertThat(matchedResult.get(1).getPlugin())
.isEqualTo(FakeFilteringRemoteDetector.getJenkinsServiceDefinition());
assertThat(matchedResult.get(1).getServicesList())
.containsExactly(jenkinsService, noNameService);
// The other non-OS detector should match, too:
assertThat(matchedResult.get(0).getPlugin())
.isEqualTo(FakeFilteringRemoteDetector.getHttpServiceDefinition());
// wordpress: matching service_name (http)
// jenkins: mismatching service_name (http*s*)
// nonameservice: no service_name present in the NetworkService, hasMatchingServiceName accepts
// that
assertThat(matchedResult.get(0).getServicesList())
.containsExactly(wordPressService, noNameService);
// The one that matches the OS only, should match everything:
assertThat(matchedResult.get(2).getPlugin())
.isEqualTo(FakeFilteringRemoteDetector.getOperatingSystemServiceDefinition());
assertThat(matchedResult.get(2).getServicesList())
.containsExactly(wordPressService, jenkinsService, noNameService);
// The one that matches the OS and "http", should return these:
assertThat(matchedResult.get(3).getPlugin())
.isEqualTo(FakeFilteringRemoteDetector.getOperatingSystemAndHttpServiceDefinition());
assertThat(matchedResult.get(3).getServicesList())
.containsExactly(wordPressService, noNameService);
}
|
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
return delegate.schedule(command, delay, unit);
}
|
@Test
public void schedule1() {
underTest.schedule(callable, delay, SECONDS);
verify(executorService).schedule(callable, delay, SECONDS);
}
|
@Override
public void writeInt(final int v) throws IOException {
ensureAvailable(INT_SIZE_IN_BYTES);
MEM.putInt(buffer, ARRAY_BYTE_BASE_OFFSET + pos, v);
pos += INT_SIZE_IN_BYTES;
}
|
@Test
public void testWriteIntV() throws Exception {
int expected = 100;
out.writeInt(expected);
int actual = Bits.readInt(out.buffer, 0, ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN);
assertEquals(expected, actual);
}
|
@Override
public void subscribe(Subscriber<? super T>[] subscribers) {
if (!validate(subscribers)) {
return;
}
@SuppressWarnings("unchecked")
Subscriber<? super T>[] newSubscribers = new Subscriber[subscribers.length];
for (int i = 0; i < subscribers.length; i++) {
AutoDisposingSubscriberImpl<? super T> subscriber =
new AutoDisposingSubscriberImpl<>(scope, subscribers[i]);
newSubscribers[i] = subscriber;
}
source.subscribe(newSubscribers);
}
|
@Test
public void autoDispose_withMaybe_normal() {
TestSubscriber<Integer> firstSubscriber = new TestSubscriber<>();
TestSubscriber<Integer> secondSubscriber = new TestSubscriber<>();
PublishProcessor<Integer> source = PublishProcessor.create();
CompletableSubject scope = CompletableSubject.create();
//noinspection unchecked
Subscriber<Integer>[] subscribers = new Subscriber[] {firstSubscriber, secondSubscriber};
source.parallel(DEFAULT_PARALLELISM).to(autoDisposable(scope)).subscribe(subscribers);
assertThat(firstSubscriber.hasSubscription()).isTrue();
assertThat(secondSubscriber.hasSubscription()).isTrue();
assertThat(source.hasSubscribers()).isTrue();
assertThat(scope.hasObservers()).isTrue();
source.onNext(1);
source.onNext(2);
firstSubscriber.assertValue(1);
secondSubscriber.assertValue(2);
source.onNext(3);
source.onNext(4);
source.onComplete();
firstSubscriber.assertValues(1, 3);
firstSubscriber.assertComplete();
secondSubscriber.assertValues(2, 4);
secondSubscriber.assertComplete();
assertThat(source.hasSubscribers()).isFalse();
assertThat(scope.hasObservers()).isFalse();
}
|
public static List<PortDescription> parseCiscoIosPorts(String interfacesReply) {
String parentInterface;
String[] parentArray;
String tempString;
List<PortDescription> portDesc = Lists.newArrayList();
int interfacesCounter = interfacesCounterMethod(interfacesReply);
for (int i = 0; i < interfacesCounter; i++) {
parentInterface = parentInterfaceMethod(interfacesReply);
portDesc.add(findPortInfo(parentInterface));
parentArray = parentInterface.split(SPACE);
tempString = parentArray[0] + SPACE;
interfacesReply = interfacesReply.replace(tempString, SPACE + tempString);
}
return portDesc;
}
|
@Test
public void controllersIntfs() {
InputStream streamOrig = getClass().getResourceAsStream(SHOW_INTFS);
String rpcReply = new Scanner(streamOrig, "UTF-8").useDelimiter("\\Z").next();
List<PortDescription> actualIntfs = TextBlockParserCisco.parseCiscoIosPorts(rpcReply);
assertEquals("Information could not be retrieved",
getExpectedIntfs(), actualIntfs);
}
|
public static String read(Reader reader) throws IOException {
try (StringWriter writer = new StringWriter()) {
write(reader, writer);
return writer.getBuffer().toString();
}
}
|
@Test
void testRead() throws Exception {
assertThat(IOUtils.read(reader), equalTo(TEXT));
}
|
public static Duration convertFreshnessToDuration(IntervalFreshness intervalFreshness) {
// validate the freshness value firstly
validateIntervalFreshness(intervalFreshness);
long interval = Long.parseLong(intervalFreshness.getInterval());
switch (intervalFreshness.getTimeUnit()) {
case DAY:
return Duration.ofDays(interval);
case HOUR:
return Duration.ofHours(interval);
case MINUTE:
return Duration.ofMinutes(interval);
case SECOND:
return Duration.ofSeconds(interval);
default:
throw new ValidationException(
String.format(
"Unknown freshness time unit: %s.",
intervalFreshness.getTimeUnit()));
}
}
|
@Test
void testConvertFreshnessToDuration() {
// verify second
Duration actualSecond = convertFreshnessToDuration(IntervalFreshness.ofSecond("20"));
assertThat(actualSecond).isEqualTo(Duration.ofSeconds(20));
// verify minute
Duration actualMinute = convertFreshnessToDuration(IntervalFreshness.ofMinute("3"));
assertThat(actualMinute).isEqualTo(Duration.ofMinutes(3));
// verify hour
Duration actualHour = convertFreshnessToDuration(IntervalFreshness.ofHour("3"));
assertThat(actualHour).isEqualTo(Duration.ofHours(3));
// verify day
Duration actualDay = convertFreshnessToDuration(IntervalFreshness.ofDay("3"));
assertThat(actualDay).isEqualTo(Duration.ofDays(3));
}
|
public void publishNamespace(final String releaseTitle, final String releaseComment) {
NamespaceReleaseDTO namespaceReleaseDTO = new NamespaceReleaseDTO();
namespaceReleaseDTO.setReleaseTitle(releaseTitle);
namespaceReleaseDTO.setReleaseComment(releaseComment);
namespaceReleaseDTO.setReleasedBy(operatorUser);
this.apolloOpenApiClient.publishNamespace(
apolloConfig.getAppId(),
apolloConfig.getEnv(),
apolloConfig.getClusterName(),
apolloConfig.getNamespace(),
namespaceReleaseDTO
);
}
|
@Test
public void testPublishNamespace() {
doNothing().when(apolloClient).publishNamespace(Mockito.any(), Mockito.any());
apolloClient.publishNamespace("Dr", "1.0.2");
verify(apolloClient).publishNamespace(Mockito.any(), Mockito.any());
}
|
public static RestSettingBuilder get(final String id) {
return get(eq(checkId(id)));
}
|
@Test
public void should_throw_exception_for_get_id_with_slash() {
assertThrows(IllegalArgumentException.class, () -> get("1/1").response(status(200)));
}
|
ClassLoader connectorLoader(String connectorClassOrAlias) {
String fullName = aliases.getOrDefault(connectorClassOrAlias, connectorClassOrAlias);
ClassLoader classLoader = pluginClassLoader(fullName);
if (classLoader == null) classLoader = this;
log.debug(
"Getting plugin class loader: '{}' for connector: {}",
classLoader,
connectorClassOrAlias
);
return classLoader;
}
|
@Test
public void testEmptyConnectorLoader() {
assertSame(classLoader, classLoader.connectorLoader(ARBITRARY));
}
|
public void retain(IndexSet indexSet,
@Nullable Integer maxNumberOfIndices,
RetentionExecutor.RetentionAction action,
String actionName) {
final Map<String, Set<String>> deflectorIndices = indexSet.getAllIndexAliases();
final int indexCount = (int) deflectorIndices.keySet()
.stream()
.filter(indexName -> !indices.isReopened(indexName))
.count();
if (maxNumberOfIndices == null) {
LOG.warn("No retention strategy configuration found, not running index retention!");
return;
}
// Do we have more indices than the configured maximum?
if (indexCount <= maxNumberOfIndices) {
LOG.debug("Number of indices ({}) lower than limit ({}). Not performing any retention actions.",
indexCount, maxNumberOfIndices);
return;
}
// We have more indices than the configured maximum! Remove as many as needed.
final int removeCount = indexCount - maxNumberOfIndices;
final String msg = "Number of indices (" + indexCount + ") higher than limit (" + maxNumberOfIndices + "). " +
"Running retention for " + removeCount + " indices.";
LOG.info(msg);
activityWriter.write(new Activity(msg, CountBasedRetentionExecutor.class));
retentionExecutor.runRetention(indexSet, removeCount, action, actionName);
}
|
@Test
public void shouldRetainOldestIndex() {
underTest.retain(indexSet, 5, action, "action");
verify(action, times(1)).retain(retainedIndexName.capture(), eq(indexSet));
assertThat(retainedIndexName.getValue()).containsExactly("test_1");
verify(activityWriter, times(2)).write(any(Activity.class));
}
|
public StatusRpcException asException() {
return new StatusRpcException(this);
}
|
@Test
void asException() {
StatusRpcException exception = TriRpcStatus.NOT_FOUND
.withDescription("desc")
.withCause(new IllegalStateException("test"))
.asException();
Assertions.assertEquals(Code.NOT_FOUND, exception.getStatus().code);
}
|
@POST
@Path(KMSRESTConstants.KEYS_RESOURCE)
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
@SuppressWarnings("unchecked")
public Response createKey(Map jsonKey) throws Exception {
try{
LOG.trace("Entering createKey Method.");
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
final String material;
material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
String description = (String)
jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
LOG.debug("Creating key with name {}, cipher being used{}, " +
"length of key {}, description of key {}", name, cipher,
length, description);
Map<String, String> attributes = (Map<String, String>)
jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
KMSOp.CREATE_KEY, name);
}
final KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration());
if (cipher != null) {
options.setCipher(cipher);
}
if (length != 0) {
options.setBitLength(length);
}
options.setDescription(description);
options.setAttributes(attributes);
KeyProvider.KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyProvider.KeyVersion keyVersion = (material != null)
? provider.createKey(name, Base64.decodeBase64(material),
options)
: provider.createKey(name, options);
provider.flush();
return keyVersion;
}
}
);
kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
keyVersion = removeKeyMaterial(keyVersion);
}
Map json = KMSUtil.toJSON(keyVersion);
String requestURL = KMSMDCFilter.getURL();
int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
requestURL = requestURL.substring(0, idx);
LOG.trace("Exiting createKey Method.");
return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
.type(MediaType.APPLICATION_JSON)
.header("Location", getKeyURI(requestURL, name)).entity(json).build();
} catch (Exception e) {
LOG.debug("Exception in createKey.", e);
throw e;
}
}
|
@Test
public void testServicePrincipalACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "client,client/host");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("client/host", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
}
|
public static JSONObject parseObj(String jsonStr) {
return new JSONObject(jsonStr);
}
|
@Test
public void duplicateKeyFalseTest() {
final String str = "{id:123, name:\"张三\", name:\"李四\"}";
final JSONObject jsonObject = JSONUtil.parseObj(str, JSONConfig.create().setCheckDuplicate(false));
assertEquals("{\"id\":123,\"name\":\"李四\"}", jsonObject.toString());
}
|
public Token<AMRMTokenIdentifier> launchUAM()
throws YarnException, IOException {
this.connectionInitiated = true;
// Blocking call to RM
Token<AMRMTokenIdentifier> amrmToken = initializeUnmanagedAM(this.applicationId);
// Creates the UAM connection
createUAMProxy(amrmToken);
return amrmToken;
}
|
@Test
public void testSeparateThreadWithoutBlockServiceStop() throws Exception {
ApplicationAttemptId attemptId1 =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(Time.now(), 1), 1);
Token<AMRMTokenIdentifier> token1 = uamPool.launchUAM("SC-1", this.conf,
attemptId1.getApplicationId(), "default", "test-user", "SC-HOME", true, "SC-1", null);
Assert.assertNotNull(token1);
ApplicationAttemptId attemptId2 =
ApplicationAttemptId.newInstance(ApplicationId.newInstance(Time.now(), 2), 1);
Token<AMRMTokenIdentifier> token2 = uamPool.launchUAM("SC-2", this.conf,
attemptId2.getApplicationId(), "default", "test-user", "SC-HOME", true, "SC-2", null);
Assert.assertNotNull(token2);
Map<String, UnmanagedApplicationManager> unmanagedAppMasterMap =
uamPool.getUnmanagedAppMasterMap();
Assert.assertNotNull(unmanagedAppMasterMap);
Assert.assertEquals(2, unmanagedAppMasterMap.size());
// try to stop uamPool
uamPool.stop();
Assert.assertTrue(uamPool.waitForServiceToStop(2000));
// process force finish Application in a separate thread, not blocking the main thread
Assert.assertEquals(Service.STATE.STOPPED, uamPool.getServiceState());
// Wait for the thread to terminate, check if uamPool#unmanagedAppMasterMap is 0
Thread finishApplicationThread = uamPool.getFinishApplicationThread();
GenericTestUtils.waitFor(() -> !finishApplicationThread.isAlive(),
100, 2000);
Assert.assertEquals(0, unmanagedAppMasterMap.size());
}
|
public static <T> void forEach(Iterable<T> iterable, Procedure<? super T> procedure)
{
FJIterate.forEach(iterable, procedure, FJIterate.FORK_JOIN_POOL);
}
|
@Test
public void testForEachUsingMap()
{
//Test the default batch size calculations
IntegerSum sum1 = new IntegerSum(0);
MutableMap<String, Integer> map1 = Interval.fromTo(1, 100).toMap(Functions.getToString(), Functions.getIntegerPassThru());
FJIterate.forEach(map1, new SumProcedure(sum1), new SumCombiner(sum1));
assertEquals(5050, sum1.getSum());
//Testing batch size 1
IntegerSum sum2 = new IntegerSum(0);
UnifiedMap<String, Integer> map2 = (UnifiedMap<String, Integer>) Interval.fromTo(1, 100).toMap(Functions.getToString(), Functions.getIntegerPassThru());
FJIterate.forEach(map2, new SumProcedure(sum2), new SumCombiner(sum2), 1, map2.getBatchCount(map2.size()));
assertEquals(5050, sum2.getSum());
//Testing an uneven batch size
IntegerSum sum3 = new IntegerSum(0);
UnifiedMap<String, Integer> set3 = (UnifiedMap<String, Integer>) Interval.fromTo(1, 100).toMap(Functions.getToString(), Functions.getIntegerPassThru());
FJIterate.forEach(set3, new SumProcedure(sum3), new SumCombiner(sum3), 1, set3.getBatchCount(13));
assertEquals(5050, sum3.getSum());
}
|
public static PortabilityJob.Builder builder() {
Instant now = Instant.now();
// TODO: Fix so we don't need fully qualified name here. This is to get IntelliJ to recognize
// the class name due to a conflict in package names for our generated code, but the conflict
// doesn't cause any actual problems with building.
return new org.datatransferproject.spi.cloud.types.AutoValue_PortabilityJob.Builder()
.setState(State.NEW)
.setCreatedTimestamp(now)
.setLastUpdateTimestamp(now)
.setFailureReason(null);
}
|
@Test
public void verifySerializeDeserializeWithAlbum() throws IOException {
ObjectMapper objectMapper = ObjectMapperFactory.createObjectMapper();
Instant date = Instant.now();
JobAuthorization jobAuthorization =
JobAuthorization.builder()
.setState(JobAuthorization.State.INITIAL)
.setSessionSecretKey("foo")
.build();
PortabilityJob job =
PortabilityJob.builder()
.setState(State.NEW)
.setExportService("fooService")
.setImportService("barService")
.setTransferDataType(PHOTOS)
.setExportInformation(
objectMapper.writeValueAsString(
new ExportInformation(
null,
new PhotosContainerResource(
Lists.newArrayList(
new PhotoAlbum("album_id", "album name", "album description")),
null))))
.setCreatedTimestamp(date)
.setLastUpdateTimestamp(date.plusSeconds(120))
.setJobAuthorization(jobAuthorization)
.build();
String serializedJobAuthorization = objectMapper.writeValueAsString(jobAuthorization);
JobAuthorization deserializedJobAuthorization =
objectMapper.readValue(serializedJobAuthorization, JobAuthorization.class);
assertThat(deserializedJobAuthorization).isEqualTo(jobAuthorization);
String serializedJob = objectMapper.writeValueAsString(job);
PortabilityJob deserializedJob = objectMapper.readValue(serializedJob, PortabilityJob.class);
assertThat(deserializedJob).isEqualTo(job);
}
|
@Override
public Optional<FunctionDefinition> getFunctionDefinition(String name) {
if (BUILT_IN_FUNC_BLACKLIST.contains(name)) {
return Optional.empty();
}
FunctionDefinitionFactory.Context context = () -> classLoader;
// We override some Hive's function by native implementation to supports hash-agg
if (isNativeAggFunctionEnabled() && BUILTIN_NATIVE_AGG_FUNC.contains(name.toLowerCase())) {
return getBuiltInNativeAggFunction(name.toLowerCase());
}
// We override Hive's grouping function. Refer to the implementation for more details.
if (name.equalsIgnoreCase("grouping")) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFGrouping.class.getName(), context));
}
// this function is used to generate legacy GROUPING__ID value for old hive versions
if (name.equalsIgnoreCase(GenericUDFLegacyGroupingID.NAME)) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, GenericUDFLegacyGroupingID.class.getName(), context));
}
// We override Hive's internal_interval. Refer to the implementation for more details
if (name.equalsIgnoreCase("internal_interval")) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFInternalInterval.class.getName(), context));
}
// used to access the field of struct in array
if (name.equalsIgnoreCase(HiveGenericUDFArrayAccessStructField.NAME)) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFArrayAccessStructField.class.getName(), context));
}
// We add a custom to_decimal function. Refer to the implementation for more details.
if (name.equalsIgnoreCase(HiveGenericUDFToDecimal.NAME)) {
return Optional.of(
factory.createFunctionDefinitionFromHiveFunction(
name, HiveGenericUDFToDecimal.class.getName(), context));
}
Optional<FunctionInfo> info = hiveShim.getBuiltInFunctionInfo(name);
return info.map(
functionInfo ->
factory.createFunctionDefinitionFromHiveFunction(
name, functionInfo.getFunctionClass().getName(), context));
}
|
@Test
public void testNonExistFunction() {
assertThat(new HiveModule().getFunctionDefinition("nonexist")).isNotPresent();
}
|
@Override
public void pickSuggestionManually(
int index, CharSequence suggestion, boolean withAutoSpaceEnabled) {
if (getCurrentComposedWord().isAtTagsSearchState()) {
if (index == 0) {
// this is a special case for tags-searcher
// since we append a magnifying glass to the suggestions, the "suggestion"
// value is not a valid output suggestion
suggestion = getCurrentComposedWord().getTypedWord().toString();
} else {
// regular emoji. Storing in history.
getQuickKeyHistoryRecords().store(suggestion.toString(), suggestion.toString());
}
}
super.pickSuggestionManually(index, suggestion, withAutoSpaceEnabled);
}
|
@Test
public void testPickingTypedTagDoesNotTryToAddToAutoDictionary() throws Exception {
verifyNoSuggestionsInteractions();
mAnySoftKeyboardUnderTest.simulateTextTyping(":face");
Mockito.reset(mAnySoftKeyboardUnderTest.getSuggest());
mAnySoftKeyboardUnderTest.pickSuggestionManually(0, ":face");
Mockito.verify(mAnySoftKeyboardUnderTest.getSuggest(), Mockito.never())
.isValidWord(Mockito.any(CharSequence.class));
}
|
@Override
public ComputeNode getWorkerById(long workerId) {
return availableID2ComputeNode.get(workerId);
}
|
@Test
public void testCaptureAvailableWorkers() {
long deadBEId = 1L;
long deadCNId = 11L;
long inBlacklistBEId = 3L;
long inBlacklistCNId = 13L;
HostBlacklist blockList = SimpleScheduler.getHostBlacklist();
blockList.add(inBlacklistBEId);
blockList.add(inBlacklistCNId);
id2Backend.get(deadBEId).setAlive(false);
id2ComputeNode.get(deadCNId).setAlive(false);
Set<Long> nonAvailableWorkerId = ImmutableSet.of(deadBEId, deadCNId, inBlacklistBEId, inBlacklistCNId);
WorkerProvider workerProvider = newWorkerProvider();
Optional<Long> maxId = id2Backend.keySet().stream().max(Comparator.naturalOrder());
Assert.assertFalse(maxId.isEmpty());
for (long id : id2AllNodes.keySet()) {
ComputeNode worker = workerProvider.getWorkerById(id);
if (nonAvailableWorkerId.contains(id)) {
Assert.assertNull(worker);
} else {
Assert.assertEquals(id, worker.getId());
if (id <= maxId.get()) {
Assert.assertTrue(worker instanceof Backend);
} else {
Assert.assertFalse(worker instanceof Backend);
}
}
}
}
|
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
}
|
@Test
void allStateChangesArePassingViaTheApplyStateFilterOnFailure() {
Job job = anEnqueuedJob().build();
when(backgroundJobServer.getBackgroundJobRunner(job)).thenReturn(null);
BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
final ListAppender<ILoggingEvent> logger = LoggerAssert.initFor(backgroundJobPerformer);
backgroundJobPerformer.run();
assertThat(logAllStateChangesFilter.getStateChanges(job)).containsExactly("ENQUEUED->PROCESSING", "PROCESSING->FAILED", "FAILED->SCHEDULED");
assertThat(logAllStateChangesFilter.onProcessingIsCalled(job)).isTrue();
assertThat(logAllStateChangesFilter.onProcessingFailedIsCalled(job)).isTrue();
assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(job)).isFalse();
assertThat(logger)
.hasNoErrorLogMessages()
.hasWarningMessageContaining("processing failed: An exception occurred during the performance of the job");
}
|
static <T, W extends BoundedWindow> AssignWindowsRunner<T, W> create(
WindowFn<? super T, W> windowFn) {
// Safe contravariant cast
WindowFn<T, W> typedWindowFn = (WindowFn<T, W>) windowFn;
return new AssignWindowsRunner<>(typedWindowFn);
}
|
@Test
public void factoryCreatesFromJavaWindowFn() throws Exception {
SdkComponents components = SdkComponents.create();
components.registerEnvironment(Environments.createDockerEnvironment("java"));
PTransform windowPTransform =
PTransform.newBuilder()
.putInputs("in", "input")
.putOutputs("out", "output")
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.ASSIGN_WINDOWS_TRANSFORM_URN)
.setPayload(
WindowIntoPayload.newBuilder()
.setWindowFn(
WindowingStrategyTranslation.toProto(
new TestWindowFn(), components))
.build()
.toByteString())
.build())
.build();
ThrowingFunction<WindowedValue<?>, WindowedValue<?>> fn =
(ThrowingFunction) factory.forPTransform("transform", windowPTransform);
assertThat(
fn.apply(
WindowedValue.of(
22L,
new Instant(5),
new IntervalWindow(new Instant(0L), new Instant(20027L)),
PaneInfo.ON_TIME_AND_ONLY_FIRING)),
equalTo(
WindowedValue.of(
22L,
new Instant(5),
new TestWindowFn().assignWindow(new Instant(5)),
PaneInfo.ON_TIME_AND_ONLY_FIRING)));
}
|
public boolean compatibleVersion(String acceptableVersionRange, String actualVersion) {
V pluginVersion = parseVersion(actualVersion);
// Treat a single version "1.4" as a left bound, equivalent to "[1.4,)"
if (acceptableVersionRange.matches(VERSION_REGEX)) {
return ge(pluginVersion, parseVersion(acceptableVersionRange));
}
// Otherwise ensure it is a version range with bounds
Matcher matcher = INTERVAL_PATTERN.matcher(acceptableVersionRange);
Preconditions.checkArgument(matcher.matches(), "invalid version range");
String leftBound = matcher.group("left");
String rightBound = matcher.group("right");
Preconditions.checkArgument(
leftBound != null || rightBound != null, "left and right bounds cannot both be empty");
BiPredicate<V, V> leftComparator =
acceptableVersionRange.startsWith("[") ? VersionChecker::ge : VersionChecker::gt;
BiPredicate<V, V> rightComparator =
acceptableVersionRange.endsWith("]") ? VersionChecker::le : VersionChecker::lt;
if (leftBound != null && !leftComparator.test(pluginVersion, parseVersion(leftBound))) {
return false;
}
if (rightBound != null && !rightComparator.test(pluginVersion, parseVersion(rightBound))) {
return false;
}
return true;
}
|
@Test
public void testRange_leftOpen() {
Assert.assertFalse(checker.compatibleVersion("(2.3,4.3]", "1.0"));
Assert.assertFalse(checker.compatibleVersion("(2.3,4.3)", "1.0"));
Assert.assertFalse(checker.compatibleVersion("(2.3,)", "1.0"));
Assert.assertFalse(checker.compatibleVersion("(2.3,]", "1.0"));
}
|
public boolean isFreshInstallation() {
return isFreshInstallation.get();
}
|
@Test
public void testIsFreshInstallation() {
mongoDBPreflightCheck.runCheck();
assertThat(mongoDBPreflightCheck.isFreshInstallation()).isTrue();
}
|
public Set<String> getLangs() {
return langs;
}
|
@Test
public void testThreadJoinInLoadingLangs() throws Exception {
assumeTrue(canRun());
//make sure that the stream is fully read and
//we're getting the same answers on several iterations
Set<String> langs = getLangs();
assumeTrue(langs.size() > 0);
for (int i = 0; i < 20; i++) {
assertEquals(langs, getLangs());
}
}
|
public boolean shouldSample(String service, int sample, int duration) {
SamplingPolicy samplingPolicy = this.samplingPolicySettings.get().get(service);
if (samplingPolicy == null) {
return shouldSampleByDefault(sample, duration);
}
return shouldSampleService(samplingPolicy, sample, duration);
}
|
@Test
@Timeout(20)
public void testDefaultSampleRateDynamicUpdate() throws InterruptedException {
ConfigWatcherRegister register = new DefaultSampleRateMockConfigWatcherRegister(3);
TraceSamplingPolicyWatcher watcher = new TraceSamplingPolicyWatcher(moduleConfig, provider);
register.registerConfigChangeWatcher(watcher);
register.start();
// Default is 10000, so 9000 must be sampled,until updating to 9000
while (watcher.shouldSample("", 9000, -1)) {
Thread.sleep(2000);
}
globalDefaultSamplingRateEquals(watcher, 8999);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.