focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void export(RegisterTypeEnum registerType) {
if (this.exported) {
return;
}
if (getScopeModel().isLifeCycleManagedExternally()) {
// prepare model for reference
getScopeModel().getDeployer().prepare();
} else {
// ensure start module, compatible with old api usage
getScopeModel().getDeployer().start();
}
synchronized (this) {
if (this.exported) {
return;
}
if (!this.isRefreshed()) {
this.refresh();
}
if (this.shouldExport()) {
this.init();
if (shouldDelay()) {
// should register if delay export
doDelayExport();
} else if (Integer.valueOf(-1).equals(getDelay())
&& Boolean.parseBoolean(ConfigurationUtils.getProperty(
getScopeModel(), CommonConstants.DUBBO_MANUAL_REGISTER_KEY, "false"))) {
// should not register by default
doExport(RegisterTypeEnum.MANUAL_REGISTER);
} else {
doExport(registerType);
}
}
}
}
|
@Test
void testMethodConfigWithInvalidArgumentIndex() {
Assertions.assertThrows(IllegalArgumentException.class, () -> {
ServiceConfig<DemoServiceImpl> service = new ServiceConfig<>();
service.setInterface(DemoService.class);
service.setRef(new DemoServiceImpl());
service.setProtocol(new ProtocolConfig() {
{
setName("dubbo");
}
});
MethodConfig methodConfig = new MethodConfig();
methodConfig.setName("sayName");
// invalid argument index.
methodConfig.setArguments(Lists.newArrayList(new ArgumentConfig() {
{
setType(String.class.getName());
setIndex(1);
}
}));
service.setMethods(Lists.newArrayList(methodConfig));
service.export();
});
}
|
@Override
public void subscribe(String serviceName, EventListener listener) throws NacosException {
subscribe(serviceName, new ArrayList<>(), listener);
}
|
@Test
void testSubscribe3() throws NacosException {
//given
String serviceName = "service1";
List<String> clusterList = Arrays.asList("cluster1", "cluster2");
EventListener listener = event -> {
};
//when
client.subscribe(serviceName, clusterList, listener);
NamingSelectorWrapper wrapper = new NamingSelectorWrapper(serviceName, Constants.DEFAULT_GROUP, Constants.NULL,
NamingSelectorFactory.newClusterSelector(clusterList), listener);
//then
verify(changeNotifier, times(1)).registerListener(Constants.DEFAULT_GROUP, serviceName, wrapper);
verify(proxy, times(1)).subscribe(serviceName, Constants.DEFAULT_GROUP, Constants.NULL);
}
|
@VisibleForTesting
static void validateWorkerSettings(DataflowPipelineWorkerPoolOptions workerOptions) {
DataflowPipelineOptions dataflowOptions = workerOptions.as(DataflowPipelineOptions.class);
validateSdkContainerImageOptions(workerOptions);
GcpOptions gcpOptions = workerOptions.as(GcpOptions.class);
Preconditions.checkArgument(
gcpOptions.getZone() == null || gcpOptions.getWorkerRegion() == null,
"Cannot use option zone with workerRegion. Prefer either workerZone or workerRegion.");
Preconditions.checkArgument(
gcpOptions.getZone() == null || gcpOptions.getWorkerZone() == null,
"Cannot use option zone with workerZone. Prefer workerZone.");
Preconditions.checkArgument(
gcpOptions.getWorkerRegion() == null || gcpOptions.getWorkerZone() == null,
"workerRegion and workerZone options are mutually exclusive.");
boolean hasExperimentWorkerRegion = false;
if (dataflowOptions.getExperiments() != null) {
for (String experiment : dataflowOptions.getExperiments()) {
if (experiment.startsWith("worker_region")) {
hasExperimentWorkerRegion = true;
break;
}
}
}
Preconditions.checkArgument(
!hasExperimentWorkerRegion || gcpOptions.getWorkerRegion() == null,
"Experiment worker_region and option workerRegion are mutually exclusive.");
Preconditions.checkArgument(
!hasExperimentWorkerRegion || gcpOptions.getWorkerZone() == null,
"Experiment worker_region and option workerZone are mutually exclusive.");
if (gcpOptions.getZone() != null) {
LOG.warn("Option --zone is deprecated. Please use --workerZone instead.");
gcpOptions.setWorkerZone(gcpOptions.getZone());
gcpOptions.setZone(null);
}
}
|
@Test
public void testAliasForSdkContainerImage() {
DataflowPipelineWorkerPoolOptions options =
PipelineOptionsFactory.as(DataflowPipelineWorkerPoolOptions.class);
String testImage = "image.url:sdk";
options.setSdkContainerImage("image.url:sdk");
DataflowRunner.validateWorkerSettings(options);
assertEquals(testImage, options.getWorkerHarnessContainerImage());
assertEquals(testImage, options.getSdkContainerImage());
}
|
@Override
public void createFunction(SqlInvokedFunction function, boolean replace)
{
checkCatalog(function);
checkFunctionLanguageSupported(function);
checkArgument(!function.hasVersion(), "function '%s' is already versioned", function);
QualifiedObjectName functionName = function.getFunctionId().getFunctionName();
checkFieldLength("Catalog name", functionName.getCatalogName(), MAX_CATALOG_NAME_LENGTH);
checkFieldLength("Schema name", functionName.getSchemaName(), MAX_SCHEMA_NAME_LENGTH);
if (!functionNamespaceDao.functionNamespaceExists(functionName.getCatalogName(), functionName.getSchemaName())) {
throw new PrestoException(NOT_FOUND, format("Function namespace not found: %s", functionName.getCatalogSchemaName()));
}
checkFieldLength("Function name", functionName.getObjectName(), MAX_FUNCTION_NAME_LENGTH);
if (function.getParameters().size() > MAX_PARAMETER_COUNT) {
throw new PrestoException(GENERIC_USER_ERROR, format("Function has more than %s parameters: %s", MAX_PARAMETER_COUNT, function.getParameters().size()));
}
for (Parameter parameter : function.getParameters()) {
checkFieldLength("Parameter name", parameter.getName(), MAX_PARAMETER_NAME_LENGTH);
}
checkFieldLength(
"Parameter type list",
function.getFunctionId().getArgumentTypes().stream()
.map(TypeSignature::toString)
.collect(joining(",")),
MAX_PARAMETER_TYPES_LENGTH);
checkFieldLength("Return type", function.getSignature().getReturnType().toString(), MAX_RETURN_TYPE_LENGTH);
jdbi.useTransaction(handle -> {
FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass);
Optional<SqlInvokedFunctionRecord> latestVersion = transactionDao.getLatestRecordForUpdate(hash(function.getFunctionId()), function.getFunctionId());
if (!replace && latestVersion.isPresent() && !latestVersion.get().isDeleted()) {
throw new PrestoException(ALREADY_EXISTS, "Function already exists: " + function.getFunctionId());
}
if (!latestVersion.isPresent() || !latestVersion.get().getFunction().hasSameDefinitionAs(function)) {
long newVersion = latestVersion.map(SqlInvokedFunctionRecord::getFunction).map(MySqlFunctionNamespaceManager::getLongVersion).orElse(0L) + 1;
insertSqlInvokedFunction(transactionDao, function, newVersion);
}
else if (latestVersion.get().isDeleted()) {
SqlInvokedFunction latest = latestVersion.get().getFunction();
checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId());
transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), false);
}
});
refreshFunctionsCache(functionName);
}
|
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Parameter name exceeds max length of 100.*")
public void testParameterNameTooLong()
{
List<Parameter> parameters = ImmutableList.of(new Parameter(dummyString(101), parseTypeSignature(DOUBLE)));
createFunction(createFunctionTangent(parameters), false);
}
|
protected static boolean isPublicInstanceField(Field field) {
return Modifier.isPublic(field.getModifiers())
&& !Modifier.isStatic(field.getModifiers())
&& !Modifier.isFinal(field.getModifiers())
&& !field.isSynthetic();
}
|
@Test
public void testIsPublicInstanceField() throws Exception {
Assert.assertTrue(isPublicInstanceField(TestReflect.class.getField("f1")));
Assert.assertFalse(isPublicInstanceField(TestReflect.class.getDeclaredField("f2")));
Assert.assertFalse(isPublicInstanceField(TestReflect.class.getDeclaredField("f3")));
Assert.assertFalse(isPublicInstanceField(TestReflect.class.getDeclaredField("f4")));
Assert.assertFalse(isPublicInstanceField(TestReflect.class.getDeclaredField("f5")));
}
|
@Override public HashSlotCursor8byteKey cursor() {
return new Cursor();
}
|
@Test
public void testCursor_advance_whenEmpty() {
HashSlotCursor8byteKey cursor = hsa.cursor();
assertFalse(cursor.advance());
}
|
@Override
public AdjacencyMatrix subgraph(int[] vertices) {
int[] v = vertices.clone();
Arrays.sort(v);
AdjacencyMatrix g = new AdjacencyMatrix(v.length, digraph);
for (int i = 0; i < v.length; i++) {
for (int j = 0; j < v.length; j++) {
g.graph[i][j] = graph[v[i]][v[j]];
}
}
return g;
}
|
@Test
public void testSubgraph() {
System.out.println("subgraph digraph = false");
AdjacencyMatrix graph = new AdjacencyMatrix(8, false);
graph.addEdge(0, 2);
graph.addEdge(1, 7);
graph.addEdge(2, 6);
graph.addEdge(7, 4);
graph.addEdge(3, 4);
graph.addEdge(3, 5);
graph.addEdge(5, 4);
int[] v = {1, 3, 7};
AdjacencyMatrix sub = graph.subgraph(v);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
System.out.print(sub.getWeight(i, j) + " ");
}
System.out.println();
}
assertEquals(1.0, sub.getWeight(0, 2), 1E-10);
assertEquals(1.0, sub.getWeight(2, 0), 1E-10);
}
|
public final void containsKey(@Nullable Object key) {
check("keySet()").that(checkNotNull(actual).keySet()).contains(key);
}
|
@Test
public void containsKeyNullFailure() {
ImmutableMultimap<String, String> multimap = ImmutableMultimap.of("kurt", "kluever");
expectFailureWhenTestingThat(multimap).containsKey(null);
assertFailureKeys("value of", "expected to contain", "but was", "multimap was");
assertFailureValue("value of", "multimap.keySet()");
assertFailureValue("expected to contain", "null");
assertFailureValue("but was", "[kurt]");
}
|
public static String sanitizeUri(String uri) {
// use xxxxx as replacement as that works well with JMX also
String sanitized = uri;
if (uri != null) {
sanitized = ALL_SECRETS.matcher(sanitized).replaceAll("$1=xxxxxx");
sanitized = USERINFO_PASSWORD.matcher(sanitized).replaceFirst("$1xxxxxx$3");
}
return sanitized;
}
|
@Test
public void testSanitizeUriWithRawPasswordAndSimpleExpression() {
String uriPlain
= "http://foo?username=me&password=RAW(me#@123)&foo=bar&port=21&tempFileName=${file:name.noext}.tmp&anotherOption=true";
String uriCurly
= "http://foo?username=me&password=RAW{me#@123}&foo=bar&port=21&tempFileName=${file:name.noext}.tmp&anotherOption=true";
String expected
= "http://foo?username=xxxxxx&password=xxxxxx&foo=bar&port=21&tempFileName=${file:name.noext}.tmp&anotherOption=true";
assertEquals(expected, URISupport.sanitizeUri(uriPlain));
assertEquals(expected, URISupport.sanitizeUri(uriCurly));
}
|
@Override public String method() {
return DubboParser.method(invocation);
}
|
@Test void method() {
when(invocation.getMethodName()).thenReturn("sayHello");
assertThat(request.method()).isEqualTo("sayHello");
}
|
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
}
|
@Test
public void testRepeatedIntMessage() throws Exception {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
ProtoWriteSupport<TestProtobuf.RepeatedIntMessage> instance =
createReadConsumerInstance(TestProtobuf.RepeatedIntMessage.class, readConsumerMock);
TestProtobuf.RepeatedIntMessage.Builder msg = TestProtobuf.RepeatedIntMessage.newBuilder();
msg.addRepeatedInt(1323);
msg.addRepeatedInt(54469);
instance.write(msg.build());
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).startField("repeatedInt", 0);
inOrder.verify(readConsumerMock).addInteger(1323);
inOrder.verify(readConsumerMock).addInteger(54469);
inOrder.verify(readConsumerMock).endField("repeatedInt", 0);
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
}
|
public void setResources(ResourceSpec minResources, ResourceSpec preferredResources) {
OperatorValidationUtils.validateMinAndPreferredResources(minResources, preferredResources);
this.minResources = minResources;
this.preferredResources = preferredResources;
}
|
@Test
void testSetResourcesUseCaseFailNullResources() {
ResourceSpec resourceSpec = ResourceSpec.newBuilder(1.0, 100).build();
assertThatThrownBy(() -> transformation.setResources(null, resourceSpec))
.isInstanceOf(NullPointerException.class);
assertThatThrownBy(() -> transformation.setResources(resourceSpec, null))
.isInstanceOf(NullPointerException.class);
}
|
@Override
public void deleteClient(ClientDetailsEntity client) throws InvalidClientException {
if (clientRepository.getById(client.getId()) == null) {
throw new InvalidClientException("Client with id " + client.getClientId() + " was not found");
}
// clean out any tokens that this client had issued
tokenRepository.clearTokensForClient(client);
// clean out any approved sites for this client
approvedSiteService.clearApprovedSitesForClient(client);
// clear out any whitelisted sites for this client
WhitelistedSite whitelistedSite = whitelistedSiteService.getByClientId(client.getClientId());
if (whitelistedSite != null) {
whitelistedSiteService.remove(whitelistedSite);
}
// clear out resource sets registered for this client
Collection<ResourceSet> resourceSets = resourceSetService.getAllForClient(client);
for (ResourceSet rs : resourceSets) {
resourceSetService.remove(rs);
}
// take care of the client itself
clientRepository.deleteClient(client);
statsService.resetCache();
}
|
@Test
public void deleteClient() {
Long id = 12345L;
String clientId = "b00g3r";
ClientDetailsEntity client = Mockito.mock(ClientDetailsEntity.class);
Mockito.when(client.getId()).thenReturn(id);
Mockito.when(client.getClientId()).thenReturn(clientId);
Mockito.when(clientRepository.getById(id)).thenReturn(client);
WhitelistedSite site = Mockito.mock(WhitelistedSite.class);
Mockito.when(whitelistedSiteService.getByClientId(clientId)).thenReturn(site);
Mockito.when(resourceSetService.getAllForClient(client)).thenReturn(new HashSet<ResourceSet>());
service.deleteClient(client);
Mockito.verify(tokenRepository).clearTokensForClient(client);
Mockito.verify(approvedSiteService).clearApprovedSitesForClient(client);
Mockito.verify(whitelistedSiteService).remove(site);
Mockito.verify(clientRepository).deleteClient(client);
}
|
public static PluginVO buildPluginVO(final PluginDO pluginDO) {
return new PluginVO(pluginDO.getId(), pluginDO.getRole(), pluginDO.getName(),
pluginDO.getConfig(), pluginDO.getSort(), pluginDO.getEnabled(),
DateUtils.localDateTimeToString(pluginDO.getDateCreated().toLocalDateTime()),
DateUtils.localDateTimeToString(pluginDO.getDateUpdated().toLocalDateTime()),
Optional.ofNullable(pluginDO.getPluginJar()).map(Base64::encodeToString).orElse(""),
Lists.newArrayList());
}
|
@Test
public void testBuildPluginVO() {
Timestamp currentTime = new Timestamp(System.currentTimeMillis());
assertNotNull(PluginVO.buildPluginVO(PluginDO.builder()
.name(PluginEnum.GLOBAL.getName())
.dateCreated(currentTime)
.dateUpdated(currentTime)
.build()));
}
|
CompressionConfig getStreamRequestCompressionConfig(String httpServiceName, StreamEncodingType requestContentEncoding)
{
if (_requestCompressionConfigs.containsKey(httpServiceName))
{
if (requestContentEncoding == StreamEncodingType.IDENTITY)
{
// This will likely happen when the service doesn't allow any request content encodings for compression,
// but the client specified a compression config for the service.
// The client probably has a misunderstanding (thinks the service supports request compression when it actually does not).
// Note that it is okay to pass in any compression config to ClientCompressionFilter when there isn't an available algorithm
// because ClientCompressionFilter will not compress requests when encoding type is IDENTITY.
LOG.warn("No request compression algorithm available but compression config specified for service {}", httpServiceName);
}
return _requestCompressionConfigs.get(httpServiceName);
}
return _defaultRequestCompressionConfig;
}
|
@Test(dataProvider = "compressionConfigsData")
public void testGetRequestCompressionConfig(String serviceName, int requestCompressionThresholdDefault, CompressionConfig expectedConfig)
{
Map<String, CompressionConfig> requestCompressionConfigs = new HashMap<>();
requestCompressionConfigs.put("service1", new CompressionConfig(0));
requestCompressionConfigs.put("service2", new CompressionConfig(Integer.MAX_VALUE));
requestCompressionConfigs.put("service3", new CompressionConfig(111));
HttpClientFactory factory = new HttpClientFactory.Builder()
.setRequestCompressionThresholdDefault(requestCompressionThresholdDefault)
.setRequestCompressionConfigs(requestCompressionConfigs)
.build();
Assert.assertEquals(factory.getStreamRequestCompressionConfig(serviceName, StreamEncodingType.SNAPPY_FRAMED), expectedConfig);
}
|
public static <T> CompletableFuture<T> addTimeoutHandling(CompletableFuture<T> future, Duration timeout,
ScheduledExecutorService executor,
Supplier<Throwable> exceptionSupplier) {
ScheduledFuture<?> scheduledFuture = executor.schedule(() -> {
if (!future.isDone()) {
future.completeExceptionally(exceptionSupplier.get());
}
}, timeout.toMillis(), TimeUnit.MILLISECONDS);
future.whenComplete((res, exception) -> scheduledFuture.cancel(false));
return future;
}
|
@Test
public void testTimeoutHandling() {
CompletableFuture<Void> future = new CompletableFuture<>();
@Cleanup("shutdownNow")
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
Exception e = new Exception();
try {
FutureUtil.addTimeoutHandling(future, Duration.ofMillis(1), executor, () -> e);
future.get();
fail("Should have failed.");
} catch (InterruptedException interruptedException) {
fail("Shouldn't occur");
} catch (ExecutionException executionException) {
assertEquals(executionException.getCause(), e);
}
}
|
public Usage getDailyApiRequests() {
return forOperation(Operation.DailyApiRequests.name());
}
|
@Test
public void shouldDeserializeFromSalesforceGeneratedJSON() throws IOException {
final ObjectMapper mapper = JsonUtils.createObjectMapper();
final Object read = mapper.readerFor(Limits.class)
.readValue(LimitsTest.class.getResource("/org/apache/camel/component/salesforce/api/dto/limits.json"));
assertThat("Limits should be parsed from JSON", read, instanceOf(Limits.class));
final Limits limits = (Limits) read;
final Usage dailyApiRequests = limits.getDailyApiRequests();
assertFalse(dailyApiRequests.isUnknown(), "Should have some usage present");
assertFalse(dailyApiRequests.getPerApplicationUsage().isEmpty(), "Per application usage should be present");
assertNotNull(dailyApiRequests.forApplication("Camel Salesman"),
"'Camel Salesman' application usage should be present");
}
|
@Udf
public List<Integer> generateSeriesInt(
@UdfParameter(description = "The beginning of the series") final int start,
@UdfParameter(description = "Marks the end of the series (inclusive)") final int end
) {
return generateSeriesInt(start, end, end - start > 0 ? 1 : -1);
}
|
@Test
public void shouldThrowIfStepWrongSignInt2() {
// When:
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> rangeUdf.generateSeriesInt(9, 0, 1)
);
// Then:
assertThat(e.getMessage(), containsString(
"GENERATE_SERIES step has wrong sign"));
}
|
public CLIConnectionFactory bearerAuth(String bearerToken) {
return authorization("Bearer " + bearerToken);
}
|
@Test
void testBearerFromToken() {
Assertions.assertEquals("Bearer some-token", cliFactory.bearerAuth("some-token").authorization);
}
|
protected void setInternalEntryCurrentDirectory() {
variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable(
repository != null ? Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY
: filename != null ? Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY
: Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) );
}
|
@Test
public void testSetInternalEntryCurrentDirectoryWithRepository( ) {
JobMeta jobMetaTest = new JobMeta( );
RepositoryDirectoryInterface path = mock( RepositoryDirectoryInterface.class );
when( path.getPath() ).thenReturn( "aPath" );
jobMetaTest.setRepository( mock( Repository.class ) );
jobMetaTest.setRepositoryDirectory( path );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "Original value defined at run execution" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "file:///C:/SomeFilenameDirectory" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "/SomeRepDirectory" );
jobMetaTest.setInternalEntryCurrentDirectory();
assertEquals( "/SomeRepDirectory", jobMetaTest.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) );
}
|
boolean hasEnoughResource(ContinuousResource request) {
double allocated = allocations.stream()
.filter(x -> x.resource() instanceof ContinuousResource)
.map(x -> (ContinuousResource) x.resource())
.mapToDouble(ContinuousResource::value)
.sum();
double left = original.value() - allocated;
return request.value() <= left;
}
|
@Test
public void testHasEnoughResourceWhenLargeResourceIsRequested() {
ContinuousResource original =
Resources.continuous(DID, PN1, Bandwidth.class).resource(Bandwidth.gbps(1).bps());
ContinuousResource allocated =
Resources.continuous(DID, PN1, Bandwidth.class).resource(Bandwidth.mbps(500).bps());
ResourceConsumer consumer = IntentId.valueOf(1);
ContinuousResourceAllocation sut = new ContinuousResourceAllocation(original,
ImmutableList.of(new ResourceAllocation(allocated, consumer)));
ContinuousResource request =
Resources.continuous(DID, PN1, Bandwidth.class).resource(Bandwidth.mbps(600).bps());
assertThat(sut.hasEnoughResource(request), is(false));
}
|
@Override
public void serialize(Asn1OutputStream out, Class<? extends Object> type, Object instance, Asn1ObjectMapper mapper)
throws IOException {
for (final Map.Entry<String, List<Asn1Field>> entry : fieldsMap(mapper.getFields(type)).entrySet()) {
final List<Asn1Field> fields = entry.getValue();
final Object[] values = new Object[fields.size()];
if (!fetchValues(fields, instance, values)) continue;
try (final Asn1OutputStream seqOut = new Asn1OutputStream(out, getNestedTagNo(type))) {
try (final Asn1OutputStream oidOut = new Asn1OutputStream(seqOut, 0x06)) {
Asn1Utils.encodeObjectIdentifier(entry.getKey(), oidOut);
}
int i = 0;
for (final Asn1Field field : fields) {
try (final Asn1OutputStream propOut = new Asn1OutputStream(seqOut, field.tagNo)) {
mapper.writeValue(propOut, field.converter(), field.type(), values[i++]);
}
}
}
}
}
|
@Test
public void shouldSerializeWithOptional() {
assertArrayEquals(
new byte[] {
0x30, 6, 0x06, 1, 44, 0x02, 1, 4, 0x30, 6, 0x06, 1, 83, 0x02, 1, 3,
0x30, 9, 0x06, 1, 84, 0x02, 1, 1, 0x02, 1, 2
},
serialize(new SetOfIdentifiedConverter(), Set.class, new Set(1, 2, 3, 4))
);
}
|
@Override
public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) {
if (client.getId() != null) { // if it's not null, it's already been saved, this is an error
throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId());
}
if (client.getRegisteredRedirectUri() != null) {
for (String uri : client.getRegisteredRedirectUri()) {
if (blacklistedSiteService.isBlacklisted(uri)) {
throw new IllegalArgumentException("Client URI is blacklisted: " + uri);
}
}
}
// assign a random clientid if it's empty
// NOTE: don't assign a random client secret without asking, since public clients have no secret
if (Strings.isNullOrEmpty(client.getClientId())) {
client = generateClientId(client);
}
// make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa
ensureRefreshTokenConsistency(client);
// make sure we don't have both a JWKS and a JWKS URI
ensureKeyConsistency(client);
// check consistency when using HEART mode
checkHeartMode(client);
// timestamp this to right now
client.setCreatedAt(new Date());
// check the sector URI
checkSectorIdentifierUri(client);
ensureNoReservedScopes(client);
ClientDetailsEntity c = clientRepository.saveClient(client);
statsService.resetCache();
return c;
}
|
@Test(expected = IllegalArgumentException.class)
public void heartMode_authcode_redirectUris() {
Mockito.when(config.isHeartMode()).thenReturn(true);
ClientDetailsEntity client = new ClientDetailsEntity();
Set<String> grantTypes = new LinkedHashSet<>();
grantTypes.add("authorization_code");
client.setGrantTypes(grantTypes);
client.setTokenEndpointAuthMethod(AuthMethod.PRIVATE_KEY);
service.saveNewClient(client);
}
|
public List<HistoryKey> getCurrentHistory() {
if (mLoadedKeys.size() == 0)
// For a unknown reason, we cannot have 0 history emoji...
mLoadedKeys.add(new HistoryKey(DEFAULT_EMOJI, DEFAULT_EMOJI));
return Collections.unmodifiableList(mLoadedKeys);
}
|
@Test
public void testLoadHasDefaultValue() {
mUnderTest = new QuickKeyHistoryRecords(mSharedPreferences);
List<QuickKeyHistoryRecords.HistoryKey> keys = mUnderTest.getCurrentHistory();
Assert.assertNotNull(keys);
Assert.assertEquals(1, keys.size());
Assert.assertEquals(QuickKeyHistoryRecords.DEFAULT_EMOJI, keys.get(0).name);
Assert.assertEquals(QuickKeyHistoryRecords.DEFAULT_EMOJI, keys.get(0).value);
}
|
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
}
|
@Test
public void testEntryUpdate() throws InterruptedException {
RMapCacheNative<Integer, Integer> map = redisson.getMapCacheNative("simple");
map.put(1, 1, Duration.ofSeconds(1));
assertThat(map.get(1)).isEqualTo(1);
Thread.sleep(1000);
assertThat(map.put(1, 1, Duration.ofSeconds(0))).isNull();
assertThat(map.get(1)).isEqualTo(1);
}
|
public static UArrayType create(UType componentType) {
return new AutoValue_UArrayType(componentType);
}
|
@Test
public void equality() {
new EqualsTester()
.addEqualityGroup(UArrayType.create(UPrimitiveType.INT))
.addEqualityGroup(UArrayType.create(UClassType.create("java.lang.String")))
.addEqualityGroup(UArrayType.create(UArrayType.create(UPrimitiveType.INT)))
.testEquals();
}
|
@Override
public void validate(final String name, final Object value) {
if (immutableProps.contains(name)) {
throw new IllegalArgumentException(String.format("Cannot override property '%s'", name));
}
final Consumer<Object> validator = HANDLERS.get(name);
if (validator != null) {
validator.accept(value);
}
}
|
@Test
public void shouldNotThrowOnConfigurableProp() {
validator.validate("mutable-1", "anything");
}
|
@Override
public boolean assign(final Map<ProcessId, ClientState> clients,
final Set<TaskId> allTaskIds,
final Set<TaskId> statefulTaskIds,
final AssignmentConfigs configs) {
final int numStandbyReplicas = configs.numStandbyReplicas();
final Set<String> rackAwareAssignmentTags = new HashSet<>(tagsFunction.apply(configs));
final Map<TaskId, Integer> tasksToRemainingStandbys = computeTasksToRemainingStandbys(
numStandbyReplicas,
statefulTaskIds
);
final Map<String, Set<String>> tagKeyToValues = new HashMap<>();
final Map<TagEntry, Set<ProcessId>> tagEntryToClients = new HashMap<>();
fillClientsTagStatistics(clients, tagEntryToClients, tagKeyToValues);
final ConstrainedPrioritySet standbyTaskClientsByTaskLoad = createLeastLoadedPrioritySetConstrainedByAssignedTask(clients);
final Map<TaskId, ProcessId> pendingStandbyTasksToClientId = new HashMap<>();
for (final TaskId statefulTaskId : statefulTaskIds) {
for (final Map.Entry<ProcessId, ClientState> entry : clients.entrySet()) {
final ProcessId clientId = entry.getKey();
final ClientState clientState = entry.getValue();
if (clientState.activeTasks().contains(statefulTaskId)) {
assignStandbyTasksToClientsWithDifferentTags(
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
statefulTaskId,
clientId,
rackAwareAssignmentTags,
clients,
tasksToRemainingStandbys,
tagKeyToValues,
tagEntryToClients,
pendingStandbyTasksToClientId
);
}
}
}
if (!tasksToRemainingStandbys.isEmpty()) {
assignPendingStandbyTasksToLeastLoadedClients(clients,
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
tasksToRemainingStandbys);
}
// returning false, because standby task assignment will never require a follow-up probing rebalance.
return false;
}
|
@Test
public void shouldDistributeClientsOnDifferentZoneTagsEvenWhenClientsReachedCapacity() {
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, createClientStateWithCapacity(PID_1, 1, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_0)),
mkEntry(PID_2, createClientStateWithCapacity(PID_2, 1, mkMap(mkEntry(ZONE_TAG, ZONE_2), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_1)),
mkEntry(PID_3, createClientStateWithCapacity(PID_3, 1, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_2)),
mkEntry(PID_4, createClientStateWithCapacity(PID_4, 1, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_1_0)),
mkEntry(PID_5, createClientStateWithCapacity(PID_5, 1, mkMap(mkEntry(ZONE_TAG, ZONE_2), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_1_1)),
mkEntry(PID_6, createClientStateWithCapacity(PID_6, 1, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_1_2))
);
final Set<TaskId> allActiveTasks = findAllActiveTasks(clientStates);
final AssignmentConfigs assignmentConfigs = newAssignmentConfigs(1, ZONE_TAG, CLUSTER_TAG);
standbyTaskAssignor.assign(clientStates, allActiveTasks, allActiveTasks, assignmentConfigs);
clientStates.keySet().forEach(client -> assertStandbyTaskCountForClientEqualsTo(clientStates, client, 1));
assertTotalNumberOfStandbyTasksEqualsTo(clientStates, 6);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_0,
clientStates,
asList(
mkSet(PID_2), mkSet(PID_5), mkSet(PID_3), mkSet(PID_6)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_1_0,
clientStates,
asList(
mkSet(PID_2), mkSet(PID_5), mkSet(PID_3), mkSet(PID_6)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_1,
clientStates,
asList(
mkSet(PID_1), mkSet(PID_4), mkSet(PID_3), mkSet(PID_6)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_1_1,
clientStates,
asList(
mkSet(PID_1), mkSet(PID_4), mkSet(PID_3), mkSet(PID_6)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_2,
clientStates,
asList(
mkSet(PID_1), mkSet(PID_4), mkSet(PID_2), mkSet(PID_5)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_1_2,
clientStates,
asList(
mkSet(PID_1), mkSet(PID_4), mkSet(PID_2), mkSet(PID_5)
)
)
);
}
|
public static String toCloudTime(ReadableInstant instant) {
// Note that since Joda objects use millisecond resolution, we always
// produce either no fractional seconds or fractional seconds with
// millisecond resolution.
// Translate the ReadableInstant to a DateTime with ISOChronology.
DateTime time = new DateTime(instant);
int millis = time.getMillisOfSecond();
if (millis == 0) {
return String.format(
"%04d-%02d-%02dT%02d:%02d:%02dZ",
time.getYear(),
time.getMonthOfYear(),
time.getDayOfMonth(),
time.getHourOfDay(),
time.getMinuteOfHour(),
time.getSecondOfMinute());
} else {
return String.format(
"%04d-%02d-%02dT%02d:%02d:%02d.%03dZ",
time.getYear(),
time.getMonthOfYear(),
time.getDayOfMonth(),
time.getHourOfDay(),
time.getMinuteOfHour(),
time.getSecondOfMinute(),
millis);
}
}
|
@Test
public void toCloudTimeShouldPrintTimeStrings() {
assertEquals("1970-01-01T00:00:00Z", toCloudTime(new Instant(0)));
assertEquals("1970-01-01T00:00:00.001Z", toCloudTime(new Instant(1)));
}
|
public String table(TableIdentifier ident) {
return SLASH.join(
"v1",
prefix,
"namespaces",
RESTUtil.encodeNamespace(ident.namespace()),
"tables",
RESTUtil.encodeString(ident.name()));
}
|
@Test
public void testTable() {
TableIdentifier ident = TableIdentifier.of("ns", "table");
assertThat(withPrefix.table(ident)).isEqualTo("v1/ws/catalog/namespaces/ns/tables/table");
assertThat(withoutPrefix.table(ident)).isEqualTo("v1/namespaces/ns/tables/table");
}
|
@Override
public TransformResultMetadata getResultMetadata() {
return _resultMetadata;
}
|
@Test
public void testArrayElementAtInt() {
Random rand = new Random();
int index = rand.nextInt(MAX_NUM_MULTI_VALUES);
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("array_element_at_int(%s, %d)", INT_MV_COLUMN, index + 1));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.INT);
assertTrue(transformFunction.getResultMetadata().isSingleValue());
int[] expectedValues = new int[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = _intMVValues[i].length > index ? _intMVValues[i][index] : NullValuePlaceHolder.INT;
}
testTransformFunction(transformFunction, expectedValues);
}
|
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback,
final ConnectionCallback connectionCallback) throws BackgroundException {
if(containerService.isContainer(source)) {
if(new SimplePathPredicate(source.getParent()).test(target.getParent())) {
// Rename only
return proxy.move(source, target, status, callback, connectionCallback);
}
}
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// Moving into or from an encrypted room
final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy));
}
final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener());
// Delete source file after copy is complete
final Delete delete = new SDSDeleteFeature(session, nodeid);
if(delete.isSupported(source)) {
log.warn(String.format("Delete source %s copied to %s", source, target));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
return c;
}
else {
return proxy.move(source, target, status, callback, connectionCallback);
}
}
|
@Test
public void testMoveToEncryptedDataRoom() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
room1.setAttributes(new SDSAttributesFinderFeature(session, nodeid).find(room1));
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
room2.setAttributes(new SDSAttributesFinderFeature(session, nodeid).find(room2));
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room2, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid);
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
final Path target = new Path(room1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSDelegatingMoveFeature(session, nodeid, new SDSMoveFeature(session, nodeid)).move(test, target, new TransferStatus().withLength(content.length), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new SDSFindFeature(session, nodeid).find(new Path(test).withAttributes(PathAttributes.EMPTY)));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
final byte[] compare = new byte[content.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static List<String> listFileNames(String path) throws IORuntimeException {
if (path == null) {
return new ArrayList<>(0);
}
int index = path.lastIndexOf(FileUtil.JAR_PATH_EXT);
if (index < 0) {
// 普通目录
final List<String> paths = new ArrayList<>();
final File[] files = ls(path);
for (File file : files) {
if (file.isFile()) {
paths.add(file.getName());
}
}
return paths;
}
// jar文件
path = getAbsolutePath(path);
// jar文件中的路径
index = index + FileUtil.JAR_FILE_EXT.length();
JarFile jarFile = null;
try {
jarFile = new JarFile(path.substring(0, index));
// 防止出现jar!/cn/hutool/这类路径导致文件找不到
return ZipUtil.listFileNames(jarFile, StrUtil.removePrefix(path.substring(index + 1), "/"));
} catch (IOException e) {
throw new IORuntimeException(StrUtil.format("Can not read file path of [{}]", path), e);
} finally {
IoUtil.close(jarFile);
}
}
|
@Test
public void listFileNamesTest() {
List<String> names = FileUtil.listFileNames("classpath:");
assertTrue(names.contains("hutool.jpg"));
names = FileUtil.listFileNames("");
assertTrue(names.contains("hutool.jpg"));
names = FileUtil.listFileNames(".");
assertTrue(names.contains("hutool.jpg"));
}
|
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
cf.parse(args);
pathOnly = cf.getOpt(OPTION_PATHONLY);
dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
humanReadable = cf.getOpt(OPTION_HUMAN);
hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
orderReverse = cf.getOpt(OPTION_REVERSE);
orderTime = cf.getOpt(OPTION_MTIME);
orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
useAtime = cf.getOpt(OPTION_ATIME);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
if (args.isEmpty()) args.add(Path.CUR_DIR);
initialiseOrderComparator();
}
|
@Test
public void processPathDirectoryAtime() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-u");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineAtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineAtime(lineFormat));
verifyNoMoreInteractions(out);
}
|
@Override
public void deleteKey(String name) throws IOException {
getKeyProvider().deleteKey(name);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
// invalidating all key versions as we don't know
// which ones belonged to the deleted key
getExtension().keyVersionCache.invalidateAll();
}
|
@Test
public void testDeleteKey() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
.thenReturn(mockKey);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
cache.deleteKey("k1");
// asserting the cache is purged
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k1@0"));
}
|
public ListenableFuture<RunResponse> runWithDeadline(RunRequest request, Deadline deadline) {
return pluginService.withDeadline(deadline).run(request);
}
|
@Test
public void run_singlePluginValidRequest_returnSingleDetectionReport() throws Exception {
RunRequest runRequest = createSinglePluginRunRequest();
PluginServiceImplBase runImpl =
new PluginServiceImplBase() {
@Override
public void run(RunRequest request, StreamObserver<RunResponse> responseObserver) {
DetectionReportList reportList =
DetectionReportList.newBuilder()
.addDetectionReports(
DetectionReport.newBuilder()
.setTargetInfo(request.getTarget())
.setNetworkService(request.getPlugins(0).getServices(0)))
.build();
responseObserver.onNext(RunResponse.newBuilder().setReports(reportList).build());
responseObserver.onCompleted();
}
};
serviceRegistry.addService(runImpl);
ListenableFuture<RunResponse> run = pluginService.runWithDeadline(runRequest, DEADLINE_DEFAULT);
RunResponse runResponse = run.get();
assertThat(run.isDone()).isTrue();
assertRunResponseContainsAllRunRequestParameters(runResponse, runRequest);
}
|
@Override
public final ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) {
return tail.writeAndFlush(msg, promise);
}
|
@Test
public void testFreeCalled() throws Exception {
final CountDownLatch free = new CountDownLatch(1);
final ReferenceCounted holder = new AbstractReferenceCounted() {
@Override
protected void deallocate() {
free.countDown();
}
@Override
public ReferenceCounted touch(Object hint) {
return this;
}
};
StringInboundHandler handler = new StringInboundHandler();
setUp(handler);
peer.writeAndFlush(holder).sync();
assertTrue(free.await(10, TimeUnit.SECONDS));
assertTrue(handler.called);
}
|
@Override
public String selectForUpdateSkipLocked() {
return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : "";
}
|
@Test
void otherDBDoesNotSupportSelectForUpdateSkipLocked() {
assertThat(new MySqlDialect("MariaDB", "10.6").selectForUpdateSkipLocked()).isEmpty();
}
|
@ExceptionHandler(NoSuchElementException.class)
public ResponseEntity<ProblemDetail> handleNoSuchElementException(NoSuchElementException exception,
Locale locale) {
return ResponseEntity.status(HttpStatus.NOT_FOUND)
.body(ProblemDetail.forStatusAndDetail(HttpStatus.NOT_FOUND,
this.messageSource.getMessage(exception.getMessage(), new Object[0],
exception.getMessage(), locale)));
}
|
@Test
void handleNoSuchElementException_ReturnsNotFound() {
// given
var exception = new NoSuchElementException("error_code");
var locale = Locale.of("ru");
doReturn("error details").when(this.messageSource)
.getMessage("error_code", new Object[0], "error_code", Locale.of("ru"));
// when
var result = this.controller.handleNoSuchElementException(exception, locale);
// then
assertNotNull(result);
assertEquals(HttpStatus.NOT_FOUND, result.getStatusCode());
assertInstanceOf(ProblemDetail.class, result.getBody());
assertEquals(HttpStatus.NOT_FOUND.value(), result.getBody().getStatus());
assertEquals("error details", result.getBody().getDetail());
verifyNoInteractions(this.productService);
}
|
@Override
public String getName() {
return FUNCTION_NAME;
}
|
@Test
public void testSubtractionNullLiteral() {
ExpressionContext expression = RequestContextUtils.getExpression("sub(null, 1)");
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction);
Assert.assertEquals(transformFunction.getName(), TransformFunctionType.SUB.getName());
double[] expectedValues = new double[NUM_ROWS];
RoaringBitmap roaringBitmap = new RoaringBitmap();
roaringBitmap.add(0L, NUM_ROWS);
testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap);
}
|
@Override
public void execute(final ConnectionSession connectionSession) {
queryResultMetaData = createQueryResultMetaData();
mergedResult = new TransparentMergedResult(getQueryResult());
}
|
@Test
void assertExecute() throws SQLException {
ShowProcedureStatusExecutor executor = new ShowProcedureStatusExecutor(new MySQLShowProcedureStatusStatement());
ContextManager contextManager = mockContextManager();
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
executor.execute(mock(ConnectionSession.class));
assertThat(executor.getQueryResultMetaData().getColumnCount(), is(11));
}
|
public static List<Long> getLongList(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing list: %s", property);
return ImmutableList.<Long>builder().addAll(new JsonLongArrayIterator(property, node)).build();
}
|
@Test
public void getLongList() throws JsonProcessingException {
assertThatThrownBy(() -> JsonUtil.getLongList("items", JsonUtil.mapper().readTree("{}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing list: items");
assertThatThrownBy(
() -> JsonUtil.getLongList("items", JsonUtil.mapper().readTree("{\"items\": null}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse JSON array from non-array value: items: null");
assertThatThrownBy(
() ->
JsonUtil.getLongList(
"items", JsonUtil.mapper().readTree("{\"items\": [13, \"23\"]}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse long from non-long value in items: \"23\"");
List<Long> items = Arrays.asList(23L, 45L);
assertThat(JsonUtil.getLongList("items", JsonUtil.mapper().readTree("{\"items\": [23, 45]}")))
.isEqualTo(items);
String json =
JsonUtil.generate(
gen -> {
gen.writeStartObject();
JsonUtil.writeLongArray("items", items, gen);
gen.writeEndObject();
},
false);
assertThat(JsonUtil.getLongList("items", JsonUtil.mapper().readTree(json))).isEqualTo(items);
}
|
@GetMapping("/service/publisher/list")
@Secured(action = ActionTypes.READ, resource = "nacos/admin")
public Result<List<ObjectNode>> getPublishedClientList(
@RequestParam(value = "namespaceId", required = false, defaultValue = Constants.DEFAULT_NAMESPACE_ID) String namespaceId,
@RequestParam(value = "groupName", required = false, defaultValue = Constants.DEFAULT_GROUP) String groupName,
@RequestParam(value = "ephemeral", required = false, defaultValue = "true") Boolean ephemeral,
@RequestParam("serviceName") String serviceName, @RequestParam(value = "ip", required = false) String ip,
@RequestParam(value = "port", required = false) Integer port) {
Service service = Service.newService(namespaceId, groupName, serviceName, ephemeral);
Collection<String> allClientsRegisteredService = clientServiceIndexesManager
.getAllClientsRegisteredService(service);
ArrayList<ObjectNode> res = new ArrayList<>();
for (String clientId : allClientsRegisteredService) {
Client client = clientManager.getClient(clientId);
InstancePublishInfo instancePublishInfo = client.getInstancePublishInfo(service);
if (instancePublishInfo instanceof BatchInstancePublishInfo) {
List<InstancePublishInfo> list = ((BatchInstancePublishInfo) instancePublishInfo).getInstancePublishInfos();
for (InstancePublishInfo info : list) {
if (!Objects.equals(info.getIp(), ip) || !Objects
.equals(port, info.getPort())) {
continue;
}
res.add(wrapSingleInstance(info).put("clientId", clientId));
}
} else {
if (!Objects.equals(instancePublishInfo.getIp(), ip) || !Objects
.equals(port, instancePublishInfo.getPort())) {
continue;
}
res.add(wrapSingleInstance(instancePublishInfo).put("clientId", clientId));
}
}
return Result.success(res);
}
|
@Test
void testGetPublishedClientList() throws Exception {
String baseTestKey = "nacos-getPublishedClientList-test";
// single instance
Service service = Service.newService(baseTestKey, baseTestKey, baseTestKey);
when(clientServiceIndexesManager.getAllClientsRegisteredService(service)).thenReturn(Arrays.asList("test"));
when(clientManager.getClient("test")).thenReturn(connectionBasedClient);
connectionBasedClient.addServiceInstance(service, new InstancePublishInfo("127.0.0.1", 8848));
MockHttpServletRequestBuilder mockHttpServletRequestBuilder = MockMvcRequestBuilders.get(URL + "/service/publisher/list")
.param("namespaceId", baseTestKey).param("groupName", baseTestKey).param("serviceName", baseTestKey)
.param("ip", "127.0.0.1").param("port", "8848");
mockmvc.perform(mockHttpServletRequestBuilder).andExpect(MockMvcResultMatchers.jsonPath("$.data.length()").value(1));
// batch instances
when(clientServiceIndexesManager.getAllClientsRegisteredService(service)).thenReturn(Arrays.asList("test"));
when(clientManager.getClient("test")).thenReturn(connectionBasedClient);
BatchInstancePublishInfo instancePublishInfo = new BatchInstancePublishInfo();
instancePublishInfo.setInstancePublishInfos(
Arrays.asList(new InstancePublishInfo("127.0.0.1", 8848), new InstancePublishInfo("127.0.0.1", 8849)));
connectionBasedClient.addServiceInstance(service, instancePublishInfo);
mockHttpServletRequestBuilder = MockMvcRequestBuilders.get(URL + "/service/publisher/list").param("namespaceId", baseTestKey)
.param("groupName", baseTestKey).param("serviceName", baseTestKey).param("ip", "127.0.0.1").param("port", "8848");
mockmvc.perform(mockHttpServletRequestBuilder).andExpect(MockMvcResultMatchers.jsonPath("$.data.length()").value(1));
}
|
@Override
public BigDecimal getBigNumber( Object object ) throws KettleValueException {
try {
if ( isNull( object ) ) {
return null;
}
switch ( type ) {
case TYPE_BIGNUMBER:
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
return (BigDecimal) object;
case STORAGE_TYPE_BINARY_STRING:
return (BigDecimal) convertBinaryStringToNativeType( (byte[]) object );
case STORAGE_TYPE_INDEXED:
return (BigDecimal) index[( (Integer) object ).intValue()];
default:
throw new KettleValueException( toString() + " : Unknown storage type " + storageType + " specified." );
}
case TYPE_STRING:
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
return convertStringToBigNumber( (String) object );
case STORAGE_TYPE_BINARY_STRING:
return convertStringToBigNumber( (String) convertBinaryStringToNativeType( (byte[]) object ) );
case STORAGE_TYPE_INDEXED:
return convertStringToBigNumber( (String) index[( (Integer) object ).intValue()] );
default:
throw new KettleValueException( toString() + " : Unknown storage type " + storageType + " specified." );
}
case TYPE_INTEGER:
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
return BigDecimal.valueOf( ( (Long) object ).longValue() );
case STORAGE_TYPE_BINARY_STRING:
return BigDecimal.valueOf( ( (Long) convertBinaryStringToNativeType( (byte[]) object ) ).longValue() );
case STORAGE_TYPE_INDEXED:
return BigDecimal.valueOf( ( (Long) index[( (Integer) object ).intValue()] ).longValue() );
default:
throw new KettleValueException( toString() + " : Unknown storage type " + storageType + " specified." );
}
case TYPE_NUMBER:
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
return BigDecimal.valueOf( ( (Double) object ).doubleValue() );
case STORAGE_TYPE_BINARY_STRING:
return BigDecimal.valueOf( ( (Double) convertBinaryStringToNativeType( (byte[]) object ) ).doubleValue() );
case STORAGE_TYPE_INDEXED:
return BigDecimal.valueOf( ( (Double) index[( (Integer) object ).intValue()] ).doubleValue() );
default:
throw new KettleValueException( toString() + " : Unknown storage type " + storageType + " specified." );
}
case TYPE_DATE:
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
return convertDateToBigNumber( (Date) object );
case STORAGE_TYPE_BINARY_STRING:
return convertDateToBigNumber( (Date) convertBinaryStringToNativeType( (byte[]) object ) );
case STORAGE_TYPE_INDEXED:
return convertDateToBigNumber( (Date) index[( (Integer) object ).intValue()] );
default:
throw new KettleValueException( toString() + " : Unknown storage type " + storageType + " specified." );
}
case TYPE_BOOLEAN:
switch ( storageType ) {
case STORAGE_TYPE_NORMAL:
return convertBooleanToBigNumber( (Boolean) object );
case STORAGE_TYPE_BINARY_STRING:
return convertBooleanToBigNumber( (Boolean) convertBinaryStringToNativeType( (byte[]) object ) );
case STORAGE_TYPE_INDEXED:
return convertBooleanToBigNumber( (Boolean) index[( (Integer) object ).intValue()] );
default:
throw new KettleValueException( toString() + " : Unknown storage type " + storageType + " specified." );
}
case TYPE_BINARY:
throw new KettleValueException( toString() + " : I don't know how to convert binary values to BigDecimals." );
case TYPE_SERIALIZABLE:
throw new KettleValueException( toString() + " : I don't know how to convert serializable values to BigDecimals." );
default:
throw new KettleValueException( toString() + " : Unknown type " + type + " specified." );
}
} catch ( Exception e ) {
throw new KettleValueException( "Unexpected conversion error while converting value [" + toString()
+ "] to a BigNumber", e );
}
}
|
@Test( expected = KettleValueException.class )
public void testGetBigDecimalThrowsKettleValueException() throws KettleValueException {
ValueMetaBase valueMeta = new ValueMetaBigNumber();
valueMeta.getBigNumber( "1234567890" );
}
|
@Override public boolean parseClientIpAndPort(Span span) {
if (parseClientIpFromXForwardedFor(span)) return true;
return span.remoteIpAndPort(delegate.getRemoteAddr(), delegate.getRemotePort());
}
|
@Test void parseClientIpAndPort_skipsRemotePortOnXForwardedFor() {
when(request.getHeader("X-Forwarded-For")).thenReturn("1.2.3.4");
when(span.remoteIpAndPort("1.2.3.4", 0)).thenReturn(true);
wrapper.parseClientIpAndPort(span);
verify(span).remoteIpAndPort("1.2.3.4", 0);
verifyNoMoreInteractions(span);
}
|
public static String[] getJobFilterNames() {
if ( STRING_JOB_FILTER_NAMES == null ) {
STRING_JOB_FILTER_NAMES =
new String[] {
BaseMessages.getString( PKG, "Const.FileFilter.Jobs" ),
BaseMessages.getString( PKG, "Const.FileFilter.XML" ),
BaseMessages.getString( PKG, "Const.FileFilter.All" ) };
}
return STRING_JOB_FILTER_NAMES;
}
|
@Test
public void testGetJobFilterNames() {
List<String> filters = Arrays.asList( Const.getJobFilterNames() );
assertTrue( filters.size() == 3 );
for ( String filter : filters ) {
assertFalse( filter.isEmpty() );
}
}
|
@Override
protected double maintain() {
NodeList candidates = nodeRepository().nodes().list().rebuilding(true);
if (candidates.isEmpty()) {
return 0;
}
int failures = 0;
List<Node> rebuilding;
try (var locked = nodeRepository().nodes().lockAndGetAll(candidates.asList(), Optional.of(Duration.ofSeconds(10)))) {
rebuilding = locked.nodes().stream().map(NodeMutex::node).toList();
RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding);
for (Node updated : result.rebuilt())
if (!updated.status().wantToRebuild())
nodeRepository().nodes().write(updated, () -> { });
for (var entry : result.failed().entrySet()) {
++failures;
log.log(Level.WARNING, "Failed to rebuild " + entry.getKey() + ", will retry in " +
interval() + ": " + Exceptions.toMessageString(entry.getValue()));
}
}
return asSuccessFactorDeviation(rebuilding.size(), failures);
}
|
@Test
public void rebuild_host() {
tester.makeReadyHosts(2, new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)).activateTenantHosts();
// No rebuilds in initial run
diskReplacer.maintain();
assertEquals(0, tester.nodeRepository().nodes().list().rebuilding(true).size());
// Host starts rebuilding
tester.nodeRepository().nodes().rebuild("host-1.yahoo.com", true, Agent.RebuildingOsUpgrader,
tester.nodeRepository().clock().instant());
diskReplacer.maintain();
assertEquals(1, tester.nodeRepository().nodes().list().rebuilding(true).size());
// Rebuild completes
hostProvisioner.completeRebuildOf("host-1.yahoo.com");
diskReplacer.maintain();
assertEquals(0, tester.nodeRepository().nodes().list().rebuilding(true).size());
}
|
public LibResponse deleteBook(Long id) {
LibResponse resp = null;
AuditDto audit = null;
try {
Call<LibResponse> callLibResponse = libraryClient.deleteBook(id);
Response<LibResponse> libResponse = callLibResponse.execute();
if (libResponse.isSuccessful()) {
resp = libResponse.body();
audit = auditMapper.populateAuditLogForDelete(id, resp);
} else {
log.error("Error calling library client: {}", libResponse.errorBody());
if (Objects.nonNull(libResponse.errorBody())) {
resp = new ObjectMapper().readValue(libResponse.errorBody().string(), LibResponse.class);
audit = auditMapper.populateAuditLogForException(
String.valueOf(id), HttpMethod.POST, libResponse.errorBody().string());
}
}
} catch (Exception ex) {
log.error("Error handling retrofit call for deleteBook", ex);
resp = new LibResponse(Constants.ERROR, "Failed");
}
if (Objects.nonNull(audit)) {
AuditLog savedObj = auditRepository.save(libraryMapper.auditDtoToAuditLog(audit));
log.info("Saved into audit successfully : {}", savedObj);
}
return resp;
}
|
@Test
@DisplayName("Cannot delete a book")
public void deleteBookRequestTest() throws Exception {
LibResponse response = new LibResponse(Status.ERROR.toString(), "Could not delete book for id : 1000");
ResponseBody respBody = ResponseBody.create(MediaType.parse("application/json"),
new ObjectMapper().writeValueAsString(response));
Response<LibResponse> respLib = Response.error(500, respBody);
when(libraryClient.deleteBook(Long.valueOf("1000"))).thenReturn(Calls.response(respLib));
doReturn(null).when(auditRepository).save(any());
LibResponse libResponse = libraryAuditService.deleteBook(Long.valueOf("1000"));
assertAll(
() -> assertNotNull(libResponse),
() -> assertTrue(libResponse.getResponseCode().equals("Error"))
);
}
|
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsAnyOf_primitiveFloatArray_success() {
assertThat(array(1.0f, 2.0f, 3.0f)).usingExactEquality().containsAnyOf(array(99.99f, 2.0f));
}
|
public static String toOperationDesc(Operation op) {
Class<? extends Operation> operationClass = op.getClass();
if (PartitionIteratingOperation.class.isAssignableFrom(operationClass)) {
PartitionIteratingOperation partitionIteratingOperation = (PartitionIteratingOperation) op;
OperationFactory operationFactory = partitionIteratingOperation.getOperationFactory();
String desc = DESCRIPTORS.get(operationFactory.getClass().getName());
if (desc == null) {
desc = PartitionIteratingOperation.class.getSimpleName() + "(" + operationFactory.getClass().getName() + ")";
DESCRIPTORS.put(operationFactory.getClass().getName(), desc);
}
return desc;
} else if (Backup.class.isAssignableFrom(operationClass)) {
Backup backup = (Backup) op;
Operation backupOperation = backup.getBackupOp();
String desc = DESCRIPTORS.get(backupOperation.getClass().getName());
if (desc == null) {
desc = Backup.class.getSimpleName() + "(" + backup.getBackupOp().getClass().getName() + ")";
DESCRIPTORS.put(backupOperation.getClass().getName(), desc);
}
return desc;
} else {
return operationClass.getName();
}
}
|
@Test
public void testPartitionIteratingOperation() {
PartitionIteratingOperation op = new PartitionIteratingOperation(new DummyOperationFactory(), new int[0]);
String result = toOperationDesc(op);
assertEquals(format("PartitionIteratingOperation(%s)", DummyOperationFactory.class.getName()), result);
}
|
public void setWorkingDirectory(String workingDir) {
this.workingDirectory = workingDir;
}
|
@Test
public void shouldErrorOutIfWorkingDirectoryIsOutsideTheCurrentWorkingDirectory() {
BuildTask task = new BuildTask() {
@Override
public String getTaskType() {
return "build";
}
@Override
public String getTypeForDisplay() {
return null;
}
@Override
public String command() {
return null;
}
@Override
public String arguments() {
return null;
}
};
task.setWorkingDirectory("/blah");
CruiseConfig config = GoConfigMother.configWithPipelines("pipeline");
PipelineConfig pipeline = config.pipelineConfigByName(new CaseInsensitiveString("pipeline"));
StageConfig stage = pipeline.get(0);
JobConfig job = stage.getJobs().get(0);
job.addTask(task);
List<ConfigErrors> errors = config.validateAfterPreprocess();
assertThat(errors.size(), is(1));
String message = "Task of job 'job' in stage 'stage' of pipeline 'pipeline' has path '/blah' which is outside the working directory.";
assertThat(task.errors().on(BuildTask.WORKING_DIRECTORY), is(message));
}
|
static Optional<SearchPath> fromString(String path) {
if (path == null || path.isEmpty()) {
return Optional.empty();
}
if (path.indexOf(';') >= 0) {
return Optional.empty(); // multi-level not supported at this time
}
try {
SearchPath sp = parseElement(path);
if (sp.isEmpty()) {
return Optional.empty();
} else {
return Optional.of(sp);
}
} catch (NumberFormatException | InvalidSearchPathException e) {
throw new InvalidSearchPathException("Invalid search path '" + path + "'", e);
}
}
|
@Test
void invalidRangeMustThrowException() {
try {
SearchPath.fromString("[p,0>/0");
fail("Expected exception");
}
catch (InvalidSearchPathException e) {
// success
}
}
|
public static List<String> getAllGroups(Pattern pattern, CharSequence content) {
return getAllGroups(pattern, content, true);
}
|
@Test
public void getAllGroupsTest() {
//转义给定字符串,为正则相关的特殊符号转义
final Pattern pattern = Pattern.compile("(\\d+)-(\\d+)-(\\d+)");
List<String> allGroups = ReUtil.getAllGroups(pattern, "192-168-1-1");
assertEquals("192-168-1", allGroups.get(0));
assertEquals("192", allGroups.get(1));
assertEquals("168", allGroups.get(2));
assertEquals("1", allGroups.get(3));
allGroups = ReUtil.getAllGroups(pattern, "192-168-1-1", false);
assertEquals("192", allGroups.get(0));
assertEquals("168", allGroups.get(1));
assertEquals("1", allGroups.get(2));
}
|
@ApiOperation(value = "Get LwM2M Objects (getLwm2mListObjectsPage)",
notes = "Returns a page of LwM2M objects parsed from Resources with type 'LWM2M_MODEL' owned by tenant or sysadmin. " +
PAGE_DATA_PARAMETERS + LWM2M_OBJECT_DESCRIPTION + TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('TENANT_ADMIN')")
@GetMapping(value = "/resource/lwm2m/page")
public List<LwM2mObject> getLwm2mListObjectsPage(@Parameter(description = PAGE_SIZE_DESCRIPTION, required = true)
@RequestParam int pageSize,
@Parameter(description = PAGE_NUMBER_DESCRIPTION, required = true)
@RequestParam int page,
@Parameter(description = RESOURCE_TEXT_SEARCH_DESCRIPTION)
@RequestParam(required = false) String textSearch,
@Parameter(description = SORT_PROPERTY_DESCRIPTION, schema = @Schema(allowableValues = {"id", "name"}))
@RequestParam(required = false) String sortProperty,
@Parameter(description = SORT_ORDER_DESCRIPTION, schema = @Schema(allowableValues = {"ASC", "DESC"}))
@RequestParam(required = false) String sortOrder) throws ThingsboardException {
PageLink pageLink = new PageLink(pageSize, page, textSearch);
return checkNotNull(tbResourceService.findLwM2mObjectPage(getTenantId(), sortProperty, sortOrder, pageLink));
}
|
@Test
public void testGetLwm2mListObjectsPage() throws Exception {
loginTenantAdmin();
List<TbResource> resources = loadLwm2mResources();
List<LwM2mObject> objects =
doGetTyped("/api/resource/lwm2m/page?pageSize=100&page=0", new TypeReference<>() {});
Assert.assertNotNull(objects);
Assert.assertEquals(resources.size(), objects.size());
removeLoadResources(resources);
}
|
public int deleteByEventDefinitionId(String id) {
return findByEventDefinitionId(id)
.map(dto -> db.removeById(new ObjectId(requireNonNull(dto.id()))).getN())
.orElse(0);
}
|
@Test
@MongoDBFixtures("event-processor-state.json")
public void deleteByEventProcessorId() {
assertThat(stateService.deleteByEventDefinitionId("54e3deadbeefdeadbeefaff3")).isEqualTo(1);
assertThat(stateService.deleteByEventDefinitionId("nope")).isEqualTo(0);
}
|
@Override
protected CompletableFuture<JobSubmitResponseBody> handleRequest(
@Nonnull HandlerRequest<JobSubmitRequestBody> request,
@Nonnull DispatcherGateway gateway)
throws RestHandlerException {
final Collection<File> uploadedFiles = request.getUploadedFiles();
final Map<String, Path> nameToFile =
uploadedFiles.stream()
.collect(Collectors.toMap(File::getName, Path::fromLocalFile));
if (uploadedFiles.size() != nameToFile.size()) {
throw new RestHandlerException(
String.format(
"The number of uploaded files was %s than the expected count. Expected: %s Actual %s",
uploadedFiles.size() < nameToFile.size() ? "lower" : "higher",
nameToFile.size(),
uploadedFiles.size()),
HttpResponseStatus.BAD_REQUEST);
}
final JobSubmitRequestBody requestBody = request.getRequestBody();
if (requestBody.jobGraphFileName == null) {
throw new RestHandlerException(
String.format(
"The %s field must not be omitted or be null.",
JobSubmitRequestBody.FIELD_NAME_JOB_GRAPH),
HttpResponseStatus.BAD_REQUEST);
}
CompletableFuture<JobGraph> jobGraphFuture = loadJobGraph(requestBody, nameToFile);
Collection<Path> jarFiles = getJarFilesToUpload(requestBody.jarFileNames, nameToFile);
Collection<Tuple2<String, Path>> artifacts =
getArtifactFilesToUpload(requestBody.artifactFileNames, nameToFile);
CompletableFuture<JobGraph> finalizedJobGraphFuture =
uploadJobGraphFiles(gateway, jobGraphFuture, jarFiles, artifacts, configuration);
CompletableFuture<Acknowledge> jobSubmissionFuture =
finalizedJobGraphFuture.thenCompose(
jobGraph -> gateway.submitJob(jobGraph, timeout));
return jobSubmissionFuture.thenCombine(
jobGraphFuture,
(ack, jobGraph) -> new JobSubmitResponseBody("/jobs/" + jobGraph.getJobID()));
}
|
@TestTemplate
void testSuccessfulJobSubmission() throws Exception {
final Path jobGraphFile = TempDirUtils.newFile(temporaryFolder).toPath();
try (ObjectOutputStream objectOut =
new ObjectOutputStream(Files.newOutputStream(jobGraphFile))) {
objectOut.writeObject(JobGraphTestUtils.emptyJobGraph());
}
TestingDispatcherGateway.Builder builder = TestingDispatcherGateway.newBuilder();
builder.setBlobServerPort(blobServer.getPort())
.setSubmitFunction(jobGraph -> CompletableFuture.completedFuture(Acknowledge.get()))
.setHostname("localhost");
DispatcherGateway mockGateway = builder.build();
JobSubmitHandler handler =
new JobSubmitHandler(
() -> CompletableFuture.completedFuture(mockGateway),
RpcUtils.INF_TIMEOUT,
Collections.emptyMap(),
Executors.directExecutor(),
configuration);
JobSubmitRequestBody request =
new JobSubmitRequestBody(
jobGraphFile.getFileName().toString(),
Collections.emptyList(),
Collections.emptyList());
handler.handleRequest(
HandlerRequest.create(
request,
EmptyMessageParameters.getInstance(),
Collections.singleton(jobGraphFile.toFile())),
mockGateway)
.get();
}
|
public Record convert(final AbstractWALEvent event) {
if (filter(event)) {
return createPlaceholderRecord(event);
}
if (!(event instanceof AbstractRowEvent)) {
return createPlaceholderRecord(event);
}
PipelineTableMetaData tableMetaData = getPipelineTableMetaData(((AbstractRowEvent) event).getTableName());
if (event instanceof WriteRowEvent) {
return handleWriteRowEvent((WriteRowEvent) event, tableMetaData);
}
if (event instanceof UpdateRowEvent) {
return handleUpdateRowEvent((UpdateRowEvent) event, tableMetaData);
}
if (event instanceof DeleteRowEvent) {
return handleDeleteRowEvent((DeleteRowEvent) event, tableMetaData);
}
throw new UnsupportedSQLOperationException("");
}
|
@Test
void assertConvertUpdateRowEvent() {
Record record = walEventConverter.convert(mockUpdateRowEvent());
assertThat(record, instanceOf(DataRecord.class));
assertThat(((DataRecord) record).getType(), is(PipelineSQLOperationType.UPDATE));
}
|
static String formatRequestBody(String scope) throws IOException {
try {
StringBuilder requestParameters = new StringBuilder();
requestParameters.append("grant_type=client_credentials");
if (scope != null && !scope.trim().isEmpty()) {
scope = scope.trim();
String encodedScope = URLEncoder.encode(scope, StandardCharsets.UTF_8.name());
requestParameters.append("&scope=").append(encodedScope);
}
return requestParameters.toString();
} catch (UnsupportedEncodingException e) {
// The world has gone crazy!
throw new IOException(String.format("Encoding %s not supported", StandardCharsets.UTF_8.name()));
}
}
|
@Test
public void testFormatRequestBodyMissingValues() throws IOException {
String expected = "grant_type=client_credentials";
String actual = HttpAccessTokenRetriever.formatRequestBody(null);
assertEquals(expected, actual);
actual = HttpAccessTokenRetriever.formatRequestBody("");
assertEquals(expected, actual);
actual = HttpAccessTokenRetriever.formatRequestBody(" ");
assertEquals(expected, actual);
}
|
public SearchQuery parse(String encodedQueryString) {
if (Strings.isNullOrEmpty(encodedQueryString) || "*".equals(encodedQueryString)) {
return new SearchQuery(encodedQueryString);
}
final var queryString = URLDecoder.decode(encodedQueryString, StandardCharsets.UTF_8);
final Matcher matcher = querySplitterMatcher(requireNonNull(queryString).trim());
final ImmutableMultimap.Builder<String, FieldValue> builder = ImmutableMultimap.builder();
final ImmutableSet.Builder<String> disallowedKeys = ImmutableSet.builder();
while (matcher.find()) {
final String entry = matcher.group();
if (!entry.contains(":")) {
builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), entry, false));
continue;
}
final Iterator<String> entryFields = FIELD_VALUE_SPLITTER.splitToList(entry).iterator();
checkArgument(entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry);
final String key = entryFields.next();
// Skip if there are no valid k/v pairs. (i.e. "action:")
if (!entryFields.hasNext()) {
continue;
}
final boolean negate = key.startsWith("-");
final String cleanKey = key.replaceFirst("^-", "");
final String value = entryFields.next();
VALUE_SPLITTER.splitToList(value).forEach(v -> {
if (!dbFieldMapping.containsKey(cleanKey)) {
disallowedKeys.add(cleanKey);
}
final SearchQueryField translatedKey = dbFieldMapping.get(cleanKey);
if (translatedKey != null) {
builder.put(withPrefixIfNeeded(translatedKey.getDbField()), createFieldValue(translatedKey.getFieldType(), v, negate));
} else {
builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), v, negate));
}
});
checkArgument(!entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry);
}
return new SearchQuery(queryString, builder.build(), disallowedKeys.build());
}
|
@Test
void mappedFields() {
SearchQueryParser parser = new SearchQueryParser("defaultfield",
ImmutableMap.of(
"name", SearchQueryField.create("index_name"),
"id", SearchQueryField.create("real_id"))
);
final SearchQuery query = parser.parse("name:foo id:1234");
final Multimap<String, SearchQueryParser.FieldValue> queryMap = query.getQueryMap();
assertThat(queryMap.size()).isEqualTo(2);
assertThat(queryMap.keySet()).containsOnly("index_name", "real_id");
assertThat(queryMap.get("index_name")).containsOnly(new SearchQueryParser.FieldValue("foo", false));
assertThat(queryMap.get("real_id")).containsOnly(new SearchQueryParser.FieldValue("1234", false));
assertThat(query.hasDisallowedKeys()).isFalse();
final DBQuery.Query dbQuery = query.toDBQuery();
final Collection<String> fieldNames = extractFieldNames(dbQuery.conditions());
assertThat(fieldNames).containsOnly("index_name", "real_id");
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DoubleColumnStatsDataInspector columnStatsData = doubleInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DoubleColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DoubleColumnStatsMerger merger = new DoubleColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDoubleStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DoubleColumnStatsData aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DoubleColumnStatsDataInspector newData =
doubleInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(),
newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDoubleStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDoubleStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDoubleStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateSingleStatWhenNullValues() throws MetaException {
List<String> partitions = Collections.singletonList("part1");
ColumnStatisticsData data1 = new ColStatsBuilder<>(double.class).numNulls(1).numDVs(2).build();
List<ColStatsObjWithSourceInfo> statsList =
Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0)));
DoubleColumnStatsAggregator aggregator = new DoubleColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
aggregator.ndvTuner = 1;
// ndv tuner does not have any effect because min numDVs and max numDVs coincide (we have a single stats)
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
}
|
public long displaySizeToByteCount( String displaySize ) {
long returnValue = -1;
// replace "," for int'l decimal convention
String displaySizeDecimal = ( displaySize == null ) ? "" : displaySize.replace( ",", "." );
Pattern pattern = Pattern.compile( "([\\d.]+)([GMK]?B)", Pattern.CASE_INSENSITIVE );
Matcher matcher = pattern.matcher( displaySizeDecimal );
Map<String, Integer> powerMap = new HashMap<>();
powerMap.put( "GB", 3 );
powerMap.put( "MB", 2 );
powerMap.put( "KB", 1 );
powerMap.put( "B", 0 );
if ( matcher.find() ) {
String number = matcher.group( 1 );
int pow = powerMap.get( matcher.group( 2 ).toUpperCase() );
BigDecimal bytes = new BigDecimal( number );
bytes = bytes.multiply( BigDecimal.valueOf( 1024 ).pow( pow ) );
returnValue = bytes.longValue();
}
return returnValue;
}
|
@Test
public void displaySizeToByteCount_wholeNumbers() {
StorageUnitConverter storageUnitConverter = new StorageUnitConverter();
// TEST 1: whole number Byte
assertEquals( 34 * B, storageUnitConverter.displaySizeToByteCount( "34B" ) );
// TEST 2: whole number kilobyte
assertEquals( 121 * KB, storageUnitConverter.displaySizeToByteCount( "121KB" ) );
// TEST 3: whole number megabyte
assertEquals( 5 * MB, storageUnitConverter.displaySizeToByteCount( "5MB" ) );
// TEST 4: whole number gigabyte
assertEquals( 2 * GB, storageUnitConverter.displaySizeToByteCount( "2GB" ) );
// TEST 5: missing number
assertEquals( -1, storageUnitConverter.displaySizeToByteCount( "GB" ) );
// TEST 6: missing number
assertEquals( -1, storageUnitConverter.displaySizeToByteCount( "B" ) );
// TEST 7: letter for number
assertEquals( -1, storageUnitConverter.displaySizeToByteCount( "XGB" ) );
// TEST 8: 0
assertEquals( 0, storageUnitConverter.displaySizeToByteCount( "0KB" ) );
// TEST 9: empty string
assertEquals( -1L, storageUnitConverter.displaySizeToByteCount( "" ) );
// TEST 10: null
assertEquals( -1L, storageUnitConverter.displaySizeToByteCount( null ) );
}
|
public void createUser(String username, String password, String realm, Encryption encryption, List<String> userGroups, List<String> algorithms) {
if (users.containsKey(username)) {
throw MSG.userToolUserExists(username);
}
realm = checkRealm(realm);
users.put(username, Encryption.CLEAR.equals(encryption) ? password : encryptPassword(username, realm, password, algorithms));
groups.put(username, userGroups != null ? String.join(",", userGroups) : "");
store(realm, encryption);
}
|
@Test
public void testUserToolClearText() throws IOException {
UserTool userTool = new UserTool(serverDirectory.getAbsolutePath());
userTool.createUser("user", "password", UserTool.DEFAULT_REALM_NAME, UserTool.Encryption.CLEAR, Collections.singletonList("admin"), null);
Properties users = loadProperties("users.properties");
assertEquals(1, users.size());
assertEquals("password", users.getProperty("user"));
Properties groups = loadProperties("groups.properties");
assertEquals(1, groups.size());
assertEquals("admin", groups.getProperty("user"));
}
|
@Override
public TypeDescriptor<Deque<T>> getEncodedTypeDescriptor() {
return new TypeDescriptor<Deque<T>>(getClass()) {}.where(
new TypeParameter<T>() {}, getElemCoder().getEncodedTypeDescriptor());
}
|
@Test
public void testEncodedTypeDescriptor() throws Exception {
TypeDescriptor<Deque<Integer>> typeDescriptor = new TypeDescriptor<Deque<Integer>>() {};
assertThat(TEST_CODER.getEncodedTypeDescriptor(), equalTo(typeDescriptor));
}
|
public synchronized byte[] getActiveData() throws ActiveNotFoundException,
KeeperException, InterruptedException, IOException {
try {
if (zkClient == null) {
createConnection();
}
Stat stat = new Stat();
return getDataWithRetries(zkLockFilePath, false, stat);
} catch(KeeperException e) {
Code code = e.code();
if (isNodeDoesNotExist(code)) {
// handle the commonly expected cases that make sense for us
throw new ActiveNotFoundException();
} else {
throw e;
}
}
}
|
@Test
public void testGetActiveData() throws ActiveNotFoundException,
KeeperException, InterruptedException, IOException {
// get valid active data
byte[] data = new byte[8];
Mockito.when(
mockZK.getData(Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
any())).thenReturn(data);
Assert.assertEquals(data, elector.getActiveData());
Mockito.verify(mockZK, Mockito.times(1)).getData(
Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false), any());
// active does not exist
Mockito.when(
mockZK.getData(Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
any())).thenThrow(
new KeeperException.NoNodeException());
try {
elector.getActiveData();
Assert.fail("ActiveNotFoundException expected");
} catch(ActiveNotFoundException e) {
Mockito.verify(mockZK, Mockito.times(2)).getData(
Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false), any());
}
// error getting active data rethrows keeperexception
try {
Mockito.when(
mockZK.getData(Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
any())).thenThrow(
new KeeperException.AuthFailedException());
elector.getActiveData();
Assert.fail("KeeperException.AuthFailedException expected");
} catch(KeeperException.AuthFailedException ke) {
Mockito.verify(mockZK, Mockito.times(3)).getData(
Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false), any());
}
}
|
@Udf(description = "Returns the cosine of an INT value")
public Double cos(
@UdfParameter(
value = "value",
description = "The value in radians to get the cosine of."
) final Integer value
) {
return cos(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandlePositive() {
assertThat(udf.cos(0.43), closeTo(0.9089657496748851, 0.000000000000001));
assertThat(udf.cos(Math.PI), closeTo(-1.0, 0.000000000000001));
assertThat(udf.cos(2 * Math.PI), closeTo(1.0, 0.000000000000001));
assertThat(udf.cos(6), closeTo(0.960170286650366, 0.000000000000001));
assertThat(udf.cos(6L), closeTo(0.960170286650366, 0.000000000000001));
}
|
@Override
@GetMapping("/presigned-url")
@PreAuthorize("isAuthenticated()")
public ResponseEntity<?> getPresignedUrl(@Validated PresignedUrlDto.Req request, @AuthenticationPrincipal SecurityUserDetails user) {
return ResponseEntity.ok(storageUseCase.getPresignedUrl(user.getUserId(), request));
}
|
@Test
@WithSecurityMockUser
@DisplayName("Type이 CHATROOM_PROFILE이고, ChatroomId가 NULL일 때 400 응답을 반환한다.")
void getPresignedUrlWithNullChatroomIdForChatroomProfile() throws Exception {
// given
PresignedUrlDto.Req request = new PresignedUrlDto.Req("CHATROOM_PROFILE", "jpg", null);
given(storageUseCase.getPresignedUrl(1L, request)).willThrow(new StorageException(StorageErrorCode.MISSING_REQUIRED_PARAMETER));
// when
ResultActions resultActions = getPresignedUrlRequest(request);
// then
resultActions.andExpect(status().isBadRequest());
}
|
@Override
public String reconstructURI() {
// If this instance is immutable, then lazy-cache reconstructing the uri.
if (immutable) {
if (reconstructedUri == null) {
reconstructedUri = _reconstructURI();
}
return reconstructedUri;
} else {
return _reconstructURI();
}
}
|
@Test
void testReconstructURI() {
HttpQueryParams queryParams = new HttpQueryParams();
queryParams.add("flag", "5");
Headers headers = new Headers();
headers.add("Host", "blah.netflix.com");
request = new HttpRequestMessageImpl(
new SessionContext(),
"HTTP/1.1",
"POST",
"/some/where",
queryParams,
headers,
"192.168.0.2",
"https",
7002,
"localhost");
assertEquals("https://blah.netflix.com:7002/some/where?flag=5", request.reconstructURI());
queryParams = new HttpQueryParams();
headers = new Headers();
headers.add("X-Forwarded-Host", "place.netflix.com");
headers.add("X-Forwarded-Port", "80");
request = new HttpRequestMessageImpl(
new SessionContext(),
"HTTP/1.1",
"POST",
"/some/where",
queryParams,
headers,
"192.168.0.2",
"http",
7002,
"localhost");
assertEquals("http://place.netflix.com/some/where", request.reconstructURI());
queryParams = new HttpQueryParams();
headers = new Headers();
headers.add("X-Forwarded-Host", "place.netflix.com");
headers.add("X-Forwarded-Proto", "https");
headers.add("X-Forwarded-Port", "443");
request = new HttpRequestMessageImpl(
new SessionContext(),
"HTTP/1.1",
"POST",
"/some/where",
queryParams,
headers,
"192.168.0.2",
"http",
7002,
"localhost");
assertEquals("https://place.netflix.com/some/where", request.reconstructURI());
queryParams = new HttpQueryParams();
headers = new Headers();
request = new HttpRequestMessageImpl(
new SessionContext(),
"HTTP/1.1",
"POST",
"/some/where",
queryParams,
headers,
"192.168.0.2",
"http",
7002,
"localhost");
assertEquals("http://localhost:7002/some/where", request.reconstructURI());
queryParams = new HttpQueryParams();
queryParams.add("flag", "5");
queryParams.add("flag B", "9");
headers = new Headers();
request = new HttpRequestMessageImpl(
new SessionContext(),
"HTTP/1.1",
"POST",
"/some%20where",
queryParams,
headers,
"192.168.0.2",
"https",
7002,
"localhost");
assertEquals("https://localhost:7002/some%20where?flag=5&flag+B=9", request.reconstructURI());
}
|
public boolean remove(final UUID accountUuid, final byte[] challengeToken) {
try {
db().deleteItem(DeleteItemRequest.builder()
.tableName(tableName)
.key(Map.of(KEY_ACCOUNT_UUID, AttributeValues.fromUUID(accountUuid)))
.conditionExpression("#challenge = :challenge AND #ttl >= :currentTime")
.expressionAttributeNames(CHALLENGE_TOKEN_NAME_MAP)
.expressionAttributeValues(Map.of(":challenge", AttributeValues.fromByteArray(challengeToken),
":currentTime", AttributeValues.fromLong(clock.instant().getEpochSecond())))
.build());
return true;
} catch (final ConditionalCheckFailedException e) {
return false;
}
}
|
@Test
void remove() {
final UUID uuid = UUID.randomUUID();
final byte[] token = generateRandomToken();
assertFalse(pushChallengeDynamoDb.remove(uuid, token));
assertTrue(pushChallengeDynamoDb.add(uuid, token, Duration.ofMinutes(1)));
assertTrue(pushChallengeDynamoDb.remove(uuid, token));
assertTrue(pushChallengeDynamoDb.add(uuid, token, Duration.ofMinutes(-1)));
assertFalse(pushChallengeDynamoDb.remove(uuid, token));
}
|
public void processOnce() throws IOException {
// set status of query to OK.
ctx.getState().reset();
executor = null;
// reset sequence id of MySQL protocol
final MysqlChannel channel = ctx.getMysqlChannel();
channel.setSequenceId(0);
// read packet from channel
try {
packetBuf = channel.fetchOnePacket();
if (packetBuf == null) {
throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet.");
}
} catch (AsynchronousCloseException e) {
// when this happened, timeout checker close this channel
// killed flag in ctx has been already set, just return
return;
}
// dispatch
dispatch();
// finalize
finalizeCommand();
ctx.setCommand(MysqlCommand.COM_SLEEP);
}
|
@Test
public void testFieldListFailEmptyTable() throws Exception {
MysqlSerializer serializer = MysqlSerializer.newInstance();
serializer.writeInt1(4);
serializer.writeNulTerminateString("");
serializer.writeEofString("");
ConnectContext ctx = initMockContext(mockChannel(serializer.toByteBuffer()),
GlobalStateMgr.getCurrentState());
ConnectProcessor processor = new ConnectProcessor(ctx);
processor.processOnce();
Assert.assertEquals(MysqlCommand.COM_FIELD_LIST, myContext.getCommand());
Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlErrPacket);
Assert.assertEquals("Empty tableName", myContext.getState().getErrorMessage());
}
|
public static void removeNulls(DataMap dataMap)
{
try
{
Data.traverse(dataMap, new NullRemover());
}
catch (IOException ioe)
{
throw new RuntimeException(ioe);
}
}
|
@Test
public void dataMapCleanUp()
{
DataMap originalDataMap = new DataMap(ImmutableMap.<String, Object>builder()
.put("float", 0F)
.put("string", "str")
.put("integer", 1)
.put("long", 2L)
.put("double", Data.NULL)
.put("boolean", false)
.put("array", new DataList(ImmutableList.of(100L, 110L, Data.NULL)))
.put("map", new DataMap(ImmutableMap.of(
"20", "200",
"21", "210",
"22", Data.NULL)))
.put("arrayofarray", new DataList(ImmutableList.of(
new DataList(ImmutableList.of(100L, 110L)),
new DataList(ImmutableList.of(500, Data.NULL)))))
.put("innerRecord", new DataMap(ImmutableMap.of(
"float", 30.0F,
"string", "str2",
"innerInnerRecord", new DataMap(ImmutableMap.of(
"float", 40.0F,
"integer", Data.NULL)))))
.build());
DataMapUtils.removeNulls(originalDataMap);
DataMap cleanedDataMap = new DataMap(ImmutableMap.<String, Object>builder()
.put("float", 0F)
.put("string", "str")
.put("integer", 1)
.put("long", 2L)
.put("boolean", false)
.put("array", new DataList(ImmutableList.of(100L, 110L)))
.put("map", new DataMap(ImmutableMap.of(
"20", "200",
"21", "210")))
.put("arrayofarray", new DataList(ImmutableList.of(
new DataList(ImmutableList.of(100L, 110L)),
new DataList(ImmutableList.of(500)))))
.put("innerRecord", new DataMap(ImmutableMap.of(
"float", 30.0F,
"string", "str2",
"innerInnerRecord", new DataMap(ImmutableMap.of(
"float", 40.0F)))))
.build());
assertEquals(originalDataMap, cleanedDataMap, "DataMap not cleaned up as expected");
}
|
@Override
public void close()
{
httpClient.dispatcher().executorService().shutdown();
httpClient.connectionPool().evictAll();
}
|
@Test
public void testResultSetClose()
throws Exception
{
try (Connection connection = createConnection()) {
try (Statement statement = connection.createStatement()) {
assertTrue(statement.execute("SELECT 123 x, 'foo' y"));
ResultSet result = statement.getResultSet();
assertFalse(result.isClosed());
result.close();
assertTrue(result.isClosed());
}
}
}
|
@Override
public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return join(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@SuppressWarnings("deprecation")
@Test
public void shouldNotAllowNullValueJoinerWithKeyOnJoin() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.join(testStream, (ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null, JoinWindows.of(ofMillis(10))));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
}
|
@Override
public List<Metric> getMetrics() {
return List.of(PRIORITIZED_RULE_ISSUES);
}
|
@Test
void getMetrics() {
assertThat(new IssueCountMetrics().getMetrics())
.containsExactlyInAnyOrder(PRIORITIZED_RULE_ISSUES);
}
|
public static String durationToString(long durationMs) {
return DFSUtilClient.durationToString(durationMs);
}
|
@Test(timeout=10000)
public void testDurationToString() throws Exception {
assertEquals("000:00:00:00.000", DFSUtil.durationToString(0));
assertEquals("001:01:01:01.000",
DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
assertEquals("000:23:59:59.999",
DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000+999));
assertEquals("-001:01:01:01.000",
DFSUtil.durationToString(-((24*60*60)+(60*60)+(60)+1)*1000));
assertEquals("-000:23:59:59.574",
DFSUtil.durationToString(-(((23*60*60)+(59*60)+(59))*1000+574)));
}
|
public boolean hasPartitionFor(Facility facility) {
return facilityToPartitionMapping.containsKey(facility);
}
|
@Test
public void partialFacilityListCanBeQueried() {
File file = getResourceFile("eimFacilityPartitionMapping.properties");
FacilityPartitionMapping mapping = new FacilityPartitionMapping(loadProperties(file));
assertThat(mapping.hasPartitionFor(Facility.PVD), is(false));
assertThat(mapping.hasPartitionFor(Facility.A80), is(true));
}
|
@Override
public long addAndGet(K key, long delta) {
return complete(asyncCounterMap.addAndGet(key, delta));
}
|
@Test
public void testAddAndGet() {
atomicCounterMap.put(KEY1, VALUE1);
Long afterIncrement = atomicCounterMap.addAndGet(KEY1, DELTA1);
assertThat(afterIncrement, is(VALUE1 + DELTA1));
}
|
@JsonIgnore
public void enrich(WorkflowInstance instance) {
if (instance.getCreateTime() != null) {
timelineEvents.add(
TimelineStatusEvent.create(instance.getCreateTime(), WorkflowInstance.Status.CREATED));
}
if (instance.getStartTime() != null) {
timelineEvents.add(
TimelineStatusEvent.create(instance.getStartTime(), WorkflowInstance.Status.IN_PROGRESS));
}
if (instance.getEndTime() != null) {
timelineEvents.add(TimelineStatusEvent.create(instance.getEndTime(), instance.getStatus()));
}
Collections.sort(timelineEvents);
}
|
@Test
public void testGetEnrichedStepInstance() throws Exception {
StepInstance instance =
loadObject("fixtures/instances/sample-step-instance-succeeded.json", StepInstance.class);
Timeline timeline = instance.getTimeline();
timeline.enrich(instance);
assertEquals(12, instance.getTimeline().getTimelineEvents().size());
assertEquals(
TimelineStatusEvent.create(1608749932076L, "CREATED"),
instance.getTimeline().getTimelineEvents().get(0));
assertEquals(
TimelineStatusEvent.create(1608749932078L, "INITIALIZED"),
instance.getTimeline().getTimelineEvents().get(1));
assertEquals(
TimelineStatusEvent.create(1608749932079L, "PAUSED"),
instance.getTimeline().getTimelineEvents().get(2));
assertEquals(
TimelineStatusEvent.create(1608749934142L, "WAITING_FOR_SIGNALS"),
instance.getTimeline().getTimelineEvents().get(3));
assertEquals(
TimelineStatusEvent.create(1608749934142L, "EVALUATING_PARAMS"),
instance.getTimeline().getTimelineEvents().get(4));
assertEquals(
TimelineStatusEvent.create(1608749934142L, "WAITING_FOR_PERMITS"),
instance.getTimeline().getTimelineEvents().get(5));
assertEquals(
TimelineStatusEvent.create(1608749934142L, "STARTING"),
instance.getTimeline().getTimelineEvents().get(6));
assertEquals(
TimelineStatusEvent.create(1608749934147L, "RUNNING"),
instance.getTimeline().getTimelineEvents().get(7));
assertEquals(
TimelineStatusEvent.create(1608749950263L, "FINISHING"),
instance.getTimeline().getTimelineEvents().get(8));
assertEquals(
TimelineStatusEvent.create(1608749950263L, "SUCCEEDED"),
instance.getTimeline().getTimelineEvents().get(9));
assertEquals("hello world", instance.getTimeline().getTimelineEvents().get(10).getMessage());
assertEquals(
"sample error details", instance.getTimeline().getTimelineEvents().get(11).getMessage());
}
|
public static <T> ProtobufSchema ofGenericClass(Class<T> pojo, Map<String, String> properties) {
SchemaDefinition<T> schemaDefinition = SchemaDefinition.<T>builder().withPojo(pojo)
.withProperties(properties).build();
return ProtobufSchema.of(schemaDefinition);
}
|
@Test
public void testGenericOf() {
try {
ProtobufSchema<org.apache.pulsar.client.schema.proto.Test.TestMessage> protobufSchema
= ProtobufSchema.ofGenericClass(org.apache.pulsar.client.schema.proto.Test.TestMessage.class,
new HashMap<>());
} catch (Exception e) {
Assert.fail("Should not construct a ProtobufShema over a non-protobuf-generated class");
}
try {
ProtobufSchema<org.apache.pulsar.client.schema.proto.Test.TestMessage> protobufSchema
= ProtobufSchema.ofGenericClass(String.class,
Collections.emptyMap());
Assert.fail("Should not construct a ProtobufShema over a non-protobuf-generated class");
} catch (Exception e) {
}
}
|
public static <T> RedistributeArbitrarily<T> arbitrarily() {
return new RedistributeArbitrarily<>(null, false);
}
|
@Test
@Category(ValidatesRunner.class)
public void testRedistributeAfterSlidingWindows() {
PCollection<KV<String, Integer>> input =
pipeline
.apply(
Create.of(ARBITRARY_KVS)
.withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of())))
.apply(Window.into(FixedWindows.of(Duration.standardMinutes(10L))));
PCollection<KV<String, Integer>> output = input.apply(Redistribute.arbitrarily());
PAssert.that(output).containsInAnyOrder(ARBITRARY_KVS);
assertEquals(input.getWindowingStrategy(), output.getWindowingStrategy());
pipeline.run();
}
|
@Override
public List<DeptDO> getDeptList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return deptMapper.selectBatchIds(ids);
}
|
@Test
public void testGetDeptList_reqVO() {
// mock 数据
DeptDO dept = randomPojo(DeptDO.class, o -> { // 等会查询到
o.setName("开发部");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
deptMapper.insert(dept);
// 测试 name 不匹配
deptMapper.insert(ObjectUtils.cloneIgnoreId(dept, o -> o.setName("发")));
// 测试 status 不匹配
deptMapper.insert(ObjectUtils.cloneIgnoreId(dept, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 准备参数
DeptListReqVO reqVO = new DeptListReqVO();
reqVO.setName("开");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
// 调用
List<DeptDO> sysDeptDOS = deptService.getDeptList(reqVO);
// 断言
assertEquals(1, sysDeptDOS.size());
assertPojoEquals(dept, sysDeptDOS.get(0));
}
|
@Override
public void process() {
JMeterContext context = getThreadContext();
Sampler sam = context.getCurrentSampler();
SampleResult res = context.getPreviousResult();
HTTPSamplerBase sampler;
HTTPSampleResult result;
if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) {
log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request.");
return;
} else {
sampler = (HTTPSamplerBase) sam;
result = (HTTPSampleResult) res;
}
List<HTTPSamplerBase> potentialLinks = new ArrayList<>();
String responseText = result.getResponseDataAsString();
int index = responseText.indexOf('<'); // $NON-NLS-1$
if (index == -1) {
index = 0;
}
if (log.isDebugEnabled()) {
log.debug("Check for matches against: "+sampler.toString());
}
Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index));
addAnchorUrls(html, result, sampler, potentialLinks);
addFormUrls(html, result, sampler, potentialLinks);
addFramesetUrls(html, result, sampler, potentialLinks);
if (!potentialLinks.isEmpty()) {
HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size()));
if (log.isDebugEnabled()) {
log.debug("Selected: "+url.toString());
}
sampler.setDomain(url.getDomain());
sampler.setPath(url.getPath());
if (url.getMethod().equals(HTTPConstants.POST)) {
for (JMeterProperty jMeterProperty : sampler.getArguments()) {
Argument arg = (Argument) jMeterProperty.getObjectValue();
modifyArgument(arg, url.getArguments());
}
} else {
sampler.setArguments(url.getArguments());
}
sampler.setProtocol(url.getProtocol());
} else {
log.debug("No matches found");
}
}
|
@Test
public void testFailSimpleParse3() throws Exception {
HTTPSamplerBase config = makeUrlConfig("/home/index.html");
HTTPSamplerBase context = makeContext("http://www.apache.org/subdir/previous.html");
String responseText = "<html><head><title>Test page</title></head><body>"
+ "<a href=\"/home/index.html?param1=value1\">" + "Goto index page</a></body></html>";
HTTPSampleResult result = new HTTPSampleResult();
String newUrl = config.getUrl().toString();
result.setResponseData(responseText, null);
result.setSampleLabel(context.toString());
result.setURL(context.getUrl());
jmctx.setCurrentSampler(context);
jmctx.setCurrentSampler(config);
jmctx.setPreviousResult(result);
parser.process();
assertEquals(newUrl + "?param1=value1", config.getUrl().toString());
}
|
public Future<Instant> watermarkFuture(ByteString encodedTag, String stateFamily) {
return stateFuture(StateTag.of(StateTag.Kind.WATERMARK, encodedTag, stateFamily), null);
}
|
@Test
public void testCachingWithinBatch() throws Exception {
underTest.watermarkFuture(STATE_KEY_1, STATE_FAMILY);
underTest.watermarkFuture(STATE_KEY_1, STATE_FAMILY);
assertEquals(1, underTest.pendingLookups.size());
}
|
public static <T> T toBean(Object source, Class<T> clazz) {
return toBean(source, clazz, null);
}
|
@Test
public void beanToBeanTest() {
// 修复对象无getter方法导致报错的问题
final Page page1 = new Page();
BeanUtil.toBean(page1, Page.class);
}
|
@Override
public void write(final MySQLPacketPayload payload, final Object value) {
payload.writeInt2((Integer) value);
}
|
@Test
void assertWrite() {
new MySQLInt2BinaryProtocolValue().write(payload, 1);
verify(payload).writeInt2(1);
}
|
public static void main(String[] args) {
/*
* Getting bar series
*/
BarSeries series = CsvTradesLoader.loadBitstampSeries();
/*
* Creating the OHLC dataset
*/
OHLCDataset ohlcDataset = createOHLCDataset(series);
/*
* Creating the additional dataset
*/
TimeSeriesCollection xyDataset = createAdditionalDataset(series);
/*
* Creating the chart
*/
JFreeChart chart = ChartFactory.createCandlestickChart("Bitstamp BTC price", "Time", "USD", ohlcDataset, true);
// Candlestick rendering
CandlestickRenderer renderer = new CandlestickRenderer();
renderer.setAutoWidthMethod(CandlestickRenderer.WIDTHMETHOD_SMALLEST);
XYPlot plot = chart.getXYPlot();
plot.setRenderer(renderer);
// Additional dataset
int index = 1;
plot.setDataset(index, xyDataset);
plot.mapDatasetToRangeAxis(index, 0);
XYLineAndShapeRenderer renderer2 = new XYLineAndShapeRenderer(true, false);
renderer2.setSeriesPaint(index, Color.blue);
plot.setRenderer(index, renderer2);
// Misc
plot.setRangeGridlinePaint(Color.lightGray);
plot.setBackgroundPaint(Color.white);
NumberAxis numberAxis = (NumberAxis) plot.getRangeAxis();
numberAxis.setAutoRangeIncludesZero(false);
plot.setDatasetRenderingOrder(DatasetRenderingOrder.FORWARD);
/*
* Displaying the chart
*/
displayChart(chart);
}
|
@Test
public void test() {
CandlestickChart.main(null);
}
|
@Override
public void generateLedgerId(BookkeeperInternalCallbacks.GenericCallback<Long> genericCallback) {
ledgerIdGenPathPresent()
.thenCompose(isIdGenPathPresent -> {
if (isIdGenPathPresent) {
// We've already started generating 63-bit ledger IDs.
// Keep doing that.
return generateLongLedgerId();
} else {
// We've not moved onto 63-bit ledgers yet.
return generateShortLedgerId();
}
}).thenAccept(ledgerId ->
genericCallback.operationComplete(BKException.Code.OK, ledgerId)
).exceptionally(ex -> {
log.error("Error generating ledger id: {}", ex.getMessage());
genericCallback.operationComplete(BKException.Code.MetaStoreException, -1L);
return null;
});
}
|
@Test(dataProvider = "impl")
public void testEnsureCounterIsNotResetWithContainerNodes(String provider, Supplier<String> urlSupplier)
throws Exception {
@Cleanup
MetadataStoreExtended store =
MetadataStoreExtended.create(urlSupplier.get(), MetadataStoreConfig.builder().build());
@Cleanup
PulsarLedgerIdGenerator ledgerIdGenerator = new PulsarLedgerIdGenerator(store, "/ledgers");
CountDownLatch l1 = new CountDownLatch(1);
AtomicLong res1 = new AtomicLong();
ledgerIdGenerator.generateLedgerId((rc, result) -> {
assertEquals(rc, BKException.Code.OK);
res1.set(result);
l1.countDown();
});
l1.await();
log.info("res1 : {}", res1);
zks.checkContainers();
CountDownLatch l2 = new CountDownLatch(1);
AtomicLong res2 = new AtomicLong();
ledgerIdGenerator.generateLedgerId((rc, result) -> {
assertEquals(rc, BKException.Code.OK);
res2.set(result);
l2.countDown();
});
l2.await();
log.info("res2 : {}", res2);
assertNotEquals(res1, res2);
assertTrue(res1.get() < res2.get());
}
|
public static Uri getUriForBaseFile(
@NonNull Context context, @NonNull HybridFileParcelable baseFile) {
switch (baseFile.getMode()) {
case FILE:
case ROOT:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
return FileProvider.getUriForFile(
context, context.getPackageName(), new File(baseFile.getPath()));
} else {
return Uri.fromFile(new File(baseFile.getPath()));
}
case OTG:
return OTGUtil.getDocumentFile(baseFile.getPath(), context, true).getUri();
case SMB:
case DROPBOX:
case GDRIVE:
case ONEDRIVE:
case BOX:
Toast.makeText(context, context.getString(R.string.smb_launch_error), Toast.LENGTH_LONG)
.show();
return null;
default:
return null;
}
}
|
@Test
public void testGetUriForBaseFile() {
HybridFileParcelable file = new HybridFileParcelable("/storage/emulated/0/test.txt");
for (OpenMode m : new OpenMode[] {OpenMode.FILE, OpenMode.ROOT}) {
file.setMode(m);
Uri uri = Utils.getUriForBaseFile(ApplicationProvider.getApplicationContext(), file);
if (Build.VERSION.SDK_INT < N) {
assertEquals("file:///storage/emulated/0/test.txt", uri.toString());
} else {
assertEquals(
"content://"
+ ApplicationProvider.getApplicationContext().getPackageName()
+ "/storage_root/storage/emulated/0/test.txt",
uri.toString());
}
}
for (OpenMode m :
new OpenMode[] {
OpenMode.DROPBOX, OpenMode.GDRIVE, OpenMode.ONEDRIVE, OpenMode.SMB, OpenMode.BOX
}) {
file = new HybridFileParcelable("/foo/bar/test.txt");
file.setMode(m);
assertNull(Utils.getUriForBaseFile(ApplicationProvider.getApplicationContext(), file));
assertEquals(
ApplicationProvider.getApplicationContext().getString(R.string.smb_launch_error),
ShadowToast.getTextOfLatestToast());
}
file.setMode(OpenMode.CUSTOM);
assertNull(Utils.getUriForBaseFile(ApplicationProvider.getApplicationContext(), file));
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
}
|
@Test
public void testListPlaceholder() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path placeholder = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(
new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final AttributedList<Path> list = new S3ObjectListService(session, acl).list(placeholder, new DisabledListProgressListener());
assertTrue(list.isEmpty());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static void copyBytes(InputStream in, OutputStream out,
int buffSize, boolean close)
throws IOException {
try {
copyBytes(in, out, buffSize);
if(close) {
out.close();
out = null;
in.close();
in = null;
}
} finally {
if(close) {
closeStream(out);
closeStream(in);
}
}
}
|
@Test
public void testCopyBytesWithCountShouldCloseStreamsWhenCloseIsTrue()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
IOUtils.copyBytes(inputStream, outputStream, (long) 1, true);
Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
|
public boolean shouldShow(@Nullable Keyboard.Key pressedKey) {
return pressedKey != null && shouldShow(pressedKey.getPrimaryCode());
}
|
@Test
public void testPathReset() {
final OnKeyWordHelper helper = new OnKeyWordHelper("test");
Keyboard.Key key = Mockito.mock(Keyboard.Key.class);
Mockito.doReturn((int) 'b').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 't').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 'e').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 's').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 's').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 't').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 'e').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 's').when(key).getPrimaryCode();
Assert.assertFalse(helper.shouldShow(key));
Mockito.doReturn((int) 't').when(key).getPrimaryCode();
Assert.assertTrue(helper.shouldShow(key));
}
|
@Operation(summary = "deleteByCode", description = "DELETE_PROCESS_DEFINITION_BY_ID_NOTES")
@Parameters({
@Parameter(name = "code", description = "PROCESS_DEFINITION_CODE", schema = @Schema(implementation = int.class, example = "100"))
})
@DeleteMapping(value = "/{code}")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_PROCESS_DEFINE_BY_CODE_ERROR)
public Result deleteProcessDefinitionByCode(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@PathVariable("code") long workflowDefinitionCode) {
processDefinitionService.deleteProcessDefinitionByCode(loginUser, workflowDefinitionCode);
return new Result(Status.SUCCESS);
}
|
@Test
public void testDeleteProcessDefinitionByCode() {
long projectCode = 1L;
long code = 1L;
// not throw error mean pass
Assertions.assertDoesNotThrow(
() -> processDefinitionController.deleteProcessDefinitionByCode(user, projectCode, code));
}
|
public NearCachePreloaderConfig setStoreIntervalSeconds(int storeIntervalSeconds) {
this.storeIntervalSeconds = checkPositive("storeIntervalSeconds", storeIntervalSeconds);
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setStoreIntervalSeconds_withZero() {
config.setStoreIntervalSeconds(0);
}
|
@Override
public Health check(Set<NodeHealth> nodeHealths) {
Set<NodeHealth> appNodes = nodeHealths.stream()
.filter(s -> s.getDetails().getType() == NodeDetails.Type.APPLICATION)
.collect(Collectors.toSet());
return Arrays.stream(AppNodeClusterHealthSubChecks.values())
.map(s -> s.check(appNodes))
.reduce(Health.GREEN, HealthReducer::merge);
}
|
@Test
public void status_RED_when_no_application_node() {
Set<NodeHealth> nodeHealths = nodeHealths().collect(toSet());
Health check = underTest.check(nodeHealths);
assertThat(check)
.forInput(nodeHealths)
.hasStatus(Health.Status.RED)
.andCauses("No application node");
}
|
@Override
public Optional<String> validate(String password) {
return password.matches(SYMBOL_REGEX)
? Optional.empty()
: Optional.of(SYMBOL_REASONING);
}
|
@Test
public void testValidateFailure() {
Optional<String> result = symbolValidator.validate("Password123");
Assert.assertTrue(result.isPresent());
Assert.assertEquals(result.get(), "must contain at least one special character");
}
|
@Override
public void deleteProductById(String productId) {
final ProductEntity productEntityToBeDelete = productRepository
.findById(productId)
.orElseThrow(() -> new ProductNotFoundException("With given productID = " + productId));
productRepository.delete(productEntityToBeDelete);
}
|
@Test
void givenProductId_whenProductNotFound_thenThrowProductNotFoundException() {
// Given
String productId = "1";
when(productRepository.findById(productId)).thenReturn(Optional.empty());
// When/Then
assertThrows(ProductNotFoundException.class, () -> productDeleteService.deleteProductById(productId));
// Verify
verify(productRepository, times(1)).findById(productId);
verify(productRepository, never()).delete(any());
}
|
public static String toAddressString(InetSocketAddress address) {
if (address == null) {
return StringUtils.EMPTY;
} else {
return toIpString(address) + ":" + address.getPort();
}
}
|
@Test
public void toAddressString() throws Exception {
}
|
@Override
public void upgrade() {
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
var result = collection.find(Filters.and(
Filters.eq("config.type", "aggregation-v1"),
Filters.type(SERIES_PATH_STRING, "array"),
Filters.exists(SERIES_PATH_STRING + ".0")
));
var bulkOperations = new ArrayList<WriteModel<? extends Document>>();
for (Document doc : result) {
processDoc(doc, bulkOperations);
}
if (bulkOperations.size() > 0) {
collection.bulkWrite(bulkOperations);
}
this.clusterConfigService.write(new MigrationCompleted());
}
|
@Test
public void writesMigrationCompletedAfterSuccess() {
this.migration.upgrade();
final V20230629140000_RenameFieldTypeOfEventDefinitionSeries.MigrationCompleted migrationCompleted = captureMigrationCompleted();
assertThat(migrationCompleted).isNotNull();
}
|
@Override
public String toString() {
return volumes.toString();
}
|
@Test
public void testDfsReservedForDifferentStorageTypes() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, 100L);
File volDir = new File(baseDir, "volume-0");
volDir.mkdirs();
// when storage type reserved is not configured,should consider
// dfs.datanode.du.reserved.
FsVolumeImpl volume = new FsVolumeImplBuilder().setDataset(dataset)
.setStorageDirectory(
new StorageDirectory(
StorageLocation.parse("[RAM_DISK]"+volDir.getPath())))
.setStorageID("storage-id")
.setConf(conf)
.build();
assertEquals("", 100L, volume.getReserved());
// when storage type reserved is configured.
conf.setLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 1L);
conf.setLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
conf.setLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ StringUtils.toLowerCase(StorageType.NVDIMM.toString()), 3L);
FsVolumeImpl volume1 = new FsVolumeImplBuilder().setDataset(dataset)
.setStorageDirectory(
new StorageDirectory(
StorageLocation.parse("[RAM_DISK]"+volDir.getPath())))
.setStorageID("storage-id")
.setConf(conf)
.build();
assertEquals("", 1L, volume1.getReserved());
FsVolumeImpl volume2 = new FsVolumeImplBuilder().setDataset(dataset)
.setStorageDirectory(
new StorageDirectory(
StorageLocation.parse("[SSD]"+volDir.getPath())))
.setStorageID("storage-id")
.setConf(conf)
.build();
assertEquals("", 2L, volume2.getReserved());
FsVolumeImpl volume3 = new FsVolumeImplBuilder().setDataset(dataset)
.setStorageDirectory(
new StorageDirectory(
StorageLocation.parse("[DISK]"+volDir.getPath())))
.setStorageID("storage-id")
.setConf(conf)
.build();
assertEquals("", 100L, volume3.getReserved());
FsVolumeImpl volume4 = new FsVolumeImplBuilder().setDataset(dataset)
.setStorageDirectory(
new StorageDirectory(
StorageLocation.parse(volDir.getPath())))
.setStorageID("storage-id")
.setConf(conf)
.build();
assertEquals("", 100L, volume4.getReserved());
FsVolumeImpl volume5 = new FsVolumeImplBuilder().setDataset(dataset)
.setStorageDirectory(
new StorageDirectory(
StorageLocation.parse("[NVDIMM]"+volDir.getPath())))
.setStorageID("storage-id")
.setConf(conf)
.build();
assertEquals(3L, volume5.getReserved());
}
|
public static boolean isGzipStream(byte[] bytes) {
int minByteArraySize = 2;
if (bytes == null || bytes.length < minByteArraySize) {
return false;
}
return GZIPInputStream.GZIP_MAGIC == ((bytes[1] << 8 | bytes[0]) & 0xFFFF);
}
|
@Test
void testIsGzipStreamWithEmpty() {
assertFalse(IoUtils.isGzipStream(new byte[0]));
}
|
public Predicate convert(ScalarOperator operator) {
if (operator == null) {
return null;
}
return operator.accept(this, null);
}
|
@Test
public void testGreaterThan() {
ConstantOperator value = ConstantOperator.createInt(5);
ScalarOperator op = new BinaryPredicateOperator(BinaryType.GT, F0, value);
Predicate result = CONVERTER.convert(op);
Assert.assertTrue(result instanceof LeafPredicate);
LeafPredicate leafPredicate = (LeafPredicate) result;
Assert.assertTrue(leafPredicate.function() instanceof GreaterThan);
Assert.assertEquals(5, leafPredicate.literals().get(0));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.