focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
static SerializableFunction<LinkedHashMap<String, Double>,
LinkedHashMap<String, Double>> getProbabilityMapFunction(final RegressionModel.NormalizationMethod normalizationMethod,
final boolean isBinary) {
if (UNSUPPORTED_NORMALIZATION_METHODS.contains(normalizationMethod)) {
throw new KiePMMLInternalException(String.format("Unsupported NormalizationMethod %s",
normalizationMethod));
} else {
return getProbabilityMapFunctionSupported(normalizationMethod, isBinary);
}
}
|
@Test
void getProbabilityMapSupportedFunction() {
KiePMMLClassificationTableFactory.SUPPORTED_NORMALIZATION_METHODS.forEach(normalizationMethod ->
assertThat(KiePMMLClassificationTableFactory.getProbabilityMapFunction(normalizationMethod, false)).isNotNull());
KiePMMLClassificationTableFactory.SUPPORTED_NORMALIZATION_METHODS.forEach(normalizationMethod ->
assertThat(KiePMMLClassificationTableFactory.getProbabilityMapFunction(normalizationMethod, true)).isNotNull());
}
|
boolean isDataAccessible() {
return (getProjectId() == null || getProjectId().isAccessible())
&& (getInstanceId() == null || getInstanceId().isAccessible())
&& (getAppProfileId() == null || getAppProfileId().isAccessible());
}
|
@Test
public void testIsDataAccessible() {
assertTrue(config.withProjectId(PROJECT_ID).withInstanceId(INSTANCE_ID).isDataAccessible());
assertTrue(
config
.withProjectId(PROJECT_ID)
.withBigtableOptions(new BigtableOptions.Builder().setInstanceId("instance_id").build())
.isDataAccessible());
assertTrue(
config
.withInstanceId(INSTANCE_ID)
.withBigtableOptions(new BigtableOptions.Builder().setProjectId("project_id").build())
.isDataAccessible());
assertTrue(
config
.withBigtableOptions(
new BigtableOptions.Builder()
.setProjectId("project_id")
.setInstanceId("instance_id")
.build())
.isDataAccessible());
assertFalse(
config.withProjectId(NOT_ACCESSIBLE_VALUE).withInstanceId(INSTANCE_ID).isDataAccessible());
assertFalse(
config.withProjectId(PROJECT_ID).withInstanceId(NOT_ACCESSIBLE_VALUE).isDataAccessible());
}
|
@Override
protected void deleteWordFromStorage(String word) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testCanNotDeleteFromStorage() {
mUnderTest.deleteWordFromStorage("word");
}
|
public Array getArray(String name) {
Array a = arrayMap.get(name);
if (a == null) {
validateArray(name);
a = new Array(configDefinition, name);
arrayMap.put(name, a);
}
return a;
}
|
@Test(expected=IllegalStateException.class)
public void require_that_append_conflicts_with_index() {
ConfigPayloadBuilder builder = new ConfigPayloadBuilder();
ConfigPayloadBuilder.Array array = builder.getArray("foo");
array.set(0, "bar");
array.append("baz");
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() != ChatMessageType.SPAM)
{
return;
}
final String message = event.getMessage();
if (message.startsWith("You successfully cook")
|| message.startsWith("You successfully bake")
|| message.startsWith("You successfully fry")
|| message.startsWith("You manage to cook")
|| message.startsWith("You roast a")
|| message.startsWith("You spit-roast")
|| message.startsWith("You cook")
|| message.startsWith("Eventually the Jubbly")
|| message.startsWith("You half-cook")
|| message.startsWith("The undead meat is now cooked")
|| message.startsWith("The undead chicken is now cooked")
|| message.startsWith("You successfully scramble")
|| message.startsWith("You dry a piece of meat"))
{
if (session == null)
{
session = new CookingSession();
}
session.updateLastCookingAction();
session.increaseCookAmount();
}
else if (message.startsWith("You accidentally burn")
|| message.equals("You burn the mushroom in the fire.")
|| message.startsWith("Unfortunately the Jubbly")
|| message.startsWith("You accidentally spoil"))
{
if (session == null)
{
session = new CookingSession();
}
session.updateLastCookingAction();
session.increaseBurnAmount();
}
}
|
@Test
public void testOnChatMessage()
{
for (String message : COOKING_MESSAGES)
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.SPAM, "", message, "", 0);
cookingPlugin.onChatMessage(chatMessage);
}
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.SPAM, "", incenseBurnerMessage, "", 0);
cookingPlugin.onChatMessage(chatMessage);
CookingSession cookingSession = cookingPlugin.getSession();
assertNotNull(cookingSession);
assertEquals(COOKING_MESSAGES.length, cookingSession.getCookAmount());
assertEquals(0, cookingSession.getBurnAmount());
}
|
@Override
public Optional<NativeEntity<LookupTableDto>> findExisting(Entity entity, Map<String, ValueReference> parameters) {
if (entity instanceof EntityV1) {
return findExisting((EntityV1) entity, parameters);
} else {
throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass());
}
}
|
@Test
@MongoDBFixtures("LookupTableFacadeTest.json")
public void findExistingWithNoExistingEntity() {
final Entity entity = EntityV1.builder()
.id(ModelId.of("1"))
.type(ModelTypes.LOOKUP_TABLE_V1)
.data(objectMapper.convertValue(LookupTableEntity.create(
ValueReference.of(DefaultEntityScope.NAME),
ValueReference.of("some-name"),
ValueReference.of("Title"),
ValueReference.of("Description"),
ValueReference.of("cache-id"),
ValueReference.of("data-adapter-id"),
ValueReference.of("Default single value"),
ValueReference.of(LookupDefaultValue.Type.STRING),
ValueReference.of("Default multi value"),
ValueReference.of(LookupDefaultValue.Type.OBJECT)), JsonNode.class))
.build();
final Optional<NativeEntity<LookupTableDto>> existingEntity = facade.findExisting(entity, Collections.emptyMap());
assertThat(existingEntity).isEmpty();
}
|
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
final ServletContext context = config.getServletContext();
if (null == registry) {
final Object registryAttr = context.getAttribute(HEALTH_CHECK_REGISTRY);
if (registryAttr instanceof HealthCheckRegistry) {
this.registry = (HealthCheckRegistry) registryAttr;
} else {
throw new ServletException("Couldn't find a HealthCheckRegistry instance.");
}
}
final Object executorAttr = context.getAttribute(HEALTH_CHECK_EXECUTOR);
if (executorAttr instanceof ExecutorService) {
this.executorService = (ExecutorService) executorAttr;
}
final Object filterAttr = context.getAttribute(HEALTH_CHECK_FILTER);
if (filterAttr instanceof HealthCheckFilter) {
filter = (HealthCheckFilter) filterAttr;
}
if (filter == null) {
filter = HealthCheckFilter.ALL;
}
final Object mapperAttr = context.getAttribute(HEALTH_CHECK_MAPPER);
if (mapperAttr instanceof ObjectMapper) {
this.mapper = (ObjectMapper) mapperAttr;
} else {
this.mapper = new ObjectMapper();
}
this.mapper.registerModule(new HealthCheckModule());
final Object httpStatusIndicatorAttr = context.getAttribute(HEALTH_CHECK_HTTP_STATUS_INDICATOR);
if (httpStatusIndicatorAttr instanceof Boolean) {
this.httpStatusIndicator = (Boolean) httpStatusIndicatorAttr;
} else {
this.httpStatusIndicator = true;
}
}
|
@Test
public void constructorWithRegistryAsArgumentIsUsedInPreferenceOverServletConfig() throws Exception {
final HealthCheckRegistry healthCheckRegistry = mock(HealthCheckRegistry.class);
final ServletContext servletContext = mock(ServletContext.class);
final ServletConfig servletConfig = mock(ServletConfig.class);
when(servletConfig.getServletContext()).thenReturn(servletContext);
final HealthCheckServlet healthCheckServlet = new HealthCheckServlet(healthCheckRegistry);
healthCheckServlet.init(servletConfig);
verify(servletConfig, times(1)).getServletContext();
verify(servletContext, never()).getAttribute(HealthCheckServlet.HEALTH_CHECK_REGISTRY);
}
|
@Override
public void validate(String value, @Nullable List<String> options) {
checkRequest(options == null || options.contains(value), "Value '%s' must be one of : %s.", value, StringUtils.join(options, ", "));
}
|
@Test
public void not_fail_on_valid_option() {
validation.validate("a", newArrayList("a", "b", "c"));
validation.validate("a", null);
}
|
Future<Boolean> canRollController(int nodeId) {
LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId);
return describeMetadataQuorum().map(info -> {
boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info);
if (!canRoll) {
LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId);
}
return canRoll;
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error);
return Future.failedFuture(error);
});
}
|
@Test
public void cannotRollControllerWhenAnotherFollowerBehind(VertxTestContext context) {
Map<Integer, OptionalLong> controllers = new HashMap<>();
controllers.put(1, OptionalLong.of(10000L));
controllers.put(2, OptionalLong.of(7000L));
controllers.put(3, OptionalLong.of(10000L));
Admin admin = setUpMocks(1, controllers);
KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS);
quorumCheck.canRollController(3).onComplete(context.succeeding(result -> {
context.verify(() -> assertFalse(result));
context.completeNow();
}));
}
|
public static void validate(long migrationNumber) {
checkArgument(migrationNumber >= 0, "Migration number must be >= 0");
}
|
@Test
public void validate_accepts_any_positive_long() {
MigrationNumber.validate(Math.abs(new Random().nextInt()));
}
|
@Override
public ValidationResult validate(RuleBuilderStep step) {
final RuleFragment ruleFragment = actions.get(step.function());
FunctionDescriptor<?> functionDescriptor = ruleFragment.descriptor();
Map<String, Object> stepParameters = step.parameters();
//Add output to map
String outputvariable = step.outputvariable();
if (StringUtils.isNotBlank(outputvariable)) {
if (functionDescriptor.returnType() == Void.class) {
return new ValidationResult(true, f("Return type is void. No output variable allowed", functionDescriptor.name()));
}
storeVariable(outputvariable, functionDescriptor.returnType());
}
ImmutableList<ParameterDescriptor> parameterDescriptors = functionDescriptor.params();
for (ParameterDescriptor parameterDescriptor : parameterDescriptors) {
String parameterName = parameterDescriptor.name();
Object value = stepParameters.get(parameterName);
Class<?> variableType = getVariableType(value);
if (!parameterDescriptor.optional() && value == null) {
return new ValidationResult(true, f("Missing parameter %s", parameterName));
}
//$ means it is stored in another variable and we need to fetch and verify that type
if (value instanceof String s && s.startsWith("$")) {
String substring = s.substring(1);
Class<?> passedVariableType = variables.get(substring);
if (Objects.isNull(passedVariableType)) {
return new ValidationResult(true, f("Could not find passed variable %s", value));
}
variableType = passedVariableType;
}
//Check if variable type matches function expectation
Class<?> paramType = parameterDescriptor.type();
if (value != null && paramType != Object.class && variableType != paramType) {
String errorMsg = "Found a wrong parameter type for parameter %s";
return new ValidationResult(true, f(errorMsg, parameterName));
}
}
return new ValidationResult(false, "");
}
|
@Test
void validateSuccessFull() {
HashMap<String, Object> stringFunctionParams = new HashMap<>();
stringFunctionParams.put(STRING_PARAM, "test string");
RuleBuilderStep validStringStep = RuleBuilderStep.builder()
.parameters(stringFunctionParams)
.function(STRING_FUNCTION)
.outputvariable("stringOutPut")
.build();
ValidationResult firstFunctionResult = classUnderTest.validate(validStringStep);
assertThat(firstFunctionResult.failed()).isFalse();
HashMap<String, Object> intFunctionParams = new HashMap<>();
intFunctionParams.put(STRING_PARAM, "stringOutPut");
RuleBuilderStep stepWithCorrectPassedOutput = RuleBuilderStep.builder()
.parameters(intFunctionParams)
.function(STRING_FUNCTION)
.build();
ValidationResult result = classUnderTest.validate(stepWithCorrectPassedOutput);
assertThat(result.failed()).isFalse();
}
|
@Override
public Optional<Endpoint> getRestEndpoint(String clusterId) {
Optional<KubernetesService> restService =
getService(ExternalServiceDecorator.getExternalServiceName(clusterId));
if (!restService.isPresent()) {
return Optional.empty();
}
final Service service = restService.get().getInternalResource();
final KubernetesConfigOptions.ServiceExposedType serviceExposedType =
ServiceType.classify(service);
return serviceExposedType
.serviceType()
.getRestEndpoint(service, internalClient, nodePortAddressType);
}
|
@Test
void testServiceLoadBalancerWithNoIP() {
final String hostName = "test-host-name";
mockExpectedServiceFromServerSide(buildExternalServiceWithLoadBalancer(hostName, ""));
final Optional<Endpoint> resultEndpoint = flinkKubeClient.getRestEndpoint(CLUSTER_ID);
assertThat(resultEndpoint).isPresent();
assertThat(resultEndpoint.get().getAddress()).isEqualTo(hostName);
assertThat(resultEndpoint.get().getPort()).isEqualTo(REST_PORT);
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testDeterministicInteger() {
assertDeterministic(AvroCoder.of(Integer.class));
}
|
public static boolean _isReferenceType(JavaType jtype) {
return Arrays.asList("com.google.common.base.Optional", "java.util.Optional")
.contains(jtype.getRawClass().getCanonicalName()) || jtype.isReferenceType();
}
|
@Test(description = "AtomicReference should be reference type")
public void testIsReferenceTypeWithAtomicReference() {
final JavaType referredType = TypeFactory.defaultInstance().constructType(String.class);
final Class<AtomicReference> rawType = AtomicReference.class;
final JavaType atomicReferenceType = TypeFactory.defaultInstance().constructReferenceType(rawType, referredType);
final boolean actualIsReferenceType = ReferenceTypeUtils._isReferenceType(atomicReferenceType);
Assert.assertEquals(actualIsReferenceType, true, rawType.getCanonicalName() + " should be reference type but was not.");
}
|
static boolean hasLanguagePackForCurrentLocale(
@NonNull List<KeyboardAddOnAndBuilder> availableLanguagePacks) {
for (KeyboardAddOnAndBuilder availableLanguagePack : availableLanguagePacks) {
final String language = availableLanguagePack.getKeyboardLocale();
if (TextUtils.isEmpty(language)) continue;
if (Locale.getDefault().getLanguage().equals(new Locale(language).getLanguage())) {
return true;
}
}
return false;
}
|
@Test
public void testHasLanguagePackForCurrentLocale() {
final KeyboardFactory spiedKeyboardFactory = mApplication.getSpiedKeyboardFactory();
ArrayList<KeyboardAddOnAndBuilder> mockResponse =
new ArrayList<>(spiedKeyboardFactory.getAllAddOns());
Assert.assertTrue(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
Locale.setDefault(Locale.FRENCH);
Assert.assertFalse(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
KeyboardAddOnAndBuilder frenchBuilder = Mockito.mock(KeyboardAddOnAndBuilder.class);
Mockito.doReturn("fr").when(frenchBuilder).getKeyboardLocale();
mockResponse.add(frenchBuilder);
Assert.assertTrue(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
Locale.setDefault(new Locale("he"));
Assert.assertFalse(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
KeyboardAddOnAndBuilder hebrewBuilder = Mockito.mock(KeyboardAddOnAndBuilder.class);
Mockito.doReturn("iw").when(hebrewBuilder).getKeyboardLocale();
mockResponse.add(hebrewBuilder);
Locale.setDefault(new Locale("iw"));
Assert.assertTrue(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
Locale.setDefault(new Locale("ru"));
Assert.assertFalse(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
Mockito.doReturn("ru").when(hebrewBuilder).getKeyboardLocale();
Assert.assertTrue(SetupSupport.hasLanguagePackForCurrentLocale(mockResponse));
}
|
@Override
public boolean createMep(MdId mdName, MaIdShort maName, Mep newMep) throws CfmConfigException {
MepKeyId key = new MepKeyId(mdName, maName, newMep.mepId());
log.debug("Creating MEP " + newMep.mepId() + " on MD {}, MA {} on Device {}",
mdName, maName, newMep.deviceId().toString());
if (mepStore.getMep(key).isPresent()) {
return false;
}
//Will throw IllegalArgumentException if ma does not exist
cfmMdService.getMaintenanceAssociation(mdName, maName);
DeviceId mepDeviceId = newMep.deviceId();
if (deviceService.getDevice(mepDeviceId) == null) {
throw new CfmConfigException("Device not found " + mepDeviceId);
} else if (!deviceService.getDevice(mepDeviceId).is(CfmMepProgrammable.class)) {
throw new CfmConfigException("Device " + mepDeviceId + " does not support CfmMepProgrammable behaviour.");
}
boolean deviceResult =
deviceService.getDevice(mepDeviceId).as(CfmMepProgrammable.class).createMep(mdName, maName, newMep);
log.debug("MEP created on {}", mepDeviceId);
if (deviceResult) {
boolean alreadyExisted = mepStore.createUpdateMep(key, newMep);
//Add to other Remote Mep List on other devices
for (Mep mep:mepStore.getMepsByMdMa(mdName, maName)) {
List<DeviceId> alreadyHandledDevices = new ArrayList<>();
if (mep.deviceId().equals(mepDeviceId) ||
alreadyHandledDevices.contains(mep.deviceId())) {
continue;
}
boolean created = deviceService.getDevice(mep.deviceId())
.as(CfmMepProgrammable.class)
.createMaRemoteMepOnDevice(mdName, maName, newMep.mepId());
alreadyHandledDevices.add(mep.deviceId());
log.info("Created RMep entry on {} on device {}",
mdName.mdName() + "/" + maName.maName(), mep.deviceId());
}
return !alreadyExisted;
} else {
return deviceResult;
}
}
|
@Test
public void testCreateMepBehaviorNotSupported() throws CfmConfigException {
final DeviceId deviceId3 = DeviceId.deviceId("netconf:3.2.3.4:830");
Map<Class<? extends Behaviour>, Class<? extends Behaviour>> behaviours = new HashMap<>();
behaviours.put(DeviceDescriptionDiscovery.class, TestDeviceDiscoveryBehavior.class);
Driver testDriver3 = new DefaultDriver(
TEST_DRIVER_3, new ArrayList<Driver>(),
TEST_MFR, TEST_HW_VERSION, TEST_SW_3,
behaviours, new HashMap<>());
Device device3 = new DefaultDevice(
ProviderId.NONE, deviceId3, Device.Type.SWITCH,
TEST_MFR, TEST_HW_VERSION, TEST_SW_3, TEST_SN,
new ChassisId(2),
DefaultAnnotations.builder().set(AnnotationKeys.DRIVER, TEST_DRIVER_3).build());
expect(mdService.getMaintenanceAssociation(MDNAME1, MANAME1))
.andReturn(Optional.ofNullable(ma1))
.anyTimes();
replay(mdService);
expect(deviceService.getDevice(deviceId3)).andReturn(device3).anyTimes();
replay(deviceService);
expect(driverService.getDriver(deviceId3)).andReturn(testDriver3).anyTimes();
replay(driverService);
MepId mepId3 = MepId.valueOf((short) 3);
Mep mep3 = DefaultMep.builder(mepId3, deviceId3, PortNumber.portNumber(1),
Mep.MepDirection.UP_MEP, MDNAME1, MANAME1).build();
try {
mepManager.createMep(MDNAME1, MANAME1, mep3);
fail("Expecting CfmConfigException because driver does not support behavior");
} catch (CfmConfigException e) {
assertEquals("Device netconf:3.2.3.4:830 does not support " +
"CfmMepProgrammable behaviour.", e.getMessage());
}
}
|
@Override
public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan,
final boolean restoreInProgress) {
try {
final ExecuteResult result = EngineExecutor
.create(primaryContext, serviceContext, plan.getConfig())
.execute(plan.getPlan(), restoreInProgress);
return result;
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
// add the statement text to the KsqlException
throw new KsqlStatementException(
e.getMessage(),
e.getMessage(),
plan.getPlan().getStatementText(),
e.getCause()
);
}
}
|
@Test
public void shouldShowCorrectHintsWhenIncorrectSourceMatchesWithTwo() {
// Given:
setupKsqlEngineWithSharedRuntimeEnabled();
KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"create stream \"Bar\" as select * from test1; "
+ "create stream bar as select * from test1;",
ksqlConfig,
Collections.emptyMap()
);
// When:
final KsqlStatementException e = assertThrows(
KsqlStatementException.class,
() -> KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"select * from \"bar\";",
ksqlConfig,
Collections.emptyMap()
)
);
// Then:
assertThat(e, rawMessage(is(
"Exception while preparing statement: bar does not exist.\n"
+ "Did you mean \"BAR\" (STREAM) or \"Bar\" (STREAM)? Hint: wrap the source name in double quotes to make it case-sensitive.")));
assertThat(e, statementText(is("select * from \"bar\";")));
}
|
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(false);
}
boolean result = false;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result |= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
}
|
@Test
void invokeArrayParamReturnTrue() {
FunctionTestUtil.assertResult(anyFunction.invoke(new Object[]{Boolean.TRUE, Boolean.TRUE}), true);
FunctionTestUtil.assertResult(anyFunction.invoke(new Object[]{Boolean.TRUE, Boolean.FALSE}), true);
FunctionTestUtil.assertResult(anyFunction.invoke(new Object[]{Boolean.TRUE, null}), true);
FunctionTestUtil.assertResult(anyFunction.invoke(new Object[]{Boolean.TRUE, null, Boolean.FALSE}), true);
}
|
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = RealtimeToOfflineSegmentsTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
String realtimeTableName = tableConfig.getTableName();
if (tableConfig.getTableType() != TableType.REALTIME) {
LOGGER.warn("Skip generating task: {} for non-REALTIME table: {}", taskType, realtimeTableName);
continue;
}
LOGGER.info("Start generating task configs for table: {} for task: {}", realtimeTableName, taskType);
// Only schedule 1 task of this type, per table
Map<String, TaskState> incompleteTasks =
TaskGeneratorUtils.getIncompleteTasks(taskType, realtimeTableName, _clusterInfoAccessor);
if (!incompleteTasks.isEmpty()) {
LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.",
incompleteTasks.keySet(), realtimeTableName, taskType);
continue;
}
// Get all segment metadata for completed segments (DONE/UPLOADED status).
List<SegmentZKMetadata> completedSegmentsZKMetadata = new ArrayList<>();
Map<Integer, String> partitionToLatestLLCSegmentName = new HashMap<>();
Set<Integer> allPartitions = new HashSet<>();
getCompletedSegmentsInfo(realtimeTableName, completedSegmentsZKMetadata, partitionToLatestLLCSegmentName,
allPartitions);
if (completedSegmentsZKMetadata.isEmpty()) {
LOGGER.info("No realtime-completed segments found for table: {}, skipping task generation: {}",
realtimeTableName, taskType);
continue;
}
allPartitions.removeAll(partitionToLatestLLCSegmentName.keySet());
if (!allPartitions.isEmpty()) {
LOGGER.info(
"Partitions: {} have no completed segments. Table: {} is not ready for {}. Skipping task generation.",
allPartitions, realtimeTableName, taskType);
continue;
}
TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig();
Preconditions.checkState(tableTaskConfig != null);
Map<String, String> taskConfigs = tableTaskConfig.getConfigsForTaskType(taskType);
Preconditions.checkState(taskConfigs != null, "Task config shouldn't be null for table: %s", realtimeTableName);
// Get the bucket size and buffer
String bucketTimePeriod =
taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUCKET_TIME_PERIOD_KEY, DEFAULT_BUCKET_PERIOD);
String bufferTimePeriod =
taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD);
long bucketMs = TimeUtils.convertPeriodToMillis(bucketTimePeriod);
long bufferMs = TimeUtils.convertPeriodToMillis(bufferTimePeriod);
// Get watermark from RealtimeToOfflineSegmentsTaskMetadata ZNode. WindowStart = watermark. WindowEnd =
// windowStart + bucket.
long windowStartMs = getWatermarkMs(realtimeTableName, completedSegmentsZKMetadata, bucketMs);
long windowEndMs = windowStartMs + bucketMs;
// Find all COMPLETED segments with data overlapping execution window: windowStart (inclusive) to windowEnd
// (exclusive)
List<String> segmentNames = new ArrayList<>();
List<String> downloadURLs = new ArrayList<>();
Set<String> lastLLCSegmentPerPartition = new HashSet<>(partitionToLatestLLCSegmentName.values());
boolean skipGenerate = false;
while (true) {
// Check that execution window is older than bufferTime
if (windowEndMs > System.currentTimeMillis() - bufferMs) {
LOGGER.info(
"Window with start: {} and end: {} is not older than buffer time: {} configured as {} ago. Skipping task "
+ "generation: {}", windowStartMs, windowEndMs, bufferMs, bufferTimePeriod, taskType);
skipGenerate = true;
break;
}
for (SegmentZKMetadata segmentZKMetadata : completedSegmentsZKMetadata) {
String segmentName = segmentZKMetadata.getSegmentName();
long segmentStartTimeMs = segmentZKMetadata.getStartTimeMs();
long segmentEndTimeMs = segmentZKMetadata.getEndTimeMs();
// Check overlap with window
if (windowStartMs <= segmentEndTimeMs && segmentStartTimeMs < windowEndMs) {
// If last completed segment is being used, make sure that segment crosses over end of window.
// In the absence of this check, CONSUMING segments could contain some portion of the window. That data
// would be skipped forever.
if (lastLLCSegmentPerPartition.contains(segmentName) && segmentEndTimeMs < windowEndMs) {
LOGGER.info("Window data overflows into CONSUMING segments for partition of segment: {}. Skipping task "
+ "generation: {}", segmentName, taskType);
skipGenerate = true;
break;
}
segmentNames.add(segmentName);
downloadURLs.add(segmentZKMetadata.getDownloadUrl());
}
}
if (skipGenerate || !segmentNames.isEmpty()) {
break;
}
LOGGER.info("Found no eligible segments for task: {} with window [{} - {}), moving to the next time bucket",
taskType, windowStartMs, windowEndMs);
windowStartMs = windowEndMs;
windowEndMs += bucketMs;
}
if (skipGenerate) {
continue;
}
Map<String, String> configs = MinionTaskUtils.getPushTaskConfig(realtimeTableName, taskConfigs,
_clusterInfoAccessor);
configs.putAll(getBaseTaskConfigs(tableConfig, segmentNames));
configs.put(MinionConstants.DOWNLOAD_URL_KEY, StringUtils.join(downloadURLs, MinionConstants.URL_SEPARATOR));
configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments");
// Segment processor configs
configs.put(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, String.valueOf(windowStartMs));
configs.put(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, String.valueOf(windowEndMs));
String roundBucketTimePeriod = taskConfigs.get(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY);
if (roundBucketTimePeriod != null) {
configs.put(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, roundBucketTimePeriod);
}
// NOTE: Check and put both keys for backward-compatibility
String mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY);
if (mergeType == null) {
mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY);
}
if (mergeType != null) {
configs.put(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, mergeType);
configs.put(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY, mergeType);
}
for (Map.Entry<String, String> entry : taskConfigs.entrySet()) {
if (entry.getKey().endsWith(RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX)) {
configs.put(entry.getKey(), entry.getValue());
}
}
String maxNumRecordsPerSegment = taskConfigs.get(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY);
if (maxNumRecordsPerSegment != null) {
configs.put(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY, maxNumRecordsPerSegment);
}
pinotTaskConfigs.add(new PinotTaskConfig(taskType, configs));
LOGGER.info("Finished generating task configs for table: {} for task: {}", realtimeTableName, taskType);
}
return pinotTaskConfigs;
}
|
@Test
public void testGenerateTasksNoSegments() {
Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
taskConfigsMap.put(RealtimeToOfflineSegmentsTask.TASK_TYPE, new HashMap<>());
TableConfig realtimeTableConfig = getRealtimeTableConfig(taskConfigsMap);
// No segments in table
ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>());
when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn(Lists.newArrayList());
when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME))
.thenReturn(getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList()));
RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator();
generator.init(mockClusterInfoProvide);
List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
assertTrue(pinotTaskConfigs.isEmpty());
// No COMPLETED segments in table
SegmentZKMetadata segmentZKMetadata1 =
getSegmentZKMetadata("testTable__0__0__12345", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
.thenReturn(Lists.newArrayList(segmentZKMetadata1));
when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME))
.thenReturn(getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList(segmentZKMetadata1.getSegmentName())));
generator = new RealtimeToOfflineSegmentsTaskGenerator();
generator.init(mockClusterInfoProvide);
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
assertTrue(pinotTaskConfigs.isEmpty());
// 2 partitions. No COMPLETED segments for partition 0
SegmentZKMetadata segmentZKMetadata2 =
getSegmentZKMetadata("testTable__1__0__12345", Status.DONE, 5000, 10000, TimeUnit.MILLISECONDS, null);
SegmentZKMetadata segmentZKMetadata3 =
getSegmentZKMetadata("testTable__1__1__13456", Status.IN_PROGRESS, -1, -1, TimeUnit.MILLISECONDS, null);
when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME))
.thenReturn(Lists.newArrayList(segmentZKMetadata1, segmentZKMetadata2, segmentZKMetadata3));
when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME)).thenReturn(getIdealState(REALTIME_TABLE_NAME,
Lists.newArrayList(segmentZKMetadata1.getSegmentName(), segmentZKMetadata2.getSegmentName(),
segmentZKMetadata3.getSegmentName())));
generator = new RealtimeToOfflineSegmentsTaskGenerator();
generator.init(mockClusterInfoProvide);
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig));
assertTrue(pinotTaskConfigs.isEmpty());
}
|
static String buildAgentArgs(final Map<String, String> configOptions)
{
if (configOptions.isEmpty())
{
return null;
}
final StringBuilder builder = new StringBuilder();
for (final Map.Entry<String, String> entry : configOptions.entrySet())
{
builder.append(entry.getKey())
.append(VALUE_SEPARATOR)
.append(entry.getValue())
.append(OPTION_SEPARATOR);
}
if (builder.length() > 0)
{
builder.setLength(builder.length() - 1);
}
return builder.toString();
}
|
@Test
void shouldReturnNullIfConfigOptionsMapIsEmpty()
{
assertNull(buildAgentArgs(new HashMap<>()));
}
|
public void printVersionInfo() {
final String version = String.format("%s version %s",
settings.getString(Settings.KEYS.APPLICATION_NAME, "dependency-check"),
settings.getString(Settings.KEYS.APPLICATION_VERSION, "Unknown"));
System.out.println(version);
}
|
@Test
@SuppressWarnings("StringSplitter")
public void testParse_printVersionInfo() throws Exception {
PrintStream out = System.out;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
System.setOut(new PrintStream(baos));
CliParser instance = new CliParser(getSettings());
instance.printVersionInfo();
try {
baos.flush();
String text = new String(baos.toByteArray(), UTF_8).toLowerCase();
String[] lines = text.split(System.getProperty("line.separator"));
Assert.assertTrue(lines.length >= 1);
Assert.assertTrue(text.contains("version"));
Assert.assertFalse(text.contains("unknown"));
} catch (IOException ex) {
System.setOut(out);
Assert.fail("CliParser.printVersionInfo did not write anything to system.out.");
} finally {
System.setOut(out);
}
}
|
@Override
public int compare(T o1, T o2) {
if (!(o1 instanceof CharSequence) || !(o2 instanceof CharSequence)) {
throw new RuntimeException("Attempted use of AvroCharSequenceComparator on non-CharSequence objects: "
+ o1.getClass().getName() + " and " + o2.getClass().getName());
}
return compareCharSequence((CharSequence) o1, (CharSequence) o2);
}
|
@Test
void compareUtf8() {
assertEquals(0, mComparator.compare(new Utf8(""), new Utf8("")));
assertThat(mComparator.compare(new Utf8(""), new Utf8("a")), lessThan(0));
assertThat(mComparator.compare(new Utf8("a"), new Utf8("")), greaterThan(0));
assertEquals(0, mComparator.compare(new Utf8("a"), new Utf8("a")));
assertThat(mComparator.compare(new Utf8("a"), new Utf8("b")), lessThan(0));
assertThat(mComparator.compare(new Utf8("b"), new Utf8("a")), greaterThan(0));
assertEquals(0, mComparator.compare(new Utf8("ab"), new Utf8("ab")));
assertThat(mComparator.compare(new Utf8("a"), new Utf8("aa")), lessThan(0));
assertThat(mComparator.compare(new Utf8("aa"), new Utf8("a")), greaterThan(0));
assertThat(mComparator.compare(new Utf8("abc"), new Utf8("abcdef")), lessThan(0));
assertThat(mComparator.compare(new Utf8("abcdef"), new Utf8("abc")), greaterThan(0));
}
|
public static boolean isValidOrigin(String sourceHost, ZeppelinConfiguration zConf)
throws UnknownHostException, URISyntaxException {
String sourceUriHost = "";
if (sourceHost != null && !sourceHost.isEmpty()) {
sourceUriHost = new URI(sourceHost).getHost();
sourceUriHost = (sourceUriHost == null) ? "" : sourceUriHost.toLowerCase();
}
sourceUriHost = sourceUriHost.toLowerCase();
String currentHost = InetAddress.getLocalHost().getHostName().toLowerCase();
return zConf.getAllowedOrigins().contains("*")
|| currentHost.equals(sourceUriHost)
|| "localhost".equals(sourceUriHost)
|| zConf.getAllowedOrigins().contains(sourceHost);
}
|
@Test
void nullOriginWithStar()
throws URISyntaxException, UnknownHostException {
assertTrue(CorsUtils.isValidOrigin(null,
ZeppelinConfiguration.load("zeppelin-site-star.xml")));
}
|
public static int formatFloatFast(float value, int maxFractionDigits, byte[] asciiBuffer)
{
if (Float.isNaN(value) ||
Float.isInfinite(value) ||
value > Long.MAX_VALUE ||
value <= Long.MIN_VALUE ||
maxFractionDigits > MAX_FRACTION_DIGITS)
{
return -1;
}
int offset = 0;
long integerPart = (long) value;
//handle sign
if (value < 0)
{
asciiBuffer[offset++] = '-';
integerPart = -integerPart;
}
//extract fraction part
long fractionPart = (long) ((Math.abs((double)value) - integerPart) * POWER_OF_TENS[maxFractionDigits] + 0.5d);
//Check for rounding to next integer
if (fractionPart >= POWER_OF_TENS[maxFractionDigits]) {
integerPart++;
fractionPart -= POWER_OF_TENS[maxFractionDigits];
}
//format integer part
offset = formatPositiveNumber(integerPart, getExponent(integerPart), false, asciiBuffer, offset);
if (fractionPart > 0 && maxFractionDigits > 0)
{
asciiBuffer[offset++] = '.';
offset = formatPositiveNumber(fractionPart, maxFractionDigits - 1, true, asciiBuffer, offset);
}
return offset;
}
|
@Test
void testFormattingInRange()
{
//Define a range to test
BigDecimal minVal = new BigDecimal("-10");
BigDecimal maxVal = new BigDecimal("10");
BigDecimal maxDelta = BigDecimal.ZERO;
Pattern pattern = Pattern.compile("^\\-?\\d+(\\.\\d+)?$");
byte[] formatBuffer = new byte[32];
for (int maxFractionDigits = 0; maxFractionDigits <= 5; maxFractionDigits++)
{
BigDecimal increment = new BigDecimal(10).pow(-maxFractionDigits, MathContext.DECIMAL128);
for (BigDecimal value = minVal; value.compareTo(maxVal) < 0; value = value.add(increment))
{
//format with the formatFloatFast method and parse back
int byteCount = NumberFormatUtil.formatFloatFast(value.floatValue(), maxFractionDigits, formatBuffer);
assertNotEquals(-1, byteCount);
String newStringResult = new String(formatBuffer, 0, byteCount, StandardCharsets.US_ASCII);
BigDecimal formattedDecimal = new BigDecimal(newStringResult);
//create new BigDecimal with float representation. This is needed because the float
//may not represent the 'value' BigDecimal precisely, in which case the formatFloatFast
//would get a different result.
BigDecimal expectedDecimal = new BigDecimal(value.floatValue());
expectedDecimal = expectedDecimal.setScale(maxFractionDigits, RoundingMode.HALF_UP);
BigDecimal diff = formattedDecimal.subtract(expectedDecimal).abs();
assertTrue(pattern.matcher(newStringResult).matches());
//Fail if diff is greater than maxDelta.
if (diff.compareTo(maxDelta) > 0)
{
fail("Expected: " + expectedDecimal + ", actual: " + newStringResult + ", diff: " + diff);
}
}
}
}
|
public static int symLink(String target, String linkname) throws IOException{
if (target == null || linkname == null) {
LOG.warn("Can not create a symLink with a target = " + target
+ " and link =" + linkname);
return 1;
}
// Run the input paths through Java's File so that they are converted to the
// native OS form
File targetFile = new File(
Path.getPathWithoutSchemeAndAuthority(new Path(target)).toString());
File linkFile = new File(
Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString());
String[] cmd = Shell.getSymlinkCommand(
targetFile.toString(),
linkFile.toString());
ShellCommandExecutor shExec;
try {
if (Shell.WINDOWS &&
linkFile.getParentFile() != null &&
!new Path(target).isAbsolute()) {
// Relative links on Windows must be resolvable at the time of
// creation. To ensure this we run the shell command in the directory
// of the link.
//
shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile());
} else {
shExec = new ShellCommandExecutor(cmd);
}
shExec.execute();
} catch (Shell.ExitCodeException ec) {
int returnVal = ec.getExitCode();
if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) {
LOG.warn("Fail to create symbolic links on Windows. "
+ "The default security settings in Windows disallow non-elevated "
+ "administrators and all non-administrators from creating symbolic links. "
+ "This behavior can be changed in the Local Security Policy management console");
} else if (returnVal != 0) {
LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed "
+ returnVal + " with: " + ec.getMessage());
}
return returnVal;
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error while create symlink " + linkname + " to " + target
+ "." + " Exception: " + StringUtils.stringifyException(e));
}
throw e;
}
return shExec.getExitCode();
}
|
@Test (timeout = 30000)
public void testSymlink() throws Exception {
byte[] data = "testSymLink".getBytes();
File file = new File(del, FILE);
File link = new File(del, "_link");
//write some data to the file
FileOutputStream os = new FileOutputStream(file);
os.write(data);
os.close();
//create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
//ensure that symlink length is correctly reported by Java
Assert.assertEquals(data.length, file.length());
Assert.assertEquals(data.length, link.length());
//ensure that we can read from link.
FileInputStream in = new FileInputStream(link);
long len = 0;
while (in.read() > 0) {
len++;
}
in.close();
Assert.assertEquals(data.length, len);
}
|
static Map<String, List<String>> determineHeaders(final Exchange exchange) {
final Message inboundMessage = exchange.getIn();
final Map<String, Object> headers = inboundMessage.getHeaders();
final Map<String, List<String>> answer = new HashMap<>();
for (final String headerName : headers.keySet()) {
final String headerNameLowercase = headerName.toLowerCase(Locale.US);
if (headerNameLowercase.startsWith("sforce") || headerNameLowercase.startsWith("x-sfdc")) {
final Object headerValue = inboundMessage.getHeader(headerName);
if (headerValue instanceof String) {
answer.put(headerName, Collections.singletonList((String) headerValue));
} else if (headerValue instanceof String[]) {
answer.put(headerName, Arrays.asList((String[]) headerValue));
} else if (headerValue instanceof Collection) {
answer.put(headerName,
((Collection<?>) headerValue).stream().map(String::valueOf).collect(Collectors.<String> toList()));
} else {
throw new IllegalArgumentException(
"Given value for header `" + headerName + "`, is not String, String array or a Collection");
}
}
}
return answer;
}
|
@Test
public void shouldDetermineHeadersForRequest() {
final CamelContext context = new DefaultCamelContext();
final Exchange exchange = new DefaultExchange(context);
final Message in = new DefaultMessage(context);
in.setHeader("sforce-auto-assign", "TRUE");
in.setHeader("SFORCE-CALL-OPTIONS", new String[] { "client=SampleCaseSensitiveToken/100", "defaultNamespace=battle" });
in.setHeader("Sforce-Limit-Info", singletonList("per-app-api-usage"));
in.setHeader("x-sfdc-packageversion-clientPackage", "1.0");
in.setHeader("Sforce-Query-Options", "batchSize=1000");
in.setHeader("Non-Related", "Header");
exchange.setIn(in);
final Map<String, List<String>> headers = AbstractClientBase.determineHeaders(exchange);
assertThat(headers).containsOnly(entry("sforce-auto-assign", singletonList("TRUE")),
entry("SFORCE-CALL-OPTIONS", asList("client=SampleCaseSensitiveToken/100", "defaultNamespace=battle")),
entry("Sforce-Limit-Info", singletonList("per-app-api-usage")),
entry("x-sfdc-packageversion-clientPackage", singletonList("1.0")),
entry("Sforce-Query-Options", singletonList("batchSize=1000")));
}
|
@Override
public boolean isEnabled() {
return branchFeatureExtension != null && branchFeatureExtension.isAvailable();
}
|
@Test
public void return_false_when_extension_returns_false() {
when(branchFeatureExtension.isAvailable()).thenReturn(false);
assertThat(new BranchFeatureProxyImpl(branchFeatureExtension).isEnabled()).isFalse();
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testMergeDisallowUpstreamUpdatesInternalModeNoSource() {
AssertHelper.assertThrows(
"Should not allow modifying internal mode",
MaestroValidationException.class,
"Cannot modify system mode for parameter [tomerge]",
new Runnable() {
@SneakyThrows
@Override
public void run() {
Map<String, ParamDefinition> allParams =
parseParamDefMap("{'tomerge': {'type': 'LONG','value': 2}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'tomerge': {'type': 'LONG', 'value': 3, 'internal_mode': 'OPTIONAL'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, upstreamMergeContext);
}
});
}
|
public static <K, V> AsMap<K, V> asMap() {
return new AsMap<>(false);
}
|
@Test
@Category(NeedsRunner.class)
public void testMapInMemorySideInputWithNonStructuralKey() {
final PCollectionView<Map<byte[], Integer>> view =
pipeline
.apply(
"CreateSideInput",
Create.of(
KV.of("a".getBytes(StandardCharsets.UTF_8), 1),
KV.of("b".getBytes(StandardCharsets.UTF_8), 3)))
.apply(View.<byte[], Integer>asMap().inMemory());
PCollection<KV<String, Integer>> output =
pipeline
.apply("CreateMainInput", Create.of("apple", "banana", "blackberry"))
.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<String, KV<String, Integer>>() {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(
KV.of(
c.element(),
c.sideInput(view)
.get(
c.element()
.substring(0, 1)
.getBytes(StandardCharsets.UTF_8))));
}
})
.withSideInputs(view));
PAssert.that(output)
.containsInAnyOrder(KV.of("apple", 1), KV.of("banana", 3), KV.of("blackberry", 3));
pipeline.run();
}
|
public static String getChecksum(String algorithm, File file) throws NoSuchAlgorithmException, IOException {
FileChecksums fileChecksums = CHECKSUM_CACHE.get(file);
if (fileChecksums == null) {
try (InputStream stream = Files.newInputStream(file.toPath())) {
final MessageDigest md5Digest = getMessageDigest(MD5);
final MessageDigest sha1Digest = getMessageDigest(SHA1);
final MessageDigest sha256Digest = getMessageDigest(SHA256);
final byte[] buffer = new byte[BUFFER_SIZE];
int read = stream.read(buffer, 0, BUFFER_SIZE);
while (read > -1) {
// update all checksums together instead of reading the file multiple times
md5Digest.update(buffer, 0, read);
sha1Digest.update(buffer, 0, read);
sha256Digest.update(buffer, 0, read);
read = stream.read(buffer, 0, BUFFER_SIZE);
}
fileChecksums = new FileChecksums(
getHex(md5Digest.digest()),
getHex(sha1Digest.digest()),
getHex(sha256Digest.digest())
);
CHECKSUM_CACHE.put(file, fileChecksums);
}
}
switch (algorithm.toUpperCase()) {
case MD5:
return fileChecksums.md5;
case SHA1:
return fileChecksums.sha1;
case SHA256:
return fileChecksums.sha256;
default:
throw new NoSuchAlgorithmException(algorithm);
}
}
|
@Test
public void testGetChecksum_String_File() throws Exception {
String algorithm = "MD5";
File file = new File(this.getClass().getClassLoader().getResource("checkSumTest.file").toURI().getPath());
String expResult = "f0915c5f46b8cfa283e5ad67a09b3793";
String result = Checksum.getChecksum(algorithm, file);
assertEquals(expResult, result);
//get checksum from cache on 2nd call
result = Checksum.getChecksum(algorithm, file);
assertEquals(expResult, result);
}
|
@Override
@Deprecated
public <VR> KStream<K, VR> transformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, ? extends VR> valueTransformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null");
return doTransformValues(
toValueTransformerWithKeySupplier(valueTransformerSupplier),
NamedInternal.empty(),
stateStoreNames);
}
|
@Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullNamedOnTransformValuesWithValueTransformerSupplier() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.transformValues(
valueTransformerSupplier,
(Named) null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
public byte[] getUnknown_0044() {
return unknown_0044;
}
|
@Test
public void testGetUnknown_0044() {
assertEquals(TestParameters.VP_ITSP_BYTEARR_LEN, chmItspHeader.getUnknown_0044().length);
}
|
@Nullable
@Override
public RecordAndPosition<E> next() {
if (pos < num) {
recordAndPosition.setNext(records[pos++]);
return recordAndPosition;
} else {
return null;
}
}
|
@Test
void testEmptyConstruction() {
final ArrayResultIterator<Object> iter = new ArrayResultIterator<>();
assertThat(iter.next()).isNull();
}
|
@Override
public void write(final MySQLPacketPayload payload, final Object value) {
LocalDateTime localDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(((Time) value).getTime()), ZoneId.systemDefault());
int hours = localDateTime.getHour();
int minutes = localDateTime.getMinute();
int seconds = localDateTime.getSecond();
int nanos = localDateTime.getNano();
boolean isTimeAbsent = 0 == hours && 0 == minutes && 0 == seconds;
boolean isNanosAbsent = 0 == nanos;
if (isTimeAbsent && isNanosAbsent) {
payload.writeInt1(0);
return;
}
if (isNanosAbsent) {
payload.writeInt1(8);
writeTime(payload, hours, minutes, seconds);
return;
}
payload.writeInt1(12);
writeTime(payload, hours, minutes, seconds);
writeNanos(payload, nanos);
}
|
@Test
void assertWriteWithTwelveBytes() {
MySQLTimeBinaryProtocolValue actual = new MySQLTimeBinaryProtocolValue();
actual.write(payload, new Time(1L));
verify(payload, atLeastOnce()).writeInt1(12);
verify(payload, times(5)).writeInt1(anyInt());
verify(payload).writeInt4(0);
verify(payload).writeInt4(1000000);
}
|
@CanIgnoreReturnValue
@SuppressWarnings("deprecation") // TODO(b/134064106): design an alternative to no-arg check()
public final Ordered containsExactly() {
return check().about(iterableEntries()).that(checkNotNull(actual).entries()).containsExactly();
}
|
@Test
public void containsExactlyFailureWithEmptyStringExtra() {
expectFailureWhenTestingThat(ImmutableMultimap.of("a", "", "", "")).containsExactly("a", "");
assertFailureKeys("unexpected", "---", "expected", "but was");
assertFailureValue("unexpected", "{\"\" (empty String)=[\"\" (empty String)]}");
assertFailureValue("expected", "{a=[\"\" (empty String)]}");
assertFailureValue("but was", "{a=[], =[]}");
}
|
public static String getJobBarrierDisablePath(final String jobId) {
return String.join("/", getJobRootPath(jobId), "barrier", "disable");
}
|
@Test
void assertGetJobBarrierDisablePath() {
assertThat(PipelineMetaDataNode.getJobBarrierDisablePath(jobId), is(jobRootPath + "/barrier/disable"));
}
|
@Override
public ObjectNode encode(Ethernet ethernet, CodecContext context) {
checkNotNull(ethernet, "Ethernet cannot be null");
final ObjectNode result = context.mapper().createObjectNode()
.put("vlanId", ethernet.getVlanID())
.put("etherType", ethernet.getEtherType())
.put("priorityCode", ethernet.getPriorityCode())
.put("pad", ethernet.isPad());
if (ethernet.getDestinationMAC() != null) {
result.put("destMac",
ethernet.getDestinationMAC().toString());
}
if (ethernet.getSourceMAC() != null) {
result.put("srcMac",
ethernet.getSourceMAC().toString());
}
return result;
}
|
@Test
public void ethernetCodecTest() {
final CodecContext context = new MockCodecContext();
final JsonCodec<Ethernet> ethernetCodec = context.codec(Ethernet.class);
assertThat(ethernetCodec, notNullValue());
final Ethernet eth1 = new Ethernet();
eth1.setSourceMACAddress("11:22:33:44:55:01");
eth1.setDestinationMACAddress("11:22:33:44:55:02");
eth1.setPad(true);
eth1.setEtherType(Ethernet.TYPE_ARP);
eth1.setPriorityCode((byte) 7);
eth1.setVlanID((short) 33);
final ObjectNode eth1Json = ethernetCodec.encode(eth1, context);
assertThat(eth1Json, notNullValue());
assertThat(eth1Json, matchesEthernet(eth1));
}
|
public static void preserve(FileSystem targetFS, Path path,
CopyListingFileStatus srcFileStatus,
EnumSet<FileAttribute> attributes,
boolean preserveRawXattrs) throws IOException {
// strip out those attributes we don't need any more
attributes.remove(FileAttribute.BLOCKSIZE);
attributes.remove(FileAttribute.CHECKSUMTYPE);
// If not preserving anything from FileStatus, don't bother fetching it.
FileStatus targetFileStatus = attributes.isEmpty() ? null :
targetFS.getFileStatus(path);
String group = targetFileStatus == null ? null :
targetFileStatus.getGroup();
String user = targetFileStatus == null ? null :
targetFileStatus.getOwner();
boolean chown = false;
if (attributes.contains(FileAttribute.ACL)) {
List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
if (!srcAcl.equals(targetAcl)) {
targetFS.removeAcl(path);
targetFS.setAcl(path, srcAcl);
}
// setAcl doesn't preserve sticky bit, so also call setPermission if needed.
if (srcFileStatus.getPermission().getStickyBit() !=
targetFileStatus.getPermission().getStickyBit()) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
} else if (attributes.contains(FileAttribute.PERMISSION) &&
!srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
targetFS.setPermission(path, srcFileStatus.getPermission());
}
final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
if (preserveXAttrs || preserveRawXattrs) {
final String rawNS =
StringUtils.toLowerCase(XAttr.NameSpace.RAW.name());
Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
for (Entry<String, byte[]> entry : srcXAttrs.entrySet()) {
String xattrName = entry.getKey();
if (xattrName.startsWith(rawNS) || preserveXAttrs) {
targetFS.setXAttr(path, xattrName, entry.getValue());
}
}
}
}
// The replication factor can only be preserved for replicated files.
// It is ignored when either the source or target file are erasure coded.
if (attributes.contains(FileAttribute.REPLICATION) &&
!targetFileStatus.isDirectory() &&
!targetFileStatus.isErasureCoded() &&
!srcFileStatus.isErasureCoded() &&
srcFileStatus.getReplication() != targetFileStatus.getReplication()) {
targetFS.setReplication(path, srcFileStatus.getReplication());
}
if (attributes.contains(FileAttribute.GROUP) &&
!group.equals(srcFileStatus.getGroup())) {
group = srcFileStatus.getGroup();
chown = true;
}
if (attributes.contains(FileAttribute.USER) &&
!user.equals(srcFileStatus.getOwner())) {
user = srcFileStatus.getOwner();
chown = true;
}
if (chown) {
targetFS.setOwner(path, user, group);
}
if (attributes.contains(FileAttribute.TIMES)) {
targetFS.setTimes(path,
srcFileStatus.getModificationTime(),
srcFileStatus.getAccessTime());
}
}
|
@Test
public void testPreserveOnFileUpwardRecursion() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.allOf(FileAttribute.class);
// Remove ACL because tests run with dfs.namenode.acls.enabled false
attributes.remove(FileAttribute.ACL);
Path src = new Path("/tmp/src2");
Path f0 = new Path("/f0");
Path f1 = new Path("/d1/f1");
Path f2 = new Path("/d1/d2/f2");
Path d1 = new Path("/d1/");
Path d2 = new Path("/d1/d2/");
createFile(fs, src);
createFile(fs, f0);
createFile(fs, f1);
createFile(fs, f2);
fs.setPermission(src, almostFullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setReplication(src, (short) 1);
fs.setPermission(d1, fullPerm);
fs.setOwner(d1, "anybody", "anybody-group");
fs.setTimes(d1, 400, 400);
fs.setReplication(d1, (short) 3);
fs.setPermission(d2, fullPerm);
fs.setOwner(d2, "anybody", "anybody-group");
fs.setTimes(d2, 300, 300);
fs.setReplication(d2, (short) 3);
fs.setPermission(f0, fullPerm);
fs.setOwner(f0, "anybody", "anybody-group");
fs.setTimes(f0, 200, 200);
fs.setReplication(f0, (short) 3);
fs.setPermission(f1, fullPerm);
fs.setOwner(f1, "anybody", "anybody-group");
fs.setTimes(f1, 200, 200);
fs.setReplication(f1, (short) 3);
fs.setPermission(f2, fullPerm);
fs.setOwner(f2, "anybody", "anybody-group");
fs.setTimes(f2, 200, 200);
fs.setReplication(f2, (short) 3);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, f2, srcStatus, attributes, false);
cluster.triggerHeartbeats();
// FileStatus.equals only compares path field, must explicitly compare all fields
// attributes of src -> f2 ? should be yes
assertStatusEqual(fs, f2, srcStatus);
// attributes of src -> f1 ? should be no
CopyListingFileStatus f1Status = new CopyListingFileStatus(fs.getFileStatus(f1));
Assert.assertFalse(srcStatus.getPermission().equals(f1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f1Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f1Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f1Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f1Status.getReplication());
// attributes of src -> f0 ? should be no
CopyListingFileStatus f0Status = new CopyListingFileStatus(fs.getFileStatus(f0));
Assert.assertFalse(srcStatus.getPermission().equals(f0Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(f0Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(f0Status.getGroup()));
Assert.assertFalse(srcStatus.getAccessTime() == f0Status.getAccessTime());
Assert.assertFalse(srcStatus.getModificationTime() == f0Status.getModificationTime());
Assert.assertFalse(srcStatus.getReplication() == f0Status.getReplication());
// attributes of src -> d2 ? should be no
CopyListingFileStatus d2Status = new CopyListingFileStatus(fs.getFileStatus(d2));
Assert.assertFalse(srcStatus.getPermission().equals(d2Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d2Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d2Status.getGroup()));
Assert.assertTrue(d2Status.getAccessTime() == 300);
Assert.assertTrue(d2Status.getModificationTime() == 300);
Assert.assertFalse(srcStatus.getReplication() == d2Status.getReplication());
// attributes of src -> d1 ? should be no
CopyListingFileStatus d1Status = new CopyListingFileStatus(fs.getFileStatus(d1));
Assert.assertFalse(srcStatus.getPermission().equals(d1Status.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(d1Status.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(d1Status.getGroup()));
Assert.assertTrue(d1Status.getAccessTime() == 400);
Assert.assertTrue(d1Status.getModificationTime() == 400);
Assert.assertFalse(srcStatus.getReplication() == d1Status.getReplication());
}
|
@Subscribe
public void inputCreated(InputCreated inputCreatedEvent) {
final String inputId = inputCreatedEvent.id();
LOG.debug("Input created: {}", inputId);
final Input input;
try {
input = inputService.find(inputId);
} catch (NotFoundException e) {
LOG.warn("Received InputCreated event but could not find input {}", inputId, e);
return;
}
final IOState<MessageInput> inputState = inputRegistry.getInputState(inputId);
if (inputState != null) {
inputRegistry.remove(inputState);
}
if (input.isGlobal() || this.nodeId.getNodeId().equals(input.getNodeId())) {
startInput(input);
}
}
|
@Test
public void inputCreatedDoesNothingIfInputDoesNotExist() throws Exception {
final String inputId = "input-id";
when(inputService.find(inputId)).thenThrow(NotFoundException.class);
listener.inputCreated(InputCreated.create(inputId));
verifyNoMoreInteractions(inputLauncher, inputRegistry);
}
|
public boolean appliesTo(String pipelineName, String stageName) {
boolean pipelineMatches = this.pipelineName.equals(pipelineName) ||
this.pipelineName.equals(GoConstants.ANY_PIPELINE);
boolean stageMatches = this.stageName.equals(stageName) ||
this.stageName.equals(GoConstants.ANY_STAGE);
return pipelineMatches && stageMatches;
}
|
@Test
void shouldNotApplyIfStageDiffers() {
NotificationFilter filter = new NotificationFilter("cruise2", "devo", StageEvent.Breaks, false);
assertThat(filter.appliesTo("cruise2", "dev")).isFalse();
}
|
public static Builder custom() {
return new Builder();
}
|
@Test(expected = IllegalStateException.class)
public void shouldNotUseWitIntervalFunctionInOpenStateAndWaitDurationInOpenStateTogether() {
custom()
.waitDurationInOpenState(Duration.ofMillis(3333))
.waitIntervalFunctionInOpenState(IntervalFunction.of(Duration.ofMillis(1234)))
.build();
}
|
public static <V, K> Map<K, V> toIdentityMap(Collection<V> collection, Function<V, K> key) {
return toIdentityMap(collection, key, false);
}
|
@Test
public void testToIdentityMap() {
Map<Long, Student> map = CollStreamUtil.toIdentityMap(null, Student::getStudentId);
assertEquals(map, Collections.EMPTY_MAP);
List<Student> list = new ArrayList<>();
map = CollStreamUtil.toIdentityMap(list, Student::getStudentId);
assertEquals(map, Collections.EMPTY_MAP);
list.add(new Student(1, 1, 1, "张三"));
list.add(new Student(1, 1, 2, "李四"));
list.add(new Student(1, 1, 3, "王五"));
map = CollStreamUtil.toIdentityMap(list, Student::getStudentId);
assertEquals(map.get(1L).getName(), "张三");
assertEquals(map.get(2L).getName(), "李四");
assertEquals(map.get(3L).getName(), "王五");
assertNull(map.get(4L));
// 测试value为空时
list.add(null);
map = CollStreamUtil.toIdentityMap(list, Student::getStudentId);
assertNull(map.get(4L));
}
|
public ChannelStateWriteRequestExecutor getOrCreateExecutor(
JobVertexID jobVertexID,
int subtaskIndex,
SupplierWithException<CheckpointStorageWorkerView, ? extends IOException>
checkpointStorageWorkerViewSupplier,
int maxSubtasksPerChannelStateFile) {
return getOrCreateExecutor(
jobVertexID,
subtaskIndex,
checkpointStorageWorkerViewSupplier,
maxSubtasksPerChannelStateFile,
true);
}
|
@Test
void testSomeSubtasksCloseDuringOtherSubtasksStarting() throws Exception {
JobID jobID = new JobID();
JobVertexID jobVertexID = new JobVertexID();
int numberOfSubtask = 100_000;
int maxSubtasksPerChannelStateFile = 10;
ChannelStateWriteRequestExecutorFactory executorFactory =
new ChannelStateWriteRequestExecutorFactory(jobID);
BlockingQueue<ChannelStateWriteRequestExecutor> queue = new LinkedBlockingQueue<>(100);
CompletableFuture<Void> createFuture = new CompletableFuture<>();
new Thread(
() -> {
try {
for (int i = 0; i < numberOfSubtask; i++) {
ChannelStateWriteRequestExecutor executor =
executorFactory.getOrCreateExecutor(
jobVertexID,
i,
() ->
CHECKPOINT_STORAGE
.createCheckpointStorage(jobID),
maxSubtasksPerChannelStateFile,
false);
assertThat(executor).isNotNull();
queue.put(executor);
}
createFuture.complete(null);
} catch (Throwable e) {
createFuture.completeExceptionally(e);
}
})
.start();
CompletableFuture<Void> releaseFuture = new CompletableFuture<>();
new Thread(
() -> {
try {
for (int i = 0; i < numberOfSubtask; i++) {
ChannelStateWriteRequestExecutor executor = queue.take();
executor.releaseSubtask(jobVertexID, numberOfSubtask);
}
releaseFuture.complete(null);
} catch (Throwable e) {
releaseFuture.completeExceptionally(e);
}
})
.start();
createFuture.get();
releaseFuture.get();
}
|
static Map<Integer, Schema.Field> mapFieldPositions(CSVFormat format, Schema schema) {
List<String> header = Arrays.asList(format.getHeader());
Map<Integer, Schema.Field> indexToFieldMap = new HashMap<>();
for (Schema.Field field : schema.getFields()) {
int index = getIndex(header, field);
if (index >= 0) {
indexToFieldMap.put(index, field);
}
}
return indexToFieldMap;
}
|
@Test
public void givenMatchingHeaderAndSchemaField_mapsPositions() {
Schema schema =
Schema.builder()
.addStringField("a_string")
.addDoubleField("a_double")
.addInt32Field("an_integer")
.build();
ImmutableMap<Integer, Schema.Field> want =
ImmutableMap.of(
0,
schema.getField("a_string"),
1,
schema.getField("an_integer"),
2,
schema.getField("a_double"));
Map<Integer, Schema.Field> got =
CsvIOParseHelpers.mapFieldPositions(
csvFormat().withHeader("a_string", "an_integer", "a_double"), schema);
assertEquals(want, got);
}
|
public boolean isRetryGlobal() {
return retryGlobal;
}
|
@Test
public void testIsRetryGlobal() {
// Test the isSuccess method
assertTrue(event.isRetryGlobal());
}
|
public HsDataView registerNewConsumer(
int subpartitionId,
HsConsumerId consumerId,
HsSubpartitionConsumerInternalOperations operation)
throws IOException {
synchronized (lock) {
checkState(!isReleased, "HsFileDataManager is already released.");
lazyInitialize();
HsSubpartitionFileReader subpartitionReader =
fileReaderFactory.createFileReader(
subpartitionId,
consumerId,
dataFileChannel,
operation,
dataIndex,
hybridShuffleConfiguration.getMaxBuffersReadAhead(),
this::releaseSubpartitionReader,
headerBuf);
allReaders.add(subpartitionReader);
mayTriggerReading();
return subpartitionReader;
}
}
|
@Test
void testRegisterReaderTriggerRun() throws Exception {
TestingHsSubpartitionFileReader reader = new TestingHsSubpartitionFileReader();
reader.setReadBuffersConsumer(
(requestedBuffers, readBuffers) -> readBuffers.addAll(requestedBuffers));
factory.allReaders.add(reader);
assertThat(reader.readBuffers).isEmpty();
fileDataManager.registerNewConsumer(0, DEFAULT, subpartitionViewOperation);
ioExecutor.trigger();
assertThat(reader.readBuffers).hasSize(BUFFER_POOL_SIZE);
}
|
public static boolean isServletRequestAuthenticatorInstanceOf(Class<? extends ServletRequestAuthenticator> clazz) {
final AuthCheckFilter instance = getInstance();
if (instance == null) {
// We've not yet been instantiated
return false;
}
return servletRequestAuthenticator != null && clazz.isAssignableFrom(servletRequestAuthenticator.getClass());
}
|
@Test
public void willReturnFalseIfNoServletRequestAuthenticatorIsConfigured() {
new AuthCheckFilter(adminManager, loginLimitManager);
assertThat(AuthCheckFilter.isServletRequestAuthenticatorInstanceOf(AdminUserServletAuthenticatorClass.class), is(false));
}
|
@Override
public void analyze(Analyzer analyzer) throws AnalysisException {
}
|
@Test
public void testGetTuple() throws AnalysisException {
ArrayList<Expr> groupingExprs = new ArrayList<>();
String[] cols = {"k1", "k2", "k3", "k1"};
for (String col : cols) {
Expr expr = new SlotRef(new TableName("testdb", "t"), col);
groupingExprs.add(expr);
}
GroupByClause groupByClause = new GroupByClause(Expr.cloneList(groupingExprs),
GroupByClause.GroupingType.GROUP_BY);
try {
groupByClause.analyze(analyzer);
} catch (AnalysisException exception) {
Assert.assertTrue(false);
}
}
|
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, Bulkhead bulkhead,
String methodName) throws Throwable {
BulkheadOperator<?> bulkheadOperator = BulkheadOperator.of(bulkhead);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava2Aspect(bulkheadOperator, returnValue);
}
|
@Test
public void testReactorTypes() throws Throwable {
Bulkhead bulkhead = Bulkhead.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava2BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava2BulkheadAspectExt.handle(proceedingJoinPoint, bulkhead, "testMethod"))
.isNotNull();
}
|
public boolean installIfNecessaryFrom(@NonNull URL archive, @CheckForNull TaskListener listener, @NonNull String message) throws IOException, InterruptedException {
if (listener == null) {
listener = TaskListener.NULL;
}
return installIfNecessaryFrom(archive, listener, message, MAX_REDIRECTS);
}
|
@Issue("JENKINS-16215")
@Test public void installIfNecessaryPerformsInstallation() throws Exception {
File tmp = temp.getRoot();
final FilePath d = new FilePath(tmp);
final HttpURLConnection con = mock(HttpURLConnection.class);
final URL url = someUrlToZipFile(con);
when(con.getResponseCode())
.thenReturn(HttpURLConnection.HTTP_OK);
when(con.getInputStream())
.thenReturn(someZippedContent());
assertTrue(d.installIfNecessaryFrom(url, null, "message if failed"));
}
|
public LeaderAndIsr newLeaderAndIsr(int leader, List<Integer> isr) {
return new LeaderAndIsr(leader, leaderEpoch + 1, isr, leaderRecoveryState, partitionEpoch);
}
|
@Test
public void testNewLeaderAndIsr() {
LeaderAndIsr leaderAndIsr = new LeaderAndIsr(1, Arrays.asList(1, 2));
LeaderAndIsr newLeaderAndIsr = leaderAndIsr.newLeaderAndIsr(2, Arrays.asList(1, 2));
assertEquals(2, newLeaderAndIsr.leader());
assertEquals(Arrays.asList(1, 2), newLeaderAndIsr.isr());
assertEquals(LeaderRecoveryState.RECOVERED, newLeaderAndIsr.leaderRecoveryState());
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testDisplayDataJsonSerialization() throws IOException {
FooOptions options = PipelineOptionsFactory.as(FooOptions.class);
options.setFoo("bar");
@SuppressWarnings("unchecked")
Map<String, Object> map = MAPPER.readValue(MAPPER.writeValueAsBytes(options), Map.class);
assertThat("main pipeline options data keyed as 'options'", map, Matchers.hasKey("options"));
assertThat("display data keyed as 'display_data'", map, Matchers.hasKey("display_data"));
Map<?, ?> expectedDisplayItem =
ImmutableMap.<String, String>builder()
.put("namespace", FooOptions.class.getName())
.put("key", "foo")
.put("value", "bar")
.put("type", "STRING")
.build();
@SuppressWarnings("unchecked")
List<Map<?, ?>> deserializedDisplayData = (List<Map<?, ?>>) map.get("display_data");
assertThat(deserializedDisplayData, hasItem(expectedDisplayItem));
}
|
public static String parsePath(String uri, Map<String, String> patterns) {
if (uri == null) {
return null;
} else if (StringUtils.isBlank(uri)) {
return String.valueOf(SLASH);
}
CharacterIterator ci = new StringCharacterIterator(uri);
StringBuilder pathBuffer = new StringBuilder();
char c = ci.first();
if (c == CharacterIterator.DONE) {
return String.valueOf(SLASH);
}
do {
if (c == OPEN) {
String regexBuffer = cutParameter(ci, patterns);
if (regexBuffer == null) {
LOGGER.warn("Operation path \"{}\" contains syntax error.", uri);
return null;
}
pathBuffer.append(regexBuffer);
} else {
int length = pathBuffer.length();
if (!(c == SLASH && (length != 0 && pathBuffer.charAt(length - 1) == SLASH))) {
pathBuffer.append(c);
}
}
} while ((c = ci.next()) != CharacterIterator.DONE);
return pathBuffer.toString();
}
|
@Test(description = "not fail when passed path is empty")
public void testEmptyPath() {
final Map<String, String> regexMap = new HashMap<String, String>();
final String path = PathUtils.parsePath("", regexMap);
assertEquals(path, "/");
}
|
public static void checkValidWriteSchema(GroupType schema) {
schema.accept(new TypeVisitor() {
@Override
public void visit(GroupType groupType) {
if (groupType.getFieldCount() <= 0) {
throw new InvalidSchemaException("Cannot write a schema with an empty group: " + groupType);
}
for (Type type : groupType.getFields()) {
type.accept(this);
}
}
@Override
public void visit(MessageType messageType) {
visit((GroupType) messageType);
}
@Override
public void visit(PrimitiveType primitiveType) {}
});
}
|
@Test
public void testWriteCheckMessageType() {
TypeUtil.checkValidWriteSchema(Types.buildMessage()
.required(INT32)
.named("a")
.optional(BINARY)
.as(UTF8)
.named("b")
.named("valid_schema"));
TestTypeBuilders.assertThrows(
"Should complain about empty MessageType", InvalidSchemaException.class, (Callable<Void>) () -> {
TypeUtil.checkValidWriteSchema(new MessageType("invalid_schema"));
return null;
});
}
|
public static int getClusterControllerIndex(ConfigId configId) {
Matcher matcher = CONTROLLER_INDEX_PATTERN.matcher(configId.s());
if (!matcher.matches()) {
throw new IllegalArgumentException("Unable to extract cluster controller index from config ID " + configId);
}
return Integer.parseInt(matcher.group(1));
}
|
@Test(expected = IllegalArgumentException.class)
public void testBadClusterControllerConfigId() {
ConfigId configId = new ConfigId("fantasy_sports/storage/9");
VespaModelUtil.getClusterControllerIndex(configId);
fail();
}
|
protected void getControls() {
// Not all of these controls are created at the same time.. that's OK, for now, just check
// each one for null before using.
dialogDeck = (XulDeck) document.getElementById( "dialog-panel-deck" );
deckOptionsBox = (XulListbox) document.getElementById( "deck-options-list" );
connectionBox = (XulListbox) document.getElementById( "connection-type-list" );
databaseDialectList = (XulMenuList) document.getElementById( "database-dialect-list" );
accessBox = (XulListbox) document.getElementById( "access-type-list" );
connectionNameBox = (XulTextbox) document.getElementById( "connection-name-text" );
hostNameBox = (XulTextbox) document.getElementById( "server-host-name-text" );
databaseNameBox = (XulTextbox) document.getElementById( "database-name-text" );
portNumberBox = (XulTextbox) document.getElementById( "port-number-text" );
userNameBox = (XulTextbox) document.getElementById( "username-text" );
passwordBox = (XulTextbox) document.getElementById( "password-text" );
dataTablespaceBox = (XulTextbox) document.getElementById( "data-tablespace-text" );
indexTablespaceBox = (XulTextbox) document.getElementById( "index-tablespace-text" );
serverInstanceBox = (XulTextbox) document.getElementById( "instance-text" );
serverNameBox = (XulTextbox) document.getElementById( "server-name-text" );
customUrlBox = (XulTextbox) document.getElementById( "custom-url-text" );
customDriverClassBox = (XulTextbox) document.getElementById( "custom-driver-class-text" );
languageBox = (XulTextbox) document.getElementById( "language-text" );
warehouseBox = (XulTextbox) document.getElementById( "warehouse-text" );
systemNumberBox = (XulTextbox) document.getElementById( "system-number-text" );
clientBox = (XulTextbox) document.getElementById( "client-text" );
doubleDecimalSeparatorCheck = (XulCheckbox) document.getElementById( "decimal-separator-check" );
resultStreamingCursorCheck = (XulCheckbox) document.getElementById( "result-streaming-check" );
webAppName = (XulTextbox) document.getElementById( "web-application-name-text" );
poolingCheck = (XulCheckbox) document.getElementById( "use-pool-check" );
clusteringCheck = (XulCheckbox) document.getElementById( "use-cluster-check" );
clusterParameterDescriptionLabel = (XulLabel) document.getElementById( "cluster-parameter-description-label" );
poolSizeLabel = (XulLabel) document.getElementById( "pool-size-label" );
poolSizeBox = (XulTextbox) document.getElementById( "pool-size-text" );
maxPoolSizeLabel = (XulLabel) document.getElementById( "max-pool-size-label" );
maxPoolSizeBox = (XulTextbox) document.getElementById( "max-pool-size-text" );
poolParameterTree = (XulTree) document.getElementById( "pool-parameter-tree" );
clusterParameterTree = (XulTree) document.getElementById( "cluster-parameter-tree" );
optionsParameterTree = (XulTree) document.getElementById( "options-parameter-tree" );
poolingDescription = (XulTextbox) document.getElementById( "pooling-description" );
poolingParameterDescriptionLabel = (XulLabel) document.getElementById( "pool-parameter-description-label" );
poolingDescriptionLabel = (XulLabel) document.getElementById( "pooling-description-label" );
supportBooleanDataType = (XulCheckbox) document.getElementById( "supports-boolean-data-type" );
supportTimestampDataType = (XulCheckbox) document.getElementById( "supports-timestamp-data-type" );
quoteIdentifiersCheck = (XulCheckbox) document.getElementById( "quote-identifiers-check" );
lowerCaseIdentifiersCheck = (XulCheckbox) document.getElementById( "force-lower-case-check" );
upperCaseIdentifiersCheck = (XulCheckbox) document.getElementById( "force-upper-case-check" );
preserveReservedCaseCheck = (XulCheckbox) document.getElementById( "preserve-reserved-case" );
strictBigNumberInterpretaion = (XulCheckbox) document.getElementById( "strict-bignum-interpretation" );
preferredSchemaName = (XulTextbox) document.getElementById( "preferred-schema-name-text" );
sqlBox = (XulTextbox) document.getElementById( "sql-text" );
useIntegratedSecurityCheck = (XulCheckbox) document.getElementById( "use-integrated-security-check" );
acceptButton = (XulButton) document.getElementById( "general-datasource-window_accept" );
cancelButton = (XulButton) document.getElementById( "general-datasource-window_cancel" );
testButton = (XulButton) document.getElementById( "test-button" );
noticeLabel = (XulLabel) document.getElementById( "notice-label" );
jdbcAuthMethod = (XulMenuList) document.getElementById( "redshift-auth-method-list" );
iamAccessKeyId = (XulTextbox) document.getElementById( "iam-access-key-id" );
iamSecretKeyId = (XulTextbox) document.getElementById( "iam-secret-access-key" );
iamSessionToken = (XulTextbox) document.getElementById( "iam-session-token" );
iamProfileName = (XulTextbox) document.getElementById( "iam-profile-name" );
namedClusterList = (XulMenuList) document.getElementById( "named-cluster-list" );
//azure SQL DB
azureSqlDBJdbcAuthMethod = (XulMenuList) document.getElementById( "azure-sql-db-auth-method-list" );
azureSqlDBAlwaysEncryptionEnabled = (XulCheckbox) document.getElementById( "azure-sql-db-enable-always-encryption-on" );
azureSqlDBClientSecretId = (XulTextbox) document.getElementById( "azure-sql-db-client-id" );
azureSqlDBClientSecretKey = (XulTextbox) document.getElementById( "azure-sql-db-client-secret-key" );
if ( portNumberBox != null && serverInstanceBox != null ) {
if ( Boolean.parseBoolean( serverInstanceBox.getAttributeValue( "shouldDisablePortIfPopulated" ) ) ) {
serverInstanceBox.addPropertyChangeListener( new PropertyChangeListener() {
@Override
public void propertyChange( PropertyChangeEvent evt ) {
if ( "value".equals( evt.getPropertyName() ) ) {
disablePortIfInstancePopulated();
}
}
} );
}
}
}
|
@Test
public void testGetControls() throws Exception {
dataHandler.getControls();
assertNotNull( dataHandler.hostNameBox );
assertNotNull( dataHandler.portNumberBox );
assertNotNull( dataHandler.userNameBox );
assertNotNull( dataHandler.passwordBox );
}
|
public static Schema inferSchema(Object value) {
if (value instanceof String) {
return Schema.STRING_SCHEMA;
} else if (value instanceof Boolean) {
return Schema.BOOLEAN_SCHEMA;
} else if (value instanceof Byte) {
return Schema.INT8_SCHEMA;
} else if (value instanceof Short) {
return Schema.INT16_SCHEMA;
} else if (value instanceof Integer) {
return Schema.INT32_SCHEMA;
} else if (value instanceof Long) {
return Schema.INT64_SCHEMA;
} else if (value instanceof Float) {
return Schema.FLOAT32_SCHEMA;
} else if (value instanceof Double) {
return Schema.FLOAT64_SCHEMA;
} else if (value instanceof byte[] || value instanceof ByteBuffer) {
return Schema.BYTES_SCHEMA;
} else if (value instanceof List) {
return inferListSchema((List<?>) value);
} else if (value instanceof Map) {
return inferMapSchema((Map<?, ?>) value);
} else if (value instanceof Struct) {
return ((Struct) value).schema();
}
return null;
}
|
@Test
public void shouldInferNoSchemaForEmptyMap() {
Schema listSchema = Values.inferSchema(Collections.emptyMap());
assertNull(listSchema);
}
|
public static NameStep newBuilder() {
return new CharacterSteps();
}
|
@Test
void testBuildPoorWizard() {
final var character = CharacterStepBuilder.newBuilder()
.name("Merlin")
.wizardClass("alchemist")
.noSpell()
.build();
assertEquals("Merlin", character.getName());
assertEquals("alchemist", character.getWizardClass());
assertNull(character.getSpell());
assertNull(character.getAbilities());
assertNotNull(character.toString());
}
|
@Override
public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
String localNamespace = MetadataContext.LOCAL_NAMESPACE;
String localService = MetadataContext.LOCAL_SERVICE;
Set<Argument> arguments = rateLimitRuleArgumentResolver.getArguments(exchange, localNamespace, localService);
long waitMs = -1;
try {
String path = exchange.getRequest().getURI().getPath();
QuotaResponse quotaResponse = QuotaCheckUtils.getQuota(
limitAPI, localNamespace, localService, 1, arguments, path);
if (quotaResponse.getCode() == QuotaResultCode.QuotaResultLimited) {
ServerHttpResponse response = exchange.getResponse();
DataBuffer dataBuffer;
if (!Objects.isNull(polarisRateLimiterLimitedFallback)) {
response.setRawStatusCode(polarisRateLimiterLimitedFallback.rejectHttpCode());
response.getHeaders().setContentType(polarisRateLimiterLimitedFallback.mediaType());
dataBuffer = response.bufferFactory().allocateBuffer()
.write(polarisRateLimiterLimitedFallback.rejectTips()
.getBytes(polarisRateLimiterLimitedFallback.charset()));
}
else {
response.setRawStatusCode(polarisRateLimitProperties.getRejectHttpCode());
response.getHeaders().setContentType(MediaType.TEXT_HTML);
dataBuffer = response.bufferFactory().allocateBuffer()
.write(rejectTips.getBytes(StandardCharsets.UTF_8));
}
response.getHeaders()
.add(HeaderConstant.INTERNAL_CALLEE_RET_STATUS, RetStatus.RetFlowControl.getDesc());
if (Objects.nonNull(quotaResponse.getActiveRule())) {
try {
String encodedActiveRuleName = URLEncoder.encode(
quotaResponse.getActiveRuleName(), UTF_8);
response.getHeaders().add(HeaderConstant.INTERNAL_ACTIVE_RULE_NAME, encodedActiveRuleName);
}
catch (UnsupportedEncodingException e) {
LOG.error("Cannot encode {} for header internal-callee-activerule.",
quotaResponse.getActiveRuleName(), e);
}
}
return response.writeWith(Mono.just(dataBuffer));
}
// Unirate
if (quotaResponse.getCode() == QuotaResultCode.QuotaResultOk && quotaResponse.getWaitMs() > 0) {
LOG.debug("The request of [{}] will waiting for {}ms.", path, quotaResponse.getWaitMs());
waitMs = quotaResponse.getWaitMs();
}
}
catch (Throwable t) {
// An exception occurs in the rate limiting API call,
// which should not affect the call of the business process.
LOG.error("fail to invoke getQuota, service is " + localService, t);
}
if (waitMs > 0) {
return Mono.delay(Duration.ofMillis(waitMs)).flatMap(e -> chain.filter(exchange));
}
else {
return chain.filter(exchange);
}
}
|
@Test
public void testFilter() {
// Create mock WebFilterChain
WebFilterChain webFilterChain = serverWebExchange -> Mono.empty();
// Mock request
MockServerHttpRequest request = MockServerHttpRequest.get("http://localhost:8080/test").build();
quotaCheckReactiveFilter.init();
// Pass
MetadataContext.LOCAL_SERVICE = "TestApp1";
ServerWebExchange testApp1Exchange = MockServerWebExchange.from(request);
quotaCheckReactiveFilter.filter(testApp1Exchange, webFilterChain);
// Unirate waiting 1000ms
MetadataContext.LOCAL_SERVICE = "TestApp2";
ServerWebExchange testApp2Exchange = MockServerWebExchange.from(request);
long startTimestamp = System.currentTimeMillis();
CountDownLatch countDownLatch = new CountDownLatch(1);
quotaCheckReactiveFilter.filter(testApp2Exchange, webFilterChain).subscribe(e -> {
}, t -> {
}, countDownLatch::countDown);
try {
countDownLatch.await();
}
catch (InterruptedException e) {
fail("Exception encountered.", e);
}
assertThat(System.currentTimeMillis() - startTimestamp).isGreaterThanOrEqualTo(1000L);
// Rate limited
MetadataContext.LOCAL_SERVICE = "TestApp3";
ServerWebExchange testApp3Exchange = MockServerWebExchange.from(request);
quotaCheckReactiveFilter.filter(testApp3Exchange, webFilterChain);
ServerHttpResponse response = testApp3Exchange.getResponse();
assertThat(response.getRawStatusCode()).isEqualTo(419);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.INSUFFICIENT_SPACE_ON_RESOURCE);
assertThat(response.getHeaders()
.get(HeaderConstant.INTERNAL_ACTIVE_RULE_NAME)).isEqualTo(Collections.singletonList("MOCK_RULE"));
// Exception
MetadataContext.LOCAL_SERVICE = "TestApp4";
ServerWebExchange testApp4Exchange = MockServerWebExchange.from(request);
quotaCheckReactiveFilter.filter(testApp4Exchange, webFilterChain);
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testSpdySettingsFrameClearFlag() throws Exception {
short type = 4;
byte flags = 0x01; // FLAG_SETTINGS_CLEAR_SETTINGS
int numSettings = 0;
int length = 8 * numSettings + 4;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(numSettings);
decoder.decode(buf);
verify(delegate).readSettingsFrame(true);
verify(delegate).readSettingsEnd();
assertFalse(buf.isReadable());
buf.release();
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
final FileEntity entity = new FilesApi(new BrickApiClient(session))
.download(StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)),
"stat", null, false, false);
switch(entity.getType()) {
case "file":
if(file.isDirectory()) {
throw new NotfoundException(file.getAbsolute());
}
break;
case "directory":
if(file.isFile()) {
throw new NotfoundException(file.getAbsolute());
}
}
return this.toAttributes(entity);
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
}
|
@Test
public void testFindRoot() throws Exception {
final BrickAttributesFinderFeature f = new BrickAttributesFinderFeature(session);
final PathAttributes attributes = f.find(new Path("/", EnumSet.of(Path.Type.volume, Path.Type.directory)));
assertNotEquals(PathAttributes.EMPTY, attributes);
}
|
@Override
public void updateArticle(ArticleUpdateReqVO updateReqVO) {
// 校验存在
validateArticleExists(updateReqVO.getId());
// 校验分类存在
validateArticleCategoryExists(updateReqVO.getCategoryId());
// 更新
ArticleDO updateObj = ArticleConvert.INSTANCE.convert(updateReqVO);
articleMapper.updateById(updateObj);
}
|
@Test
public void testUpdateArticle_notExists() {
// 准备参数
ArticleUpdateReqVO reqVO = randomPojo(ArticleUpdateReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> articleService.updateArticle(reqVO), ARTICLE_NOT_EXISTS);
}
|
boolean isInsideClosedClosed(Number toEvaluate) {
if (leftMargin == null) {
return toEvaluate.doubleValue() <= rightMargin.doubleValue();
} else if (rightMargin == null) {
return toEvaluate.doubleValue() >= leftMargin.doubleValue();
} else {
return toEvaluate.doubleValue() >= leftMargin.doubleValue() && toEvaluate.doubleValue() <= rightMargin.doubleValue();
}
}
|
@Test
void isInsideClosedClosed() {
KiePMMLInterval kiePMMLInterval = new KiePMMLInterval(null, 20, CLOSURE.CLOSED_CLOSED);
assertThat(kiePMMLInterval.isInsideClosedClosed(10)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(20)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(30)).isFalse();
kiePMMLInterval = new KiePMMLInterval(20, null, CLOSURE.CLOSED_CLOSED);
assertThat(kiePMMLInterval.isInsideClosedClosed(30)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(20)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(10)).isFalse();
kiePMMLInterval = new KiePMMLInterval(20, 40, CLOSURE.CLOSED_CLOSED);
assertThat(kiePMMLInterval.isInsideClosedClosed(30)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(10)).isFalse();
assertThat(kiePMMLInterval.isInsideClosedClosed(20)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(40)).isTrue();
assertThat(kiePMMLInterval.isInsideClosedClosed(50)).isFalse();
}
|
public static <K, V> V putWithoutChecking(CheckedMap<K, V> map, K key, V value)
{
return map.putWithAssertedChecking(key, value);
}
|
@Test(expectedExceptions = AssertionError.class)
public void testPutCycleWithAssertChecking()
{
final DataMap map = new DataMap();
CheckedUtil.putWithoutChecking(map, "cycle", map);
}
|
@Override
public boolean equals(Object obj) {
if (obj instanceof IndexFile) {
if (equalsByExists((IndexFile) obj)) {
if (this.exists()) {
return equalsByIsSameFile((IndexFile) obj);
} else {
return Objects.equals(this.getAbsoluteFile().getAbsolutePath(), ((IndexFile)obj).getAbsoluteFile().getAbsolutePath());
}
}
return false;
}
return false;
}
|
@Test
void isEqualNotExisting() {
String expected = "model";
IndexFile indexFile = new IndexFile("not/exist/", expected);
IndexFile compareFile = new IndexFile("not/exist/", expected);
assertThat(compareFile.getName()).isEqualTo(indexFile.getName());
assertThat(compareFile.getPath()).isEqualTo(indexFile.getPath());
assertThat(compareFile.getAbsolutePath()).isEqualTo(indexFile.getAbsolutePath());
assertThat(compareFile.equals(indexFile)).isTrue();
}
|
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.2");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue;
}
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
}
|
@Test
public void testImportSystemScopes() throws IOException {
SystemScope scope1 = new SystemScope();
scope1.setId(1L);
scope1.setValue("scope1");
scope1.setDescription("Scope 1");
scope1.setRestricted(true);
scope1.setDefaultScope(false);
scope1.setIcon("glass");
SystemScope scope2 = new SystemScope();
scope2.setId(2L);
scope2.setValue("scope2");
scope2.setDescription("Scope 2");
scope2.setRestricted(false);
scope2.setDefaultScope(false);
scope2.setIcon("ball");
SystemScope scope3 = new SystemScope();
scope3.setId(3L);
scope3.setValue("scope3");
scope3.setDescription("Scope 3");
scope3.setRestricted(false);
scope3.setDefaultScope(true);
scope3.setIcon("road");
String configJson = "{" +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [], " +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [" +
"{\"id\":1,\"description\":\"Scope 1\",\"icon\":\"glass\",\"value\":\"scope1\",\"restricted\":true,\"defaultScope\":false}," +
"{\"id\":2,\"description\":\"Scope 2\",\"icon\":\"ball\",\"value\":\"scope2\",\"restricted\":false,\"defaultScope\":false}," +
"{\"id\":3,\"description\":\"Scope 3\",\"icon\":\"road\",\"value\":\"scope3\",\"restricted\":false,\"defaultScope\":true}" +
" ]" +
"}";
logger.debug(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
dataService.importData(reader);
verify(sysScopeRepository, times(3)).save(capturedScope.capture());
List<SystemScope> savedScopes = capturedScope.getAllValues();
assertThat(savedScopes.size(), is(3));
assertThat(savedScopes.get(0).getValue(), equalTo(scope1.getValue()));
assertThat(savedScopes.get(0).getDescription(), equalTo(scope1.getDescription()));
assertThat(savedScopes.get(0).getIcon(), equalTo(scope1.getIcon()));
assertThat(savedScopes.get(0).isDefaultScope(), equalTo(scope1.isDefaultScope()));
assertThat(savedScopes.get(0).isRestricted(), equalTo(scope1.isRestricted()));
assertThat(savedScopes.get(1).getValue(), equalTo(scope2.getValue()));
assertThat(savedScopes.get(1).getDescription(), equalTo(scope2.getDescription()));
assertThat(savedScopes.get(1).getIcon(), equalTo(scope2.getIcon()));
assertThat(savedScopes.get(1).isDefaultScope(), equalTo(scope2.isDefaultScope()));
assertThat(savedScopes.get(1).isRestricted(), equalTo(scope2.isRestricted()));
assertThat(savedScopes.get(2).getValue(), equalTo(scope3.getValue()));
assertThat(savedScopes.get(2).getDescription(), equalTo(scope3.getDescription()));
assertThat(savedScopes.get(2).getIcon(), equalTo(scope3.getIcon()));
assertThat(savedScopes.get(2).isDefaultScope(), equalTo(scope3.isDefaultScope()));
assertThat(savedScopes.get(2).isRestricted(), equalTo(scope3.isRestricted()));
}
|
@Override
public SchemaTransform from(PubsubReadSchemaTransformConfiguration configuration) {
if (configuration.getSubscription() == null && configuration.getTopic() == null) {
throw new IllegalArgumentException(
"To read from Pubsub, a subscription name or a topic name must be provided");
}
if (configuration.getSubscription() != null && configuration.getTopic() != null) {
throw new IllegalArgumentException(
"To read from Pubsub, a subscription name or a topic name must be provided. Not both.");
}
if (!"RAW".equals(configuration.getFormat())) {
if ((Strings.isNullOrEmpty(configuration.getSchema())
&& !Strings.isNullOrEmpty(configuration.getFormat()))
|| (!Strings.isNullOrEmpty(configuration.getSchema())
&& Strings.isNullOrEmpty(configuration.getFormat()))) {
throw new IllegalArgumentException(
"A schema was provided without a data format (or viceversa). Please provide "
+ "both of these parameters to read from Pubsub, or if you would like to use the Pubsub schema service,"
+ " please leave both of these blank.");
}
}
Schema payloadSchema;
SerializableFunction<byte[], Row> payloadMapper;
String format =
configuration.getFormat() == null ? null : configuration.getFormat().toUpperCase();
if ("RAW".equals(format)) {
payloadSchema = Schema.of(Schema.Field.of("payload", Schema.FieldType.BYTES));
payloadMapper = input -> Row.withSchema(payloadSchema).addValue(input).build();
} else if ("JSON".equals(format)) {
payloadSchema = JsonUtils.beamSchemaFromJsonSchema(configuration.getSchema());
payloadMapper = JsonUtils.getJsonBytesToRowFunction(payloadSchema);
} else if ("AVRO".equals(format)) {
payloadSchema =
AvroUtils.toBeamSchema(
new org.apache.avro.Schema.Parser().parse(configuration.getSchema()));
payloadMapper = AvroUtils.getAvroBytesToRowFunction(payloadSchema);
} else {
throw new IllegalArgumentException(
String.format(
"Format %s not supported. Only supported formats are %s",
configuration.getFormat(), VALID_FORMATS_STR));
}
PubsubReadSchemaTransform transform =
new PubsubReadSchemaTransform(configuration, payloadSchema, payloadMapper);
if (configuration.getClientFactory() != null) {
transform.setClientFactory(configuration.getClientFactory());
}
if (configuration.getClock() != null) {
transform.setClock(configuration.getClock());
}
return transform;
}
|
@Test
public void testReadAvroWithError() throws IOException {
PCollectionRowTuple begin = PCollectionRowTuple.empty(p);
try (PubsubTestClientFactory clientFactory = clientFactory(beamRowToMessageWithError())) {
PubsubReadSchemaTransformConfiguration config =
PubsubReadSchemaTransformConfiguration.builder()
.setFormat("AVRO")
.setSchema(SCHEMA)
.setSubscription(SUBSCRIPTION)
.setErrorHandling(
PubsubReadSchemaTransformConfiguration.ErrorHandling.builder()
.setOutput("errors")
.build())
.setClientFactory(clientFactory)
.setClock(CLOCK)
.build();
SchemaTransform transform = new PubsubReadSchemaTransformProvider().from(config);
PCollectionRowTuple reads = begin.apply(transform);
PAssert.that(reads.get("output")).empty();
PipelineResult result = p.run();
result.waitUntilFinish();
MetricResults metrics = result.metrics();
MetricQueryResults metricResults =
metrics.queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
PubsubReadSchemaTransformProvider.class, "PubSub-read-error-counter"))
.build());
Iterable<MetricResult<Long>> counters = metricResults.getCounters();
if (!counters.iterator().hasNext()) {
throw new RuntimeException("no counters available ");
}
Long expectedCount = 3L;
for (MetricResult<Long> count : counters) {
assertEquals(expectedCount, count.getAttempted());
}
} catch (Exception e) {
throw e;
}
}
|
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
}
|
@Test
public void testIncludeObjectNames() throws Exception {
new JmxCollector(
"\n---\nincludeObjectNames:\n- java.lang:*\n- java.lang:*\n- org.apache.cassandra.concurrent:*"
.replace('`', '"'))
.register(prometheusRegistry);
// Test what should and shouldn't be present.
assertNotNull(
getSampleValue(
"java_lang_OperatingSystem_ProcessCpuTime",
new String[] {},
new String[] {}));
assertNotNull(
getSampleValue(
"org_apache_cassandra_concurrent_CONSISTENCY_MANAGER_ActiveCount",
new String[] {},
new String[] {}));
assertNull(
getSampleValue(
"org_apache_cassandra_metrics_Compaction_Value",
new String[] {"name"},
new String[] {"CompletedTasks"}));
assertNull(
getSampleValue(
"hadoop_DataNode_replaceBlockOpMinTime",
new String[] {"name"},
new String[] {"DataNodeActivity-ams-hdd001-50010"}));
}
|
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
if (exc instanceof FileSystemLoopException) {
LOG.warn("Not indexing due to symlink loop: {}", file.toFile());
return FileVisitResult.CONTINUE;
} else if (exc instanceof AccessDeniedException && isExcluded(file)) {
return FileVisitResult.CONTINUE;
}
throw exc;
}
|
@Test
public void test_visit_file_failed_file_system_loop_exception() throws IOException {
DirectoryFileVisitor.FileVisitAction action = mock(DirectoryFileVisitor.FileVisitAction.class);
File file = temp.newFile("symlink");
DirectoryFileVisitor underTest = new DirectoryFileVisitor(action, module, moduleExclusionFilters, inputModuleHierarchy, type);
FileVisitResult result = underTest.visitFileFailed(file.toPath(), new FileSystemLoopException(file.getPath()));
assertThat(result).isEqualTo(FileVisitResult.CONTINUE);
}
|
public CompletableFuture<PutMessageResult> asyncPutMessage(MessageExtBrokerInner messageExt) {
BrokerController masterBroker = this.brokerController.peekMasterBroker();
if (masterBroker != null) {
return masterBroker.getMessageStore().asyncPutMessage(messageExt);
} else if (this.brokerController.getBrokerConfig().isEnableSlaveActingMaster()
&& this.brokerController.getBrokerConfig().isEnableRemoteEscape()) {
try {
messageExt.setWaitStoreMsgOK(false);
final TopicPublishInfo topicPublishInfo = this.brokerController.getTopicRouteInfoManager().tryToFindTopicPublishInfo(messageExt.getTopic());
final String producerGroup = getProducerGroup(messageExt);
final MessageQueue mqSelected = topicPublishInfo.selectOneMessageQueue();
messageExt.setQueueId(mqSelected.getQueueId());
final String brokerNameToSend = mqSelected.getBrokerName();
final String brokerAddrToSend = this.brokerController.getTopicRouteInfoManager().findBrokerAddressInPublish(brokerNameToSend);
final CompletableFuture<SendResult> future = this.brokerController.getBrokerOuterAPI().sendMessageToSpecificBrokerAsync(brokerAddrToSend,
brokerNameToSend, messageExt,
producerGroup, SEND_TIMEOUT);
return future.exceptionally(throwable -> null)
.thenApplyAsync(sendResult -> transformSendResult2PutResult(sendResult), this.defaultAsyncSenderExecutor)
.exceptionally(throwable -> transformSendResult2PutResult(null));
} catch (Exception e) {
LOG.error("sendMessageInFailover to remote failed", e);
return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.PUT_TO_REMOTE_BROKER_FAIL, null, true));
}
} else {
LOG.warn("Put message failed, enableSlaveActingMaster={}, enableRemoteEscape={}.",
this.brokerController.getBrokerConfig().isEnableSlaveActingMaster(), this.brokerController.getBrokerConfig().isEnableRemoteEscape());
return CompletableFuture.completedFuture(new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null));
}
}
|
@Test
public void asyncPutMessageTest() {
// masterBroker is null
Assertions.assertThatCode(() -> escapeBridge.asyncPutMessage(messageExtBrokerInner)).doesNotThrowAnyException();
// masterBroker is not null
when(brokerController.peekMasterBroker()).thenReturn(brokerController);
Assertions.assertThatCode(() -> escapeBridge.asyncPutMessage(messageExtBrokerInner)).doesNotThrowAnyException();
when(brokerController.peekMasterBroker()).thenReturn(null);
Assertions.assertThatCode(() -> escapeBridge.asyncPutMessage(messageExtBrokerInner)).doesNotThrowAnyException();
}
|
@Nullable public static BaggageField getByName(@Nullable TraceContext context, String name) {
if (context == null) return null;
return ExtraBaggageContext.getFieldByName(context, validateName(name));
}
|
@Test void getByName_invalid() {
assertThatThrownBy(() -> BaggageField.getByName(context, ""))
.isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(() -> BaggageField.getByName(context, " "))
.isInstanceOf(IllegalArgumentException.class);
}
|
public ManagedProcess launch(AbstractCommand command) {
EsInstallation esInstallation = command.getEsInstallation();
if (esInstallation != null) {
cleanupOutdatedEsData(esInstallation);
writeConfFiles(esInstallation);
}
Process process;
if (command instanceof JavaCommand<?> javaCommand) {
process = launchJava(javaCommand);
} else {
throw new IllegalStateException("Unexpected type of command: " + command.getClass());
}
ProcessId processId = command.getProcessId();
try {
if (processId == ProcessId.ELASTICSEARCH) {
checkArgument(esInstallation != null, "Incorrect configuration EsInstallation is null");
EsConnectorImpl esConnector = new EsConnectorImpl(singleton(HostAndPort.fromParts(esInstallation.getHost(),
esInstallation.getHttpPort())), esInstallation.getBootstrapPassword(), esInstallation.getHttpKeyStoreLocation(),
esInstallation.getHttpKeyStorePassword().orElse(null));
return new EsManagedProcess(process, processId, esConnector);
} else {
ProcessCommands commands = allProcessesCommands.createAfterClean(processId.getIpcIndex());
return new ProcessCommandsManagedProcess(process, processId, commands);
}
} catch (Exception e) {
// just in case
if (process != null) {
process.destroyForcibly();
}
throw new IllegalStateException(format("Fail to launch monitor of process [%s]", processId.getHumanReadableName()), e);
}
}
|
@Test
public void enabling_es_security_should_execute_keystore_cli_if_cert_password_provided() throws Exception {
File tempDir = temp.newFolder();
File certificateFile = temp.newFile("certificate.pk12");
TestProcessBuilder processBuilder = new TestProcessBuilder();
ProcessLauncher underTest = new ProcessLauncherImpl(tempDir, commands, () -> processBuilder);
EsInstallation esInstallation = createEsInstallation(new Props(new Properties())
.set("sonar.cluster.enabled", "true")
.set("sonar.cluster.search.password", "bootstrap-password")
.set("sonar.cluster.es.ssl.keystore", certificateFile.getAbsolutePath())
.set("sonar.cluster.es.ssl.keystorePassword", "keystore-password")
.set("sonar.cluster.es.ssl.truststore", certificateFile.getAbsolutePath())
.set("sonar.cluster.es.ssl.truststorePassword", "truststore-password"));
JavaCommand<JvmOptions> command = new JavaCommand<>(ProcessId.ELASTICSEARCH, temp.newFolder());
command.addClasspath("lib/*.class");
command.addClasspath("lib/*.jar");
command.setArgument("foo", "bar");
command.setClassName("org.sonarqube.Main");
command.setEnvVariable("VAR1", "valueOfVar1");
command.setJvmOptions(new JvmOptions<>()
.add("-Dfoo=bar")
.add("-Dfoo2=bar2"));
command.setEsInstallation(esInstallation);
ManagedProcess monitor = underTest.launch(command);
assertThat(monitor).isNotNull();
assertThat(Paths.get(esInstallation.getConfDirectory().getAbsolutePath(), "certificate.pk12")).exists();
}
|
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf =
getConf() == null ? new YarnConfiguration() : new YarnConfiguration(
getConf());
boolean isHAEnabled =
yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED,
YarnConfiguration.DEFAULT_RM_HA_ENABLED);
if (args.length < 1) {
printUsage("", isHAEnabled);
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = args[i++];
exitCode = 0;
if ("-help".equals(cmd)) {
if (i < args.length) {
printUsage(args[i], isHAEnabled);
} else {
printHelp("", isHAEnabled);
}
return exitCode;
}
if (USAGE.containsKey(cmd)) {
if (isHAEnabled) {
return super.run(args);
}
System.out.println("Cannot run " + cmd
+ " when ResourceManager HA is not enabled");
return -1;
}
//
// verify that we have enough command line parameters
//
String subClusterId = StringUtils.EMPTY;
if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
"-refreshNodesResources".equals(cmd) ||
"-refreshServiceAcl".equals(cmd) ||
"-refreshUserToGroupsMappings".equals(cmd) ||
"-refreshSuperUserGroupsConfiguration".equals(cmd) ||
"-refreshClusterMaxPriority".equals(cmd)) {
subClusterId = parseSubClusterId(args, isHAEnabled);
// If we enable Federation mode, the number of args may be either one or three.
// Example: -refreshQueues or -refreshQueues -subClusterId SC-1
if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) {
printUsage(cmd, isHAEnabled);
return exitCode;
} else if (!isYarnFederationEnabled(getConf()) && args.length != 1) {
// If Federation mode is not enabled, then the number of args can only be one.
// Example: -refreshQueues
printUsage(cmd, isHAEnabled);
return exitCode;
}
}
// If it is federation mode, we will print federation mode information
if (isYarnFederationEnabled(getConf())) {
System.out.println("Using YARN Federation mode.");
}
try {
if ("-refreshQueues".equals(cmd)) {
exitCode = refreshQueues(subClusterId);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
} else if ("-refreshNodesResources".equals(cmd)) {
exitCode = refreshNodesResources(subClusterId);
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings(subClusterId);
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration(subClusterId);
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls(subClusterId);
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcls(subClusterId);
} else if ("-refreshClusterMaxPriority".equals(cmd)) {
exitCode = refreshClusterMaxPriority(subClusterId);
} else if ("-getGroups".equals(cmd)) {
String[] usernames = Arrays.copyOfRange(args, i, args.length);
exitCode = getGroups(usernames);
} else if ("-updateNodeResource".equals(cmd)) {
exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId);
} else if ("-addToClusterNodeLabels".equals(cmd)) {
exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-removeFromClusterNodeLabels".equals(cmd)) {
exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-replaceLabelsOnNode".equals(cmd)) {
exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("", isHAEnabled);
}
} catch (IllegalArgumentException arge) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd, isHAEnabled);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
}
} catch (Exception e) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (null != localNodeLabelsManager) {
localNodeLabelsManager.stop();
}
return exitCode;
}
|
@Test
public void testAccessLocalNodeLabelManager() throws Exception {
assertFalse(dummyNodeLabelsManager.getServiceState() == STATE.STOPPED);
String[] args =
{ "-addToClusterNodeLabels", "x,y", "-directlyAccessNodeLabelStore" };
assertEquals(0, rmAdminCLI.run(args));
assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().containsAll(
ImmutableSet.of("x", "y")));
// reset localNodeLabelsManager
dummyNodeLabelsManager.removeFromClusterNodeLabels(ImmutableSet.of("x", "y"));
// change the sequence of "-directlyAccessNodeLabelStore" and labels,
// should fail
args =
new String[] { "-addToClusterNodeLabels",
"-directlyAccessNodeLabelStore", "x,y" };
assertEquals(-1, rmAdminCLI.run(args));
// local node labels manager will be close after running
assertTrue(dummyNodeLabelsManager.getServiceState() == STATE.STOPPED);
}
|
public StepInstanceActionResponse terminate(
WorkflowInstance instance,
String stepId,
User user,
Actions.StepInstanceAction action,
boolean blocking) {
validateStepId(instance, stepId, action);
StepInstance stepInstance =
stepInstanceDao.getStepInstance(
instance.getWorkflowId(),
instance.getWorkflowInstanceId(),
instance.getWorkflowRunId(),
stepId,
Constants.LATEST_INSTANCE_RUN);
if (!stepInstance.getRuntimeState().getStatus().shouldWakeup()) {
throw new MaestroInvalidStatusException(
"Cannot manually %s the step %s as it is in a terminal state [%s]",
action.name(), stepInstance.getIdentity(), stepInstance.getRuntimeState().getStatus());
}
// prepare payload and then add it to db
StepAction stepAction =
StepAction.createTerminate(
action, stepInstance, user, "manual step instance API call", false);
saveAction(stepInstance, stepAction);
if (blocking) {
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < ACTION_TIMEOUT) {
StepRuntimeState state =
stepInstanceDao.getStepInstanceRuntimeState(
stepInstance.getWorkflowId(),
stepInstance.getWorkflowInstanceId(),
stepInstance.getWorkflowRunId(),
stepInstance.getStepId(),
Constants.LATEST_INSTANCE_RUN);
if (!state.getStatus().shouldWakeup()) {
return createActionResponseFrom(stepInstance, state, stepAction.toTimelineEvent());
}
TimeUtils.sleep(CHECK_INTERVAL);
}
throw new MaestroTimeoutException(
"%s action for the step %s is timed out. No retry is needed and maestro will eventually complete the action.",
action.name(), stepInstance.getIdentity());
} else {
return createActionResponseFrom(stepInstance, null, stepAction.toTimelineEvent());
}
}
|
@Test
public void testKill() {
StepInstanceActionResponse response = actionDao.terminate(instance, "job1", user, KILL, false);
Assert.assertEquals("sample-dag-test-3", response.getWorkflowId());
Assert.assertEquals(1, response.getWorkflowInstanceId());
Assert.assertEquals(1, response.getWorkflowRunId());
Assert.assertEquals("job1", response.getStepId());
Assert.assertEquals(1L, response.getStepAttemptId().longValue());
Assert.assertEquals(
"User [tester] take action [KILL] on the step due to reason: [manual step instance API call]",
response.getTimelineEvent().getMessage());
Mockito.verify(publisher, Mockito.times(1)).publish(any(StepInstanceWakeUpEvent.class));
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return true;
}
try {
new FTPAttributesFinderFeature(session).find(file, listener);
return true;
}
catch(NotfoundException e) {
return false;
}
}
|
@Test
public void testFindNotFound() throws Exception {
assertFalse(new FTPFindFeature(session).find(new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.file))));
}
|
public MetricsBuffer renderToBuffer(Executor executor, List<PrometheusRawMetricsProvider> metricsProviders) {
boolean cacheMetricsResponse = pulsar.getConfiguration().isMetricsBufferResponse();
while (!closed && !Thread.currentThread().isInterrupted()) {
long currentTimeSlot = cacheMetricsResponse ? calculateCurrentTimeSlot() : 0;
MetricsBuffer currentMetricsBuffer = metricsBuffer;
if (currentMetricsBuffer == null || currentMetricsBuffer.getBufferFuture().isCompletedExceptionally()
|| (currentMetricsBuffer.getBufferFuture().isDone()
&& (currentMetricsBuffer.getCreateTimeslot() != 0
&& currentTimeSlot > currentMetricsBuffer.getCreateTimeslot()))) {
MetricsBuffer newMetricsBuffer = new MetricsBuffer(currentTimeSlot);
if (metricsBufferFieldUpdater.compareAndSet(this, currentMetricsBuffer, newMetricsBuffer)) {
if (currentMetricsBuffer != null) {
currentMetricsBuffer.release();
}
CompletableFuture<ResponseBuffer> bufferFuture = newMetricsBuffer.getBufferFuture();
executor.execute(() -> {
try {
bufferFuture.complete(new ResponseBuffer(generateMetrics(metricsProviders)));
} catch (Exception e) {
bufferFuture.completeExceptionally(e);
} finally {
if (currentTimeSlot == 0) {
// if the buffer is not cached, release it after the future is completed
metricsBufferFieldUpdater.compareAndSet(this, newMetricsBuffer, null);
newMetricsBuffer.release();
}
}
});
// no need to retain before returning since the new buffer starts with refCnt 2
return newMetricsBuffer;
} else {
currentMetricsBuffer = metricsBuffer;
}
}
// retain the buffer before returning
// if the buffer is already released, retaining won't succeed, retry in that case
if (currentMetricsBuffer != null && currentMetricsBuffer.retain()) {
return currentMetricsBuffer;
}
}
return null;
}
|
@Test
public void testReproducingBufferOverflowExceptionAndEOFExceptionBugsInGzipCompression()
throws ExecutionException, InterruptedException, IOException {
PulsarService pulsar = mock(PulsarService.class);
ServiceConfiguration serviceConfiguration = new ServiceConfiguration();
when(pulsar.getConfiguration()).thenReturn(serviceConfiguration);
// generate a random byte buffer which is 8 bytes less than the minimum compress buffer size limit
// this will trigger the BufferOverflowException bug in writing the gzip trailer
// it will also trigger another bug in finishing the gzip compression stream when the compress buffer is full
// which results in EOFException
Random random = new Random();
byte[] inputBytes = new byte[8192 - 8];
random.nextBytes(inputBytes);
ByteBuf byteBuf = Unpooled.wrappedBuffer(inputBytes);
PrometheusMetricsGenerator generator =
new PrometheusMetricsGenerator(pulsar, false, false, false, false, Clock.systemUTC()) {
// override the generateMetrics method to return the random byte buffer for gzip compression
// instead of the actual metrics
@Override
protected ByteBuf generateMetrics(List<PrometheusRawMetricsProvider> metricsProviders) {
return byteBuf;
}
};
PrometheusMetricsGenerator.MetricsBuffer metricsBuffer =
generator.renderToBuffer(MoreExecutors.directExecutor(), Collections.emptyList());
try {
PrometheusMetricsGenerator.ResponseBuffer responseBuffer = metricsBuffer.getBufferFuture().get();
ByteBuf compressed = responseBuffer.getCompressedBuffer(MoreExecutors.directExecutor()).get();
byte[] compressedBytes = new byte[compressed.readableBytes()];
compressed.readBytes(compressedBytes);
try (GZIPInputStream gzipInputStream = new GZIPInputStream(new ByteArrayInputStream(compressedBytes))) {
byte[] uncompressedBytes = IOUtils.toByteArray(gzipInputStream);
assertEquals(uncompressedBytes, inputBytes);
}
} finally {
metricsBuffer.release();
}
}
|
@Override
public READDIRPLUS3Response readdirplus(XDR xdr, RpcInfo info) {
return readdirplus(xdr, getSecurityHandler(info), info.remoteAddress());
}
|
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
int namenodeId = Nfs3Utils.getNamenodeId(config);
FileHandle handle = new FileHandle(dirId, namenodeId);
XDR xdr_req = new XDR();
READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
req.serialize(xdr_req);
// Attempt by an unprivileged user should fail.
READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a privileged user should pass.
READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
response2.getStatus());
}
|
@Override
public Long createDataSourceConfig(DataSourceConfigSaveReqVO createReqVO) {
DataSourceConfigDO config = BeanUtils.toBean(createReqVO, DataSourceConfigDO.class);
validateConnectionOK(config);
// 插入
dataSourceConfigMapper.insert(config);
// 返回
return config.getId();
}
|
@Test
public void testCreateDataSourceConfig_success() {
try (MockedStatic<JdbcUtils> databaseUtilsMock = mockStatic(JdbcUtils.class)) {
// 准备参数
DataSourceConfigSaveReqVO reqVO = randomPojo(DataSourceConfigSaveReqVO.class)
.setId(null); // 避免 id 被设置
// mock 方法
databaseUtilsMock.when(() -> JdbcUtils.isConnectionOK(eq(reqVO.getUrl()),
eq(reqVO.getUsername()), eq(reqVO.getPassword()))).thenReturn(true);
// 调用
Long dataSourceConfigId = dataSourceConfigService.createDataSourceConfig(reqVO);
// 断言
assertNotNull(dataSourceConfigId);
// 校验记录的属性是否正确
DataSourceConfigDO dataSourceConfig = dataSourceConfigMapper.selectById(dataSourceConfigId);
assertPojoEquals(reqVO, dataSourceConfig, "id");
}
}
|
@Override
public void deleteDictType(Long id) {
// 校验是否存在
DictTypeDO dictType = validateDictTypeExists(id);
// 校验是否有字典数据
if (dictDataService.getDictDataCountByDictType(dictType.getType()) > 0) {
throw exception(DICT_TYPE_HAS_CHILDREN);
}
// 删除字典类型
dictTypeMapper.updateToDelete(id, LocalDateTime.now());
}
|
@Test
public void testDeleteDictType_hasChildren() {
// mock 数据
DictTypeDO dbDictType = randomDictTypeDO();
dictTypeMapper.insert(dbDictType);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbDictType.getId();
// mock 方法
when(dictDataService.getDictDataCountByDictType(eq(dbDictType.getType()))).thenReturn(1L);
// 调用, 并断言异常
assertServiceException(() -> dictTypeService.deleteDictType(id), DICT_TYPE_HAS_CHILDREN);
}
|
@VisibleForTesting
static long calculateDistinctPartitionKeys(
HiveColumnHandle column,
List<HivePartition> partitions)
{
return partitions.stream()
.map(partition -> partition.getKeys().get(column))
.filter(value -> !value.isNull())
.distinct()
.count();
}
|
@Test
public void testCalculateDistinctPartitionKeys()
{
assertEquals(calculateDistinctPartitionKeys(PARTITION_COLUMN_1, ImmutableList.of()), 0);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_1,
ImmutableList.of(partition("p1=string1/p2=1234"))),
1);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_1,
ImmutableList.of(partition("p1=string1/p2=1234"), partition("p1=string2/p2=1234"))),
2);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_2,
ImmutableList.of(partition("p1=string1/p2=1234"), partition("p1=string2/p2=1234"))),
1);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_2,
ImmutableList.of(partition("p1=string1/p2=1234"), partition("p1=string1/p2=1235"))),
2);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_1,
ImmutableList.of(partition("p1=__HIVE_DEFAULT_PARTITION__/p2=1234"), partition("p1=string1/p2=1235"))),
1);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_2,
ImmutableList.of(partition("p1=123/p2=__HIVE_DEFAULT_PARTITION__"), partition("p1=string1/p2=1235"))),
1);
assertEquals(
calculateDistinctPartitionKeys(
PARTITION_COLUMN_2,
ImmutableList.of(partition("p1=123/p2=__HIVE_DEFAULT_PARTITION__"), partition("p1=string1/p2=__HIVE_DEFAULT_PARTITION__"))),
0);
}
|
public static SubquerySegment bind(final SubquerySegment segment, final SQLStatementBinderContext binderContext, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) {
SQLStatementBinderContext selectBinderContext = new SQLStatementBinderContext(segment.getSelect(), binderContext.getMetaData(), binderContext.getCurrentDatabaseName());
selectBinderContext.getExternalTableBinderContexts().putAll(binderContext.getExternalTableBinderContexts());
SelectStatement boundSelectStatement = new SelectStatementBinder(outerTableBinderContexts).bind(segment.getSelect(), selectBinderContext);
SubquerySegment result = new SubquerySegment(segment.getStartIndex(), segment.getStopIndex(), boundSelectStatement, segment.getText());
result.setSubqueryType(segment.getSubqueryType());
return result;
}
|
@Test
void assertBind() {
MySQLSelectStatement mysqlSelectStatement = new MySQLSelectStatement();
ColumnSegment columnSegment = new ColumnSegment(58, 65, new IdentifierValue("order_id"));
ProjectionsSegment projectionsSegment = new ProjectionsSegment(58, 65);
projectionsSegment.getProjections().add(new ColumnProjectionSegment(columnSegment));
mysqlSelectStatement.setProjections(projectionsSegment);
mysqlSelectStatement.setFrom(new SimpleTableSegment(new TableNameSegment(72, 78, new IdentifierValue("t_order"))));
ExpressionSegment whereExpressionSegment = new ColumnSegment(86, 91, new IdentifierValue("status"));
mysqlSelectStatement.setWhere(new WhereSegment(80, 102, whereExpressionSegment));
SubquerySegment subquerySegment = new SubquerySegment(39, 103, mysqlSelectStatement, "order_id = (SELECT order_id FROM t_order WHERE status = 'SUBMIT')");
SQLStatementBinderContext sqlStatementBinderContext = new SQLStatementBinderContext(
createMetaData(), DefaultDatabase.LOGIC_NAME, TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), Collections.emptySet());
ColumnSegment boundNameColumn = new ColumnSegment(7, 13, new IdentifierValue("user_id"));
boundNameColumn.setColumnBoundInfo(new ColumnSegmentBoundInfo(
new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue(DefaultDatabase.LOGIC_NAME), new IdentifierValue("t_order_item"), new IdentifierValue("user_id")));
sqlStatementBinderContext.getExternalTableBinderContexts().put("t_order_item", new SimpleTableSegmentBinderContext(Collections.singleton(new ColumnProjectionSegment(boundNameColumn))));
Map<String, TableSegmentBinderContext> outerTableBinderContexts = new LinkedHashMap<>();
SubquerySegment actual = SubquerySegmentBinder.bind(subquerySegment, sqlStatementBinderContext, outerTableBinderContexts);
assertNotNull(actual.getSelect());
assertTrue(actual.getSelect().getFrom().isPresent());
assertInstanceOf(SimpleTableSegment.class, actual.getSelect().getFrom().get());
assertThat(((SimpleTableSegment) actual.getSelect().getFrom().get()).getTableName().getIdentifier().getValue(), is("t_order"));
assertTrue(actual.getSelect().getWhere().isPresent());
assertInstanceOf(ColumnSegment.class, actual.getSelect().getWhere().get().getExpr());
assertThat(((ColumnSegment) actual.getSelect().getWhere().get().getExpr()).getIdentifier().getValue(), is("status"));
assertNotNull(((ColumnSegment) actual.getSelect().getWhere().get().getExpr()).getColumnBoundInfo());
assertThat(((ColumnSegment) actual.getSelect().getWhere().get().getExpr()).getColumnBoundInfo().getOriginalColumn().getValue(), is("status"));
assertThat(((ColumnSegment) actual.getSelect().getWhere().get().getExpr()).getColumnBoundInfo().getOriginalTable().getValue(), is("t_order"));
assertThat(((ColumnSegment) actual.getSelect().getWhere().get().getExpr()).getColumnBoundInfo().getOriginalSchema().getValue(), is(DefaultDatabase.LOGIC_NAME));
assertThat(((ColumnSegment) actual.getSelect().getWhere().get().getExpr()).getColumnBoundInfo().getOriginalDatabase().getValue(), is(DefaultDatabase.LOGIC_NAME));
assertNotNull(actual.getSelect().getProjections());
assertThat(actual.getSelect().getProjections().getProjections().size(), is(1));
ProjectionSegment column = actual.getSelect().getProjections().getProjections().iterator().next();
assertInstanceOf(ColumnProjectionSegment.class, column);
assertThat(((ColumnProjectionSegment) column).getColumn().getIdentifier().getValue(), is("order_id"));
assertNotNull(((ColumnProjectionSegment) column).getColumn().getColumnBoundInfo());
assertThat(((ColumnProjectionSegment) column).getColumn().getColumnBoundInfo().getOriginalColumn().getValue(), is("order_id"));
assertThat(((ColumnProjectionSegment) column).getColumn().getColumnBoundInfo().getOriginalTable().getValue(), is("t_order"));
assertThat(((ColumnProjectionSegment) column).getColumn().getColumnBoundInfo().getOriginalSchema().getValue(), is(DefaultDatabase.LOGIC_NAME));
assertThat(((ColumnProjectionSegment) column).getColumn().getColumnBoundInfo().getOriginalDatabase().getValue(), is(DefaultDatabase.LOGIC_NAME));
}
|
static String describe(Throwable t) {
if (t == null) {
return null;
}
String typeDescription = t.getClass().getSimpleName();
String message = t.getMessage();
return typeDescription + (message != null ? ": '" + message + "'" : "");
}
|
@Test
void exceptionDescribedAsClassSimpleNameAndMessage() {
assertThat(
ExceptionUtils.describe(new IOException("timed out")), is("IOException: 'timed out'"));
}
|
@Override
public void set(File file, String view, String attribute, Object value, boolean create) {
if (attribute.equals("owner")) {
checkNotCreate(view, attribute, create);
UserPrincipal user = checkType(view, attribute, value, UserPrincipal.class);
// TODO(cgdecker): Do we really need to do this? Any reason not to allow any UserPrincipal?
if (!(user instanceof UserLookupService.JimfsUserPrincipal)) {
user = createUserPrincipal(user.getName());
}
file.setAttribute("owner", "owner", user);
}
}
|
@Test
public void testSet() {
assertSetAndGetSucceeds("owner", createUserPrincipal("user"));
assertSetFailsOnCreate("owner", createUserPrincipal("user"));
// invalid type
assertSetFails("owner", "root");
}
|
@Override
public String requestMessageForLatestRevision(PackageConfiguration packageConfiguration, RepositoryConfiguration repositoryConfiguration) {
Map configuredValues = new LinkedHashMap();
configuredValues.put("repository-configuration", jsonResultMessageHandler.configurationToMap(repositoryConfiguration));
configuredValues.put("package-configuration", jsonResultMessageHandler.configurationToMap(packageConfiguration));
return GSON.toJson(configuredValues);
}
|
@Test
public void shouldBuildRequestBodyForLatestRevisionRequest() throws Exception {
String requestBody = messageHandler.requestMessageForLatestRevision(packageConfiguration, repositoryConfiguration);
assertThat(requestBody, is("{\"repository-configuration\":{\"key-one\":{\"value\":\"value-one\"},\"key-two\":{\"value\":\"value-two\"}},\"package-configuration\":{\"key-three\":{\"value\":\"value-three\"},\"key-four\":{\"value\":\"value-four\"}}}"));
}
|
public JobStatsExtended enrich(JobStats jobStats) {
JobStats latestJobStats = getLatestJobStats(jobStats, previousJobStats);
if (lock.tryLock()) {
setFirstRelevantJobStats(latestJobStats);
setJobStatsExtended(latestJobStats);
setPreviousJobStats(latestJobStats);
lock.unlock();
}
return jobStatsExtended;
}
|
@Test
void enrichGivenNoPreviousJobStatsAndNoWorkToDo() {
JobStatsExtended extendedJobStats = jobStatsEnricher.enrich(getJobStats(0L, 0L, 0L, 0L));
assertThat(extendedJobStats.getAmountSucceeded()).isZero();
assertThat(extendedJobStats.getAmountFailed()).isZero();
assertThat(extendedJobStats.getEstimation().isProcessingDone()).isTrue();
assertThat(extendedJobStats.getEstimation().isEstimatedProcessingFinishedInstantAvailable()).isFalse();
}
|
@NonNull
static String getImageUrl(List<FastDocumentFile> files, Uri folderUri) {
// look for special file names
for (String iconLocation : PREFERRED_FEED_IMAGE_FILENAMES) {
for (FastDocumentFile file : files) {
if (iconLocation.equals(file.getName())) {
return file.getUri().toString();
}
}
}
// use the first image in the folder if existing
for (FastDocumentFile file : files) {
String mime = file.getType();
if (mime != null && (mime.startsWith("image/jpeg") || mime.startsWith("image/png"))) {
return file.getUri().toString();
}
}
// use default icon as fallback
return Feed.PREFIX_GENERATIVE_COVER + folderUri;
}
|
@Test
public void testGetImageUrl_NoImageButAudioFiles() {
List<FastDocumentFile> folder = Collections.singletonList(mockDocumentFile("audio.mp3", "audio/mp3"));
String imageUrl = LocalFeedUpdater.getImageUrl(folder, Uri.EMPTY);
assertThat(imageUrl, startsWith(Feed.PREFIX_GENERATIVE_COVER));
}
|
public MutableMap<K, V> beginWrite() {
return new MutableMap<>(set);
}
|
@SuppressWarnings({"OverlyLongMethod"})
@Test
public void iterationTest() {
Random random = new Random(8234890);
Persistent23TreeMap<Integer, String> map = new Persistent23TreeMap<>();
Persistent23TreeMap.MutableMap<Integer, String> write = map.beginWrite();
int[] p = genPermutation(random);
TreeSet<Integer> added = new TreeSet<>();
for (int i = 0; i < ENTRIES_TO_ADD; i++) {
int size = write.size();
Assert.assertEquals(i, size);
if ((size & 1023) == 0 || size < 100) {
Iterator<Integer> iterator = added.iterator();
for (Persistent23TreeMap.Entry<Integer, String> key : write) {
Assert.assertTrue(iterator.hasNext());
Integer next = iterator.next();
Assert.assertEquals(next, key.getKey());
Assert.assertEquals(String.valueOf(next), key.getValue());
}
Assert.assertFalse(iterator.hasNext());
iterator = added.iterator();
Iterator<Persistent23TreeMap.Entry<Integer, String>> treeItr = write.iterator();
for (int j = 0; j < size; j++) {
Persistent23TreeMap.Entry<Integer, String> key = treeItr.next();
Assert.assertTrue(iterator.hasNext());
Integer next = iterator.next();
Assert.assertEquals(next, key.getKey());
Assert.assertEquals(String.valueOf(next), key.getValue());
}
Assert.assertFalse(iterator.hasNext());
try {
treeItr.next();
Assert.fail();
} catch (NoSuchElementException e) {
}
Assert.assertFalse(treeItr.hasNext());
}
write.put(p[i], String.valueOf(p[i]));
added.add(p[i]);
}
}
|
@Udf
public String lpad(
@UdfParameter(description = "String to be padded") final String input,
@UdfParameter(description = "Target length") final Integer targetLen,
@UdfParameter(description = "Padding string") final String padding) {
if (input == null) {
return null;
}
if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) {
return null;
}
final StringBuilder sb = new StringBuilder(targetLen + padding.length());
final int padUpTo = Math.max(targetLen - input.length(), 0);
for (int i = 0; i < padUpTo; i += padding.length()) {
sb.append(padding);
}
sb.setLength(padUpTo);
sb.append(input);
sb.setLength(targetLen);
return sb.toString();
}
|
@Test
public void shouldTruncateInputIfTargetLengthTooSmallString() {
final String result = udf.lpad("foo", 2, "bar");
assertThat(result, is("fo"));
}
|
@Override
public List<String> tokenise(String text) {
if (text == null || text.isEmpty()) {
return new ArrayList<>();
}
text = text.replaceAll("[^\\p{L}\\p{N}\\s\\-'.]", " ").trim();
String[] parts = text.split("\\s+");
var tokenBuilder = new StringBuilder();
List<String> tokenParts = Stream.of(parts)
.flatMap(part -> {
if (isPrefixWord(part) && !tokenBuilder.isEmpty()) {
tokenBuilder.append(" ").append(part);
return Stream.empty();
} else {
if (!tokenBuilder.isEmpty()) {
String token = tokenBuilder.append(" ").append(part).toString();
tokenBuilder.setLength(0);
return Stream.of(token);
} else if (isPrefixWord(part)) {
tokenBuilder.append(part);
return Stream.empty();
} else {
return Stream.of(part);
}
}
})
.collect(Collectors.toList());
if (!tokenBuilder.isEmpty()) {
tokenParts.add(tokenBuilder.toString());
}
return new ArrayList<>(tokenParts);
}
|
@Description("Tokenise, when text only has one word but lots of commas, then return one word")
@Test
void tokenise_WhenTextOnlyHasOneWordButLotsOfCommas_ThenReturnOneToken() {
// When
var result = textTokeniser.tokenise(",Aberdeen,,,,");
// Then
assertThat(result).isNotEmpty().hasSize(1).contains("Aberdeen");
}
|
public void addValueProviders(final String segmentName,
final RocksDB db,
final Cache cache,
final Statistics statistics) {
if (storeToValueProviders.isEmpty()) {
logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId);
streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this);
} else if (storeToValueProviders.containsKey(segmentName)) {
throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues");
}
verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics);
logger.debug("Adding value providers for store {} of task {}", segmentName, taskId);
storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics));
}
|
@Test
public void shouldThrowIfCacheToAddIsNotNullButExistingCacheIsNull() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
final Throwable exception = assertThrows(
IllegalStateException.class,
() -> recorder.addValueProviders(SEGMENT_STORE_NAME_2, dbToAdd2, null, statisticsToAdd2)
);
assertThat(
exception.getMessage(),
is("Cache for segment " + SEGMENT_STORE_NAME_2 + " of task " + TASK_ID1 +
" is null although the cache of another segment in this metrics recorder is not null. " +
"This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues")
);
}
|
@Override
public synchronized FunctionSource getFunction(final List<SqlType> argTypeList) {
final List<SqlArgument> args = argTypeList.stream()
.map((type) -> type == null ? null : SqlArgument.of(type))
.collect(Collectors.toList());
final UdafFactoryInvoker creator = udfIndex.getFunction(args);
if (creator == null) {
throw new KsqlException("There is no aggregate function with name='" + getName()
+ "' that has arguments of type="
+ argTypeList.stream()
.map(SqlType::baseType)
.map(Objects::toString)
.collect(Collectors.joining(",")));
}
final boolean isFactoryVariadic = creator.literalParams().stream()
.anyMatch(ParameterInfo::isVariadic);
/* There can only be one variadic argument, so we know either the column args are bounded
or the initial args are bounded. */
final int numInitArgs;
final int numSignatureInitArgs = creator.literalParams().size();
if (isFactoryVariadic) {
numInitArgs = argTypeList.size() - (creator.parameterInfo().size() - numSignatureInitArgs);
} else {
numInitArgs = numSignatureInitArgs;
}
return new FunctionSource(
numInitArgs,
(initArgs) -> creator.createFunction(initArgs, args)
);
}
|
@Test
public void shouldThrowOnUnsupportedInitParamType() {
// When:
final Exception e = assertThrows(KsqlException.class,
() -> functionFactory.getFunction(
ImmutableList.of(SqlTypes.STRING, SqlDecimal.of(1, 0))
)
);
// Then:
assertThat(e.getMessage(), is("There is no aggregate function with name='BOB' that has "
+ "arguments of type=STRING,DECIMAL"));
}
|
public static Row toBeamRow(GenericRecord record, Schema schema, ConversionOptions options) {
List<Object> valuesInOrder =
schema.getFields().stream()
.map(
field -> {
try {
org.apache.avro.Schema.Field avroField =
record.getSchema().getField(field.getName());
Object value = avroField != null ? record.get(avroField.pos()) : null;
return convertAvroFormat(field.getType(), value, options);
} catch (Exception cause) {
throw new IllegalArgumentException(
"Error converting field " + field + ": " + cause.getMessage(), cause);
}
})
.collect(toList());
return Row.withSchema(schema).addValues(valuesInOrder).build();
}
|
@Test
public void testToBeamRow_array_row() {
Row beamRow = BigQueryUtils.toBeamRow(ARRAY_ROW_TYPE, BQ_ARRAY_ROW_ROW);
assertEquals(ARRAY_ROW_ROW, beamRow);
}
|
public Region.Type type() {
String t = get(TYPE, null);
return t == null ? null : regionTypeFor(t);
}
|
@Test
public void clearType() {
loadRegion(R2);
cfg.type(null);
checkRegion(PARIS, null, R2_DEVS);
}
|
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(length);
out.writeLong(fileCount);
out.writeLong(directoryCount);
out.writeLong(getQuota());
out.writeLong(getSpaceConsumed());
out.writeLong(getSpaceQuota());
}
|
@Test
public void testWrite() throws IOException {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
DataOutput out = mock(DataOutput.class);
InOrder inOrder = inOrder(out);
contentSummary.write(out);
inOrder.verify(out).writeLong(length);
inOrder.verify(out).writeLong(fileCount);
inOrder.verify(out).writeLong(directoryCount);
inOrder.verify(out).writeLong(quota);
inOrder.verify(out).writeLong(spaceConsumed);
inOrder.verify(out).writeLong(spaceQuota);
}
|
Properties consumerProps() {
return consumerProps;
}
|
@Test
public void testDefaultClientId() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
Properties consumerProperties = config.consumerProps();
assertEquals("console-share-consumer", consumerProperties.getProperty(ConsumerConfig.CLIENT_ID_CONFIG));
}
|
@Override
public double calcEdgeWeight(EdgeIteratorState edgeState, boolean reverse) {
double priority = edgeToPriorityMapping.get(edgeState, reverse);
if (priority == 0) return Double.POSITIVE_INFINITY;
final double distance = edgeState.getDistance();
double seconds = calcSeconds(distance, edgeState, reverse);
if (Double.isInfinite(seconds)) return Double.POSITIVE_INFINITY;
// add penalty at start/stop/via points
if (edgeState.get(EdgeIteratorState.UNFAVORED_EDGE)) seconds += headingPenaltySeconds;
double distanceCosts = distance * distanceInfluence;
if (Double.isInfinite(distanceCosts)) return Double.POSITIVE_INFINITY;
return seconds / priority + distanceCosts;
}
|
@Test
public void withPriority() {
// 25km/h -> 144s per km, 50km/h -> 72s per km, 100km/h -> 36s per km
EdgeIteratorState slow = graph.edge(0, 1).set(avSpeedEnc, 25, 25).setDistance(1000).
set(roadClassEnc, SECONDARY);
EdgeIteratorState medium = graph.edge(0, 1).set(avSpeedEnc, 50, 50).setDistance(1000).
set(roadClassEnc, SECONDARY);
EdgeIteratorState fast = graph.edge(0, 1).set(avSpeedEnc, 100).setDistance(1000).
set(roadClassEnc, SECONDARY);
Weighting weighting = createWeighting(createSpeedCustomModel(avSpeedEnc));
assertEquals(144, weighting.calcEdgeWeight(slow, false), .1);
assertEquals(72, weighting.calcEdgeWeight(medium, false), .1);
assertEquals(36, weighting.calcEdgeWeight(fast, false), .1);
// if we reduce the priority we get higher edge weights
weighting = CustomModelParser.createWeighting(encodingManager, NO_TURN_COST_PROVIDER,
createSpeedCustomModel(avSpeedEnc)
.addToPriority(If("road_class == SECONDARY", MULTIPLY, "0.5"))
);
assertEquals(2 * 144, weighting.calcEdgeWeight(slow, false), .1);
assertEquals(2 * 72, weighting.calcEdgeWeight(medium, false), .1);
assertEquals(2 * 36, weighting.calcEdgeWeight(fast, false), .1);
}
|
@VisibleForTesting
ZonedDateTime parseZoned(final String text, final ZoneId zoneId) {
final TemporalAccessor parsed = formatter.parse(text);
final ZoneId parsedZone = parsed.query(TemporalQueries.zone());
ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply(
ObjectUtils.defaultIfNull(parsedZone, zoneId));
for (final TemporalField override : ChronoField.values()) {
if (parsed.isSupported(override)) {
if (!resolved.isSupported(override)) {
throw new KsqlException(
"Unsupported temporal field in timestamp: " + text + " (" + override + ")");
}
final long value = parsed.getLong(override);
if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) {
if (!parsed.isSupported(ChronoField.YEAR)) {
throw new KsqlException("Leap day cannot be parsed without supplying the year field");
}
// eagerly override year, to avoid mismatch with epoch year, which is not a leap year
resolved = resolved.withYear(parsed.get(ChronoField.YEAR));
}
resolved = resolved.with(override, value);
}
}
return resolved;
}
|
@Test
public void shouldResolveDefaultsForEmpty() {
// Given
final String format = "";
final String timestamp = "";
// When
final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, ZID);
// Then
assertThat(ts, is(sameInstant(EPOCH.withZoneSameInstant(ZID))));
}
|
public void removeField(int fieldNum) {
// range check
if (fieldNum < 0 || fieldNum >= this.numFields) {
throw new IndexOutOfBoundsException();
}
int lastIndex = this.numFields - 1;
if (fieldNum < lastIndex) {
int len = lastIndex - fieldNum;
System.arraycopy(this.offsets, fieldNum + 1, this.offsets, fieldNum, len);
System.arraycopy(this.lengths, fieldNum + 1, this.lengths, fieldNum, len);
System.arraycopy(this.readFields, fieldNum + 1, this.readFields, fieldNum, len);
System.arraycopy(this.writeFields, fieldNum + 1, this.writeFields, fieldNum, len);
markModified(fieldNum);
}
this.offsets[lastIndex] = NULL_INDICATOR_OFFSET;
this.lengths[lastIndex] = 0;
this.writeFields[lastIndex] = null;
setNumFields(lastIndex);
}
|
@Test
void testRemoveField() {
Record record;
int oldLen;
// Create filled record and remove field from the middle
record = new Record(this.origVal1, this.origVal2);
record.addField(this.origVal3);
record.removeField(1);
assertThat(record.getNumFields()).isEqualTo(2);
StringValue recVal1 = record.getField(0, StringValue.class);
IntValue recVal2 = record.getField(1, IntValue.class);
assertThat(this.origVal1.getValue()).isEqualTo(recVal1.getValue());
assertThat(this.origVal3.getValue()).isEqualTo(recVal2.getValue());
record = this.generateFilledDenseRecord(100);
// Remove field from the first position of the record
oldLen = record.getNumFields();
record.removeField(0);
assertThat(oldLen - 1).isEqualTo(record.getNumFields());
// Remove field from the end of the record
oldLen = record.getNumFields();
record.removeField(oldLen - 1);
assertThat(oldLen - 1).isEqualTo(record.getNumFields());
// Insert several random fields into the record
record = this.generateFilledDenseRecord(100);
for (int i = 0; i < 100; i++) {
oldLen = record.getNumFields();
int pos = this.rand.nextInt(record.getNumFields());
record.removeField(pos);
assertThat(oldLen - 1).isEqualTo(record.getNumFields());
}
}
|
public static <T extends GeneratedMessageV3> ProtobufNativeSchema<T> of(Class<T> pojo) {
return of(pojo, new HashMap<>());
}
|
@Test
public void testEncodeAndDecode() {
final String stringFieldValue = "StringFieldValue";
org.apache.pulsar.client.schema.proto.Test.TestMessage testMessage = org.apache.pulsar.client.schema.proto.Test.TestMessage.newBuilder().setStringField(stringFieldValue).build();
ProtobufNativeSchema<org.apache.pulsar.client.schema.proto.Test.TestMessage> protobufSchema = ProtobufNativeSchema.of(org.apache.pulsar.client.schema.proto.Test.TestMessage.class);
byte[] bytes = protobufSchema.encode(testMessage);
org.apache.pulsar.client.schema.proto.Test.TestMessage message = protobufSchema.decode(bytes);
assertEquals(message.getStringField(), stringFieldValue);
}
|
@CheckForNull
@Override
public Map<Path, Set<Integer>> branchChangedLines(String targetBranchName, Path projectBaseDir, Set<Path> changedFiles) {
return branchChangedLinesWithFileMovementDetection(targetBranchName, projectBaseDir, toChangedFileByPathsMap(changedFiles));
}
|
@Test
public void branchChangedLines_should_always_ignore_different_line_endings() throws IOException, GitAPIException {
Path filePath = worktree.resolve("file-m1.xoo");
createAndCommitFile("file-m1.xoo");
ObjectId forkPoint = git.getRepository().exactRef("HEAD").getObjectId();
git.branchCreate().setName("b1").setStartPoint(forkPoint.getName()).call();
git.checkout().setName("b1").call();
String newFileContent = Files.readString(filePath).replaceAll("\n", "\r\n");
Files.write(filePath, newFileContent.getBytes(StandardCharsets.UTF_8), StandardOpenOption.TRUNCATE_EXISTING);
commit("file-m1.xoo");
assertThat(newScmProvider().branchChangedLines("master", worktree, Collections.singleton(filePath)))
.isEmpty();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.