focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void enumerateImportedFields(SDDocumentType documentType) { var search = this.schemas.stream() .filter(s -> s.getDocument() != null) .filter(s -> s.getDocument().getName().equals(documentType.getName())) .findFirst(); if (search.isEmpty()) { return; // No imported fields present. } search.get().temporaryImportedFields().ifPresent(documentType::setTemporaryImportedFields); }
@Test void imported_fields_are_enumerated_and_copied_from_correct_search_instance() { String PARENT = "parent"; Schema parentSchema = new Schema(PARENT, MockApplicationPackage.createEmpty()); SDDocumentType parentDocument = new SDDocumentType(PARENT, parentSchema); var parentField = new SDField(parentDocument, "their_field", DataType.INT); AttributeUtils.addAttributeAspect(parentSchema.getName(), parentField); parentDocument.addField(parentField); parentSchema.addDocument(parentDocument); String FOO = "foo"; Schema fooSchema = new Schema(FOO, MockApplicationPackage.createEmpty()); /* SDField fooRefToParent = new SDField( "foo_ref", NewDocumentReferenceDataType.createWithInferredId(parentDocument.getDocumentType())); AttributeUtils.addAttributeAspect(fooRefToParent); */ var fooImports = fooSchema.temporaryImportedFields().get(); fooImports.add(new TemporaryImportedField("my_first_import", "foo_ref", "their_field")); fooImports.add(new TemporaryImportedField("my_second_import", "foo_ref", "their_field")); SDDocumentType fooDocument = new SDDocumentType(FOO, fooSchema); fooSchema.addDocument(fooDocument); String BAR = "bar"; Schema barSchema = new Schema(BAR, MockApplicationPackage.createEmpty()); /* SDField barRefToParent = new SDField( "bar_ref", NewDocumentReferenceDataType.createWithInferredId(parentDocument.getDocumentType())); AttributeUtils.addAttributeAspect(barRefToParent); */ var barImports = barSchema.temporaryImportedFields().get(); barImports.add(new TemporaryImportedField("my_cool_import", "my_ref", "their_field")); SDDocumentType barDocument = new SDDocumentType(BAR, barSchema); barSchema.addDocument(barDocument); var enumerator = new ImportedFieldsEnumerator(List.of(parentSchema, fooSchema, barSchema)); enumerator.enumerateImportedFields(parentDocument); assertImportedFieldsAre(parentDocument, List.of()); // No imported fields in parent enumerator.enumerateImportedFields(fooDocument); assertImportedFieldsAre(fooDocument, List.of("my_first_import", "my_second_import")); enumerator.enumerateImportedFields(barDocument); assertImportedFieldsAre(barDocument, List.of("my_cool_import")); }
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException { byte[] bytes = pollEntryBytes(timeout); if (bytes == null) { return null; } return DLQEntry.deserialize(bytes); }
@Test public void testFlushAfterDelay() throws Exception { Event event = new Event(); int eventsPerBlock = randomBetween(1,16); int eventsToWrite = eventsPerBlock - 1; event.setField("T", generateMessageContent(PAD_FOR_BLOCK_SIZE_EVENT/eventsPerBlock)); Timestamp timestamp = new Timestamp(); System.out.println("events per block= " + eventsPerBlock); try(DeadLetterQueueWriter writeManager = DeadLetterQueueWriter .newBuilder(dir, BLOCK_SIZE, defaultDlqSize, Duration.ofSeconds(2)) .build()) { for (int i = 1; i < eventsToWrite; i++) { DLQEntry entry = new DLQEntry(event, "", "", Integer.toString(i), timestamp); writeManager.writeEntry(entry); } try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) { for (int i = 1; i < eventsToWrite; i++) { DLQEntry entry = readManager.pollEntry(100); assertThat(entry, is(nullValue())); } } Thread.sleep(3000); try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) { for (int i = 1; i < eventsToWrite; i++) { DLQEntry entry = readManager.pollEntry(100); assertThat(entry.getReason(), is(String.valueOf(i))); } } } }
@Override public final void afterPropertiesSet() { this.refreshLocalCache(); this.afterInitialize(); }
@Test public void testAfterPropertiesSet() { listener.afterPropertiesSet(); assertTrue(listener.getCache().containsKey(ConfigGroupEnum.APP_AUTH.name())); assertTrue(listener.getCache().containsKey(ConfigGroupEnum.PLUGIN.name())); assertTrue(listener.getCache().containsKey(ConfigGroupEnum.RULE.name())); assertTrue(listener.getCache().containsKey(ConfigGroupEnum.SELECTOR.name())); assertTrue(listener.getCache().containsKey(ConfigGroupEnum.META_DATA.name())); }
@Override @CheckForNull public EmailMessage format(Notification notif) { if (!(notif instanceof ChangesOnMyIssuesNotification)) { return null; } ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif; if (notification.getChange() instanceof AnalysisChange) { checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty"); return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification); } return formatMultiProject(notification); }
@Test public void formats_returns_html_message_for_multiple_issues_of_same_rule_on_same_project_on_master_when_user_change() { Project project = newProject("1"); String ruleName = randomAlphabetic(8); String host = randomAlphabetic(15); Rule rule = newRule(ruleName, randomRuleTypeHotspotExcluded()); List<ChangedIssue> changedIssues = IntStream.range(0, 2 + new Random().nextInt(5)) .mapToObj(i -> newChangedIssue("issue_" + i, randomValidStatus(), project, rule)) .collect(toList()); UserChange userChange = newUserChange(); when(emailSettings.getServerBaseURL()).thenReturn(host); EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(userChange, ImmutableSet.copyOf(changedIssues))); String expectedHref = host + "/project/issues?id=" + project.getKey() + "&issues=" + changedIssues.stream().map(ChangedIssue::getKey).collect(joining("%2C")); String expectedLinkText = "See all " + changedIssues.size() + " issues"; HtmlFragmentAssert.assertThat(emailMessage.getMessage()) .hasParagraph().hasParagraph() // skip header .hasParagraph(project.getProjectName()) .hasList("Rule " + ruleName + " - " + expectedLinkText) .withLink(expectedLinkText, expectedHref) .hasParagraph().hasParagraph() // skip footer .noMoreBlock(); }
@Override public void applyFlowRules(FlowRule... flowRules) { FlowRuleOperations.Builder builder = FlowRuleOperations.builder(); for (FlowRule flowRule : flowRules) { builder.add(flowRule); } apply(builder.build()); }
@Test public void applyFlowRules() { FlowRule r1 = flowRule(1, 1); FlowRule r2 = flowRule(2, 2); FlowRule r3 = flowRule(3, 3); assertTrue("store should be empty", Sets.newHashSet(vnetFlowRuleService1.getFlowEntries(DID1)).isEmpty()); vnetFlowRuleService1.applyFlowRules(r1, r2, r3); assertEquals("3 rules should exist", 3, flowCount(vnetFlowRuleService1)); assertTrue("Entries should be pending add.", validateState(ImmutableMap.of( r1, FlowEntry.FlowEntryState.PENDING_ADD, r2, FlowEntry.FlowEntryState.PENDING_ADD, r3, FlowEntry.FlowEntryState.PENDING_ADD))); }
public void transmit(final int msgTypeId, final DirectBuffer srcBuffer, final int srcIndex, final int length) { checkTypeId(msgTypeId); checkMessageLength(length); final AtomicBuffer buffer = this.buffer; long currentTail = buffer.getLong(tailCounterIndex); int recordOffset = (int)currentTail & (capacity - 1); final int recordLength = HEADER_LENGTH + length; final int recordLengthAligned = BitUtil.align(recordLength, RECORD_ALIGNMENT); final long newTail = currentTail + recordLengthAligned; final int toEndOfBuffer = capacity - recordOffset; if (toEndOfBuffer < recordLengthAligned) { signalTailIntent(buffer, newTail + toEndOfBuffer); insertPaddingRecord(buffer, recordOffset, toEndOfBuffer); currentTail += toEndOfBuffer; recordOffset = 0; } else { signalTailIntent(buffer, newTail); } buffer.putInt(lengthOffset(recordOffset), recordLength); buffer.putInt(typeOffset(recordOffset), msgTypeId); buffer.putBytes(msgOffset(recordOffset), srcBuffer, srcIndex, length); buffer.putLongOrdered(latestCounterIndex, currentTail); buffer.putLongOrdered(tailCounterIndex, currentTail + recordLengthAligned); }
@Test void shouldTransmitIntoEmptyBuffer() { final long tail = 0L; final int recordOffset = (int)tail; final int length = 8; final int recordLength = length + HEADER_LENGTH; final int recordLengthAligned = align(recordLength, RECORD_ALIGNMENT); final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[1024]); final int srcIndex = 0; broadcastTransmitter.transmit(MSG_TYPE_ID, srcBuffer, srcIndex, length); final InOrder inOrder = inOrder(buffer); inOrder.verify(buffer).getLong(TAIL_COUNTER_INDEX); inOrder.verify(buffer).putLongOrdered(TAIL_INTENT_COUNTER_OFFSET, tail + recordLengthAligned); inOrder.verify(buffer).putInt(lengthOffset(recordOffset), recordLength); inOrder.verify(buffer).putInt(typeOffset(recordOffset), MSG_TYPE_ID); inOrder.verify(buffer).putBytes(msgOffset(recordOffset), srcBuffer, srcIndex, length); inOrder.verify(buffer).putLongOrdered(LATEST_COUNTER_INDEX, tail); inOrder.verify(buffer).putLongOrdered(TAIL_COUNTER_INDEX, tail + recordLengthAligned); }
public static String uncompress(byte[] compressedURL) { StringBuffer url = new StringBuffer(); switch (compressedURL[0] & 0x0f) { case EDDYSTONE_URL_PROTOCOL_HTTP_WWW: url.append(URL_PROTOCOL_HTTP_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTPS_WWW: url.append(URL_PROTOCOL_HTTPS_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTP: url.append(URL_PROTOCOL_HTTP_COLON_SLASH_SLASH); break; case EDDYSTONE_URL_PROTOCOL_HTTPS: url.append(URL_PROTOCOL_HTTPS_COLON_SLASH_SLASH); break; default: break; } byte lastByte = -1; for (int i = 1; i < compressedURL.length; i++) { byte b = compressedURL[i]; if (lastByte == 0 && b == 0 ) { break; } lastByte = b; String tld = topLevelDomainForByte(b); if (tld != null) { url.append(tld); } else { url.append((char) b); } } return url.toString(); }
@Test public void testUncompressWithSubdomainsAndTrailingSlash() throws MalformedURLException { String testURL = "http://www.forums.google.com/"; byte[] testBytes = {0x00, 'f', 'o', 'r', 'u', 'm', 's', '.', 'g', 'o', 'o', 'g', 'l', 'e', 0x00}; assertEquals(testURL, UrlBeaconUrlCompressor.uncompress(testBytes)); }
@Override public BitMask set(int index) { if (index >= 64) { return BitMask.getEmpty(index+1).setAll(this).set(index); } this.mask = this.mask | (1L << index); return this; }
@Test public void testSet() { assertThat(new LongBitMask().set(0).toString()).isEqualTo("1"); assertThat(new LongBitMask().set(1).toString()).isEqualTo("2"); assertThat(new LongBitMask().set(65).toString()).isEqualTo("0, 2"); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } if (!super.equals(o)) { return false; } MapTableField field = (MapTableField) o; return path.equals(field.path); }
@Test public void testEquals() { MapTableField field = new MapTableField("name1", QueryDataType.INT, false, QueryPath.KEY_PATH); checkEquals(field, new MapTableField("name1", QueryDataType.INT, false, QueryPath.KEY_PATH), true); checkEquals(field, new MapTableField("name2", QueryDataType.INT, false, QueryPath.KEY_PATH), false); checkEquals(field, new MapTableField("name1", QueryDataType.BIGINT, false, QueryPath.KEY_PATH), false); checkEquals(field, new MapTableField("name1", QueryDataType.INT, true, QueryPath.KEY_PATH), false); checkEquals(field, new MapTableField("name1", QueryDataType.INT, false, QueryPath.VALUE_PATH), false); }
public void sendCouponNewsletter() { try { // Retrieve the list of contacts from the "weekly-coupons-newsletter" contact // list // snippet-start:[sesv2.java2.newsletter.ListContacts] ListContactsRequest contactListRequest = ListContactsRequest.builder() .contactListName(CONTACT_LIST_NAME) .build(); List<String> contactEmails; try { ListContactsResponse contactListResponse = sesClient.listContacts(contactListRequest); contactEmails = contactListResponse.contacts().stream() .map(Contact::emailAddress) .toList(); } catch (Exception e) { // TODO: Remove when listContacts's GET body issue is resolved. contactEmails = this.contacts; } // snippet-end:[sesv2.java2.newsletter.ListContacts] // Send an email using the "weekly-coupons" template to each contact in the list // snippet-start:[sesv2.java2.newsletter.SendEmail.template] String coupons = Files.readString(Paths.get("resources/coupon_newsletter/sample_coupons.json")); for (String emailAddress : contactEmails) { SendEmailRequest newsletterRequest = SendEmailRequest.builder() .destination(Destination.builder().toAddresses(emailAddress).build()) .content(EmailContent.builder() .template(Template.builder() .templateName(TEMPLATE_NAME) .templateData(coupons) .build()) .build()) .fromEmailAddress(this.verifiedEmail) .listManagementOptions(ListManagementOptions.builder() .contactListName(CONTACT_LIST_NAME) .build()) .build(); SendEmailResponse newsletterResponse = sesClient.sendEmail(newsletterRequest); System.out.println("Newsletter sent to " + emailAddress + ": " + newsletterResponse.messageId()); } // snippet-end:[sesv2.java2.newsletter.SendEmail.template] } catch (NotFoundException e) { // If the contact list does not exist, fail the workflow and inform the user System.err.println("The contact list is missing. Please create the contact list and try again."); } catch (AccountSuspendedException e) { // If the account is suspended, fail the workflow and inform the user System.err.println("Your account is suspended. Please resolve the issue and try again."); } catch (MailFromDomainNotVerifiedException e) { // If the sending domain is not verified, fail the workflow and inform the user System.err.println("The sending domain is not verified. Please verify your domain and try again."); throw e; } catch (MessageRejectedException e) { // If the message is rejected due to invalid content, fail the workflow and // inform the user System.err.println("The message content is invalid. Please check your template and try again."); throw e; } catch (SendingPausedException e) { // If sending is paused, fail the workflow and inform the user System.err.println("Sending is currently paused for your account. Please resolve the issue and try again."); throw e; } catch (Exception e) { System.err.println("Error occurred while sending the newsletter: " + e.getMessage()); e.printStackTrace(); } }
@Test public void test_sendCouponNewsletter_success() { ListContactsResponse contactListResponse = ListContactsResponse.builder() .contacts( Contact.builder().emailAddress("user+ses-weekly-newsletter-1@example.com").build(), Contact.builder().emailAddress("user+ses-weekly-newsletter-2@example.com").build(), Contact.builder().emailAddress("user+ses-weekly-newsletter-3@example.com").build()) .build(); when(sesClient.listContacts(any(ListContactsRequest.class))).thenReturn( contactListResponse); SendEmailResponse newsletterResponse = SendEmailResponse.builder().messageId("message-id").build(); when(sesClient.sendEmail(any(SendEmailRequest.class))).thenReturn( newsletterResponse); scenario.sendCouponNewsletter(); String output = outContent.toString(); for (int i = 1; i <= 3; i++) { assertThat(output, containsString("Newsletter sent to user+ses-weekly-newsletter-" + i + "@example.com: message-id")); } }
public Plan validateReservationListRequest( ReservationSystem reservationSystem, ReservationListRequest request) throws YarnException { String queue = request.getQueue(); if (request.getEndTime() < request.getStartTime()) { String errorMessage = "The specified end time must be greater than " + "the specified start time."; RMAuditLogger.logFailure("UNKNOWN", AuditConstants.LIST_RESERVATION_REQUEST, "validate list reservation input", "ClientRMService", errorMessage); throw RPCUtil.getRemoteException(errorMessage); } // Check if it is a managed queue return getPlanFromQueue(reservationSystem, queue, AuditConstants.LIST_RESERVATION_REQUEST); }
@Test public void testListReservationsEmptyQueue() { ReservationListRequest request = new ReservationListRequestPBImpl(); request.setQueue(""); Plan plan = null; try { plan = rrValidator.validateReservationListRequest(rSystem, request); Assert.fail(); } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); Assert.assertTrue(message.equals( "The queue is not specified. Please try again with a valid " + "reservable queue.")); LOG.info(message); } }
public Exchange createDbzExchange(DebeziumConsumer consumer, final SourceRecord sourceRecord) { final Exchange exchange; if (consumer != null) { exchange = consumer.createExchange(false); } else { exchange = super.createExchange(); } final Message message = exchange.getIn(); final Schema valueSchema = sourceRecord.valueSchema(); final Object value = sourceRecord.value(); // extract values from SourceRecord final Map<String, Object> sourceMetadata = extractSourceMetadataValueFromValueStruct(valueSchema, value); final Object operation = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.OPERATION); final Object before = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.BEFORE); final Object body = extractBodyValueFromValueStruct(valueSchema, value); final Object timestamp = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.TIMESTAMP); final Object ddl = extractValueFromValueStruct(valueSchema, value, HistoryRecord.Fields.DDL_STATEMENTS); // set message headers message.setHeader(DebeziumConstants.HEADER_IDENTIFIER, sourceRecord.topic()); message.setHeader(DebeziumConstants.HEADER_KEY, sourceRecord.key()); message.setHeader(DebeziumConstants.HEADER_SOURCE_METADATA, sourceMetadata); message.setHeader(DebeziumConstants.HEADER_OPERATION, operation); message.setHeader(DebeziumConstants.HEADER_BEFORE, before); message.setHeader(DebeziumConstants.HEADER_TIMESTAMP, timestamp); message.setHeader(DebeziumConstants.HEADER_DDL_SQL, ddl); message.setHeader(Exchange.MESSAGE_TIMESTAMP, timestamp); message.setBody(body); return exchange; }
@Test void testIfCreatesExchangeFromSourceDeleteRecord() { final SourceRecord sourceRecord = createDeleteRecord(); final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord); final Message inMessage = exchange.getIn(); assertNotNull(exchange); // assert headers assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER)); assertEquals(Envelope.Operation.DELETE.code(), inMessage.getHeader(DebeziumConstants.HEADER_OPERATION)); final Struct key = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_KEY); assertEquals(12345, key.getInt32("id").intValue()); assertNotNull(inMessage.getHeader(DebeziumConstants.HEADER_BEFORE)); // assert value final Struct body = (Struct) inMessage.getBody(); assertNull(body); // we expect body to be null since is a delete }
public IndexConfig addAttribute(String attribute) { addAttributeInternal(attribute); return this; }
@Test(expected = NullPointerException.class) public void testAttributeNullAdd() { new IndexConfig().addAttribute(null); }
public Duration getServerTimeoutOrThrow() { // readTimeout = DOWNSTREAM_OVERHEAD + serverTimeout TimeBudget serverBudget = readBudget().withReserved(DOWNSTREAM_OVERHEAD); if (serverBudget.timeLeft().get().compareTo(MIN_SERVER_TIMEOUT) < 0) throw new UncheckedTimeoutException("Timed out after " + timeBudget.originalTimeout().get()); return serverBudget.timeLeft().get(); }
@Test public void justEnoughTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT)); timeouts.getServerTimeoutOrThrow(); }
@Override public void shutDown() throws NacosException { String className = this.getClass().getName(); NAMING_LOGGER.info("{} do shutdown begin", className); serverListManager.shutdown(); serverProxy.shutdown(); ThreadUtils.shutdownThreadPool(executorService, NAMING_LOGGER); NAMING_LOGGER.info("{} do shutdown stop", className); }
@Test void testShutDown() throws NacosException { //when nacosNamingMaintainService.shutDown(); //then verify(serverProxy, times(1)).shutdown(); verify(serverListManager, times(1)).shutdown(); verify(executorService, times(1)).shutdown(); }
protected static boolean isValidValue(Field f, Object value) { if (value != null) { return true; } Schema schema = f.schema(); Type type = schema.getType(); // If the type is null, any value is valid if (type == Type.NULL) { return true; } // If the type is a union that allows nulls, any value is valid if (type == Type.UNION) { for (Schema s : schema.getTypes()) { if (s.getType() == Type.NULL) { return true; } } } // The value is null but the type does not allow nulls return false; }
@Test void isValidValueWithUnion() { // Verify that null values are not valid for a union with no null type: Schema unionWithoutNull = Schema .createUnion(Arrays.asList(Schema.create(Type.STRING), Schema.create(Type.BOOLEAN))); assertTrue(RecordBuilderBase.isValidValue(new Field("f", unionWithoutNull, null, null), new Object())); assertFalse(RecordBuilderBase.isValidValue(new Field("f", unionWithoutNull, null, null), null)); // Verify that null values are valid for a union with a null type: Schema unionWithNull = Schema.createUnion(Arrays.asList(Schema.create(Type.STRING), Schema.create(Type.NULL))); assertTrue(RecordBuilderBase.isValidValue(new Field("f", unionWithNull, null, null), new Object())); assertTrue(RecordBuilderBase.isValidValue(new Field("f", unionWithNull, null, null), null)); }
@Override public void seekPastMagicBytes(ByteBuffer in) throws BufferUnderflowException { int magicCursor = 3; // Which byte of the magic we're looking for currently. while (true) { byte b = in.get(); // We're looking for a run of bytes that is the same as the packet magic but we want to ignore partial // magics that aren't complete. So we keep track of where we're up to with magicCursor. byte expectedByte = (byte)(0xFF & packetMagic >>> (magicCursor * 8)); if (b == expectedByte) { magicCursor--; if (magicCursor < 0) { // We found the magic sequence. return; } else { // We still have further to go to find the next message. } } else { magicCursor = 3; } } }
@Test(expected = BufferUnderflowException.class) public void testSeekPastMagicBytes() { // Fail in another way, there is data in the stream but no magic bytes. byte[] brokenMessage = ByteUtils.parseHex("000000"); MAINNET.getDefaultSerializer().seekPastMagicBytes(ByteBuffer.wrap(brokenMessage)); }
public static Object[] realize(Object[] objs, Class<?>[] types) { if (objs.length != types.length) { throw new IllegalArgumentException("args.length != types.length"); } Object[] dests = new Object[objs.length]; for (int i = 0; i < objs.length; i++) { dests[i] = realize(objs[i], types[i]); } return dests; }
@Test void testIntToBoolean() throws Exception { Map<String, Object> map = new HashMap<>(); map.put("name", "myname"); map.put("male", 1); map.put("female", 0); PersonInfo personInfo = (PersonInfo) PojoUtils.realize(map, PersonInfo.class); assertEquals("myname", personInfo.getName()); assertTrue(personInfo.isMale()); assertFalse(personInfo.isFemale()); }
long nextRecordingId() { return nextRecordingId; }
@Test void shouldComputeNextRecordingIdIfValueInHeaderIsZero() throws IOException { setNextRecordingId(0); try (Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, clock, null, segmentFileBuffer)) { assertEquals(recordingThreeId + 1, catalog.nextRecordingId()); } }
private HelloWorld() { }
@Test void testHelloWorld() throws IOException { String outputDir = "target/test-output"; String outputFile = outputDir + "/HelloWorld.pdf"; String message = "HelloWorld.pdf"; new File(outputFile).delete(); String[] args = { outputFile, message }; HelloWorld.main(args); checkOutputFile(outputFile, message); new File(outputFile).delete(); }
public JMXUriBuilder withObjectPropertiesReference(String aReferenceToHashtable) { if (aReferenceToHashtable.startsWith("#")) { addProperty("objectProperties", aReferenceToHashtable); } else { addProperty("objectProperties", "#" + aReferenceToHashtable); } return this; }
@Test public void withObjectPropertiesReference() { assertEquals("jmx:platform?objectProperties=#op", new JMXUriBuilder().withObjectPropertiesReference("#op").toString()); }
@Override public ParsedLine parse(final String line, final int cursor, final ParseContext context) { final ParsedLine parsed = delegate.parse(line, cursor, context); if (context != ParseContext.ACCEPT_LINE) { return parsed; } if (UnclosedQuoteChecker.isUnclosedQuote(line)) { throw new EOFError(-1, -1, "Missing end quote", "end quote char"); } final String bare = CommentStripper.strip(parsed.line()); if (bare.isEmpty()) { return parsed; } if (cliCmdPredicate.test(bare)) { return parsed; } if (!bare.endsWith(TERMINATION_CHAR)) { throw new EOFError(-1, -1, "Missing termination char", "termination char"); } return parsed; }
@Test public void shouldAcceptIfEmptyLine() { // Given: givenDelegateWillReturn(""); // When: final ParsedLine result = parser.parse("what ever", 0, ParseContext.ACCEPT_LINE); // Then: assertThat(result, is(parsedLine)); }
@Bean public SpringWebSocketClientEventListener springWebSocketClientEventListener( final ShenyuClientConfig clientConfig, final Environment env, final ShenyuClientRegisterRepository shenyuClientRegisterRepository) { ShenyuClientConfig.ClientPropertiesConfig clientPropertiesConfig = clientConfig.getClient().get(RpcTypeEnum.WEB_SOCKET.getName()); Properties props = clientPropertiesConfig == null ? null : clientPropertiesConfig.getProps(); String discoveryMode = env.getProperty("shenyu.discovery.type", ShenyuClientConstants.DISCOVERY_LOCAL_MODE); if (props != null) { props.setProperty(ShenyuClientConstants.DISCOVERY_LOCAL_MODE_KEY, Boolean.valueOf(ShenyuClientConstants.DISCOVERY_LOCAL_MODE.equals(discoveryMode)).toString()); } return new SpringWebSocketClientEventListener(clientConfig.getClient().get(RpcTypeEnum.WEB_SOCKET.getName()), shenyuClientRegisterRepository); }
@Test public void testSpringWebSocketClientEventListener() { MockedStatic<RegisterUtils> registerUtilsMockedStatic = mockStatic(RegisterUtils.class); registerUtilsMockedStatic.when(() -> RegisterUtils.doLogin(any(), any(), any())).thenReturn(Optional.ofNullable("token")); applicationContextRunner.run(context -> { SpringWebSocketClientEventListener eventListener = context.getBean("springWebSocketClientEventListener", SpringWebSocketClientEventListener.class); assertNotNull(eventListener); }); registerUtilsMockedStatic.close(); }
@GetInitialRestriction public OffsetByteRange getInitialRestriction( @Element SubscriptionPartition subscriptionPartition) { Offset offset = offsetReaderFactory.apply(subscriptionPartition).read(); return OffsetByteRange.of(new OffsetRange(offset.value(), Long.MAX_VALUE /* open interval */)); }
@Test public void getInitialRestrictionReadSuccess() { when(initialOffsetReader.read()).thenReturn(example(Offset.class)); OffsetByteRange range = sdf.getInitialRestriction(PARTITION); assertEquals(example(Offset.class).value(), range.getRange().getFrom()); assertEquals(Long.MAX_VALUE, range.getRange().getTo()); assertEquals(0, range.getByteCount()); verify(offsetReaderFactory).apply(PARTITION); }
public long count() { return dbCollection.countDocuments(); }
@Test void count() { assertThat(service.count()).isEqualTo(16L); }
public FEELFnResult<Object> invoke(@ParameterName("list") List list) { if ( list == null || list.isEmpty() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty")); } else { try { return FEELFnResult.ofResult(Collections.max(list, new InterceptNotComparableComparator())); } catch (ClassCastException e) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable")); } } }
@Test void invokeListWithHeterogenousTypes() { FunctionTestUtil.assertResultError(maxFunction.invoke(Arrays.asList(1, "test", BigDecimal.valueOf(10.2))), InvalidParametersEvent.class); }
public static Builder newBlobColumnDefBuilder() { return new Builder(); }
@Test public void blobColumDef_is_nullable_by_default() { assertThat(newBlobColumnDefBuilder().setColumnName("a").build().isNullable()).isTrue(); }
public static IpPrefix valueOf(int address, int prefixLength) { return new IpPrefix(IpAddress.valueOf(address), prefixLength); }
@Test(expected = NullPointerException.class) public void testInvalidValueOfNullArrayIPv4() { IpPrefix ipPrefix; byte[] value; value = null; ipPrefix = IpPrefix.valueOf(IpAddress.Version.INET, value, 24); }
@Override public void logoutSuccess(HttpRequest request, @Nullable String login) { checkRequest(request); if (!LOGGER.isDebugEnabled()) { return; } LOGGER.debug("logout success [IP|{}|{}][login|{}]", request.getRemoteAddr(), getAllIps(request), preventLogFlood(emptyIfNull(login))); }
@Test public void logout_success_fails_with_NPE_if_request_is_null() { logTester.setLevel(Level.INFO); assertThatThrownBy(() -> underTest.logoutSuccess(null, "foo")) .isInstanceOf(NullPointerException.class) .hasMessage("request can't be null"); }
public SchemaKStream<K> selectKey( final FormatInfo valueFormat, final List<Expression> keyExpression, final Optional<KeyFormat> forceInternalKeyFormat, final Stacker contextStacker, final boolean forceRepartition ) { final boolean keyFormatChange = forceInternalKeyFormat.isPresent() && !forceInternalKeyFormat.get().equals(keyFormat); final boolean repartitionNeeded = repartitionNeeded(keyExpression); if (!keyFormatChange && !forceRepartition && !repartitionNeeded) { return this; } if ((repartitionNeeded || !forceRepartition) && keyFormat.isWindowed()) { throw new KsqlException( "Implicit repartitioning of windowed sources is not supported. " + "See https://github.com/confluentinc/ksql/issues/4385." ); } final ExecutionStep<KStreamHolder<K>> step = ExecutionStepFactory .streamSelectKey(contextStacker, sourceStep, keyExpression); final KeyFormat newKeyFormat = forceInternalKeyFormat.orElse(keyFormat); return new SchemaKStream<>( step, resolveSchema(step), SerdeFeaturesFactory.sanitizeKeyFormat( newKeyFormat, toSqlTypes(keyExpression), true), ksqlConfig, functionRegistry ); }
@Test(expected = UnsupportedOperationException.class) public void shouldFailRepartitionTable() { // Given: givenInitialKStreamOf("SELECT * FROM test2 EMIT CHANGES;"); final UnqualifiedColumnReferenceExp col2 = new UnqualifiedColumnReferenceExp(ColumnName.of("COL2")); // When: schemaKTable.selectKey( valueFormat.getFormatInfo(), ImmutableList.of(col2), Optional.empty(), childContextStacker, false ); }
@Override public Long clusterCountKeysInSlot(int slot) { RedisClusterNode node = clusterGetNodeForSlot(slot); MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort())); RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot); return syncFuture(f); }
@Test public void testClusterCountKeysInSlot() { testInCluster(connection -> { Long t = connection.clusterCountKeysInSlot(1); assertThat(t).isZero(); }); }
@Override public void validateConnectorConfig(Map<String, String> connectorProps, Callback<ConfigInfos> callback) { validateConnectorConfig(connectorProps, callback, true); }
@Test public void testConfigValidationTopicsRegexWithDlq() { final Class<? extends Connector> connectorClass = SampleSinkConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); Map<String, String> config = new HashMap<>(); config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); config.put(SinkConnectorConfig.TOPICS_REGEX_CONFIG, "topic.*"); config.put(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, "topic1"); ConfigInfos validation = herder.validateConnectorConfig(config, s -> null, false); ConfigInfo topicsRegexInfo = findInfo(validation, SinkConnectorConfig.TOPICS_REGEX_CONFIG); assertNotNull(topicsRegexInfo); assertEquals(1, topicsRegexInfo.configValue().errors().size()); verifyValidationIsolation(); }
@Override public List<byte[]> mGet(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key : keys) { read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.readAsync(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } BatchResult<byte[]> r = (BatchResult<byte[]>) es.execute(); return r.getResponses(); }
@Test public void testMGet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); List<byte[]> r = connection.mGet(map.keySet().toArray(new byte[0][])); assertThat(r).containsExactly(map.values().toArray(new byte[0][])); }
public void enableSendingOldValues() { this.sendOldValues = true; this.queryableName = storeName; }
@Test public void testSendingOldValue() { final StreamsBuilder builder = new StreamsBuilder(); final String topic1 = "topic1"; @SuppressWarnings("unchecked") final KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(topic1, stringConsumed); table1.enableSendingOldValues(true); assertTrue(table1.sendingOldValueEnabled()); final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = new MockApiProcessorSupplier<>(); final Topology topology = builder.build().addProcessor("proc1", supplier, table1.name); try (final TopologyTestDriver driver = new TopologyTestDriver(topology, props)) { final TestInputTopic<String, String> inputTopic1 = driver.createInputTopic( topic1, new StringSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO ); final MockApiProcessor<String, Integer, Void, Void> proc1 = supplier.theCapturedProcessor(); inputTopic1.pipeInput("A", "01", 10L); inputTopic1.pipeInput("B", "01", 20L); inputTopic1.pipeInput("C", "01", 15L); proc1.checkAndClearProcessResult( new KeyValueTimestamp<>("A", new Change<>("01", null), 10), new KeyValueTimestamp<>("B", new Change<>("01", null), 20), new KeyValueTimestamp<>("C", new Change<>("01", null), 15) ); inputTopic1.pipeInput("A", "02", 8L); inputTopic1.pipeInput("B", "02", 22L); proc1.checkAndClearProcessResult( new KeyValueTimestamp<>("A", new Change<>("02", "01"), 8), new KeyValueTimestamp<>("B", new Change<>("02", "01"), 22) ); inputTopic1.pipeInput("A", "03", 12L); proc1.checkAndClearProcessResult( new KeyValueTimestamp<>("A", new Change<>("03", "02"), 12) ); inputTopic1.pipeInput("A", null, 15L); inputTopic1.pipeInput("B", null, 20L); proc1.checkAndClearProcessResult( new KeyValueTimestamp<>("A", new Change<>(null, "03"), 15), new KeyValueTimestamp<>("B", new Change<>(null, "02"), 20) ); } }
public static Workbook createBook(String excelFilePath) { return createBook(excelFilePath, false); }
@Test public void createBookTest(){ Workbook book = WorkbookUtil.createBook(true); assertNotNull(book); book = WorkbookUtil.createBook(false); assertNotNull(book); }
public void densify(FeatureMap fMap) { // Densify! - guitar solo List<String> featureNames = new ArrayList<>(fMap.keySet()); Collections.sort(featureNames); densify(featureNames); }
@Test public void testExtendedListExampleDensify() { MockOutput output = new MockOutput("UNK"); Example<MockOutput> example, expected; // Single feature, contiguous densification example = new ListExample<>(output, new String[]{"F0"}, new double[]{1.0}); example.densify(Arrays.asList("F0", "F1", "F2")); expected = new ListExample<>(new MockOutput("UNK"), new String[]{"F0","F1","F2"}, new double[]{1.0,0.0,0.0}); checkDenseExample(expected,example); example = new ListExample<>(output, new String[]{"F0"}, new double[]{1.0}); example.densify(Arrays.asList("F", "F1", "F2")); expected = new ListExample<>(new MockOutput("UNK"), new String[]{"F","F0","F1","F2"}, new double[]{0.0,1.0,0.0,0.0}); checkDenseExample(expected,example); // Multiple features, edges example = new ListExample<>(output, new String[]{"F0","F7"}, new double[]{1.0,1.0}); example.densify(Arrays.asList("F0", "F5", "F10")); expected = new ListExample<>(new MockOutput("UNK"), new String[]{"F0","F5","F7","F10"}, new double[]{1.0,0.0,1.0,0.0}); checkDenseExample(expected,example); example = new ListExample<>(output, new String[]{"F0","F1","F2"}, new double[]{1.0,1.0,1.0}); example.densify(Arrays.asList("F1.5", "F2.5")); expected = new ListExample<>(new MockOutput("UNK"), new String[]{"F0","F1","F1.5","F2","F2.5"}, new double[]{1.0,1.0,0.0,1.0,0.0}); checkDenseExample(expected,example); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key); StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next(); final Window wnd = next.key.window(); if (!windowStart.contains(wnd.startTime())) { continue; } if (!windowEnd.contains(wnd.endTime())) { continue; } final long rowTime = wnd.end(); final WindowedRow row = WindowedRow.of( stateStore.schema(), next.key, next.value, rowTime ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValueIfSessionStartsAtUpperBoundIfUpperBoundClosed() { // Given: final Range<Instant> startBounds = Range.closed( LOWER_INSTANT, UPPER_INSTANT ); final Instant wend = UPPER_INSTANT.plusMillis(1); givenSingleSession(UPPER_INSTANT, wend); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(A_KEY, PARTITION, startBounds, Range.all()); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is( WindowedRow.of( SCHEMA, sessionKey(UPPER_INSTANT, wend), A_VALUE, wend.toEpochMilli() ) )); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
public int getDepth(Throwable ex) { return getDepth(ex.getClass(), 0); }
@Test public void alwaysTrueForThrowable() { RollbackRule rr = new RollbackRule(java.lang.Throwable.class.getName()); assertThat(rr.getDepth(new MyRuntimeException("")) > 0).isTrue(); assertThat(rr.getDepth(new IOException()) > 0).isTrue(); assertThat(rr.getDepth(new ShouldNeverHappenException(null, null)) > 0).isTrue(); assertThat(rr.getDepth(new RuntimeException()) > 0).isTrue(); }
public void initialize() { validateBundledPluginDirectory(); validateExternalPluginDirectory(); }
@Test void shouldCreatePluginDirectoryIfItDoesNotExist() { bundledPluginDir.delete(); new DefaultPluginJarLocationMonitor(systemEnvironment).initialize(); assertThat(bundledPluginDir).exists(); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesIntegerUsingJavaTypeLong() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "integer"); objectNode.put("existingJavaType", "java.lang.Long"); when(config.isUsePrimitives()).thenReturn(true); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is("java.lang.Long")); }
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor, @Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) { long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes(); if (desiredSegmentSizeBytes <= 0) { desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES; } long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2; double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5; if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}", _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } else { final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName, autotuneInitialRows); return autotuneInitialRows; } } final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes(); if (committingSegmentSizeBytes <= 0 // repair segment case || SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals( committingSegmentDescriptor.getStopReason())) { String reason = committingSegmentSizeBytes <= 0 // ? "Committing segment size is not available" // : "Committing segment is due to force-commit"; final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}", reason, newSegmentName, targetNumRows); return targetNumRows; } final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime(); final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs(); final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}", newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold, committingSegmentSizeBytes); double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes; if (_latestSegmentRowsToSizeRatio > 0) { _latestSegmentRowsToSizeRatio = CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio; } else { _latestSegmentRowsToSizeRatio = currentRatio; } // If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit. // We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim // to hit the row limit next time around. // // If the size of the committing segment is higher than the desired segment size, then the administrator has // set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time // limit. // // TODO: add feature to adjust time threshold as well // If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit // and time threshold being hit If we set new threshold to be committingSegmentZKMetadata // .getSizeThresholdToFlushSegment(), // we might end up using a lot more memory than required for the segment Using a minor bump strategy, until // we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) { final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis(); long currentNumRows = numRowsConsumed; StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. "); if (timeThresholdMillis < timeConsumed) { // The administrator has reduced the time threshold. Adjust the // number of rows to match the average consumption rate on the partition. currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed; logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows) .append(". "); } long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); logStringBuilder.append("Setting segment size for {} as {}"); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(), newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } long targetSegmentNumRows; if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) { targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2; } else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) { targetSegmentNumRows = numRowsConsumed / 2; } else { if (_latestSegmentRowsToSizeRatio > 0) { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); } else { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio); } } targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment size {}, current ratio {}, setting threshold for {} as {}", committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; }
@Test public void testNoRows() { SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer(); StreamConfig streamConfig = mock(StreamConfig.class); when(streamConfig.getFlushThresholdSegmentSizeBytes()).thenReturn(300_0000L); CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class); when(committingSegmentDescriptor.getSegmentSizeBytes()).thenReturn(250_0000L); SegmentZKMetadata committingSegmentZKMetadata = mock(SegmentZKMetadata.class); when(committingSegmentZKMetadata.getTotalDocs()).thenReturn(0L); when(committingSegmentZKMetadata.getSizeThresholdToFlushSegment()).thenReturn(0); int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, committingSegmentZKMetadata, "events3__0__0__20211222T1646Z"); // max((totalDocs / segmentSize) * flushThresholdSegmentSize, 10000) // max(0, 10000) assertEquals(threshold, 10_000); }
public Publisher<V> descendingIterator() { return iterator(-1, false); }
@Test public void testListIteratorPrevious() { RListRx<Integer> list = redisson.getList("list"); sync(list.add(1)); sync(list.add(2)); sync(list.add(3)); sync(list.add(4)); sync(list.add(5)); sync(list.add(0)); sync(list.add(7)); sync(list.add(8)); sync(list.add(0)); sync(list.add(10)); Iterator<Integer> iterator = toIterator(list.descendingIterator()); Assertions.assertTrue(10 == iterator.next()); Assertions.assertTrue(0 == iterator.next()); Assertions.assertTrue(8 == iterator.next()); Assertions.assertTrue(7 == iterator.next()); Assertions.assertTrue(0 == iterator.next()); Assertions.assertTrue(5 == iterator.next()); Assertions.assertTrue(4 == iterator.next()); Assertions.assertTrue(3 == iterator.next()); Assertions.assertTrue(2 == iterator.next()); Assertions.assertTrue(1 == iterator.next()); Assertions.assertFalse(iterator.hasNext()); }
public long scan( final UnsafeBuffer termBuffer, final long rebuildPosition, final long hwmPosition, final long nowNs, final int termLengthMask, final int positionBitsToShift, final int initialTermId) { boolean lossFound = false; int rebuildOffset = (int)rebuildPosition & termLengthMask; if (rebuildPosition < hwmPosition) { final int rebuildTermCount = (int)(rebuildPosition >>> positionBitsToShift); final int hwmTermCount = (int)(hwmPosition >>> positionBitsToShift); final int rebuildTermId = initialTermId + rebuildTermCount; final int hwmTermOffset = (int)hwmPosition & termLengthMask; final int limitOffset = rebuildTermCount == hwmTermCount ? hwmTermOffset : termLengthMask + 1; rebuildOffset = scanForGap(termBuffer, rebuildTermId, rebuildOffset, limitOffset, this); if (rebuildOffset < limitOffset) { if (scannedTermOffset != activeTermOffset || scannedTermId != activeTermId) { activateGap(nowNs); lossFound = true; } checkTimerExpiry(nowNs); } } return pack(rebuildOffset, lossFound); }
@Test void shouldReplaceOldNakWithNewNak() { long rebuildPosition = ACTIVE_TERM_POSITION; long hwmPosition = ACTIVE_TERM_POSITION + (ALIGNED_FRAME_LENGTH * 3L); insertDataFrame(offsetOfMessage(0)); insertDataFrame(offsetOfMessage(2)); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); currentTime = TimeUnit.MILLISECONDS.toNanos(20); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); insertDataFrame(offsetOfMessage(4)); insertDataFrame(offsetOfMessage(1)); rebuildPosition += (ALIGNED_FRAME_LENGTH * 3L); hwmPosition = (ALIGNED_FRAME_LENGTH * 5L); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); currentTime = TimeUnit.MILLISECONDS.toNanos(100); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); verify(lossHandler, atLeast(1)).onGapDetected(TERM_ID, offsetOfMessage(3), gapLength()); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String oracleType = typeDefine.getDataType().toUpperCase(); switch (oracleType) { case ORACLE_INTEGER: builder.dataType(new DecimalType(DEFAULT_PRECISION, 0)); builder.columnLength((long) DEFAULT_PRECISION); break; case ORACLE_NUMBER: Long precision = typeDefine.getPrecision(); if (precision == null || precision == 0 || precision > DEFAULT_PRECISION) { precision = Long.valueOf(DEFAULT_PRECISION); } Integer scale = typeDefine.getScale(); if (scale == null) { scale = 127; } if (scale <= 0) { int newPrecision = (int) (precision - scale); if (newPrecision == 1) { builder.dataType(BasicType.BOOLEAN_TYPE); } else if (newPrecision <= 9) { builder.dataType(BasicType.INT_TYPE); } else if (newPrecision <= 18) { builder.dataType(BasicType.LONG_TYPE); } else if (newPrecision < 38) { builder.dataType(new DecimalType(newPrecision, 0)); builder.columnLength((long) newPrecision); } else { builder.dataType(new DecimalType(DEFAULT_PRECISION, 0)); builder.columnLength((long) DEFAULT_PRECISION); } } else if (scale <= DEFAULT_SCALE) { builder.dataType(new DecimalType(precision.intValue(), scale)); builder.columnLength(precision); builder.scale(scale); } else { builder.dataType(new DecimalType(precision.intValue(), DEFAULT_SCALE)); builder.columnLength(precision); builder.scale(DEFAULT_SCALE); } break; case ORACLE_FLOAT: // The float type will be converted to DecimalType(10, -127), // which will lose precision in the spark engine DecimalType floatDecimal = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); builder.dataType(floatDecimal); builder.columnLength((long) floatDecimal.getPrecision()); builder.scale(floatDecimal.getScale()); break; case ORACLE_BINARY_FLOAT: case ORACLE_REAL: builder.dataType(BasicType.FLOAT_TYPE); break; case ORACLE_BINARY_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case ORACLE_CHAR: case ORACLE_VARCHAR: case ORACLE_VARCHAR2: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case ORACLE_NCHAR: case ORACLE_NVARCHAR2: builder.dataType(BasicType.STRING_TYPE); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); break; case ORACLE_ROWID: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(MAX_ROWID_LENGTH); break; case ORACLE_XML: case ORACLE_SYS_XML: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case ORACLE_LONG: builder.dataType(BasicType.STRING_TYPE); // The maximum length of the column is 2GB-1 builder.columnLength(BYTES_2GB - 1); break; case ORACLE_CLOB: case ORACLE_NCLOB: builder.dataType(BasicType.STRING_TYPE); // The maximum length of the column is 4GB-1 builder.columnLength(BYTES_4GB - 1); break; case ORACLE_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); // The maximum length of the column is 4GB-1 builder.columnLength(BYTES_4GB - 1); break; case ORACLE_RAW: builder.dataType(PrimitiveByteArrayType.INSTANCE); if (typeDefine.getLength() == null || typeDefine.getLength() == 0) { builder.columnLength(MAX_RAW_LENGTH); } else { builder.columnLength(typeDefine.getLength()); } break; case ORACLE_LONG_RAW: builder.dataType(PrimitiveByteArrayType.INSTANCE); // The maximum length of the column is 2GB-1 builder.columnLength(BYTES_2GB - 1); break; case ORACLE_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; case ORACLE_TIMESTAMP: case ORACLE_TIMESTAMP_WITH_TIME_ZONE: case ORACLE_TIMESTAMP_WITH_LOCAL_TIME_ZONE: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); if (typeDefine.getScale() == null) { builder.scale(TIMESTAMP_DEFAULT_SCALE); } else { builder.scale(typeDefine.getScale()); } break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.ORACLE, oracleType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertBytes() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder().name("test").columnType("blob").dataType("blob").build(); Column column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); Assertions.assertEquals(BYTES_4GB - 1, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder().name("test").columnType("raw").dataType("raw").build(); column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); Assertions.assertEquals(MAX_RAW_LENGTH, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("raw(10)") .dataType("raw") .length(10L) .build(); column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); Assertions.assertEquals(10, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("long raw") .dataType("long raw") .build(); column = OracleTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType()); Assertions.assertEquals(BYTES_2GB - 1, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
@Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { if (msg instanceof Http2DataFrame) { Http2DataFrame dataFrame = (Http2DataFrame) msg; encoder().writeData(ctx, dataFrame.stream().id(), dataFrame.content(), dataFrame.padding(), dataFrame.isEndStream(), promise); } else if (msg instanceof Http2HeadersFrame) { writeHeadersFrame(ctx, (Http2HeadersFrame) msg, promise); } else if (msg instanceof Http2WindowUpdateFrame) { Http2WindowUpdateFrame frame = (Http2WindowUpdateFrame) msg; Http2FrameStream frameStream = frame.stream(); // It is legit to send a WINDOW_UPDATE frame for the connection stream. The parent channel doesn't attempt // to set the Http2FrameStream so we assume if it is null the WINDOW_UPDATE is for the connection stream. try { if (frameStream == null) { increaseInitialConnectionWindow(frame.windowSizeIncrement()); } else { consumeBytes(frameStream.id(), frame.windowSizeIncrement()); } promise.setSuccess(); } catch (Throwable t) { promise.setFailure(t); } } else if (msg instanceof Http2ResetFrame) { Http2ResetFrame rstFrame = (Http2ResetFrame) msg; int id = rstFrame.stream().id(); // Only ever send a reset frame if stream may have existed before as otherwise we may send a RST on a // stream in an invalid state and cause a connection error. if (connection().streamMayHaveExisted(id)) { encoder().writeRstStream(ctx, rstFrame.stream().id(), rstFrame.errorCode(), promise); } else { ReferenceCountUtil.release(rstFrame); promise.setFailure(Http2Exception.streamError( rstFrame.stream().id(), Http2Error.PROTOCOL_ERROR, "Stream never existed")); } } else if (msg instanceof Http2PingFrame) { Http2PingFrame frame = (Http2PingFrame) msg; encoder().writePing(ctx, frame.ack(), frame.content(), promise); } else if (msg instanceof Http2SettingsFrame) { encoder().writeSettings(ctx, ((Http2SettingsFrame) msg).settings(), promise); } else if (msg instanceof Http2SettingsAckFrame) { // In the event of manual SETTINGS ACK, it is assumed the encoder will apply the earliest received but not // yet ACKed settings. encoder().writeSettingsAck(ctx, promise); } else if (msg instanceof Http2GoAwayFrame) { writeGoAwayFrame(ctx, (Http2GoAwayFrame) msg, promise); } else if (msg instanceof Http2PushPromiseFrame) { Http2PushPromiseFrame pushPromiseFrame = (Http2PushPromiseFrame) msg; writePushPromise(ctx, pushPromiseFrame, promise); } else if (msg instanceof Http2PriorityFrame) { Http2PriorityFrame priorityFrame = (Http2PriorityFrame) msg; encoder().writePriority(ctx, priorityFrame.stream().id(), priorityFrame.streamDependency(), priorityFrame.weight(), priorityFrame.exclusive(), promise); } else if (msg instanceof Http2UnknownFrame) { Http2UnknownFrame unknownFrame = (Http2UnknownFrame) msg; encoder().writeFrame(ctx, unknownFrame.frameType(), unknownFrame.stream().id(), unknownFrame.flags(), unknownFrame.content(), promise); } else if (!(msg instanceof Http2Frame)) { ctx.write(msg, promise); } else { ReferenceCountUtil.release(msg); throw new UnsupportedMessageTypeException(msg, SUPPORTED_MESSAGES); } }
@Test public void windowUpdateMayFail() throws Exception { frameInboundWriter.writeInboundHeaders(3, request, 31, false); Http2Connection connection = frameCodec.connection(); Http2Stream stream = connection.stream(3); assertNotNull(stream); Http2HeadersFrame inboundHeaders = inboundHandler.readInbound(); assertNotNull(inboundHeaders); Http2FrameStream stream2 = inboundHeaders.stream(); // Fails, cause trying to return too many bytes to the flow controller ChannelFuture f = channel.write(new DefaultHttp2WindowUpdateFrame(100).stream(stream2)); assertTrue(f.isDone()); assertFalse(f.isSuccess()); assertThat(f.cause(), instanceOf(Http2Exception.class)); }
@ScalarOperator(GREATER_THAN_OR_EQUAL) @SqlType(StandardTypes.BOOLEAN) public static boolean greaterThanOrEqual(@SqlType(StandardTypes.SMALLINT) long left, @SqlType(StandardTypes.SMALLINT) long right) { return left >= right; }
@Test public void testGreaterThanOrEqual() { assertFunction("SMALLINT'37' >= SMALLINT'37'", BOOLEAN, true); assertFunction("SMALLINT'37' >= SMALLINT'17'", BOOLEAN, true); assertFunction("SMALLINT'17' >= SMALLINT'37'", BOOLEAN, false); assertFunction("SMALLINT'17' >= SMALLINT'17'", BOOLEAN, true); }
@Override public CreatePartitionsResult createPartitions(final Map<String, NewPartitions> newPartitions, final CreatePartitionsOptions options) { final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>(newPartitions.size()); final CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection(newPartitions.size()); for (Map.Entry<String, NewPartitions> entry : newPartitions.entrySet()) { final String topic = entry.getKey(); final NewPartitions newPartition = entry.getValue(); List<List<Integer>> newAssignments = newPartition.assignments(); List<CreatePartitionsAssignment> assignments = newAssignments == null ? null : newAssignments.stream() .map(brokerIds -> new CreatePartitionsAssignment().setBrokerIds(brokerIds)) .collect(Collectors.toList()); topics.add(new CreatePartitionsTopic() .setName(topic) .setCount(newPartition.totalCount()) .setAssignments(assignments)); futures.put(topic, new KafkaFutureImpl<>()); } if (!topics.isEmpty()) { final long now = time.milliseconds(); final long deadline = calcDeadlineMs(now, options.timeoutMs()); final Call call = getCreatePartitionsCall(options, futures, topics, Collections.emptyMap(), now, deadline); runnable.call(call, now); } return new CreatePartitionsResult(new HashMap<>(futures)); }
@Test public void testCreatePartitionsRetryThrottlingExceptionWhenEnabled() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( expectCreatePartitionsRequestWithTopics("topic1", "topic2", "topic3"), prepareCreatePartitionsResponse(1000, createPartitionsTopicResult("topic1", Errors.NONE), createPartitionsTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), createPartitionsTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS))); env.kafkaClient().prepareResponse( expectCreatePartitionsRequestWithTopics("topic2"), prepareCreatePartitionsResponse(1000, createPartitionsTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED))); env.kafkaClient().prepareResponse( expectCreatePartitionsRequestWithTopics("topic2"), prepareCreatePartitionsResponse(0, createPartitionsTopicResult("topic2", Errors.NONE))); Map<String, NewPartitions> counts = new HashMap<>(); counts.put("topic1", NewPartitions.increaseTo(1)); counts.put("topic2", NewPartitions.increaseTo(2)); counts.put("topic3", NewPartitions.increaseTo(3)); CreatePartitionsResult result = env.adminClient().createPartitions( counts, new CreatePartitionsOptions().retryOnQuotaViolation(true)); assertNull(result.values().get("topic1").get()); assertNull(result.values().get("topic2").get()); TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } }
public static GenericRecord convertToAvro(Schema schema, Message message) { return AvroSupport.convert(schema, message); }
@Test public void allFieldsSet_wellKnownTypesAndTimestampsAsRecords() throws IOException { Schema.Parser parser = new Schema.Parser(); Schema convertedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/sample_schema_wrapped_and_timestamp_as_record.avsc")); Pair<Sample, GenericRecord> inputAndOutput = createInputOutputSampleWithRandomValues(convertedSchema, true); Sample input = inputAndOutput.getLeft(); GenericRecord actual = serializeAndDeserializeAvro(ProtoConversionUtil.convertToAvro(convertedSchema, input), convertedSchema); Assertions.assertEquals(inputAndOutput.getRight(), actual); // assert that unsigned long is interpreted correctly Schema primitiveUnsignedLongSchema = convertedSchema.getField(PRIMITIVE_UNSIGNED_LONG_FIELD_NAME).schema(); assertUnsignedLongCorrectness(primitiveUnsignedLongSchema, input.getPrimitiveUnsignedLong(), (GenericFixed) actual.get(PRIMITIVE_UNSIGNED_LONG_FIELD_NAME)); Schema wrappedUnsignedLongSchema = convertedSchema.getField(WRAPPED_UNSIGNED_LONG_FIELD_NAME).schema().getTypes().get(1).getField("value").schema(); assertUnsignedLongCorrectness(wrappedUnsignedLongSchema, input.getWrappedUnsignedLong().getValue(), (GenericFixed) ((GenericRecord) actual.get(WRAPPED_UNSIGNED_LONG_FIELD_NAME)).get("value")); }
public static boolean canDrop( FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) { Objects.requireNonNull(pred, "pred cannnot be null"); Objects.requireNonNull(columns, "columns cannnot be null"); return pred.accept(new DictionaryFilter(columns, dictionaries)); }
@Test public void testOr() throws Exception { BinaryColumn col = binaryColumn("binary_field"); // both evaluate to false (no upper-case letters are in the dictionary) FilterPredicate B = eq(col, Binary.fromString("B")); FilterPredicate C = eq(col, Binary.fromString("C")); // both evaluate to true (all lower-case letters are in the dictionary) FilterPredicate x = eq(col, Binary.fromString("x")); FilterPredicate y = eq(col, Binary.fromString("y")); assertFalse("Should not drop when one predicate could be true", canDrop(or(B, y), ccmd, dictionaries)); assertFalse("Should not drop when one predicate could be true", canDrop(or(x, C), ccmd, dictionaries)); assertTrue("Should drop when both predicates must be false", canDrop(or(B, C), ccmd, dictionaries)); assertFalse("Should not drop when one predicate could be true", canDrop(or(x, y), ccmd, dictionaries)); }
@Override public Rule register(String ref, RuleKey ruleKey) { requireNonNull(ruleKey, "ruleKey can not be null"); Rule rule = rulesByUuid.get(ref); if (rule != null) { if (!ruleKey.repository().equals(rule.repository()) || !ruleKey.rule().equals(rule.key())) { throw new IllegalArgumentException(format( "Specified RuleKey '%s' is not equal to the one already registered in repository for ref %s: '%s'", ruleKey, ref, RuleKey.of(rule.repository(), rule.key()))); } return rule; } rule = new Rule(ref, ruleKey.repository(), ruleKey.rule()); rulesByUuid.put(ref, rule); return rule; }
@Test public void register_does_not_enforce_some_RuleKey_is_registered_under_a_single_id() { underTest.register(SOME_UUID, RuleKey.of(SOME_REPOSITORY, SOME_RULE_KEY)); for (int i = 0; i < someRandomInt(); i++) { Rule otherRule = underTest.register(Integer.toString(i), RuleKey.of(SOME_REPOSITORY, SOME_RULE_KEY)); assertThat(otherRule.ref()).isEqualTo(Integer.toString(i)); assertThat(otherRule.repository()).isEqualTo(SOME_REPOSITORY); assertThat(otherRule.key()).isEqualTo(SOME_RULE_KEY); } }
public static Builder builderFor(long snapshotId, SnapshotRefType type) { return new Builder(type, snapshotId); }
@Test public void testNoTypeFailure() { assertThatThrownBy(() -> SnapshotRef.builderFor(1L, null).build()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Snapshot reference type must not be null"); }
static void populateMissingPredictedOutputFieldTarget(final Model model) { if (model.getOutput() != null && model.getMiningSchema() != null) { Optional<OutputField> predictedOutputField = model.getOutput().getOutputFields().stream() .filter(outputField -> (outputField.getResultFeature() == null || outputField.getResultFeature().equals(ResultFeature.PREDICTED_VALUE)) && outputField.getTargetField() == null) .findFirst(); predictedOutputField.ifPresent(outputField -> { List<MiningField> targetFields = getMiningTargetFields(model.getMiningSchema().getMiningFields()); if (!targetFields.isEmpty()) { outputField.setTargetField(targetFields.get(0).getName()); } }); } }
@Test void populateMissingPredictedOutputFieldTarget() throws Exception { final InputStream inputStream = getFileInputStream(NO_OUTPUT_FIELD_TARGET_NAME_SAMPLE); final PMML pmml = org.jpmml.model.PMMLUtil.unmarshal(inputStream); final Model toPopulate = pmml.getModels().get(0); final OutputField outputField = toPopulate.getOutput().getOutputFields().get(0); assertThat(outputField.getResultFeature()).isEqualTo(ResultFeature.PREDICTED_VALUE); assertThat(outputField.getTargetField()).isNull(); KiePMMLUtil.populateMissingPredictedOutputFieldTarget(toPopulate); final MiningField targetField = getMiningTargetFields(toPopulate.getMiningSchema().getMiningFields()).get(0); assertThat(outputField.getTargetField()).isNotNull(); assertThat(outputField.getTargetField()).isEqualTo(targetField.getName()); }
public void addTimeline(TimelineEvent event) { timeline.add(event); }
@Test public void testAddDuplicateTimelineEvents() { WorkflowRuntimeSummary summary = new WorkflowRuntimeSummary(); TimelineEvent event = TimelineLogEvent.info("hello world"); summary.addTimeline(event); summary.addTimeline(TimelineLogEvent.info("hello world")); summary.addTimeline(TimelineLogEvent.info("hello world")); summary.addTimeline(TimelineLogEvent.info("hello world")); assertEquals(Collections.singletonList(event), summary.getTimeline().getTimelineEvents()); }
@Override public BlameAlgorithmEnum getBlameAlgorithm(int availableProcessors, int numberOfFiles) { BlameAlgorithmEnum forcedStrategy = configuration.get(PROP_SONAR_SCM_USE_BLAME_ALGORITHM) .map(BlameAlgorithmEnum::valueOf) .orElse(null); if (forcedStrategy != null) { return forcedStrategy; } if (availableProcessors == 0) { LOG.warn("Available processors are 0. Falling back to native git blame"); return GIT_NATIVE_BLAME; } if (numberOfFiles / availableProcessors > FILES_GIT_BLAME_TRIGGER) { return GIT_FILES_BLAME; } return GIT_NATIVE_BLAME; }
@Test public void useRepositoryBlame_whenFileBlamePropsEnabled_shouldDisableRepoBlame() { when(configuration.get(DefaultBlameStrategy.PROP_SONAR_SCM_USE_BLAME_ALGORITHM)).thenReturn(Optional.of(GIT_FILES_BLAME.name())); assertThat(underTest.getBlameAlgorithm(1, 1)).isEqualTo(GIT_FILES_BLAME); }
public void append(ByteBuffer record, DataType dataType) throws InterruptedException { if (dataType.isEvent()) { writeEvent(record, dataType); } else { writeRecord(record, dataType); } }
@Test void testAppendEventFinishCurrentBuffer() throws Exception { bufferSize = RECORD_SIZE * 3; AtomicInteger finishedBuffers = new AtomicInteger(0); HsMemoryDataManagerOperation memoryDataManagerOperation = TestingMemoryDataManagerOperation.builder() .setRequestBufferFromPoolSupplier(() -> createBufferBuilder(bufferSize)) .setOnBufferFinishedRunnable(finishedBuffers::incrementAndGet) .build(); HsSubpartitionMemoryDataManager subpartitionMemoryDataManager = createSubpartitionMemoryDataManager(memoryDataManagerOperation); subpartitionMemoryDataManager.append(createRecord(0), DataType.DATA_BUFFER); subpartitionMemoryDataManager.append(createRecord(1), DataType.DATA_BUFFER); assertThat(finishedBuffers).hasValue(0); subpartitionMemoryDataManager.append(createRecord(2), DataType.EVENT_BUFFER); assertThat(finishedBuffers).hasValue(2); }
@Override public List<GrokPattern> saveAll(Collection<GrokPattern> patterns, ImportStrategy importStrategy) throws ValidationException { final Map<String, GrokPattern> newPatternsByName; try { newPatternsByName = patterns.stream().collect(Collectors.toMap(GrokPattern::name, Function.identity())); } catch (IllegalStateException e) { throw new ValidationException("The supplied Grok patterns contain conflicting names: " + e.getLocalizedMessage()); } final Map<String, GrokPattern> existingPatternsByName = loadAll().stream().collect(Collectors.toMap(GrokPattern::name, Function.identity())); if (importStrategy == ABORT_ON_CONFLICT) { final Sets.SetView<String> conflictingNames = Sets.intersection(newPatternsByName.keySet(), existingPatternsByName.keySet()); if (!conflictingNames.isEmpty()) { final Iterable<String> limited = Iterables.limit(conflictingNames, MAX_DISPLAYED_CONFLICTS); throw new ValidationException("The following Grok patterns already exist: " + StringUtils.join(limited, ", ") + (conflictingNames.size() > MAX_DISPLAYED_CONFLICTS ? " (+ " + (conflictingNames.size() - MAX_DISPLAYED_CONFLICTS) + " more)" : "") + "."); } } validateAllOrThrow(patterns, importStrategy); final List<GrokPattern> savedPatterns = patterns.stream() .map(newPattern -> { final GrokPattern existingPattern = existingPatternsByName.get(newPattern.name()); if (existingPattern != null) { return newPattern.toBuilder().id(existingPattern.id()).build(); } else { return newPattern; } }) .map(dbCollection::save) .map(WriteResult::getSavedObject).collect(Collectors.toList()); clusterBus.post(GrokPatternsUpdatedEvent.create(newPatternsByName.keySet())); return savedPatterns; }
@Test public void issue_3949() { final List<GrokPattern> patternSet = new ArrayList<>(); // Also see: https://github.com/Graylog2/graylog2-server/issues/3949 patternSet.add(GrokPattern.create("POSTFIX_QMGR_REMOVED", "%{POSTFIX_QUEUEID:postfix_queueid}: removed")); patternSet.add(GrokPattern.create("POSTFIX_CLEANUP_MILTER", "%{POSTFIX_QUEUEID:postfix_queueid}: milter-%{POSTFIX_ACTION:postfix_milter_result}: %{GREEDYDATA:postfix_milter_message}; %{GREEDYDATA_NO_COLON:postfix_keyvalue_data}(: %{GREEDYDATA:postfix_milter_data})?")); patternSet.add(GrokPattern.create("POSTFIX_QMGR_ACTIVE", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data} \\(queue active\\)")); patternSet.add(GrokPattern.create("POSTFIX_TRIVIAL_REWRITE", "%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_WARNING", "%{POSTFIX_WARNING_WITH_KV}|%{POSTFIX_WARNING_WITHOUT_KV}")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD_DISCONNECT", "disconnect from %{POSTFIX_CLIENT_INFO}")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD_CONNECT", "connect from %{POSTFIX_CLIENT_INFO}")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD_NOQUEUE", "NOQUEUE: %{POSTFIX_ACTION:postfix_action}: %{POSTFIX_SMTP_STAGE:postfix_smtp_stage} from %{POSTFIX_CLIENT_INFO}:( %{POSTFIX_STATUS_CODE:postfix_status_code} %{POSTFIX_STATUS_CODE_ENHANCED:postfix_status_code_enhanced})?( <%{DATA:postfix_status_data}>:)? (%{POSTFIX_DNSBL_MESSAGE}|%{GREEDYDATA:postfix_status_message};) %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data}")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD_LOSTCONN", "%{POSTFIX_LOSTCONN:postfix_smtpd_lostconn_data}( after %{POSTFIX_SMTP_STAGE:postfix_smtp_stage}( \\(%{INT} bytes\\))?)? from %{POSTFIX_CLIENT_INFO}(: %{GREEDYDATA:postfix_smtpd_lostconn_reason})?")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD_PROXY", "proxy-%{POSTFIX_ACTION:postfix_proxy_result}: (%{POSTFIX_SMTP_STAGE:postfix_proxy_smtp_stage}): %{POSTFIX_PROXY_MESSAGE:postfix_proxy_message}; %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data}")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD_PIPELINING", "improper command pipelining after %{POSTFIX_SMTP_STAGE:postfix_smtp_stage} from %{POSTFIX_CLIENT_INFO}: %{GREEDYDATA:postfix_improper_pipelining_data}")); patternSet.add(GrokPattern.create("POSTFIX_QMGR", "%{POSTFIX_QMGR_REMOVED}|%{POSTFIX_QMGR_ACTIVE}|%{POSTFIX_QMGR_EXPIRED}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_CLEANUP", "%{POSTFIX_CLEANUP_MILTER}|%{POSTFIX_WARNING}|%{POSTFIX_KEYVALUE}")); patternSet.add(GrokPattern.create("POSTFIX_POSTSCREEN", "%{POSTFIX_PS_CONNECT}|%{POSTFIX_PS_ACCESS}|%{POSTFIX_PS_NOQUEUE}|%{POSTFIX_PS_TOOBUSY}|%{POSTFIX_PS_CACHE}|%{POSTFIX_PS_DNSBL}|%{POSTFIX_PS_VIOLATIONS}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_PIPE", "%{POSTFIX_PIPE_ANY}")); patternSet.add(GrokPattern.create("POSTFIX_ANVIL", "%{POSTFIX_ANVIL_CONN_RATE}|%{POSTFIX_ANVIL_CONN_CACHE}|%{POSTFIX_ANVIL_CONN_COUNT}")); patternSet.add(GrokPattern.create("POSTFIX_DNSBLOG", "%{POSTFIX_DNSBLOG_LISTING}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_PICKUP", "%{POSTFIX_KEYVALUE}")); patternSet.add(GrokPattern.create("POSTFIX_LMTP", "%{POSTFIX_SMTP}")); patternSet.add(GrokPattern.create("POSTFIX_MASTER", "%{POSTFIX_MASTER_START}|%{POSTFIX_MASTER_EXIT}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_TLSPROXY", "%{POSTFIX_TLSPROXY_CONN}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_SENDMAIL", "%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_BOUNCE", "%{POSTFIX_BOUNCE_NOTIFICATION}")); patternSet.add(GrokPattern.create("POSTFIX_SCACHE", "%{POSTFIX_SCACHE_LOOKUPS}|%{POSTFIX_SCACHE_SIMULTANEOUS}|%{POSTFIX_SCACHE_TIMESTAMP}")); patternSet.add(GrokPattern.create("POSTFIX_POSTDROP", "%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_LOSTCONN_REASONS", "(receiving the initial server greeting|sending message body|sending end of data -- message may be sent more than once)")); patternSet.add(GrokPattern.create("POSTFIX_PROXY_MESSAGE", "(%{POSTFIX_STATUS_CODE:postfix_proxy_status_code} )?(%{POSTFIX_STATUS_CODE_ENHANCED:postfix_proxy_status_code_enhanced})?.*")); patternSet.add(GrokPattern.create("GREEDYDATA_NO_COLON", "[^:]*")); patternSet.add(GrokPattern.create("GREEDYDATA_NO_SEMICOLON", "[^;]*")); patternSet.add(GrokPattern.create("POSTFIX_DISCARD", "%{POSTFIX_DISCARD_ANY}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_WARNING_WITH_KV", "(%{POSTFIX_QUEUEID:postfix_queueid}: )?%{POSTFIX_WARNING_LEVEL:postfix_message_level}: %{GREEDYDATA:postfix_message}; %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data}")); patternSet.add(GrokPattern.create("POSTFIX_SMTP", "%{POSTFIX_SMTP_DELIVERY}|%{POSTFIX_SMTP_CONNERR}|%{POSTFIX_SMTP_LOSTCONN}|%{POSTFIX_SMTP_TIMEOUT}|%{POSTFIX_SMTP_RELAYERR}|%{POSTFIX_TLSCONN}|%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_WARNING_WITHOUT_KV", "(%{POSTFIX_QUEUEID:postfix_queueid}: )?%{POSTFIX_WARNING_LEVEL:postfix_message_level}: %{GREEDYDATA:postfix_message}")); patternSet.add(GrokPattern.create("POSTFIX_TLSPROXY_CONN", "(DIS)?CONNECT( from)? %{POSTFIX_CLIENT_INFO}")); patternSet.add(GrokPattern.create("POSTFIX_ANVIL_CONN_CACHE", "statistics: max cache size %{NUMBER:postfix_anvil_cache_size} at %{SYSLOGTIMESTAMP:postfix_anvil_timestamp}")); patternSet.add(GrokPattern.create("POSTFIX_ANVIL_CONN_RATE", "statistics: max connection rate %{NUMBER:postfix_anvil_conn_rate}/%{POSTFIX_TIME_UNIT:postfix_anvil_conn_period} for \\(%{DATA:postfix_service}:%{IP:postfix_client_ip}\\) at %{SYSLOGTIMESTAMP:postfix_anvil_timestamp}")); patternSet.add(GrokPattern.create("POSTFIX_SMTP_DELIVERY", "%{POSTFIX_KEYVALUE} status=%{WORD:postfix_status}( \\(%{GREEDYDATA:postfix_smtp_response}\\))?")); patternSet.add(GrokPattern.create("POSTFIX_ANVIL_CONN_COUNT", "statistics: max connection count %{NUMBER:postfix_anvil_conn_count} for \\(%{DATA:postfix_service}:%{IP:postfix_client_ip}\\) at %{SYSLOGTIMESTAMP:postfix_anvil_timestamp}")); patternSet.add(GrokPattern.create("POSTFIX_SMTP_CONNERR", "connect to %{POSTFIX_RELAY_INFO}: (Connection timed out|No route to host|Connection refused|Network is unreachable)")); patternSet.add(GrokPattern.create("POSTFIX_SMTPD", "%{POSTFIX_SMTPD_CONNECT}|%{POSTFIX_SMTPD_DISCONNECT}|%{POSTFIX_SMTPD_LOSTCONN}|%{POSTFIX_SMTPD_NOQUEUE}|%{POSTFIX_SMTPD_PIPELINING}|%{POSTFIX_TLSCONN}|%{POSTFIX_WARNING}|%{POSTFIX_SMTPD_PROXY}|%{POSTFIX_KEYVALUE}")); patternSet.add(GrokPattern.create("POSTFIX_SMTP_RELAYERR", "%{POSTFIX_QUEUEID:postfix_queueid}: host %{POSTFIX_RELAY_INFO} said: %{GREEDYDATA:postfix_smtp_response} \\(in reply to %{POSTFIX_SMTP_STAGE:postfix_smtp_stage} command\\)")); patternSet.add(GrokPattern.create("POSTFIX_STATUS_CODE_ENHANCED", "\\d\\.\\d\\.\\d")); patternSet.add(GrokPattern.create("POSTFIX_SMTP_TIMEOUT", "%{POSTFIX_QUEUEID:postfix_queueid}: conversation with %{POSTFIX_RELAY_INFO} timed out( while %{POSTFIX_LOSTCONN_REASONS:postfix_smtp_lostconn_reason})?")); patternSet.add(GrokPattern.create("POSTFIX_MASTER_EXIT", "terminating on signal %{INT:postfix_termination_signal}")); patternSet.add(GrokPattern.create("POSTFIX_MASTER_START", "(daemon started|reload) -- version %{DATA:postfix_version}, configuration %{PATH:postfix_config_path}")); patternSet.add(GrokPattern.create("POSTFIX_SCACHE_LOOKUPS", "statistics: (address|domain) lookup hits=%{INT:postfix_scache_hits} miss=%{INT:postfix_scache_miss} success=%{INT:postfix_scache_success}%")); patternSet.add(GrokPattern.create("POSTFIX_BOUNCE_NOTIFICATION", "%{POSTFIX_QUEUEID:postfix_queueid}: sender (non-delivery|delivery status|delay) notification: %{POSTFIX_QUEUEID:postfix_bounce_queueid}")); patternSet.add(GrokPattern.create("POSTFIX_SCACHE_TIMESTAMP", "statistics: start interval %{SYSLOGTIMESTAMP:postfix_scache_timestamp}")); patternSet.add(GrokPattern.create("POSTFIX_SCACHE_SIMULTANEOUS", "statistics: max simultaneous domains=%{INT:postfix_scache_domains} addresses=%{INT:postfix_scache_addresses} connection=%{INT:postfix_scache_connection}")); patternSet.add(GrokPattern.create("POSTFIX_CLIENT_INFO", "%{HOSTNAME:postfix_client_hostname}?\\[%{IP:postfix_client_ip}\\](:%{INT:postfix_client_port})?")); patternSet.add(GrokPattern.create("POSTFIX_RELAY_INFO", "%{HOSTNAME:postfix_relay_hostname}?\\[(%{IP:postfix_relay_ip}|%{DATA:postfix_relay_service})\\](:%{INT:postfix_relay_port})?|%{WORD:postfix_relay_service}")); patternSet.add(GrokPattern.create("POSTFIX_SMTP_STAGE", "(CONNECT|HELO|EHLO|STARTTLS|AUTH|MAIL( FROM)?|RCPT( TO)?|(end of )?DATA|RSET|UNKNOWN|END-OF-MESSAGE|VRFY|\\.)")); patternSet.add(GrokPattern.create("POSTFIX_ACTION", "(accept|defer|discard|filter|header-redirect|reject)")); patternSet.add(GrokPattern.create("POSTFIX_SMTP_LOSTCONN", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_LOSTCONN:postfix_smtp_lostconn_data} with %{POSTFIX_RELAY_INFO}( while %{POSTFIX_LOSTCONN_REASONS:postfix_smtp_lostconn_reason})?")); patternSet.add(GrokPattern.create("POSTFIX_STATUS_CODE", "\\d{3}")); patternSet.add(GrokPattern.create("POSTFIX_TLSCONN", "(Anonymous|Trusted|Untrusted|Verified) TLS connection established (to %{POSTFIX_RELAY_INFO}|from %{POSTFIX_CLIENT_INFO}): %{DATA:postfix_tls_version} with cipher %{DATA:postfix_tls_cipher} \\(%{DATA:postfix_tls_cipher_size} bits\\)")); patternSet.add(GrokPattern.create("POSTFIX_DELAYS", "%{NUMBER:postfix_delay_before_qmgr}/%{NUMBER:postfix_delay_in_qmgr}/%{NUMBER:postfix_delay_conn_setup}/%{NUMBER:postfix_delay_transmission}")); patternSet.add(GrokPattern.create("POSTFIX_LOSTCONN", "(lost connection|timeout|SSL_accept error)")); patternSet.add(GrokPattern.create("POSTFIX_PIPE_ANY", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data}, status=%{WORD:postfix_status} \\(%{GREEDYDATA:postfix_pipe_response}\\)")); patternSet.add(GrokPattern.create("POSTFIX_QMGR_EXPIRED", "%{POSTFIX_QUEUEID:postfix_queueid}: from=<%{DATA:postfix_from}>, status=%{WORD:postfix_status}, returned to sender")); patternSet.add(GrokPattern.create("POSTFIX_DISCARD_ANY", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data} status=%{WORD:postfix_status} %{GREEDYDATA}")); patternSet.add(GrokPattern.create("POSTFIX_ERROR_ANY", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data}, status=%{WORD:postfix_status} \\(%{GREEDYDATA:postfix_error_response}\\)")); patternSet.add(GrokPattern.create("POSTFIX_POSTSUPER_ACTION", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_POSTSUPER_ACTIONS:postfix_postsuper_action}")); patternSet.add(GrokPattern.create("POSTFIX_POSTSUPER_ACTIONS", "(removed|requeued|placed on hold|released from hold)")); patternSet.add(GrokPattern.create("POSTFIX_DNSBLOG_LISTING", "addr %{IP:postfix_client_ip} listed by domain %{HOSTNAME:postfix_dnsbl_domain} as %{IP:postfix_dnsbl_result}")); patternSet.add(GrokPattern.create("POSTFIX_DNSBL_MESSAGE", "Service unavailable; .* \\[%{GREEDYDATA:postfix_status_data}\\] %{GREEDYDATA:postfix_status_message};")); patternSet.add(GrokPattern.create("POSTFIX_PS_VIOLATIONS", "%{POSTFIX_PS_VIOLATION:postfix_postscreen_violation}( %{INT})?( after %{NUMBER:postfix_postscreen_violation_time})? from %{POSTFIX_CLIENT_INFO}(( after %{POSTFIX_SMTP_STAGE:postfix_smtp_stage})?(: %{GREEDYDATA:postfix_postscreen_data})?| in tests (after|before) SMTP handshake)")); patternSet.add(GrokPattern.create("POSTFIX_PS_ACCESS_ACTION", "(DISCONNECT|BLACKLISTED|WHITELISTED|WHITELIST VETO|PASS NEW|PASS OLD)")); patternSet.add(GrokPattern.create("POSTFIX_PS_VIOLATION", "(BARE NEWLINE|COMMAND (TIME|COUNT|LENGTH) LIMIT|COMMAND PIPELINING|DNSBL|HANGUP|NON-SMTP COMMAND|PREGREET)")); patternSet.add(GrokPattern.create("POSTFIX_TIME_UNIT", "%{NUMBER}[smhd]")); patternSet.add(GrokPattern.create("POSTFIX_KEYVALUE_DATA", "[\\w-]+=[^;]*")); patternSet.add(GrokPattern.create("POSTFIX_KEYVALUE", "%{POSTFIX_QUEUEID:postfix_queueid}: %{POSTFIX_KEYVALUE_DATA:postfix_keyvalue_data}")); patternSet.add(GrokPattern.create("POSTFIX_WARNING_LEVEL", "(warning|fatal|info)")); patternSet.add(GrokPattern.create("POSTFIX_POSTSUPER_SUMMARY", "%{POSTFIX_POSTSUPER_SUMMARY_ACTIONS:postfix_postsuper_summary_action}: %{NUMBER:postfix_postsuper_summary_count} messages?")); patternSet.add(GrokPattern.create("POSTFIX_POSTSUPER_SUMMARY_ACTIONS", "(Deleted|Requeued|Placed on hold|Released from hold)")); patternSet.add(GrokPattern.create("POSTFIX_PS_ACCESS", "%{POSTFIX_PS_ACCESS_ACTION:postfix_postscreen_access} %{POSTFIX_CLIENT_INFO}")); patternSet.add(GrokPattern.create("POSTFIX_PS_CONNECT", "CONNECT from %{POSTFIX_CLIENT_INFO} to \\[%{IP:postfix_server_ip}\\]:%{INT:postfix_server_port}")); patternSet.add(GrokPattern.create("POSTFIX_PS_TOOBUSY", "NOQUEUE: reject: CONNECT from %{POSTFIX_CLIENT_INFO}: %{GREEDYDATA:postfix_postscreen_toobusy_data}")); patternSet.add(GrokPattern.create("POSTFIX_PS_NOQUEUE", "%{POSTFIX_SMTPD_NOQUEUE}")); patternSet.add(GrokPattern.create("POSTFIX_PS_CACHE", "cache %{DATA} full cleanup: retained=%{NUMBER:postfix_postscreen_cache_retained} dropped=%{NUMBER:postfix_postscreen_cache_dropped} entries")); patternSet.add(GrokPattern.create("POSTFIX_PS_DNSBL", "%{POSTFIX_PS_VIOLATION:postfix_postscreen_violation} rank %{INT:postfix_postscreen_dnsbl_rank} for %{POSTFIX_CLIENT_INFO}")); patternSet.add(GrokPattern.create("POSTFIX_LOCAL", "%{POSTFIX_KEYVALUE}")); patternSet.add(GrokPattern.create("POSTFIX_TLSMGR", "%{POSTFIX_WARNING}")); patternSet.add(GrokPattern.create("POSTFIX_ERROR", "%{POSTFIX_ERROR_ANY}")); patternSet.add(GrokPattern.create("POSTFIX_QUEUEID", "([0-9A-F]{6,}|[0-9a-zA-Z]{15,})")); patternSet.add(GrokPattern.create("POSTFIX_VIRTUAL", "%{POSTFIX_SMTP_DELIVERY}")); patternSet.add(GrokPattern.create("POSTFIX_POSTSUPER", "%{POSTFIX_POSTSUPER_ACTION}|%{POSTFIX_POSTSUPER_SUMMARY}")); assertThatThrownBy(() -> service.saveAll(patternSet, DROP_ALL_EXISTING)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("No definition for key 'GREEDYDATA' found, aborting"); }
public List<VespaConfigChangeAction> validate() { List<VespaConfigChangeAction> result = new ArrayList<>(); for (ImmutableSDField nextField : new LinkedHashSet<>(nextSchema.allConcreteFields())) { String fieldName = nextField.getName(); ImmutableSDField currentField = currentSchema.getConcreteField(fieldName); if (currentField != null) { validateScripts(currentField, nextField).ifPresent(result::add); } else if (nextField.isExtraField()) { result.add(VespaReindexAction.of(id, null, "Non-document field '" + nextField.getName() + "' added; this may be populated by reindexing")); } } return result; }
@Test void requireThatOutputExpressionsAreIgnoredInAdvancedScript() throws Exception { assertTrue(new ScriptFixture("{ input foo | switch { case \"audio\": input bar | index; case \"video\": input baz | index; default: 0 | index; }; }", "{ input foo | switch { case \"audio\": input bar | attribute; case \"video\": input baz | attribute; default: 0 | attribute; }; }"). validate()); }
@Override public final int hashCode() { return Objects.hash(enabled, writeCoalescing, implementation, className, factoryImplementation, factoryClassName, writeDelaySeconds, writeBatchSize, properties, initialLoadMode, offload); }
@Test public void testHashCode() { assumeDifferentHashCodes(); assertNotEquals(defaultCfg.hashCode(), cfgNotEnabled.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgNotWriteCoalescing.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgNonNullClassName.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgNonNullFactoryClassName.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgNonNullImplementation.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgNonNullFactoryImplementation.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgEagerMode.hashCode()); assertNotEquals(defaultCfg.hashCode(), cfgNullMode.hashCode()); }
@Override public String getResourceOutputNodeType() { return DictionaryConst.NODE_TYPE_FILE_FIELD; }
@Test public void testGetResourceOutputNodeType() throws Exception { assertEquals( DictionaryConst.NODE_TYPE_FILE_FIELD, analyzer.getResourceOutputNodeType() ); }
public Order placeOrder(Order body) throws RestClientException { return placeOrderWithHttpInfo(body).getBody(); }
@Test public void placeOrderTest() { Order body = null; Order response = api.placeOrder(body); // TODO: test validations }
@Override public boolean tryLock(final GlobalLockDefinition lockDefinition, final long timeoutMillis) { return globalLockPersistService.tryLock(lockDefinition, timeoutMillis); }
@Test void assertTryLock() { when(lockPersistService.tryLock(lockDefinition, 3000L)).thenReturn(true); assertTrue(lockContext.tryLock(lockDefinition, 3000L)); }
private List<BindingPattern> serverBindings(DeployState deployState, ConfigModelContext context, Element searchElement, Collection<BindingPattern> defaultBindings) { List<Element> bindings = XML.getChildren(searchElement, "binding"); if (bindings.isEmpty()) return List.copyOf(defaultBindings); return toBindingList(deployState, context, bindings); }
@Test void verify_bindings_for_builtin_handlers() { createBasicContainerModel(); JdiscBindingsConfig config = root.getConfig(JdiscBindingsConfig.class, "default/container.0"); JdiscBindingsConfig.Handlers defaultRootHandler = config.handlers(BindingsOverviewHandler.class.getName()); assertThat(defaultRootHandler.serverBindings(), contains("http://*/")); JdiscBindingsConfig.Handlers applicationStatusHandler = config.handlers(ApplicationStatusHandler.class.getName()); assertThat(applicationStatusHandler.serverBindings(), contains("http://*/ApplicationStatus")); JdiscBindingsConfig.Handlers fileRequestHandler = config.handlers(VipStatusHandler.class.getName()); assertThat(fileRequestHandler.serverBindings(), contains("http://*/status.html")); JdiscBindingsConfig.Handlers metricsV2Handler = config.handlers(MetricsV2Handler.class.getName()); assertThat(metricsV2Handler.serverBindings(), contains("http://*/metrics/v2", "http://*/metrics/v2/*")); }
@Override public int run(PrintWriter out, PrintWriter err, String... args) { try { return Main.main(System.in, out, err, args); } catch (RuntimeException e) { err.print(e.getMessage()); err.flush(); return 1; // pass non-zero value back indicating an error has happened } }
@Test public void testUsageOutputAfterLoadingViaToolName() { String name = "google-java-format"; assertThat( ServiceLoader.load(ToolProvider.class).stream() .map(ServiceLoader.Provider::get) .map(ToolProvider::name)) .contains(name); ToolProvider format = ToolProvider.findFirst(name).get(); StringWriter out = new StringWriter(); StringWriter err = new StringWriter(); int result = format.run(new PrintWriter(out, true), new PrintWriter(err, true), "--help"); assertThat(result).isNotEqualTo(0); String usage = err.toString(); // Check that doc links are included. assertThat(usage).containsMatch("http.*/google-java-format"); assertThat(usage).contains("Usage: google-java-format"); }
@Override public Mono<DeleteAccountResponse> deleteAccount(final DeleteAccountRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedPrimaryDevice(); return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> accountsManager.delete(account, AccountsManager.DeletionReason.USER_REQUEST))) .thenReturn(DeleteAccountResponse.newBuilder().build()); }
@Test void deleteAccountLinkedDevice() { getMockAuthenticationInterceptor().setAuthenticatedDevice(AUTHENTICATED_ACI, (byte) (Device.PRIMARY_ID + 1)); //noinspection ResultOfMethodCallIgnored GrpcTestUtils.assertStatusException(Status.PERMISSION_DENIED, () -> authenticatedServiceStub().deleteAccount(DeleteAccountRequest.newBuilder().build())); verify(accountsManager, never()).delete(any(), any()); }
@Override public DictTypeDO getDictType(Long id) { return dictTypeMapper.selectById(id); }
@Test public void testGetDictType_id() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType); // 准备参数 Long id = dbDictType.getId(); // 调用 DictTypeDO dictType = dictTypeService.getDictType(id); // 断言 assertNotNull(dictType); assertPojoEquals(dbDictType, dictType); }
@Override public void getConfig(ZookeeperServerConfig.Builder builder) { ConfigServer[] configServers = getConfigServers(); int[] zookeeperIds = getConfigServerZookeeperIds(); if (configServers.length != zookeeperIds.length) { throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " + "same as number of provided config server zookeeper ids (%d)", configServers.length, zookeeperIds.length)); } String myhostname = HostName.getLocalhost(); // TODO: Server index should be in interval [1, 254] according to doc, // however, we cannot change this id for an existing server for (int i = 0; i < configServers.length; i++) { if (zookeeperIds[i] < 0) { throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s", zookeeperIds[i], configServers[i].hostName)); } if (configServers[i].hostName.equals(myhostname)) { builder.myid(zookeeperIds[i]); } builder.server(getZkServer(configServers[i], zookeeperIds[i])); } if (options.zookeeperClientPort().isPresent()) { builder.clientPort(options.zookeeperClientPort().get()); } if (options.hostedVespa().orElse(false)) { builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json")); } boolean isHostedVespa = options.hostedVespa().orElse(false); builder.dynamicReconfiguration(isHostedVespa); builder.reconfigureEnsemble(!isHostedVespa); builder.snapshotMethod(options.zooKeeperSnapshotMethod()); builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer()); }
@Test void testCuratorConfig() { CuratorConfig config = getConfig(CuratorConfig.class); assertEquals(1, config.server().size()); assertEquals("localhost", config.server().get(0).hostname()); assertEquals(2181, config.server().get(0).port()); assertEquals(120, config.zookeeperSessionTimeoutSeconds()); assertTrue(config.zookeeperLocalhostAffinity()); }
public static ReadRows readRows() { return new AutoValue_JdbcIO_ReadRows.Builder() .setFetchSize(DEFAULT_FETCH_SIZE) .setOutputParallelization(true) .setStatementPreparator(ignored -> {}) .build(); }
@Test @SuppressWarnings({"UnusedVariable", "AssertThrowsMultipleStatements"}) public void testReadRowsFailedToGetSchema() { Exception exc = assertThrows( BeamSchemaInferenceException.class, () -> { // Using a new pipeline object to avoid the various checks made by TestPipeline in // this pipeline which is // expected to throw an exception. Pipeline pipeline = Pipeline.create(); pipeline.apply( JdbcIO.readRows() .withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION) .withQuery( String.format( "SELECT CAST(1 AS NUMERIC(1, 0)) AS T1 FROM %s", "unknown_table"))); pipeline.run(); }); assertThat(exc.getMessage(), containsString("Failed to infer Beam schema")); }
@Override public boolean createTopic( final String topic, final int numPartitions, final short replicationFactor, final Map<String, ?> configs, final CreateTopicsOptions createOptions ) { final Optional<Long> retentionMs = KafkaTopicClient.getRetentionMs(configs); if (isTopicExists(topic)) { validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } final short resolvedReplicationFactor = replicationFactor == TopicProperties.DEFAULT_REPLICAS ? getDefaultClusterReplication() : replicationFactor; final NewTopic newTopic = new NewTopic(topic, numPartitions, resolvedReplicationFactor); newTopic.configs(toStringConfigs(configs)); try { LOG.info("Creating topic '{}' {}", topic, (createOptions.shouldValidateOnly()) ? "(ONLY VALIDATE)" : "" ); ExecutorUtil.executeWithRetries( () -> adminClient.get().createTopics( Collections.singleton(newTopic), createOptions ).all().get(), ExecutorUtil.RetryBehaviour.ON_RETRYABLE); return true; } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } catch (final TopicExistsException e) { // if the topic already exists, it is most likely because another node just created it. // ensure that it matches the partition count, replication factor, and retention // before returning success validateTopicProperties(topic, numPartitions, replicationFactor, retentionMs); return false; } catch (final TopicAuthorizationException e) { throw new KsqlTopicAuthorizationException( AclOperation.CREATE, Collections.singleton(topic)); } catch (final Exception e) { throw new KafkaResponseGetFailedException( "Failed to guarantee existence of topic " + topic, e); } }
@Test public void shouldThrowFromCreateTopicIfNoAclsSet() { // Given: when(adminClient.createTopics(any(), any())) .thenAnswer(createTopicsResult(new TopicAuthorizationException("error"))); // When: final Exception e = assertThrows( KsqlTopicAuthorizationException.class, () -> kafkaTopicClient.createTopic("someTopic", 1, (short) 2, configs) ); // Then: assertThat(e.getMessage(), containsString( "Authorization denied to Create on topic(s): [someTopic]")); }
protected <T> Map<String, Object> generate(Class<? extends T> cls, @Nullable Class<T> base) { SchemaGeneratorConfigBuilder builder = new SchemaGeneratorConfigBuilder( SchemaVersion.DRAFT_2019_09, OptionPreset.PLAIN_JSON ); this.build(builder,false); // we don't return base properties unless specified with @PluginProperty builder .forFields() .withIgnoreCheck(fieldScope -> base != null && fieldScope.getAnnotation(PluginProperty.class) == null && fieldScope.getDeclaringType().getTypeName().equals(base.getName())); SchemaGeneratorConfig schemaGeneratorConfig = builder.build(); SchemaGenerator generator = new SchemaGenerator(schemaGeneratorConfig); try { ObjectNode objectNode = generator.generateSchema(cls); replaceAnyOfWithOneOf(objectNode); return JacksonMapper.toMap(extractMainRef(objectNode)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unable to generate jsonschema for '" + cls.getName() + "'", e); } }
@SuppressWarnings("unchecked") @Test void trigger() throws URISyntaxException { Helpers.runApplicationContext((applicationContext) -> { JsonSchemaGenerator jsonSchemaGenerator = applicationContext.getBean(JsonSchemaGenerator.class); Map<String, Object> jsonSchema = jsonSchemaGenerator.generate(AbstractTrigger.class, AbstractTrigger.class); assertThat((Map<String, Object>) jsonSchema.get("properties"), allOf( Matchers.aMapWithSize(2), hasKey("conditions"), hasKey("stopAfter") )); }); }
@Override public void validateUserList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<AdminUserDO> users = userMapper.selectBatchIds(ids); Map<Long, AdminUserDO> userMap = CollectionUtils.convertMap(users, AdminUserDO::getId); // 校验 ids.forEach(id -> { AdminUserDO user = userMap.get(id); if (user == null) { throw exception(USER_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus())) { throw exception(USER_IS_DISABLE, user.getNickname()); } }); }
@Test public void testValidateUserList_notEnable() { // mock 数据 AdminUserDO userDO = randomAdminUserDO().setStatus(CommonStatusEnum.DISABLE.getStatus()); userMapper.insert(userDO); // 准备参数 List<Long> ids = singletonList(userDO.getId()); // 调用, 并断言异常 assertServiceException(() -> userService.validateUserList(ids), USER_IS_DISABLE, userDO.getNickname()); }
public HttpResponse get(Application application, String hostName, String serviceType, Path path, Query query) { return get(application, hostName, serviceType, path, query, null); }
@Test(expected = RequestTimeoutException.class) public void testFetchException() { when(fetcher.get(any(), any())).thenThrow(new RequestTimeoutException("timed out")); proxy.get(applicationMock, hostname, CLUSTERCONTROLLER_CONTAINER.serviceName, Path.parse("clustercontroller-status/v1/clusterName"), Query.empty()); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test public void testAddNewExcludePathToConfiguration() throws Exception { Configuration conf = new Configuration(); rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:5678", 10240); ClusterMetrics metrics = ClusterMetrics.getMetrics(); assert(metrics != null); int initialMetricCount = metrics.getNumDecommisionedNMs(); NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals( NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); nodeHeartbeat = nm2.nodeHeartbeat(true); Assert.assertEquals( NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); writeToHostsFile("host2"); conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile .getAbsolutePath()); rm.getNodesListManager().refreshNodes(conf); checkDecommissionedNMCount(rm, ++initialMetricCount); nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals( "Node should not have been decommissioned.", NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); nodeHeartbeat = nm2.nodeHeartbeat(true); Assert.assertEquals( "Node should have been decommissioned but is in state" + nodeHeartbeat.getNodeAction(), NodeAction.SHUTDOWN, nodeHeartbeat.getNodeAction()); }
public static String formatTime(Duration duration) { int totalSeconds = Ints.saturatedCast(duration.roundTo(SECONDS)); if (totalSeconds == 0) { return format("%sms", Ints.saturatedCast(duration.roundTo(MILLISECONDS))); } int minutes = totalSeconds / 60; int seconds = totalSeconds % 60; return format("%s:%02d", minutes, seconds); }
@Test public void testFormatTime() { assertEquals(FormatUtils.formatTime(Duration.succinctNanos(100L)), "0ms"); assertEquals(FormatUtils.formatTime(Duration.succinctDuration(1.1, TimeUnit.MILLISECONDS)), "1ms"); assertEquals(FormatUtils.formatTime(Duration.succinctDuration(1.1, TimeUnit.SECONDS)), "0:01"); assertEquals(FormatUtils.formatTime(Duration.succinctDuration(1.5, TimeUnit.MINUTES)), "1:30"); assertEquals(FormatUtils.formatTime(Duration.succinctDuration(1.5, TimeUnit.HOURS)), "90:00"); }
@Nullable public static String decrypt(String cipherText, String encryptionKey, String salt) { try { return tryDecrypt(cipherText, encryptionKey, salt); } catch (Exception ex) { LOG.error("Could not decrypt (legacy) value.", ex); return null; } }
@Test public void testDecryptStaticISO10126PaddedCipherText() { // The cipherText was encrypted using the legacy AES/CBC/ISO10126Padding transformation. // If this test fails, we changed the transformation. If the change was intentional, this test must // be updated, and we need to create a migration to re-encrypt all existing secrets in the database. // Otherwise, existing secrets cannot be decrypted anymore! final String cipherText = "374219db59516b706234a60dd9a7e1e2"; final String salt = "53569ac046df1097"; final String decrypt = AESTools.decrypt(cipherText, "1234567890123456", salt); Assert.assertEquals("I am secret", decrypt); }
@Override public DataSink createDataSink(Context context) { FactoryHelper.createFactoryHelper(this, context).validateExcept(PROPERTIES_PREFIX); Configuration configuration = Configuration.fromMap(context.getFactoryConfiguration().toMap()); DeliveryGuarantee deliveryGuarantee = context.getFactoryConfiguration().get(KafkaDataSinkOptions.DELIVERY_GUARANTEE); ZoneId zoneId = ZoneId.systemDefault(); if (!Objects.equals( context.getPipelineConfiguration().get(PipelineOptions.PIPELINE_LOCAL_TIME_ZONE), PipelineOptions.PIPELINE_LOCAL_TIME_ZONE.defaultValue())) { zoneId = ZoneId.of( context.getPipelineConfiguration() .get(PipelineOptions.PIPELINE_LOCAL_TIME_ZONE)); } KeyFormat keyFormat = context.getFactoryConfiguration().get(KEY_FORMAT); SerializationSchema<Event> keySerialization = KeySerializationFactory.createSerializationSchema(configuration, keyFormat, zoneId); JsonSerializationType jsonSerializationType = context.getFactoryConfiguration().get(KafkaDataSinkOptions.VALUE_FORMAT); SerializationSchema<Event> valueSerialization = ChangeLogJsonFormatFactory.createSerializationSchema( configuration, jsonSerializationType, zoneId); final Properties kafkaProperties = new Properties(); Map<String, String> allOptions = context.getFactoryConfiguration().toMap(); allOptions.keySet().stream() .filter(key -> key.startsWith(PROPERTIES_PREFIX)) .forEach( key -> { final String value = allOptions.get(key); final String subKey = key.substring((PROPERTIES_PREFIX).length()); kafkaProperties.put(subKey, value); }); String topic = context.getFactoryConfiguration().get(KafkaDataSinkOptions.TOPIC); boolean addTableToHeaderEnabled = context.getFactoryConfiguration() .get(KafkaDataSinkOptions.SINK_ADD_TABLEID_TO_HEADER_ENABLED); String customHeaders = context.getFactoryConfiguration().get(KafkaDataSinkOptions.SINK_CUSTOM_HEADER); PartitionStrategy partitionStrategy = context.getFactoryConfiguration().get(KafkaDataSinkOptions.PARTITION_STRATEGY); return new KafkaDataSink( deliveryGuarantee, kafkaProperties, partitionStrategy, zoneId, keySerialization, valueSerialization, topic, addTableToHeaderEnabled, customHeaders); }
@Test void testCreateDataSink() { DataSinkFactory sinkFactory = FactoryDiscoveryUtils.getFactoryByIdentifier("kafka", DataSinkFactory.class); Assertions.assertThat(sinkFactory).isInstanceOf(KafkaDataSinkFactory.class); Configuration conf = Configuration.fromMap(ImmutableMap.<String, String>builder().build()); DataSink dataSink = sinkFactory.createDataSink( new FactoryHelper.DefaultContext( conf, conf, Thread.currentThread().getContextClassLoader())); Assertions.assertThat(dataSink).isInstanceOf(KafkaDataSink.class); }
@Override public ComponentCreationData createProjectAndBindToDevOpsPlatform(DbSession dbSession, CreationMethod creationMethod, Boolean monorepo, @Nullable String projectKey, @Nullable String projectName) { String key = Optional.ofNullable(projectKey).orElse(generateUniqueProjectKey()); boolean isManaged = devOpsPlatformSettings.isProvisioningEnabled(); Boolean shouldProjectBePrivate = shouldProjectBePrivate(devOpsProjectCreationContext.isPublic()); ComponentCreationData componentCreationData = projectCreator.createProject(dbSession, key, getProjectName(projectName), devOpsProjectCreationContext.defaultBranchName(), creationMethod, shouldProjectBePrivate, isManaged); ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow(); createProjectAlmSettingDto(dbSession, projectDto, devOpsProjectCreationContext.almSettingDto(), monorepo); addScanPermissionToCurrentUser(dbSession, projectDto); BranchDto mainBranchDto = Optional.ofNullable(componentCreationData.mainBranchDto()).orElseThrow(); if (isManaged) { syncProjectPermissionsWithDevOpsPlatform(projectDto, mainBranchDto); } return componentCreationData; }
@Test void createProjectAndBindToDevOpsPlatformFromScanner_whenVisibilitySyncDeactivated_successfullyCreatesProjectAndUseDefaultProjectVisibility() { // given mockGeneratedProjectKey(); ComponentCreationData componentCreationData = mockProjectCreation("generated_orga2/repo1"); ProjectAlmSettingDao projectAlmSettingDao = mock(); when(dbClient.projectAlmSettingDao()).thenReturn(projectAlmSettingDao); when(projectDefaultVisibility.get(any())).thenReturn(Visibility.PRIVATE); // when ComponentCreationData actualComponentCreationData = defaultDevOpsProjectCreator.createProjectAndBindToDevOpsPlatform(dbClient.openSession(true), SCANNER_API_DEVOPS_AUTO_CONFIG, false, null, null); // then assertThat(actualComponentCreationData).isEqualTo(componentCreationData); ComponentCreationParameters componentCreationParameters = componentCreationParametersCaptor.getValue(); assertComponentCreationParametersContainsCorrectInformation(componentCreationParameters, "generated_orga2/repo1", SCANNER_API_DEVOPS_AUTO_CONFIG); assertThat(componentCreationParameters.isManaged()).isFalse(); assertThat(componentCreationParameters.newComponent().isPrivate()).isTrue(); verify(projectAlmSettingDao).insertOrUpdate(any(), projectAlmSettingDtoCaptor.capture(), eq(ALM_SETTING_KEY), eq(REPOSITORY_NAME), eq("generated_orga2/repo1")); ProjectAlmSettingDto projectAlmSettingDto = projectAlmSettingDtoCaptor.getValue(); assertAlmSettingsDtoContainsCorrectInformation(almSettingDto, requireNonNull(componentCreationData.projectDto()), projectAlmSettingDto); }
@Override public void add(Object task, boolean priority) { checkNotNull(task, "task can't be null"); if (priority) { priorityQueue.add(task); normalQueue.add(TRIGGER_TASK); } else { normalQueue.add(task); } }
@Test(expected = NullPointerException.class) public void add_whenNull() { operationQueue.add(null, false); }
SortedReplicas(Broker broker, Set<Function<Replica, Boolean>> selectionFuncs, List<Function<Replica, Integer>> priorityFuncs, Function<Replica, Double> scoreFunction) { this(broker, null, selectionFuncs, priorityFuncs, scoreFunction, true); }
@Test public void testScoreFunctionOnly() { Broker broker = generateBroker(NUM_REPLICAS); broker.trackSortedReplicas(SORT_NAME, null, null, SCORE_FUNC); SortedReplicas sr = broker.trackedSortedReplicas(SORT_NAME); double lastScore = Double.NEGATIVE_INFINITY; for (Replica r : sr.sortedReplicas(false)) { assertTrue(SCORE_FUNC.apply(r) >= lastScore); } }
public Map<E, ValuesAndExtrapolations> peekCurrentWindow() { // prevent window rolling. _windowRollingLock.lock(); try { Map<E, ValuesAndExtrapolations> result = new HashMap<>(); _rawMetrics.forEach((entity, rawMetric) -> { ValuesAndExtrapolations vae = rawMetric.peekCurrentWindow(_currentWindowIndex, _metricDef); SortedSet<Long> currentWindows = new TreeSet<>(Collections.singleton(_currentWindowIndex)); vae.setWindows(toWindows(currentWindows)); result.put(entity, vae); }); return result; } finally { _windowRollingLock.unlock(); } }
@Test public void testPeekCurrentWindow() { MetricSampleAggregator<String, IntegerEntity> aggregator = new MetricSampleAggregator<>(NUM_WINDOWS, WINDOW_MS, MIN_SAMPLES_PER_WINDOW, 0, _metricDef); // Add samples to three entities. // Entity1 has 2 windows with insufficient data. // Entity2 has 2 windows with sufficient data. // Entity3 has 1 window with sufficient data, i.e. the active window does not have data. populateSampleAggregator(2, 1, aggregator, ENTITY1); populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY2); CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, aggregator, ENTITY3, 0, WINDOW_MS, _metricDef); Map<IntegerEntity, ValuesAndExtrapolations> currentWindowMetrics = aggregator.peekCurrentWindow(); assertEquals(FORCED_INSUFFICIENT, currentWindowMetrics.get(ENTITY1).extrapolations().get(0)); assertTrue(currentWindowMetrics.get(ENTITY2).extrapolations().isEmpty()); assertEquals(NO_VALID_EXTRAPOLATION, currentWindowMetrics.get(ENTITY3).extrapolations().get(0)); }
@Override public void createPort(K8sPort port) { checkNotNull(port, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(port.portId()), ERR_NULL_PORT_ID); checkArgument(!Strings.isNullOrEmpty(port.networkId()), ERR_NULL_PORT_NET_ID); k8sNetworkStore.createPort(port); log.info(String.format(MSG_PORT, port.portId(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void createDuplicatePort() { target.createPort(PORT); target.createPort(PORT); }
@VisibleForTesting List<ExecNode<?>> calculatePipelinedAncestors(ExecNode<?> node) { List<ExecNode<?>> ret = new ArrayList<>(); AbstractExecNodeExactlyOnceVisitor ancestorVisitor = new AbstractExecNodeExactlyOnceVisitor() { @Override protected void visitNode(ExecNode<?> node) { boolean hasAncestor = false; if (!boundaries.contains(node)) { List<InputProperty> inputProperties = node.getInputProperties(); for (int i = 0; i < inputProperties.size(); i++) { // we only go through PIPELINED edges if (inputProperties .get(i) .getDamBehavior() .stricterOrEqual(safeDamBehavior)) { continue; } hasAncestor = true; node.getInputEdges().get(i).getSource().accept(this); } } if (!hasAncestor) { ret.add(node); } } }; node.accept(ancestorVisitor); return ret; }
@Test void testCalculateBoundedPipelinedAncestors() { // P = InputProperty.DamBehavior.PIPELINED, E = InputProperty.DamBehavior.END_INPUT // // 0 -P-> 1 -P-> 2 // 3 -P-> 4 -E/ TestingBatchExecNode[] nodes = new TestingBatchExecNode[5]; for (int i = 0; i < nodes.length; i++) { nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i); } nodes[1].addInput(nodes[0]); nodes[2].addInput(nodes[1]); nodes[2].addInput( nodes[4], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()); nodes[4].addInput(nodes[3]); TestingInputPriorityConflictResolver resolver = new TestingInputPriorityConflictResolver( Collections.singletonList(nodes[2]), new HashSet<>(Collections.singleton(nodes[1])), InputProperty.DamBehavior.END_INPUT); List<ExecNode<?>> ancestors = resolver.calculatePipelinedAncestors(nodes[2]); assertThat(ancestors).hasSize(1); assertThat(ancestors).contains(nodes[1]); }
public void isNotEqualTo(@Nullable Object unexpected) { standardIsNotEqualTo(unexpected); }
@Test public void isNotEqualToFailureWithNulls() { Object o = null; expectFailure.whenTesting().that(o).isNotEqualTo(null); assertFailureKeys("expected not to be"); assertFailureValue("expected not to be", "null"); }
public static int compare(Object o1, Object o2) { return o1.getClass() != o2.getClass() && o1 instanceof Number && o2 instanceof Number ? asBigDecimal((Number)o1).compareTo(asBigDecimal((Number)o2)) : ((Comparable) o1).compareTo(o2); }
@Test public void compareWithNumbers() { assertThat(OperatorUtils.compare(1, 1)).isZero(); assertThat(OperatorUtils.compare(1L, 1L)).isZero(); assertThat(OperatorUtils.compare(1.0f, 1.0f)).isZero(); assertThat(OperatorUtils.compare(1.0, 1.0)).isZero(); assertThat(OperatorUtils.compare(1, 2)).isNegative(); assertThat(OperatorUtils.compare(1L, 2L)).isNegative(); assertThat(OperatorUtils.compare(1.0f, 2.0f)).isNegative(); assertThat(OperatorUtils.compare(1.0, 2.0)).isNegative(); assertThat(OperatorUtils.compare(2, 1)).isPositive(); assertThat(OperatorUtils.compare(2L, 1L)).isPositive(); assertThat(OperatorUtils.compare(2.0f, 1.0f)).isPositive(); assertThat(OperatorUtils.compare(2.0, 1.0)).isPositive(); assertThat(OperatorUtils.compare(1, new BigDecimal("0.99"))).isPositive(); assertThat(OperatorUtils.compare(1, new BigDecimal("1.0"))).isZero(); assertThat(OperatorUtils.compare(1, new BigDecimal("1.01"))).isNegative(); assertThat(OperatorUtils.compare(new BigDecimal("1.00"), new BigDecimal("0.99"))).isPositive(); assertThat(OperatorUtils.compare(new BigDecimal("1.00"), new BigDecimal("1.0"))).isZero(); assertThat(OperatorUtils.compare(new BigDecimal("1.00"), new BigDecimal("1.01"))).isNegative(); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 2) { onInvalidDataReceived(device, data); return; } // Read flags int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset); final int hearRateType = (flags & 0x01) == 0 ? Data.FORMAT_UINT8 : Data.FORMAT_UINT16_LE; final int sensorContactStatus = (flags & 0x06) >> 1; final boolean sensorContactSupported = sensorContactStatus == 2 || sensorContactStatus == 3; final boolean sensorContactDetected = sensorContactStatus == 3; final boolean energyExpandedPresent = (flags & 0x08) != 0; final boolean rrIntervalsPresent = (flags & 0x10) != 0; offset += 1; // Validate packet length if (data.size() < 1 + (hearRateType & 0x0F) + (energyExpandedPresent ? 2 : 0) + (rrIntervalsPresent ? 2 : 0)) { onInvalidDataReceived(device, data); return; } // Prepare data final Boolean sensorContact = sensorContactSupported ? sensorContactDetected : null; final int heartRate = data.getIntValue(hearRateType, offset); offset += hearRateType & 0xF; Integer energyExpanded = null; if (energyExpandedPresent) { energyExpanded = data.getIntValue(Data.FORMAT_UINT16_LE, offset); offset += 2; } List<Integer> rrIntervals = null; if (rrIntervalsPresent) { final int count = (data.size() - offset) / 2; final List<Integer> intervals = new ArrayList<>(count); for (int i = 0; i < count; ++i) { intervals.add(data.getIntValue(Data.FORMAT_UINT16_LE, offset)); offset += 2; } rrIntervals = Collections.unmodifiableList(intervals); } onHeartRateMeasurementReceived(device, heartRate, sensorContact, energyExpanded, rrIntervals); }
@Test public void onHeartRateMeasurementReceived_uint16() { success = false; final Data data = new Data(new byte[] { 0b11111, 1, 1, 50, 1, 1, 0, 2, 1, (byte) 0xFF, (byte) 0xFF}); response.onDataReceived(null, data); assertTrue(response.isValid()); assertTrue(success); assertEquals(257, heartRate); assertNotNull(contactDetected); assertTrue(contactDetected); assertNotNull(energyExpanded); assertEquals(306, energyExpanded.intValue()); assertNotNull(rrIntervals); assertEquals(3, rrIntervals.size()); assertEquals(1, rrIntervals.get(0).intValue()); assertEquals(258, rrIntervals.get(1).intValue()); assertEquals(65535, rrIntervals.get(2).intValue()); }
@Override public List<String> listTableNames(String dbName) { return deltaOps.getAllTableNames(dbName); }
@Test public void testListTableNames() { List<String> tableNames = deltaLakeMetadata.listTableNames("db1"); Assert.assertEquals(2, tableNames.size()); Assert.assertEquals("table1", tableNames.get(0)); Assert.assertEquals("table2", tableNames.get(1)); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void truthFrameWithOutSubject_shouldNotCleaned() { Throwable throwable = createThrowableWithStackTrace( "com.google.random.Package", // two or more truth frame will trigger string matching mechenism to got it collapsed "com.google.common.truth.FailureMetadata", "com.google.example.SomeClass"); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()) .isEqualTo( new StackTraceElement[] { createStackTraceElement("com.google.random.Package"), createStackTraceElement("com.google.common.truth.FailureMetadata"), createStackTraceElement("com.google.example.SomeClass"), }); }
public static void repair(final Path path) throws IOException { if (!path.toFile().isDirectory()) { throw new IllegalArgumentException( String.format("Given PQ path %s is not a directory.", path) ); } LOGGER.info("Start repairing queue dir: {}", path.toString()); deleteTempCheckpoint(path); final Map<Integer, Path> pageFiles = new HashMap<>(); try (final DirectoryStream<Path> pfs = Files.newDirectoryStream(path, "page.*")) { pfs.forEach(p -> pageFiles.put( Integer.parseInt(p.getFileName().toString().substring("page.".length())), p) ); } final Map<Integer, Path> checkpointFiles = new HashMap<>(); try (final DirectoryStream<Path> cpfs = Files.newDirectoryStream(path, "checkpoint.*")) { cpfs.forEach( c -> { final String cpFilename = c.getFileName().toString(); if (!"checkpoint.head".equals(cpFilename)) { checkpointFiles.put( Integer.parseInt(cpFilename.substring("checkpoint.".length())), c ); } } ); } deleteFullyAcked(path, pageFiles, checkpointFiles); fixMissingPages(pageFiles, checkpointFiles); fixZeroSizePages(pageFiles, checkpointFiles); fixMissingCheckpoints(pageFiles, checkpointFiles); LOGGER.info("Repair is done"); }
@Test public void testRemoveTempCheckPoint() throws Exception { Files.createFile(dataPath.resolve("checkpoint.head.tmp")); Files.createFile(dataPath.resolve("checkpoint.1.tmp")); PqRepair.repair(dataPath); verifyQueue(); }
@Converter(fallback = true) public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) { if (NodeInfo.class.isAssignableFrom(value.getClass())) { // use a fallback type converter so we can convert the embedded body if the value is NodeInfo NodeInfo ni = (NodeInfo) value; // first try to find a Converter for Node TypeConverter tc = registry.lookup(type, Node.class); if (tc != null) { Node node = NodeOverNodeInfo.wrap(ni); return tc.convertTo(type, exchange, node); } // if this does not exist we can also try NodeList (there are some type converters for that) as // the default Xerces Node implementation also implements NodeList. tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> nil = new LinkedList<>(); nil.add(ni); return tc.convertTo(type, exchange, toDOMNodeList(nil)); } } else if (List.class.isAssignableFrom(value.getClass())) { TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> lion = new LinkedList<>(); for (Object o : (List<?>) value) { if (o instanceof NodeInfo) { lion.add((NodeInfo) o); } } if (!lion.isEmpty()) { NodeList nl = toDOMNodeList(lion); return tc.convertTo(type, exchange, nl); } } } else if (NodeOverNodeInfo.class.isAssignableFrom(value.getClass())) { // NodeOverNode info is a read-only Node implementation from Saxon. In contrast to the JDK // com.sun.org.apache.xerces.internal.dom.NodeImpl class it does not implement NodeList, but // many Camel type converters are based on that interface. Therefore we convert to NodeList and // try type conversion in the fallback type converter. TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<Node> domNodeList = new LinkedList<>(); domNodeList.add((NodeOverNodeInfo) value); return tc.convertTo(type, exchange, new DOMNodeList(domNodeList)); } } return null; }
@Test public void convertToDocument() { Document document = context.getTypeConverter().convertTo(Document.class, exchange, doc); assertNotNull(document); String string = context.getTypeConverter().convertTo(String.class, exchange, document); assertEquals(CONTENT, string); }
public static List<String> getParams(String description) { // find all match params key in description Matcher matcher = PARAMS_PATTERN.matcher(description); List<String> params = new ArrayList<>(); while (matcher.find()) { String key = matcher.group(1); params.add(key); } return params; }
@Test void testGetParamsForDescription() { String description = "test description with param <key1>, <key2> and <key3>."; Assertions.assertIterableEquals( Arrays.asList("key1", "key2", "key3"), ExceptionParamsUtil.getParams(description)); String description2 = "test description with no param."; Assertions.assertIterableEquals( Collections.emptyList(), ExceptionParamsUtil.getParams(description2)); String description3 = "test description with wrong param <>, <, >, < >."; Assertions.assertIterableEquals( Collections.emptyList(), ExceptionParamsUtil.getParams(description3)); }
@Udf(description = "Returns the inverse (arc) tangent of y / x") public Double atan2( @UdfParameter( value = "y", description = "The ordinate (y) coordinate." ) final Integer y, @UdfParameter( value = "x", description = "The abscissa (x) coordinate." ) final Integer x ) { return atan2(y == null ? null : y.doubleValue(), x == null ? null : x.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.atan2(null, 1), is(nullValue())); assertThat(udf.atan2(null, 1L), is(nullValue())); assertThat(udf.atan2(null, 0.45), is(nullValue())); assertThat(udf.atan2(1, null), is(nullValue())); assertThat(udf.atan2(1L, null), is(nullValue())); assertThat(udf.atan2(0.45, null), is(nullValue())); assertThat(udf.atan2((Integer) null, null), is(nullValue())); assertThat(udf.atan2((Long) null, null), is(nullValue())); assertThat(udf.atan2((Double) null, null), is(nullValue())); }
@Override public void validate() { super.validate(); sourceOptions.validate(); }
@Test public void testSourceOptionsWithNegativeNumRecords() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("numRecords should be a non-negative number, but found -100"); testSourceOptions.numRecords = -100; testSourceOptions.validate(); }
@Override public V get(final K key) { Objects.requireNonNull(key); final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); for (final ReadOnlyKeyValueStore<K, V> store : stores) { try { final V result = store.get(key); if (result != null) { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } return null; }
@Test public void shouldReturnNullIfKeyDoesNotExist() { assertNull(theStore.get("whatever")); }
@Override public HttpResponse send(HttpRequest httpRequest) throws IOException { return send(httpRequest, null); }
@Test public void send_always_canonicalizesRequestUrl() throws IOException, InterruptedException { String responseBody = "test response"; mockWebServer.enqueue( new MockResponse() .setResponseCode(HttpStatus.OK.code()) .setHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString()) .setBody(responseBody)); mockWebServer.start(); HttpUrl baseUrl = mockWebServer.url("/"); String requestUrl = new URL(baseUrl.scheme(), baseUrl.host(), baseUrl.port(), "/%2e%2e/%2e%2e/etc/passwd") .toString(); httpClient.send(get(requestUrl).withEmptyHeaders().build()); assertThat(mockWebServer.takeRequest().getPath()).isEqualTo("/etc/passwd"); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testNotAllGettersAnnotatedWithJsonIgnore() throws Exception { // Initial construction is valid. GetterWithJsonIgnore options = PipelineOptionsFactory.as(GetterWithJsonIgnore.class); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Expected getter for property [object] to be marked with @JsonIgnore on all [" + "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$GetterWithJsonIgnore, " + "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MissingSetter], " + "found only on [org.apache.beam.sdk.options." + "PipelineOptionsFactoryTest$GetterWithJsonIgnore]"); // When we attempt to convert, we should error at this moment. options.as(CombinedObject.class); }
@Override public boolean isEmpty() { return multiResult.isEmpty(); }
@Test public void testIsEmpty_whenEmpty() { MultiResult<Integer> multiResult = new MultiResult<>(); immutableMultiResult = new ImmutableMultiResult<>(multiResult); assertTrue(immutableMultiResult.isEmpty()); }
public static Map<String, String> parseToMap(String attributesModification) { if (Strings.isNullOrEmpty(attributesModification)) { return new HashMap<>(); } // format: +key1=value1,+key2=value2,-key3,+key4=value4 Map<String, String> attributes = new HashMap<>(); String[] kvs = attributesModification.split(ATTR_ARRAY_SEPARATOR_COMMA); for (String kv : kvs) { String key; String value; if (kv.contains(ATTR_KEY_VALUE_EQUAL_SIGN)) { String[] splits = kv.split(ATTR_KEY_VALUE_EQUAL_SIGN); key = splits[0]; value = splits[1]; if (!key.contains(ATTR_ADD_PLUS_SIGN)) { throw new RuntimeException("add/alter attribute format is wrong: " + key); } } else { key = kv; value = ""; if (!key.contains(ATTR_DELETE_MINUS_SIGN)) { throw new RuntimeException("delete attribute format is wrong: " + key); } } String old = attributes.put(key, value); if (old != null) { throw new RuntimeException("key duplication: " + key); } } return attributes; }
@Test public void parseToMap_ValidAttributesModification_ReturnsExpectedMap() { String attributesModification = "+key1=value1,+key2=value2,-key3,+key4=value4"; Map<String, String> result = AttributeParser.parseToMap(attributesModification); Map<String, String> expectedMap = new HashMap<>(); expectedMap.put("+key1", "value1"); expectedMap.put("+key2", "value2"); expectedMap.put("-key3", ""); expectedMap.put("+key4", "value4"); assertEquals(expectedMap, result); }
@Override public PinotDataBuffer getBuffer(String column, IndexType<?, ?, ?> type) throws IOException { return checkAndGetIndexBuffer(column, type); }
@Test(expectedExceptions = RuntimeException.class) public void testMissingIndex() throws IOException, ConfigurationException { try (SingleFileIndexDirectory columnDirectory = new SingleFileIndexDirectory(TEMP_DIR, _segmentMetadata, ReadMode.mmap)) { columnDirectory.getBuffer("column1", StandardIndexes.dictionary()); } }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldReturnActiveAndStandBysWhenRoutingStandbyEnabledHeartBeatNotEnabled() { // Given: getActiveAndStandbyMetadata(); // When: final List<KsqlPartitionLocation> result = locator.locate(ImmutableList.of(KEY), routingOptions, routingFilterFactoryStandby, false); // Then: List<KsqlNode> nodeList = result.get(0).getNodes(); assertThat(nodeList.size(), is(3)); assertThat(nodeList.stream().findFirst().get(), is(activeNode)); assertThat(nodeList, containsInAnyOrder(activeNode, standByNode1, standByNode2)); }
public static Record convertDataRecordToRecord(final String database, final String schema, final DataRecord dataRecord) { List<TableColumn> before = new LinkedList<>(); List<TableColumn> after = new LinkedList<>(); for (Column column : dataRecord.getColumns()) { before.add(TableColumn.newBuilder().setName(column.getName()).setValue(Any.pack(ColumnValueConvertUtils.convertToProtobufMessage(column.getOldValue()))).build()); after.add(TableColumn.newBuilder().setName(column.getName()).setValue(Any.pack(ColumnValueConvertUtils.convertToProtobufMessage(column.getValue()))).build()); } MetaData metaData = MetaData.newBuilder().setDatabase(database).setSchema(Strings.nullToEmpty(schema)).setTable(dataRecord.getTableName()).build(); return DataRecordResult.Record.newBuilder().setMetaData(metaData).addAllBefore(before).addAllAfter(after).setTransactionCommitMillis(dataRecord.getCommitTime()) .setDataChangeType(getDataChangeType(dataRecord.getType())).build(); }
@Test void assertConvertDataRecordToRecord() throws InvalidProtocolBufferException, SQLException { DataRecord dataRecord = new DataRecord(PipelineSQLOperationType.INSERT, "t_order", new IntegerPrimaryKeyIngestPosition(0L, 1L), 2); dataRecord.addColumn(new Column("order_id", BigInteger.ONE, false, true)); dataRecord.addColumn(new Column("price", BigDecimal.valueOf(123L), false, false)); dataRecord.addColumn(new Column("user_id", Long.MAX_VALUE, false, false)); dataRecord.addColumn(new Column("item_id", Integer.MAX_VALUE, false, false)); dataRecord.addColumn(new Column("create_date", LocalDate.now(), false, false)); dataRecord.addColumn(new Column("create_date2", Date.valueOf(LocalDate.now()), false, false)); dataRecord.addColumn(new Column("create_time", LocalTime.now(), false, false)); dataRecord.addColumn(new Column("create_time2", OffsetTime.now(), false, false)); dataRecord.addColumn(new Column("create_datetime", LocalDateTime.now(), false, false)); dataRecord.addColumn(new Column("create_datetime2", OffsetDateTime.now(), false, false)); dataRecord.addColumn(new Column("empty", null, false, false)); Blob mockedBlob = mock(Blob.class); when(mockedBlob.getBytes(anyLong(), anyInt())).thenReturn(new byte[]{-1, 0, 1}); dataRecord.addColumn(new Column("data_blob", mockedBlob, false, false)); Clob mockedClob = mock(Clob.class); when(mockedClob.getSubString(anyLong(), anyInt())).thenReturn("clob\n"); dataRecord.addColumn(new Column("text_clob", mockedClob, false, false)); dataRecord.addColumn(new Column("update_time", new Timestamp(System.currentTimeMillis()), false, false)); TypeRegistry registry = TypeRegistry.newBuilder().add(EmptyProto.getDescriptor().getMessageTypes()).add(TimestampProto.getDescriptor().getMessageTypes()) .add(WrappersProto.getDescriptor().getMessageTypes()).build(); Record expectedRecord = DataRecordResultConvertUtils.convertDataRecordToRecord("test", null, dataRecord); String print = JsonFormat.printer().usingTypeRegistry(registry).print(expectedRecord); Builder actualRecord = Record.newBuilder(); JsonFormat.parser().usingTypeRegistry(registry).merge(print, actualRecord); assertThat(actualRecord.build(), is(expectedRecord)); }
@Override public <T> Mono<T> run(Mono<T> toRun, Function<Throwable, Mono<T>> fallback) { Mono<T> toReturn = toRun.transform(new PolarisCircuitBreakerReactorTransformer<>(invokeHandler)); if (fallback != null) { toReturn = toReturn.onErrorResume(throwable -> { if (throwable instanceof CallAbortedException) { PolarisCircuitBreakerUtils.reportStatus(consumerAPI, conf, (CallAbortedException) throwable); } return fallback.apply(throwable); }); } return toReturn; }
@Test public void run() { this.reactiveContextRunner.run(context -> { ReactivePolarisCircuitBreakerFactory polarisCircuitBreakerFactory = context.getBean(ReactivePolarisCircuitBreakerFactory.class); ReactiveCircuitBreaker cb = polarisCircuitBreakerFactory.create(SERVICE_CIRCUIT_BREAKER); PolarisCircuitBreakerConfigBuilder.PolarisCircuitBreakerConfiguration configuration = polarisCircuitBreakerFactory.configBuilder(SERVICE_CIRCUIT_BREAKER).build(); polarisCircuitBreakerFactory.configureDefault(id -> configuration); assertThat(Mono.just("foobar").transform(cb::run).block()).isEqualTo("foobar"); assertThat(Mono.error(new RuntimeException("boom")).transform(it -> cb.run(it, t -> Mono.just("fallback"))) .block()).isEqualTo("fallback"); assertThat(Flux.just("foobar", "hello world").transform(cb::run).collectList().block()) .isEqualTo(Arrays.asList("foobar", "hello world")); assertThat(Flux.error(new RuntimeException("boom")).transform(it -> cb.run(it, t -> Flux.just("fallback"))) .collectList().block()).isEqualTo(Collections.singletonList("fallback")); }); }