comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Good, catch fixed
public boolean isGroupCoverageSufficient(long activeDocuments, long medianDocuments) { double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments; return ! (medianDocuments > 0 && documentCoverage < minActivedocsPercentage); }
double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments;
public boolean isGroupCoverageSufficient(long activeDocuments, long medianDocuments) { if (medianDocuments <= 0) return true; double documentCoverage = 100.0 * (double) activeDocuments / medianDocuments; return documentCoverage >= minActivedocsPercentage; }
class SearchGroupsImpl implements SearchGroups { private final Map<Integer, Group> groups; private final double minActivedocsPercentage; public SearchGroupsImpl(Map<Integer, Group> groups, double minActivedocsPercentage) { this.groups = Map.copyOf(groups); this.minActivedocsPercentage = minActivedocsPercentage; } @Over...
class SearchGroupsImpl implements SearchGroups { private final Map<Integer, Group> groups; private final double minActivedocsPercentage; public SearchGroupsImpl(Map<Integer, Group> groups, double minActivedocsPercentage) { this.groups = Map.copyOf(groups); this.minActivedocsPercentage = minActivedocsPercentage; } @Over...
Should be `equals`, so non-empty settings always throws.
public LoadBalancerInstance create(LoadBalancerSpec spec, boolean force) { if (spec.settings() != LoadBalancerSettings.empty) throw new IllegalArgumentException("custom load balancer settings are not supported with " + getClass()); return new LoadBalancerInstance(Optional.of(DomainName.of(vipHostname)), Optional.empty(...
if (spec.settings() != LoadBalancerSettings.empty) throw new IllegalArgumentException("custom load balancer settings are not supported with " + getClass());
public LoadBalancerInstance create(LoadBalancerSpec spec, boolean force) { if ( ! spec.settings().isEmpty()) throw new IllegalArgumentException("custom load balancer settings are not supported with " + getClass()); return new LoadBalancerInstance(Optional.of(DomainName.of(vipHostname)), Optional.empty(), Optional.empty...
class SharedLoadBalancerService implements LoadBalancerService { private final String vipHostname; public SharedLoadBalancerService(String vipHostname) { this.vipHostname = Objects.requireNonNull(vipHostname); } @Override @Override public void remove(LoadBalancer loadBalancer) { } @Override public Protocol protocol() {...
class SharedLoadBalancerService implements LoadBalancerService { private final String vipHostname; public SharedLoadBalancerService(String vipHostname) { this.vipHostname = Objects.requireNonNull(vipHostname); } @Override @Override public void remove(LoadBalancer loadBalancer) { } @Override public Protocol protocol() {...
Yes, replaced with `isEmpty()` method.
public LoadBalancerInstance create(LoadBalancerSpec spec, boolean force) { if (spec.settings() != LoadBalancerSettings.empty) throw new IllegalArgumentException("custom load balancer settings are not supported with " + getClass()); return new LoadBalancerInstance(Optional.of(DomainName.of(vipHostname)), Optional.empty(...
if (spec.settings() != LoadBalancerSettings.empty) throw new IllegalArgumentException("custom load balancer settings are not supported with " + getClass());
public LoadBalancerInstance create(LoadBalancerSpec spec, boolean force) { if ( ! spec.settings().isEmpty()) throw new IllegalArgumentException("custom load balancer settings are not supported with " + getClass()); return new LoadBalancerInstance(Optional.of(DomainName.of(vipHostname)), Optional.empty(), Optional.empty...
class SharedLoadBalancerService implements LoadBalancerService { private final String vipHostname; public SharedLoadBalancerService(String vipHostname) { this.vipHostname = Objects.requireNonNull(vipHostname); } @Override @Override public void remove(LoadBalancer loadBalancer) { } @Override public Protocol protocol() {...
class SharedLoadBalancerService implements LoadBalancerService { private final String vipHostname; public SharedLoadBalancerService(String vipHostname) { this.vipHostname = Objects.requireNonNull(vipHostname); } @Override @Override public void remove(LoadBalancer loadBalancer) { } @Override public Protocol protocol() {...
Do we need to allow empty here? Would be nice if the empty was a singleton created above.
public static CloudAccount from(String cloudAccount) { return switch (cloudAccount) { case "", "default" -> empty; default -> new CloudAccount(cloudAccount, EMPTY + "|" + AWS_ACCOUNT_ID + "|" + GCP_PROJECT_ID, "cloud account"); }; }
default -> new CloudAccount(cloudAccount, EMPTY + "|" + AWS_ACCOUNT_ID + "|" + GCP_PROJECT_ID, "cloud account");
public static CloudAccount from(String cloudAccount) { return switch (cloudAccount) { case "", "default" -> empty; default -> new CloudAccount(cloudAccount, AWS_ACCOUNT_ID + "|" + GCP_PROJECT_ID, "cloud account"); }; }
class CloudAccount extends PatternedStringWrapper<CloudAccount> { private static final String EMPTY = ""; private static final String AWS_ACCOUNT_ID = "[0-9]{12}"; private static final String GCP_PROJECT_ID = "[a-z][a-z0-9-]{4,28}[a-z0-9]"; /** Empty value. When this is used, either implicitly or explicitly, the zone w...
class CloudAccount extends PatternedStringWrapper<CloudAccount> { private static final String EMPTY = ""; private static final String AWS_ACCOUNT_ID = "[0-9]{12}"; private static final String GCP_PROJECT_ID = "[a-z][a-z0-9-]{4,28}[a-z0-9]"; /** Empty value. When this is used, either implicitly or explicitly, the zone w...
Use camel case like we do for the other APIs and `serviceId` in this API? Also in /nodes/v2
private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, re...
Cursor lbArray = slime.setObject().setArray("load-balancers");
private HttpResponse getPrivateServiceInfo(String tenantName, String applicationName, String instanceName, String environment, String region) { List<LoadBalancer> lbs = controller.serviceRegistry().configServer().getLoadBalancers(ApplicationId.from(tenantName, applicationName, instanceName), ZoneId.from(environment, re...
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandle...
class ApplicationApiHandler extends AuditLoggingRequestHandler { private static final ObjectMapper jsonMapper = new ObjectMapper(); private final Controller controller; private final AccessControlRequests accessControlRequests; private final TestConfigSerializer testConfigSerializer; @Inject public ApplicationApiHandle...
Consider not using dash in parameter name (the other parameters don't)
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.build...
.longOpt("tensor-short-form")
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.build...
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
See comment for vespa-visit
private static Options createOptions() { Options options = new Options(); options.addOption(Option.builder("h") .hasArg(false) .desc("Show this syntax page.") .longOpt(HELP_OPTION) .build()); options.addOption(Option.builder("i") .hasArg(false) .desc("Show only identifiers of retrieved documents.") .longOpt(PRINTIDS_OP...
.longOpt(TENSOR_SHORT_FORM_OPTION)
private static Options createOptions() { Options options = new Options(); options.addOption(Option.builder("h") .hasArg(false) .desc("Show this syntax page.") .longOpt(HELP_OPTION) .build()); options.addOption(Option.builder("i") .hasArg(false) .desc("Show only identifiers of retrieved documents.") .longOpt(PRINTIDS_OP...
class CommandLineOptions { public static final String HELP_OPTION = "help"; public static final String PRINTIDS_OPTION = "printids"; public static final String FIELDSET_OPTION = "fieldset"; public static final String CLUSTER_OPTION = "cluster"; public static final String ROUTE_OPTION = "route"; public static final Stri...
class CommandLineOptions { public static final String HELP_OPTION = "help"; public static final String PRINTIDS_OPTION = "printids"; public static final String FIELDSET_OPTION = "fieldset"; public static final String CLUSTER_OPTION = "cluster"; public static final String ROUTE_OPTION = "route"; public static final Stri...
I considered it, but I think writingoptionslikethisispointlesslyhardtoread so I went for the "be the change you want to see" approach. Save for entirely replacing the tool it's not given how we can ever transition away from a (IMHO) very suboptimal naming strategy if it cannot be done gradually. But it might be less h...
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.build...
.longOpt("tensor-short-form")
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.build...
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
Renamed
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.build...
.longOpt("tensor-short-form")
protected static Options createOptions() { Options options = new Options(); options.addOption("h", "help", false, "Show this syntax page."); options.addOption(Option.builder("d") .longOpt("datahandler") .hasArg(true) .argName("target") .desc("Send results to the given target.") .build()); options.addOption(Option.build...
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
class JvmRuntimeShutdownHookRegistrar implements ShutdownHookRegistrar { @Override public void registerShutdownHook(Thread thread) { Runtime.getRuntime().addShutdownHook(thread); } }
Renamed
private static Options createOptions() { Options options = new Options(); options.addOption(Option.builder("h") .hasArg(false) .desc("Show this syntax page.") .longOpt(HELP_OPTION) .build()); options.addOption(Option.builder("i") .hasArg(false) .desc("Show only identifiers of retrieved documents.") .longOpt(PRINTIDS_OP...
.longOpt(TENSOR_SHORT_FORM_OPTION)
private static Options createOptions() { Options options = new Options(); options.addOption(Option.builder("h") .hasArg(false) .desc("Show this syntax page.") .longOpt(HELP_OPTION) .build()); options.addOption(Option.builder("i") .hasArg(false) .desc("Show only identifiers of retrieved documents.") .longOpt(PRINTIDS_OP...
class CommandLineOptions { public static final String HELP_OPTION = "help"; public static final String PRINTIDS_OPTION = "printids"; public static final String FIELDSET_OPTION = "fieldset"; public static final String CLUSTER_OPTION = "cluster"; public static final String ROUTE_OPTION = "route"; public static final Stri...
class CommandLineOptions { public static final String HELP_OPTION = "help"; public static final String PRINTIDS_OPTION = "printids"; public static final String FIELDSET_OPTION = "fieldset"; public static final String CLUSTER_OPTION = "cluster"; public static final String ROUTE_OPTION = "route"; public static final Stri...
Easier to read if you add some extra parenthesis around (codePoints * 2 <= s.length() - start)
public UnicodeString substring(int start, int codePoints) { int cps = codePoints * 2 <= s.length() - start ? codePoints : Math.min(codePoints, s.codePointCount(start, s.length())); return new UnicodeString(s.substring(start, s.offsetByCodePoints(start, cps))); }
int cps = codePoints * 2 <= s.length() - start ? codePoints
public UnicodeString substring(int start, int codePoints) { int cps = codePoints * 2 <= s.length() - start ? codePoints : Math.min(codePoints, s.codePointCount(start, s.length())); return new UnicodeString(s.substring(start, s.offsetByCodePoints(start, cps))); }
class UnicodeString { private final String s; public UnicodeString(String s) { this.s = s; } /** Substring in code point space */ /** Returns the position count code points after start (which may be past the end of the string) */ public int skip(int codePointCount, int start) { int index = start; for (int i = 0; i < co...
class UnicodeString { private final String s; public UnicodeString(String s) { this.s = s; } /** Substring in code point space */ /** Returns the position count code points after start (which may be past the end of the string) */ public int skip(int codePointCount, int start) { int index = start; for (int i = 0; i < co...
... or was this here for a reason? >_<
static String encode(String raw) { return URLEncoder.encode(raw, UTF_8); }
return URLEncoder.encode(raw, UTF_8);
static String encode(String raw) { try { return URLEncoder.encode(raw, UTF_8.name()); } catch (UnsupportedEncodingException e) { throw new IllegalStateException(e); } }
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(...
class MessageAndTrace { final String message; final String trace; MessageAndTrace(String message, String trace) { this.message = message; this.trace = trace; } }
I think maybe because of backwards compatibility. Let me revert this.
static String encode(String raw) { return URLEncoder.encode(raw, UTF_8); }
return URLEncoder.encode(raw, UTF_8);
static String encode(String raw) { try { return URLEncoder.encode(raw, UTF_8.name()); } catch (UnsupportedEncodingException e) { throw new IllegalStateException(e); } }
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(...
class MessageAndTrace { final String message; final String trace; MessageAndTrace(String message, String trace) { this.message = message; this.trace = trace; } }
Create a proper feed exception by parsing the response and include status code with response message: 1) If `application/json` try to parse the message field 2) If `text/plain` use response content raw Fallback to `toString()` otherwise.
private void verifyConnection(FeedClientBuilderImpl builder) { if (builder.dryrun) return; try (Cluster cluster = new ApacheCluster(builder)) { HttpRequest request = new HttpRequest("POST", getPath(DocumentId.of("feeder", "handshake", "dummy")) + getQuery(empty(), true), requestHeaders, null, Duration.ofSeconds(10)); C...
throw new FeedException("non-200 response: " + response);
private void verifyConnection(FeedClientBuilderImpl builder) { if (builder.dryrun) return; try (Cluster cluster = new ApacheCluster(builder)) { HttpRequest request = new HttpRequest("POST", getPath(DocumentId.of("feeder", "handshake", "dummy")) + getQuery(empty(), true), requestHeaders, null, Duration.ofSeconds(10)); C...
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(...
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(...
PTAL.
private void verifyConnection(FeedClientBuilderImpl builder) { if (builder.dryrun) return; try (Cluster cluster = new ApacheCluster(builder)) { HttpRequest request = new HttpRequest("POST", getPath(DocumentId.of("feeder", "handshake", "dummy")) + getQuery(empty(), true), requestHeaders, null, Duration.ofSeconds(10)); C...
throw new FeedException("non-200 response: " + response);
private void verifyConnection(FeedClientBuilderImpl builder) { if (builder.dryrun) return; try (Cluster cluster = new ApacheCluster(builder)) { HttpRequest request = new HttpRequest("POST", getPath(DocumentId.of("feeder", "handshake", "dummy")) + getQuery(empty(), true), requestHeaders, null, Duration.ofSeconds(10)); C...
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(...
class HttpFeedClient implements FeedClient { private static final JsonFactory factory = new JsonFactory(); private final Map<String, Supplier<String>> requestHeaders; private final RequestStrategy requestStrategy; private final AtomicBoolean closed = new AtomicBoolean(); private final boolean speedTest; HttpFeedClient(...
The section below seems to be the same as the above, `json == jsonWithNoValuesForNodeResources == {"count":7}`, so both the `deserialized` are identical.
void serializationWithNoNodeResources() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, null, null, null, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7}", json); ClusterCapacity deserialized = mapper.re...
void serializationWithNoNodeResources() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, null, null, null, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7}", json); ClusterCapacity deserialized = mapper.re...
class ClusterCapacityTest { @Test void serialization() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, 1.2, 3.4, 5.6, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7,\"vcpu\":1.2,\"memoryGb\":3.4,\"diskGb...
class ClusterCapacityTest { @Test void serialization() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, 1.2, 3.4, 5.6, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7,\"vcpu\":1.2,\"memoryGb\":3.4,\"diskGb...
Yes, you are right, went back and forth and forgot to remove this. Fixing
void serializationWithNoNodeResources() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, null, null, null, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7}", json); ClusterCapacity deserialized = mapper.re...
void serializationWithNoNodeResources() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, null, null, null, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7}", json); ClusterCapacity deserialized = mapper.re...
class ClusterCapacityTest { @Test void serialization() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, 1.2, 3.4, 5.6, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7,\"vcpu\":1.2,\"memoryGb\":3.4,\"diskGb...
class ClusterCapacityTest { @Test void serialization() throws IOException { ClusterCapacity clusterCapacity = new ClusterCapacity(7, 1.2, 3.4, 5.6, null); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(clusterCapacity); assertEquals("{\"count\":7,\"vcpu\":1.2,\"memoryGb\":3.4,\"diskGb...
Move the guard above sampling time so you can skip sampling if not necessary.
public Duration timeLeft() { Instant now = SystemTimer.INSTANCE.instant(); if (expiresAt == Instant.MAX) return NO_TIMEOUT; if (now.isAfter(expiresAt)) return Duration.ZERO; return Duration.between(now, expiresAt); }
if (expiresAt == Instant.MAX) return NO_TIMEOUT;
public Duration timeLeft() { if (expiresAt == Instant.MAX) return NO_TIMEOUT; Instant now = SystemTimer.INSTANCE.instant(); if (now.isAfter(expiresAt)) return Duration.ZERO; return Duration.between(now, expiresAt); }
class Processing extends ProcessingAccess { /** The name of the service which owns this processing. Null is the same as "default". */ private String service = null; /** The processors to call the next work is done on this processing. */ private CallStack callStack = null; /** The collection of documents or document upd...
class Processing extends ProcessingAccess { /** The name of the service which owns this processing. Null is the same as "default". */ private String service = null; /** The processors to call the next work is done on this processing. */ private CallStack callStack = null; /** The collection of documents or document upd...
s/mke/make
public void test_autoscaling_weights_growth_rate_by_confidence() { var fixture = AutoscalingTester.fixture().awsProdSetup(true).build(); double scalingFactor = 1.0/6000; fixture.setScalingDuration(Duration.ofMinutes(60)); fixture.tester().clock().advance(Duration.ofDays(2)); Duration timeAdded = fixture.loader().addLoa...
double scalingFactor = 1.0/6000;
public void test_autoscaling_weights_growth_rate_by_confidence() { var fixture = AutoscalingTester.fixture().awsProdSetup(true).build(); double scalingFactor = 1.0/6000; fixture.setScalingDuration(Duration.ofMinutes(60)); fixture.tester().clock().advance(Duration.ofDays(2)); Duration timeAdded = fixture.loader().addLoa...
class AutoscalingTest { @Test public void test_autoscaling_single_content_group() { var fixture = AutoscalingTester.fixture().awsProdSetup(true).build(); fixture.loader().applyCpuLoad(0.7f, 10); var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high", 7, 1, 4.6, 11.1, 55.1,...
class AutoscalingTest { @Test public void test_autoscaling_single_content_group() { var fixture = AutoscalingTester.fixture().awsProdSetup(true).build(); fixture.loader().applyCpuLoad(0.7f, 10); var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high", 7, 1, 4.6, 11.1, 55.1,...
Move disableRepos before enableRepos. I have only seen this order, and don't know if this order will work. Unless you know it's OK to put disable last?
protected void addParametersToCommandLine(CommandLine commandLine) { commandLine.add("--assumeyes"); enabledRepos.forEach(repo -> commandLine.add("--enablerepo=" + repo)); disabledRepos.forEach(repo -> commandLine.add("--disablerepo=" + repo)); }
disabledRepos.forEach(repo -> commandLine.add("--disablerepo=" + repo));
protected void addParametersToCommandLine(CommandLine commandLine) { commandLine.add("--assumeyes"); if (!enabledRepos.isEmpty() && disabledRepos.isEmpty()) { commandLine.add("--disablerepo=*"); } else { disabledRepos.forEach(repo -> commandLine.add("--disablerepo=" + repo)); } enabledRepos.forEach(repo -> commandLine....
class YumCommand<T extends YumCommand<T>> { public static final Pattern INSTALL_NOOP_PATTERN = Pattern.compile("(?dm)^Nothing to do\\.?$"); public static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$"); public static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)...
class YumCommand<T extends YumCommand<T>> { public static final Pattern INSTALL_NOOP_PATTERN = Pattern.compile("(?dm)^Nothing to do\\.?$"); public static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$"); public static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)...
I was thinking ``` disabledRepos = repo.length == 0 ? List.of("*") : List.of(repo) ``` and then only do the ``` disabledRepos.forEach(repo -> commandLine.add("--disablerepo=" + repo)); ``` below. But this works too.
public T disableRepo(String... repo) { disabledRepos = List.of(repo); return getThis(); }
disabledRepos = List.of(repo);
public T disableRepo(String... repo) { disabledRepos = List.of(repo); return getThis(); }
class YumCommand<T extends YumCommand<T>> { public static final Pattern INSTALL_NOOP_PATTERN = Pattern.compile("(?dm)^Nothing to do\\.?$"); public static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$"); public static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)...
class YumCommand<T extends YumCommand<T>> { public static final Pattern INSTALL_NOOP_PATTERN = Pattern.compile("(?dm)^Nothing to do\\.?$"); public static final Pattern UPGRADE_NOOP_PATTERN = Pattern.compile("(?dm)^No packages marked for update$"); public static final Pattern REMOVE_NOOP_PATTERN = Pattern.compile("(?dm)...
Use `EnumSet.of(first, rest)` ?
public Set<String> resolve(String name, RecordType first, RecordType... rest) { Set<RecordType> types = new HashSet<>(1 + rest.length); types.add(first); Collections.addAll(types, rest); return resolveAll(name) .stream() .filter(addressString -> { if (types.contains(RecordType.A) && IP.isV4(addressString)) return true;...
Collections.addAll(types, rest);
public Set<String> resolve(String name, RecordType first, RecordType... rest) { var types = EnumSet.of(first, rest); return resolveAll(name) .stream() .filter(addressString -> { if (types.contains(RecordType.A) && IP.isV4(addressString)) return true; if (types.contains(RecordType.AAAA) && IP.isV6(addressString)) return...
class MockNameResolver implements NameResolver { private final Map<String, Set<String>> records = new HashMap<>(); private boolean mockAnyLookup = false; private boolean explicitReverseRecords = false; public MockNameResolver addReverseRecord(String ipAddress, String hostname) { addRecord(ipAddress, hostname); return t...
class MockNameResolver implements NameResolver { private final Map<String, Set<String>> records = new HashMap<>(); private boolean mockAnyLookup = false; private boolean explicitReverseRecords = false; public MockNameResolver addReverseRecord(String ipAddress, String hostname) { addRecord(ipAddress, hostname); return t...
Avoid resolving `A` records in the first place if protocol is `IPv6`?
private static Allocation fromAddress(Address address, NameResolver resolver, IpAddresses.Protocol protocol) { Optional<String> ipv4Address = resolveOptional(address.hostname(), resolver, RecordType.A); if (protocol != IpAddresses.Protocol.ipv6 && ipv4Address.isEmpty()) throw new IllegalArgumentException(protocol.descr...
if (protocol != IpAddresses.Protocol.ipv6 && ipv4Address.isEmpty())
private static Allocation fromAddress(Address address, NameResolver resolver, IpAddresses.Protocol protocol) { Optional<String> ipv4Address = resolveOptional(address.hostname(), resolver, RecordType.A); if (protocol != IpAddresses.Protocol.ipv6 && ipv4Address.isEmpty()) throw new IllegalArgumentException(protocol.descr...
class Allocation { private final String hostname; private final Optional<String> ipv4Address; private final Optional<String> ipv6Address; private Allocation(String hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this....
class Allocation { private final String hostname; private final Optional<String> ipv4Address; private final Optional<String> ipv6Address; private Allocation(String hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this....
Great, exactly what I was looking for.
public Set<String> resolve(String name, RecordType first, RecordType... rest) { Set<RecordType> types = new HashSet<>(1 + rest.length); types.add(first); Collections.addAll(types, rest); return resolveAll(name) .stream() .filter(addressString -> { if (types.contains(RecordType.A) && IP.isV4(addressString)) return true;...
Collections.addAll(types, rest);
public Set<String> resolve(String name, RecordType first, RecordType... rest) { var types = EnumSet.of(first, rest); return resolveAll(name) .stream() .filter(addressString -> { if (types.contains(RecordType.A) && IP.isV4(addressString)) return true; if (types.contains(RecordType.AAAA) && IP.isV6(addressString)) return...
class MockNameResolver implements NameResolver { private final Map<String, Set<String>> records = new HashMap<>(); private boolean mockAnyLookup = false; private boolean explicitReverseRecords = false; public MockNameResolver addReverseRecord(String ipAddress, String hostname) { addRecord(ipAddress, hostname); return t...
class MockNameResolver implements NameResolver { private final Map<String, Set<String>> records = new HashMap<>(); private boolean mockAnyLookup = false; private boolean explicitReverseRecords = false; public MockNameResolver addReverseRecord(String ipAddress, String hostname) { addRecord(ipAddress, hostname); return t...
:thinking: or, verify no A records are found for a supposedly IPv6-only host? We expect later resolutions in host admin or various tools to blindly find all A and AAAA records, so let's ensure they are exactly as expected? Implemented the latter.
private static Allocation fromAddress(Address address, NameResolver resolver, IpAddresses.Protocol protocol) { Optional<String> ipv4Address = resolveOptional(address.hostname(), resolver, RecordType.A); if (protocol != IpAddresses.Protocol.ipv6 && ipv4Address.isEmpty()) throw new IllegalArgumentException(protocol.descr...
if (protocol != IpAddresses.Protocol.ipv6 && ipv4Address.isEmpty())
private static Allocation fromAddress(Address address, NameResolver resolver, IpAddresses.Protocol protocol) { Optional<String> ipv4Address = resolveOptional(address.hostname(), resolver, RecordType.A); if (protocol != IpAddresses.Protocol.ipv6 && ipv4Address.isEmpty()) throw new IllegalArgumentException(protocol.descr...
class Allocation { private final String hostname; private final Optional<String> ipv4Address; private final Optional<String> ipv6Address; private Allocation(String hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this....
class Allocation { private final String hostname; private final Optional<String> ipv4Address; private final Optional<String> ipv6Address; private Allocation(String hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this....
This changes for both single node and multi node groups. Is that intentional ? Is there another reason why this has only been reported as an issue for single node groups ? You might be just masking a symptom now.
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) { if ( ! hasInformationAboutAllNodes()) return; boolean changed = group.fullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeM...
if (nonWorkingNodeCount() == 1)
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) { if ( ! hasInformationAboutAllNodes()) return; boolean changed = group.fullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeM...
class SearchCluster implements NodeManager<Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); private final String clusterId; private final VipStatus vipStatus; private final PingFactory pingFactory; private final SearchGroupsImpl groups; private volatile long nextLogTime = 0; /*...
class SearchCluster implements NodeManager<Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); private final String clusterId; private final VipStatus vipStatus; private final PingFactory pingFactory; private final SearchGroupsImpl groups; private volatile long nextLogTime = 0; /*...
Yes, intentional since whether we have one or many nodes in a group it is normal to take at least one node down.
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) { if ( ! hasInformationAboutAllNodes()) return; boolean changed = group.fullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeM...
if (nonWorkingNodeCount() == 1)
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) { if ( ! hasInformationAboutAllNodes()) return; boolean changed = group.fullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeM...
class SearchCluster implements NodeManager<Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); private final String clusterId; private final VipStatus vipStatus; private final PingFactory pingFactory; private final SearchGroupsImpl groups; private volatile long nextLogTime = 0; /*...
class SearchCluster implements NodeManager<Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); private final String clusterId; private final VipStatus vipStatus; private final PingFactory pingFactory; private final SearchGroupsImpl groups; private volatile long nextLogTime = 0; /*...
OK, so we assume that we have the same logging for groups with multiple nodes too. If so this is fine.
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) { if ( ! hasInformationAboutAllNodes()) return; boolean changed = group.fullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeM...
if (nonWorkingNodeCount() == 1)
private void trackGroupCoverageChanges(Group group, boolean fullCoverage, long medianDocuments) { if ( ! hasInformationAboutAllNodes()) return; boolean changed = group.fullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeM...
class SearchCluster implements NodeManager<Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); private final String clusterId; private final VipStatus vipStatus; private final PingFactory pingFactory; private final SearchGroupsImpl groups; private volatile long nextLogTime = 0; /*...
class SearchCluster implements NodeManager<Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); private final String clusterId; private final VipStatus vipStatus; private final PingFactory pingFactory; private final SearchGroupsImpl groups; private volatile long nextLogTime = 0; /*...
Doesn't `production()` imply `not().test()`?
public Optional<Version> oldestInstalledPlatform(Application application) { return controller.jobController().deploymentStatus(application).jobs() .production() .not().test() .asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()); }
.production()
public Optional<Version> oldestInstalledPlatform(Application application) { return controller.jobController().deploymentStatus(application).jobs() .production() .not().test() .asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final Appli...
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final Appli...
Production tests are production tests Ü
public Optional<Version> oldestInstalledPlatform(Application application) { return controller.jobController().deploymentStatus(application).jobs() .production() .not().test() .asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()); }
.production()
public Optional<Version> oldestInstalledPlatform(Application application) { return controller.jobController().deploymentStatus(application).jobs() .production() .not().test() .asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()); }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final Appli...
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final Appli...
I'm assuming this log level bump is to make debugging easier for spurious failures?
public void run() { log.log(Level.FINE, () -> "Dummy node " + DummyVdsNode.this + ": starting message responder thread"); while (true) { synchronized (timer) { if (isInterrupted()) break; long currentTime = timer.getCurrentTimeInMillis(); for (Iterator<Req> it = waitingRequests.iterator(); it.hasNext(); ) { Req r = it....
log.log(Level.INFO, () -> "Dummy node " + DummyVdsNode.this + ": Responding to node state request at time " + currentTime);
public void run() { log.log(Level.FINE, () -> "Dummy node " + DummyVdsNode.this + ": starting message responder thread"); while (true) { synchronized (timer) { if (isInterrupted()) break; long currentTime = timer.getCurrentTimeInMillis(); for (Iterator<Req> it = waitingRequests.iterator(); it.hasNext(); ) { Req r = it....
class BackOff implements BackOffPolicy { public void reset() {} public double get() { return 0.01; } public boolean shouldWarn(double v) { return false; } public boolean shouldInform(double v) { return false; } }
class BackOff implements BackOffPolicy { public void reset() {} public double get() { return 0.01; } public boolean shouldWarn(double v) { return false; } public boolean shouldInform(double v) { return false; } }
No, not intended, will fix. Thanks
public void run() { log.log(Level.FINE, () -> "Dummy node " + DummyVdsNode.this + ": starting message responder thread"); while (true) { synchronized (timer) { if (isInterrupted()) break; long currentTime = timer.getCurrentTimeInMillis(); for (Iterator<Req> it = waitingRequests.iterator(); it.hasNext(); ) { Req r = it....
log.log(Level.INFO, () -> "Dummy node " + DummyVdsNode.this + ": Responding to node state request at time " + currentTime);
public void run() { log.log(Level.FINE, () -> "Dummy node " + DummyVdsNode.this + ": starting message responder thread"); while (true) { synchronized (timer) { if (isInterrupted()) break; long currentTime = timer.getCurrentTimeInMillis(); for (Iterator<Req> it = waitingRequests.iterator(); it.hasNext(); ) { Req r = it....
class BackOff implements BackOffPolicy { public void reset() {} public double get() { return 0.01; } public boolean shouldWarn(double v) { return false; } public boolean shouldInform(double v) { return false; } }
class BackOff implements BackOffPolicy { public void reset() {} public double get() { return 0.01; } public boolean shouldWarn(double v) { return false; } public boolean shouldInform(double v) { return false; } }
Maybe `filename.contains(".metrics-proxy.")` to avoid regex?
public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; final String dir; String remoteFilename = logFile.getFileName().toString(); Duration minDurationBetweenSync = null; if (filename...
else if (filename.matches(".*\\.metrics-proxy\\..*"))
public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; final String dir; String remoteFilename = logFile.getFileName().toString(); Duration minDurationBetweenSync = null; if (filename...
class SyncFileInfo { private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd.HH-mm-ss").withZone(ZoneOffset.UTC); private final Path source; private final Function<String, URI> destination; private final Compression uploadCompression; private final Map<String, String> tags;...
class SyncFileInfo { private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd.HH-mm-ss").withZone(ZoneOffset.UTC); private final Path source; private final Function<String, URI> destination; private final Compression uploadCompression; private final Map<String, String> tags;...
Of course ...
public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; final String dir; String remoteFilename = logFile.getFileName().toString(); Duration minDurationBetweenSync = null; if (filename...
else if (filename.matches(".*\\.metrics-proxy\\..*"))
public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; final String dir; String remoteFilename = logFile.getFileName().toString(); Duration minDurationBetweenSync = null; if (filename...
class SyncFileInfo { private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd.HH-mm-ss").withZone(ZoneOffset.UTC); private final Path source; private final Function<String, URI> destination; private final Compression uploadCompression; private final Map<String, String> tags;...
class SyncFileInfo { private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd.HH-mm-ss").withZone(ZoneOffset.UTC); private final Path source; private final Function<String, URI> destination; private final Compression uploadCompression; private final Map<String, String> tags;...
```suggestion else if (filename.contains(".metrics-proxy.")) // See AccessLogComponent.java for filename. ```
public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; final String dir; String remoteFilename = logFile.getFileName().toString(); Duration minDurationBetweenSync = null; if (filename...
else if (filename.matches(".*\\.metrics-proxy\\..*"))
public static Optional<SyncFileInfo> forLogFile(URI uri, Path logFile, boolean rotatedOnly, ApplicationId owner) { String filename = logFile.getFileName().toString(); Compression compression; final String dir; String remoteFilename = logFile.getFileName().toString(); Duration minDurationBetweenSync = null; if (filename...
class SyncFileInfo { private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd.HH-mm-ss").withZone(ZoneOffset.UTC); private final Path source; private final Function<String, URI> destination; private final Compression uploadCompression; private final Map<String, String> tags;...
class SyncFileInfo { private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyy-MM-dd.HH-mm-ss").withZone(ZoneOffset.UTC); private final Path source; private final Function<String, URI> destination; private final Compression uploadCompression; private final Map<String, String> tags;...
I think that a comment here would be good. Not obvious....
private void processDocument(DocumentPut prev, List<DocumentOperation> out) { DocumentType hadType = prev.getDocument().getDataType(); DocumentScript script = scriptMgr.getScript(hadType); if (script == null) { log.log(Level.FINE, "No indexing script for document '%s'.", prev.getId()); out.add(prev); return; } log.log(...
if (hadType != wantType) {
private void processDocument(DocumentPut prev, List<DocumentOperation> out) { DocumentType hadType = prev.getDocument().getDataType(); DocumentScript script = scriptMgr.getScript(hadType); if (script == null) { log.log(Level.FINE, "No indexing script for document '%s'.", prev.getId()); out.add(prev); return; } log.log(...
class " + documentOperation.getClass().getName() + " not supported."); } else { throw new IllegalArgumentException("Expected document, got null."); }
class " + documentOperation.getClass().getName() + " not supported."); } else { throw new IllegalArgumentException("Expected document, got null."); }
Are we sure we want to limit this to containers only? History shows that there are ways to run custom code on other cluster types.
public boolean exclusiveAllocation(ClusterSpec clusterSpec) { return clusterSpec.isExclusive() || ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) || ( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name())); }
( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
public boolean exclusiveAllocation(ClusterSpec clusterSpec) { return clusterSpec.isExclusive() || ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) || ( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name())); }
class NodeRepository extends AbstractComponent { private final CuratorDb db; private final Clock clock; private final Zone zone; private final Nodes nodes; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVe...
class NodeRepository extends AbstractComponent { private final CuratorDb db; private final Clock clock; private final Zone zone; private final Nodes nodes; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVe...
Debatable, but we should definitely not allow that, so disallowing it here would be from a defence in depth argument, where it must be weighted against the potential cost of this, which is much higher on content nodes, as that's where most of the resource cost and therefore cost optimization is spent, and I just don't ...
public boolean exclusiveAllocation(ClusterSpec clusterSpec) { return clusterSpec.isExclusive() || ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) || ( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name())); }
( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
public boolean exclusiveAllocation(ClusterSpec clusterSpec) { return clusterSpec.isExclusive() || ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) || ( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name())); }
class NodeRepository extends AbstractComponent { private final CuratorDb db; private final Clock clock; private final Zone zone; private final Nodes nodes; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVe...
class NodeRepository extends AbstractComponent { private final CuratorDb db; private final Clock clock; private final Zone zone; private final Nodes nodes; private final NodeFlavors flavors; private final HostResourcesCalculator resourcesCalculator; private final NameResolver nameResolver; private final OsVersions osVe...
consider using private static void addMetric(Set<Metric> metrics, String nameWithSuffix) instead of list with one item (applies to the other below, too, with only one suffix)
private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics....
addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.baseName(), List.of("average"));
private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics....
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metr...
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metr...
Thanks. Changed. I agree this is better for readability and also prevents possible typos from being merged.
private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics....
addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.baseName(), List.of("average"));
private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics....
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metr...
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metr...
```suggestion // External tables don't need to check here ```
private boolean needToRefreshNonPartitionTable(Table partitionTable) { for (Pair<MaterializedView.BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table snapshotTable = tablePair.second; if (snapshotTable.getId() == partitionTable.getId()) { continue; } if (!snapshotTable.isOlapTable()) { continue; } if...
private boolean needToRefreshNonPartitionTable(Table partitionTable) { for (Pair<MaterializedView.BaseTableInfo, Table> tablePair : snapshotBaseTables.values()) { Table snapshotTable = tablePair.second; if (snapshotTable.getId() == partitionTable.getId()) { continue; } if (!snapshotTable.isOlapTable()) { continue; } if...
class PartitionBasedMaterializedViewRefreshProcessor extends BaseTaskRunProcessor { private static final Logger LOG = LogManager.getLogger(PartitionBasedMaterializedViewRefreshProcessor.class); public static final String MV_ID = "mvId"; private static final int MAX_RETRY_NUM = 10; private Database database; private Mat...
class PartitionBasedMaterializedViewRefreshProcessor extends BaseTaskRunProcessor { private static final Logger LOG = LogManager.getLogger(PartitionBasedMaterializedViewRefreshProcessor.class); public static final String MV_ID = "mvId"; private static final int MAX_RETRY_NUM = 10; private Database database; private Mat...
Need to close out stream here.
private OutputStream compressedOutputStream(File outputFile) throws IOException { FileOutputStream out = new FileOutputStream(outputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Compressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPOutputStre...
throw new RuntimeException("Unknown file reference type " + type);
private OutputStream compressedOutputStream(File outputFile) throws IOException { FileOutputStream out = new FileOutputStream(outputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Compressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPOutputStre...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
Need to close in stream here.
private InputStream decompressedInputStream(File inputFile) throws IOException { FileInputStream in = new FileInputStream(inputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Decompressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPInputStream(i...
throw new RuntimeException("Unknown file reference type " + type);
private InputStream decompressedInputStream(File inputFile) throws IOException { FileInputStream in = new FileInputStream(inputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Decompressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPInputStream(i...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
fixed
private OutputStream compressedOutputStream(File outputFile) throws IOException { FileOutputStream out = new FileOutputStream(outputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Compressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPOutputStre...
throw new RuntimeException("Unknown file reference type " + type);
private OutputStream compressedOutputStream(File outputFile) throws IOException { FileOutputStream out = new FileOutputStream(outputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Compressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPOutputStre...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
fixed
private InputStream decompressedInputStream(File inputFile) throws IOException { FileInputStream in = new FileInputStream(inputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Decompressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPInputStream(i...
throw new RuntimeException("Unknown file reference type " + type);
private InputStream decompressedInputStream(File inputFile) throws IOException { FileInputStream in = new FileInputStream(inputFile); switch (type) { case compressed: log.log(Level.FINE, () -> "Decompressing with compression type " + compressionType); return switch (compressionType) { case gzip -> new GZIPInputStream(i...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
class FileReferenceCompressor { private static final Logger log = Logger.getLogger(FileReferenceCompressor.class.getName()); private static final int recurseDepth = 100; private final FileReferenceData.Type type; private final FileReferenceData.CompressionType compressionType; public FileReferenceCompressor(FileReferen...
Ah, yes, not really related, but let's just leave it here. @hakonhall FYI :)
void testClient() throws IOException, ExecutionException, InterruptedException, TimeoutException { for (Compression compression : Compression.values()) { try (ApacheCluster cluster = new ApacheCluster(new FeedClientBuilderImpl(List.of(URI.create("http: .setCompression(compression))) { server.stubFor(any(anyUrl())) .set...
Duration.ofSeconds(10)),
void testClient() throws IOException, ExecutionException, InterruptedException, TimeoutException { for (Compression compression : Compression.values()) { try (ApacheCluster cluster = new ApacheCluster(new FeedClientBuilderImpl(List.of(URI.create("http: .setCompression(compression))) { server.stubFor(any(anyUrl())) .set...
class ApacheClusterTest { @RegisterExtension final WireMockExtension server = new WireMockExtension(); @Test }
class ApacheClusterTest { @RegisterExtension final WireMockExtension server = new WireMockExtension(); @Test }
If the hash map only compute hash value once, or we cache the hash value, the merge process seems should be more simple.
private void mergeGroup(Group srcGroup, Group dstGroup) { if (srcGroup == rootGroup) { rootGroup = dstGroup; } List<GroupExpression> needReinsertedExpressions = Lists.newArrayList(); for (Iterator<Map.Entry<GroupExpression, GroupExpression>> iterator = groupExpressions.entrySet().iterator(); iterator.hasNext(); ) { Gro...
GroupExpression existGroupExpression = groupExpressions.get(groupExpression);
private void mergeGroup(Group srcGroup, Group dstGroup) { groups.remove(srcGroup); if (srcGroup == rootGroup) { rootGroup = dstGroup; } List<GroupExpression> needReinsertedExpressions = Lists.newArrayList(); for (Iterator<Map.Entry<GroupExpression, GroupExpression>> iterator = groupExpressions.entrySet().iterator(); it...
class Memo { private static final Logger LOG = LogManager.getLogger(Memo.class); private int nextGroupId = 0; private final List<Group> groups; private Group rootGroup; /** * The map value is root group id for the GroupExpression. * We need to store group id because when {@see insertGroupExpression} * we need to get ex...
class Memo { private static final Logger LOG = LogManager.getLogger(Memo.class); private int nextGroupId = 0; private final List<Group> groups; private Group rootGroup; /** * The map value is root group id for the GroupExpression. * We need to store group id because when {@see insertGroupExpression} * we need to get ex...
Missing newline after `if`.
static void validateAndConsolidate(Map<String, Map<RegionName, List<ZoneEndpoint>>> in, Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> out) { in.forEach((cluster, regions) -> { List<ZoneEndpoint> wildcards = regions.remove(null); ZoneEndpoint wildcardZoneEndpoint = null; ZoneEndpoint wildcardPrivateEndpoint = null; if ...
if (wildcards != null) for (ZoneEndpoint endpoint : wildcards) {
static void validateAndConsolidate(Map<String, Map<RegionName, List<ZoneEndpoint>>> in, Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> out) { in.forEach((cluster, regions) -> { List<ZoneEndpoint> wildcards = regions.remove(null); ZoneEndpoint wildcardZoneEndpoint = null; ZoneEndpoint wildcardPrivateEndpoint = null; if ...
class DeploymentSpecXmlReader { private static final String deploymentTag = "deployment"; private static final String instanceTag = "instance"; private static final String tagsTag = "tags"; private static final String testTag = "test"; private static final String stagingTag = "staging"; private static final String devT...
class DeploymentSpecXmlReader { private static final String deploymentTag = "deployment"; private static final String instanceTag = "instance"; private static final String tagsTag = "tags"; private static final String testTag = "test"; private static final String stagingTag = "staging"; private static final String devT...
Fixed.
static void validateAndConsolidate(Map<String, Map<RegionName, List<ZoneEndpoint>>> in, Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> out) { in.forEach((cluster, regions) -> { List<ZoneEndpoint> wildcards = regions.remove(null); ZoneEndpoint wildcardZoneEndpoint = null; ZoneEndpoint wildcardPrivateEndpoint = null; if ...
if (wildcards != null) for (ZoneEndpoint endpoint : wildcards) {
static void validateAndConsolidate(Map<String, Map<RegionName, List<ZoneEndpoint>>> in, Map<ClusterSpec.Id, Map<ZoneId, ZoneEndpoint>> out) { in.forEach((cluster, regions) -> { List<ZoneEndpoint> wildcards = regions.remove(null); ZoneEndpoint wildcardZoneEndpoint = null; ZoneEndpoint wildcardPrivateEndpoint = null; if ...
class DeploymentSpecXmlReader { private static final String deploymentTag = "deployment"; private static final String instanceTag = "instance"; private static final String tagsTag = "tags"; private static final String testTag = "test"; private static final String stagingTag = "staging"; private static final String devT...
class DeploymentSpecXmlReader { private static final String deploymentTag = "deployment"; private static final String instanceTag = "instance"; private static final String tagsTag = "tags"; private static final String testTag = "test"; private static final String stagingTag = "staging"; private static final String devT...
The first controller upgrading will fail to dispatch any existing request with data only (if there any in the queue), but they are rare and the other controllers should dispatch those before all are upgraded.
private RemoveRecords removeRecordsFromSlime(Inspector object) { var type = Record.Type.valueOf(object.field(typeField).asString()); var name = RecordName.from(object.field(nameField).asString()); var data = SlimeUtils.optionalString(object.field(dataField)).map(RecordData::from); return new RemoveRecords(type, name, d...
var name = RecordName.from(object.field(nameField).asString());
private RemoveRecords removeRecordsFromSlime(Inspector object) { var type = Record.Type.valueOf(object.field(typeField).asString()); var name = RecordName.from(object.field(nameField).asString()); var data = SlimeUtils.optionalString(object.field(dataField)).map(RecordData::from); return new RemoveRecords(type, name, d...
class NameServiceQueueSerializer { private static final String requestsField = "requests"; private static final String requestType = "requestType"; private static final String recordsField = "records"; private static final String typeField = "type"; private static final String nameField = "name"; private static final S...
class NameServiceQueueSerializer { private static final String requestsField = "requests"; private static final String requestType = "requestType"; private static final String recordsField = "records"; private static final String typeField = "type"; private static final String nameField = "name"; private static final S...
So we just drop deletion of unmatched records? Should we log a warning when this happens?
public void dispatchTo(NameService nameService) { List<Record> completeRecords = nameService.findRecords(type, name).stream() .filter(record -> data.isEmpty() || matchingFqdnIn(data.get(), record)) .toList(); nameService.removeRecords(completeRecords); }
nameService.removeRecords(completeRecords);
public void dispatchTo(NameService nameService) { List<Record> completeRecords = nameService.findRecords(type, name).stream() .filter(record -> data.isEmpty() || matchingFqdnIn(data.get(), record)) .toList(); nameService.removeRecords(completeRecords); }
class RemoveRecords implements NameServiceRequest { private final Record.Type type; private final RecordName name; private final Optional<RecordData> data; public RemoveRecords(Record.Type type, RecordName name) { this(type, name, Optional.empty()); } public RemoveRecords(Record.Type type, RecordName name, RecordData d...
class RemoveRecords implements NameServiceRequest { private final Record.Type type; private final RecordName name; private final Optional<RecordData> data; public RemoveRecords(Record.Type type, RecordName name) { this(type, name, Optional.empty()); } public RemoveRecords(Record.Type type, RecordName name, RecordData d...
The logic should be the same as before this change. Not sure what you mean by unmatched here? If you specify both name+data you specifically want to only delete records matching both fields.
public void dispatchTo(NameService nameService) { List<Record> completeRecords = nameService.findRecords(type, name).stream() .filter(record -> data.isEmpty() || matchingFqdnIn(data.get(), record)) .toList(); nameService.removeRecords(completeRecords); }
nameService.removeRecords(completeRecords);
public void dispatchTo(NameService nameService) { List<Record> completeRecords = nameService.findRecords(type, name).stream() .filter(record -> data.isEmpty() || matchingFqdnIn(data.get(), record)) .toList(); nameService.removeRecords(completeRecords); }
class RemoveRecords implements NameServiceRequest { private final Record.Type type; private final RecordName name; private final Optional<RecordData> data; public RemoveRecords(Record.Type type, RecordName name) { this(type, name, Optional.empty()); } public RemoveRecords(Record.Type type, RecordName name, RecordData d...
class RemoveRecords implements NameServiceRequest { private final Record.Type type; private final RecordName name; private final Optional<RecordData> data; public RemoveRecords(Record.Type type, RecordName name) { this(type, name, Optional.empty()); } public RemoveRecords(Record.Type type, RecordName name, RecordData d...
Aha, that sounds right.
public void dispatchTo(NameService nameService) { List<Record> completeRecords = nameService.findRecords(type, name).stream() .filter(record -> data.isEmpty() || matchingFqdnIn(data.get(), record)) .toList(); nameService.removeRecords(completeRecords); }
nameService.removeRecords(completeRecords);
public void dispatchTo(NameService nameService) { List<Record> completeRecords = nameService.findRecords(type, name).stream() .filter(record -> data.isEmpty() || matchingFqdnIn(data.get(), record)) .toList(); nameService.removeRecords(completeRecords); }
class RemoveRecords implements NameServiceRequest { private final Record.Type type; private final RecordName name; private final Optional<RecordData> data; public RemoveRecords(Record.Type type, RecordName name) { this(type, name, Optional.empty()); } public RemoveRecords(Record.Type type, RecordName name, RecordData d...
class RemoveRecords implements NameServiceRequest { private final Record.Type type; private final RecordName name; private final Optional<RecordData> data; public RemoveRecords(Record.Type type, RecordName name) { this(type, name, Optional.empty()); } public RemoveRecords(Record.Type type, RecordName name, RecordData d...
This is the only one I identified as an operator/cross-application event. I'm not even sure we need this distinction, though, because this also happens per application ... so we could probably just do all of these like usual, too? That would simplify a bit.
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
updateGlobalDnsOf(instancePolicies, Set.of(), Optional.empty(), lock);
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
OTOH, this _is_ the right treatment, should any owner-less request appear in the queue, so perhaps leave it like this?
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
updateGlobalDnsOf(instancePolicies, Set.of(), Optional.empty(), lock);
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
(Could alternatively assign an owner to each of the requests generated by taking zones in/out.)
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
updateGlobalDnsOf(instancePolicies, Set.of(), Optional.empty(), lock);
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
Agree that we don't need this. Preserving the order per application is sufficient.
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
updateGlobalDnsOf(instancePolicies, Set.of(), Optional.empty(), lock);
public void setRoutingStatus(ZoneId zone, RoutingStatus.Value value) { try (var lock = db.lockRoutingPolicies()) { db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator, controller.clock().instant()))); Map<ApplicationId, RoutingPolicyList> allPolicies = readAll(...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.r...
missing negation, fixed in next commit
protected double maintain() { int requestCount = trueIntervalInSeconds(); try (var lock = db.lockNameServiceQueue()) { var queue = db.readNameServiceQueue(); if (queue.requests().isEmpty() || requestCount == 0) return 1.0; var instant = clock.instant(); var remaining = queue.dispatchTo(nameService, requestCount); var d...
.filter(new HashSet<>(remaining.requests())::remove)
protected double maintain() { int requestCount = trueIntervalInSeconds(); NameServiceQueue initial; try (var lock = db.lockNameServiceQueue()) { initial = db.readNameServiceQueue(); } if (initial.requests().isEmpty() || requestCount == 0) return 1.0; var instant = clock.instant(); var remaining = initial.dispatchTo(nam...
class NameServiceDispatcher extends ControllerMaintainer { private final Clock clock; private final CuratorDb db; private final NameService nameService; public NameServiceDispatcher(Controller controller, Duration interval) { super(controller, interval); this.clock = controller.clock(); this.db = controller.curator(); ...
class NameServiceDispatcher extends ControllerMaintainer { private final Clock clock; private final CuratorDb db; private final NameService nameService; NameServiceDispatcher(Controller controller, NameService nameService, Duration interval) { super(controller, interval); this.clock = controller.clock(); this.db = cont...
Note the "error" of removing req1 instead of req3, which is really the same request. If req2 was req1's inverse, this would be significant. The odds of this happening seem _very_ low.
void testDispatch() { Deque<Consumer<RecordName>> expectations = new ArrayDeque<>(); var nameService = new NameService() { @Override public Record createRecord(Type type, RecordName name, RecordData data) { expectations.pop().accept(name); return null; } @Override public List<Record> createAlias(RecordName name, Set<Al...
assertEquals(List.of(req2, req3, req4, req6, req7, req8), tester.curator().readNameServiceQueue().requests());
void testDispatch() { Deque<Consumer<RecordName>> expectations = new ArrayDeque<>(); var nameService = new NameService() { @Override public Record createRecord(Type type, RecordName name, RecordData data) { expectations.pop().accept(name); return null; } @Override public List<Record> createAlias(RecordName name, Set<Al...
class NameServiceDispatcherTest { @Test }
class NameServiceDispatcherTest { @Test }
Should the exception message be "require non-conflicting input types" instead?
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
throw new VerificationException(ScriptExpression.class, "Choice expressions require conflicting input types, " +
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
Consider adding unit test that triggers this exception.
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
throw new VerificationException(ScriptExpression.class, "Choice expressions require conflicting input types, " +
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
Consider adding unit test that triggers this exception.
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
throw new VerificationException(ScriptExpression.class, "Choice expressions produce conflicting output types, " +
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
Not really since this is about what *this* choice expression requires. Tried to make that slightly clearer.
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
throw new VerificationException(ScriptExpression.class, "Choice expressions require conflicting input types, " +
private static DataType resolveInputType(Collection<? extends Expression> list) { DataType previousInput = null; DataType previousOutput = null; for (Expression choice : list) { DataType thisInput = choice.requiredInputType(); if (previousInput == null) previousInput = thisInput; else if (thisInput != null && !previous...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
class ChoiceExpression extends ExpressionList<Expression> { public ChoiceExpression() { this(List.of()); } public ChoiceExpression(Expression... choices) { this(Arrays.asList(choices)); } public ChoiceExpression(Collection<? extends Expression> choices) { super(choices, resolveInputType(choices)); } @Override protected...
Not related, but a tiny bug that should be fixed anyway. I doubt it often matters.
public void deactivate(ApplicationId instanceId, ZoneId zone) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); try (Mutex deploymentLock = lockForDeployment(instanceId, zone)) { try (Mutex lock = lock(applicationId)) { Optional<LockedApplication> application = getApplication(application...
try (Mutex deploymentLock = lockForDeployment(instanceId, zone)) {
public void deactivate(ApplicationId instanceId, ZoneId zone) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(instanceId); try (Mutex deploymentLock = lockForDeployment(instanceId, zone)) { try (Mutex lock = lock(applicationId)) { Optional<LockedApplication> application = getApplication(application...
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final Appli...
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final Appli...
Shouldn't this be `max.nodes() / min.groups()` ?
public Capacity applyOn(Capacity capacity, ApplicationId application, boolean exclusive) { var min = applyOn(capacity.minResources(), capacity, application, exclusive); var max = applyOn(capacity.maxResources(), capacity, application, exclusive); var groupSize = capacity.groupSize().fromAtMost(max.nodes() / max.groups(...
var groupSize = capacity.groupSize().fromAtMost(max.nodes() / max.groups())
public Capacity applyOn(Capacity capacity, ApplicationId application, boolean exclusive) { var min = applyOn(capacity.minResources(), capacity, application, exclusive); var max = applyOn(capacity.maxResources(), capacity, application, exclusive); var groupSize = capacity.groupSize().fromAtMost(max.nodes() / min.groups(...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
Good question! I think that is slightly worse in the cases where the other limits are moved, which is what this is for (e.g we have configured that we want multiple large groups, this will hard prioritize _large_ over _multiple_ in perf), but otoh, it avoids ever moving this constraint in prod, which we don't want, so ...
public Capacity applyOn(Capacity capacity, ApplicationId application, boolean exclusive) { var min = applyOn(capacity.minResources(), capacity, application, exclusive); var max = applyOn(capacity.maxResources(), capacity, application, exclusive); var groupSize = capacity.groupSize().fromAtMost(max.nodes() / max.groups(...
var groupSize = capacity.groupSize().fromAtMost(max.nodes() / max.groups())
public Capacity applyOn(Capacity capacity, ApplicationId application, boolean exclusive) { var min = applyOn(capacity.minResources(), capacity, application, exclusive); var max = applyOn(capacity.maxResources(), capacity, application, exclusive); var groupSize = capacity.groupSize().fromAtMost(max.nodes() / min.groups(...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
Getting closer ... :-)
public void testHierarchicalDistributionCapacity() throws Exception { StorDistributionConfig.Builder config = buildHierarchicalConfig(6, 3, 1, "1|*", 3); config.group.get(1).capacity(3); test = new DistributionTestFactory("group-capacity") .setNodeCount(getNodeCount(1, 3, 3)).setDistribution(config); int [] counts = ne...
int [] counts = new int[9];
public void testHierarchicalDistributionCapacity() throws Exception { StorDistributionConfig.Builder config = buildHierarchicalConfig(6, 3, 1, "1|*", 3); config.group.get(1).capacity(3); test = new DistributionTestFactory("group-capacity") .setNodeCount(getNodeCount(1, 3, 3)).setDistribution(config); int [] counts = ne...
class DistributionTestCase { private DistributionTestFactory test; /** Build a set of buckets to test that should represent the entire bucket space well. */ private static List<BucketId> getTestBuckets() { return getTestBuckets(16); } private static List<BucketId> getTestBuckets(int minUsedBits) { List<BucketId> bucket...
class DistributionTestCase { private DistributionTestFactory test; /** Build a set of buckets to test that should represent the entire bucket space well. */ private static List<BucketId> getTestBuckets() { return getTestBuckets(16); } private static List<BucketId> getTestBuckets(int minUsedBits) { List<BucketId> bucket...
Ouch, good catch...
public void testConnect() throws ListenFailedException { Test.Orb server = new Test.Orb(new Transport()); Test.Orb client = new Test.Orb(new Transport()); Acceptor acceptor = server.listen(new Spec(0)); Connection target = (Connection) client.connect(new Spec("localhost", acceptor.port())); for (int i = 0; i < 100;...
if (target.isClosed()) {
public void testConnect() throws ListenFailedException { Test.Orb server = new Test.Orb(new Transport()); Test.Orb client = new Test.Orb(new Transport()); Acceptor acceptor = server.listen(new Spec(0)); Connection target = (Connection) client.connect(new Spec("localhost", acceptor.port())); for (int i = 0; i < 100;...
class ConnectTest { @org.junit.Test }
class ConnectTest { @org.junit.Test }
The comment must be updated. Suggestion: _Summary fields with the original supported type is always present in the document type. However, if the source of that summary field is a single explicit source that exists in the schema we use that as source instead as this is handled by the backend code. This is a move in th...
public static String getSource(SummaryField summaryField, Schema schema) { if (isOriginalSupportedType(summaryField.getDataType())) { if (summaryField.hasExplicitSingleSource()) { String sourceFieldName = summaryField.getSingleSource(); ImmutableSDField source = schema.getField(sourceFieldName); if (source != null) { r...
if (isOriginalSupportedType(summaryField.getDataType())) {
public static String getSource(SummaryField summaryField, Schema schema) { if (isOriginalSupportedType(summaryField.getDataType())) { if (summaryField.hasExplicitSingleSource()) { String sourceFieldName = summaryField.getSingleSource(); ImmutableSDField source = schema.getField(sourceFieldName); if (source != null) { r...
class DynamicSummaryTransformUtils { public static boolean hasSupportedType(SummaryField field) { return isSupportedType(field.getDataType()); } public static boolean isSupportedType(DataType type) { return isOriginalSupportedType(type) || isNewSupportedType(type); } private static boolean isOriginalSupportedType(DataT...
class DynamicSummaryTransformUtils { public static boolean hasSupportedType(SummaryField field) { return isSupportedType(field.getDataType()); } public static boolean isSupportedType(DataType type) { return isOriginalSupportedType(type) || isNewSupportedType(type); } private static boolean isOriginalSupportedType(DataT...
Comment has been updated.
public static String getSource(SummaryField summaryField, Schema schema) { if (isOriginalSupportedType(summaryField.getDataType())) { if (summaryField.hasExplicitSingleSource()) { String sourceFieldName = summaryField.getSingleSource(); ImmutableSDField source = schema.getField(sourceFieldName); if (source != null) { r...
if (isOriginalSupportedType(summaryField.getDataType())) {
public static String getSource(SummaryField summaryField, Schema schema) { if (isOriginalSupportedType(summaryField.getDataType())) { if (summaryField.hasExplicitSingleSource()) { String sourceFieldName = summaryField.getSingleSource(); ImmutableSDField source = schema.getField(sourceFieldName); if (source != null) { r...
class DynamicSummaryTransformUtils { public static boolean hasSupportedType(SummaryField field) { return isSupportedType(field.getDataType()); } public static boolean isSupportedType(DataType type) { return isOriginalSupportedType(type) || isNewSupportedType(type); } private static boolean isOriginalSupportedType(DataT...
class DynamicSummaryTransformUtils { public static boolean hasSupportedType(SummaryField field) { return isSupportedType(field.getDataType()); } public static boolean isSupportedType(DataType type) { return isOriginalSupportedType(type) || isNewSupportedType(type); } private static boolean isOriginalSupportedType(DataT...
Consider using `Validation.requireInRange`.
public record RegionMember(RegionName region, double fraction) { public RegionMember { if (fraction < 0 || fraction > 1) throw new IllegalArgumentException("Fraction must be a number between 0.0 and 1.0, but got " + fraction); } }
throw new IllegalArgumentException("Fraction must be a number between 0.0 and 1.0, but got " + fraction);
public record RegionMember(RegionName region, double fraction) { public RegionMember { if (fraction < 0 || fraction > 1) throw new IllegalArgumentException("Fraction must be a number between 0.0 and 1.0, but got " + fraction); } }
class Group { private final Duration deadline; private final List<RegionMember> members; public Group(List<RegionMember> members, Duration deadline) { this.members = List.copyOf(members); this.deadline = deadline; } public List<RegionMember> members() { return members; } /** * Returns the max time until the other regio...
class Group { private final Duration deadline; private final List<RegionMember> members; public Group(List<RegionMember> members, Duration deadline) { this.members = List.copyOf(members); this.deadline = deadline; } public List<RegionMember> members() { return members; } /** * Returns the max time until the other regio...
I assume we'll discuss the syntax later. Maybe `traffic-share` or simply `share` would be more descriptive?
public void complexBcpSetup() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='beta'> <prod> <region>us-east1</region> <region>us-east2</region> </prod> <bcp> <group deadline="60m"> <region>us-east1</region> <region>us-east2</region> </group> </bcp> </instance> <instance id='main'> <prod...
<region fraction="0.3">us-central1</region>
public void complexBcpSetup() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='beta'> <prod> <region>us-east1</region> <region>us-east2</region> </prod> <bcp> <group deadline="60m"> <region>us-east1</region> <region>us-east2</region> </group> </bcp> </instance> <instance id='main'> <prod...
class DeploymentSpecWithBcpTest { @Test public void minimalProductionSpecWithExplicitBcp() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='default'> <prod> <region>us-east1</region> <region>us-west1</region> </prod> </instance> <bcp> <group> <region>us-east1</region> <region>us-west1</r...
class DeploymentSpecWithBcpTest { @Test public void minimalProductionSpecWithExplicitBcp() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='default'> <prod> <region>us-east1</region> <region>us-west1</region> </prod> </instance> <bcp> <group> <region>us-east1</region> <region>us-west1</r...
Thanks for the tip. I'll do it in the next pr.
public record RegionMember(RegionName region, double fraction) { public RegionMember { if (fraction < 0 || fraction > 1) throw new IllegalArgumentException("Fraction must be a number between 0.0 and 1.0, but got " + fraction); } }
throw new IllegalArgumentException("Fraction must be a number between 0.0 and 1.0, but got " + fraction);
public record RegionMember(RegionName region, double fraction) { public RegionMember { if (fraction < 0 || fraction > 1) throw new IllegalArgumentException("Fraction must be a number between 0.0 and 1.0, but got " + fraction); } }
class Group { private final Duration deadline; private final List<RegionMember> members; public Group(List<RegionMember> members, Duration deadline) { this.members = List.copyOf(members); this.deadline = deadline; } public List<RegionMember> members() { return members; } /** * Returns the max time until the other regio...
class Group { private final Duration deadline; private final List<RegionMember> members; public Group(List<RegionMember> members, Duration deadline) { this.members = List.copyOf(members); this.deadline = deadline; } public List<RegionMember> members() { return members; } /** * Returns the max time until the other regio...
Yes, on Thursday. This isn't traffic share though, but fractional membership ...
public void complexBcpSetup() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='beta'> <prod> <region>us-east1</region> <region>us-east2</region> </prod> <bcp> <group deadline="60m"> <region>us-east1</region> <region>us-east2</region> </group> </bcp> </instance> <instance id='main'> <prod...
<region fraction="0.3">us-central1</region>
public void complexBcpSetup() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='beta'> <prod> <region>us-east1</region> <region>us-east2</region> </prod> <bcp> <group deadline="60m"> <region>us-east1</region> <region>us-east2</region> </group> </bcp> </instance> <instance id='main'> <prod...
class DeploymentSpecWithBcpTest { @Test public void minimalProductionSpecWithExplicitBcp() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='default'> <prod> <region>us-east1</region> <region>us-west1</region> </prod> </instance> <bcp> <group> <region>us-east1</region> <region>us-west1</r...
class DeploymentSpecWithBcpTest { @Test public void minimalProductionSpecWithExplicitBcp() { StringReader r = new StringReader(""" <deployment version='1.0'> <instance id='default'> <prod> <region>us-east1</region> <region>us-west1</region> </prod> </instance> <bcp> <group> <region>us-east1</region> <region>us-west1</r...
without
public void provision_load_balancer_combined_cluster() { Supplier<List<LoadBalancer>> lbs = () -> tester.nodeRepository().loadBalancers().list(app1).asList(); var combinedId = ClusterSpec.Id.from("container1"); var nodes = prepare(app1, clusterRequest(ClusterSpec.Type.combined, ClusterSpec.Id.from("content1"), Optional...
assertEquals("Prepare provisions load balancer wihtout reserved nodes", 0, lbs.get().get(0).instance().get().reals().size());
public void provision_load_balancer_combined_cluster() { Supplier<List<LoadBalancer>> lbs = () -> tester.nodeRepository().loadBalancers().list(app1).asList(); var combinedId = ClusterSpec.Id.from("container1"); var nodes = prepare(app1, clusterRequest(ClusterSpec.Type.combined, ClusterSpec.Id.from("content1"), Optional...
class LoadBalancerProvisionerTest { private final ApplicationId app1 = ApplicationId.from("tenant1", "application1", "default"); private final ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default"); private final ApplicationId infraApp1 = ApplicationId.from("vespa", "tenant-host", "default"); pri...
class LoadBalancerProvisionerTest { private final ApplicationId app1 = ApplicationId.from("tenant1", "application1", "default"); private final ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default"); private final ApplicationId infraApp1 = ApplicationId.from("vespa", "tenant-host", "default"); pri...
Hopefully we'll decide on (and enforce) a code style some day. 🙂
private static void requireInstance(LoadBalancerId id, LoadBalancer loadBalancer, CloudAccount cloudAccount, ZoneEndpoint zoneEndpoint) { if (loadBalancer.instance().isEmpty()) { throw new LoadBalancerServiceException("Could not provision " + id + ". The operation will be retried on next deployment"); } if ( ! inAccoun...
if ( ! inAccount(cloudAccount, loadBalancer)) {
private static void requireInstance(LoadBalancerId id, LoadBalancer loadBalancer, CloudAccount cloudAccount, ZoneEndpoint zoneEndpoint) { if (loadBalancer.instance().isEmpty()) { throw new LoadBalancerServiceException("Could not provision " + id + ". The operation will be retried on next deployment"); } if ( ! inAccoun...
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDb db; private final LoadBalancerService service; private final BooleanFlag deactivateRouting; public LoadBalancerProvisioner(Nod...
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDb db; private final LoadBalancerService service; private final BooleanFlag deactivateRouting; public LoadBalancerProvisioner(Nod...
🙈Yes.
private static void requireInstance(LoadBalancerId id, LoadBalancer loadBalancer, CloudAccount cloudAccount, ZoneEndpoint zoneEndpoint) { if (loadBalancer.instance().isEmpty()) { throw new LoadBalancerServiceException("Could not provision " + id + ". The operation will be retried on next deployment"); } if ( ! inAccoun...
if ( ! inAccount(cloudAccount, loadBalancer)) {
private static void requireInstance(LoadBalancerId id, LoadBalancer loadBalancer, CloudAccount cloudAccount, ZoneEndpoint zoneEndpoint) { if (loadBalancer.instance().isEmpty()) { throw new LoadBalancerServiceException("Could not provision " + id + ". The operation will be retried on next deployment"); } if ( ! inAccoun...
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDb db; private final LoadBalancerService service; private final BooleanFlag deactivateRouting; public LoadBalancerProvisioner(Nod...
class LoadBalancerProvisioner { private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName()); private final NodeRepository nodeRepository; private final CuratorDb db; private final LoadBalancerService service; private final BooleanFlag deactivateRouting; public LoadBalancerProvisioner(Nod...
```suggestion assertEquals("Prepare provisions load balancer without reserved nodes", 0, lbs.get().get(0).instance().get().reals().size()); ```
public void provision_load_balancer_combined_cluster() { Supplier<List<LoadBalancer>> lbs = () -> tester.nodeRepository().loadBalancers().list(app1).asList(); var combinedId = ClusterSpec.Id.from("container1"); var nodes = prepare(app1, clusterRequest(ClusterSpec.Type.combined, ClusterSpec.Id.from("content1"), Optional...
assertEquals("Prepare provisions load balancer wihtout reserved nodes", 0, lbs.get().get(0).instance().get().reals().size());
public void provision_load_balancer_combined_cluster() { Supplier<List<LoadBalancer>> lbs = () -> tester.nodeRepository().loadBalancers().list(app1).asList(); var combinedId = ClusterSpec.Id.from("container1"); var nodes = prepare(app1, clusterRequest(ClusterSpec.Type.combined, ClusterSpec.Id.from("content1"), Optional...
class LoadBalancerProvisionerTest { private final ApplicationId app1 = ApplicationId.from("tenant1", "application1", "default"); private final ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default"); private final ApplicationId infraApp1 = ApplicationId.from("vespa", "tenant-host", "default"); pri...
class LoadBalancerProvisionerTest { private final ApplicationId app1 = ApplicationId.from("tenant1", "application1", "default"); private final ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default"); private final ApplicationId infraApp1 = ApplicationId.from("vespa", "tenant-host", "default"); pri...
When there are capability requirements on multiple levels, do we ever want the resulting required capability set to be the the _union_ set, or is it always desired that a "more specific" capability set can be _less_ strict than a more general one?
public CapabilitySet requiredCapabilities(RequestView req) { Path pathMatcher = new Path(req.uri()); Route route = resolveRoute(pathMatcher); HandlerHolder<?> handler = resolveHandler(req.method(), route); return Optional.ofNullable(handler.config.requiredCapabilities) .or(() -> Optional.ofNullable(route.requiredCapabi...
.or(() -> Optional.ofNullable(requiredCapabilities))
public CapabilitySet requiredCapabilities(RequestView req) { Path pathMatcher = new Path(req.uri()); Route route = resolveRoute(pathMatcher); HandlerHolder<?> handler = resolveHandler(req.method(), route); return Optional.ofNullable(handler.config.requiredCapabilities) .or(() -> Optional.ofNullable(route.requiredCapabi...
class RestApiImpl implements RestApi { private static final Logger log = Logger.getLogger(RestApiImpl.class.getName()); private final Route defaultRoute; private final List<Route> routes; private final List<ExceptionMapperHolder<?>> exceptionMappers; private final List<ResponseMapperHolder<?>> responseMappers; private ...
class RestApiImpl implements RestApi { private static final Logger log = Logger.getLogger(RestApiImpl.class.getName()); private final Route defaultRoute; private final List<Route> routes; private final List<ExceptionMapperHolder<?>> exceptionMappers; private final List<ResponseMapperHolder<?>> responseMappers; private ...
Prefer List.of()
public void testWritingHostNamesToZooKeeper() throws IOException { Curator zk = new MockCurator(); BaseDeployLogger logger = new BaseDeployLogger(); Path app = Path.fromString("/1"); ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zk, logger, app); zooKeeperClient.initialize(); HostSpec host1 = new HostSpec("host...
HostSpec host1 = new HostSpec("host1.yahoo.com", Optional.empty());
public void testWritingHostNamesToZooKeeper() throws IOException { Curator zk = new MockCurator(); BaseDeployLogger logger = new BaseDeployLogger(); Path app = Path.fromString("/1"); ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zk, logger, app); zooKeeperClient.initialize(); HostSpec host1 = new HostSpec("host...
class ZooKeeperClientTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private Curator zk; private final Path appPath = Path.fromString("/1"); @Before public void setupZK() throws IOException { zk = new MockCurator(); ZooKeeperClient zkc = new ZooKeeperClient(zk, new BaseDeployLogger(), appPa...
class ZooKeeperClientTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private Curator zk; private final Path appPath = Path.fromString("/1"); @Before public void setupZK() throws IOException { zk = new MockCurator(); ZooKeeperClient zkc = new ZooKeeperClient(zk, new BaseDeployLogger(), appPa...
Arg.... color blind.....
public void testWritingHostNamesToZooKeeper() throws IOException { Curator zk = new MockCurator(); BaseDeployLogger logger = new BaseDeployLogger(); Path app = Path.fromString("/1"); ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zk, logger, app); zooKeeperClient.initialize(); HostSpec host1 = new HostSpec("host...
HostSpec host1 = new HostSpec("host1.yahoo.com", Optional.empty());
public void testWritingHostNamesToZooKeeper() throws IOException { Curator zk = new MockCurator(); BaseDeployLogger logger = new BaseDeployLogger(); Path app = Path.fromString("/1"); ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zk, logger, app); zooKeeperClient.initialize(); HostSpec host1 = new HostSpec("host...
class ZooKeeperClientTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private Curator zk; private final Path appPath = Path.fromString("/1"); @Before public void setupZK() throws IOException { zk = new MockCurator(); ZooKeeperClient zkc = new ZooKeeperClient(zk, new BaseDeployLogger(), appPa...
class ZooKeeperClientTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private Curator zk; private final Path appPath = Path.fromString("/1"); @Before public void setupZK() throws IOException { zk = new MockCurator(); ZooKeeperClient zkc = new ZooKeeperClient(zk, new BaseDeployLogger(), appPa...
Witness should be taken before we check for `INVALID`, lest we risk taking an `INVALID` witness, which means we _should_ nullify the lock, but we miss that.
private void renewLease() { Instant ourDoom = doom.get(); if (ourDoom == INVALID) { logger.log(INFO, "Lease invalidated"); return; } Instant start = clock.instant(); if (lock == null) try { lock = curator.lock(path.append("lock"), tickTimeout); logger.log(INFO, "Acquired lock for ID: " + id); } catch (UncheckedTimeoutE...
}
private void renewLease() { Instant ourDoom = doom.get(); if (ourDoom == INVALID) { logger.log(INFO, "Lease invalidated"); return; } Instant start = clock.instant(); if (lock == null) try { lock = curator.lock(path.append("lock"), tickTimeout); logger.log(INFO, "Acquired lock for ID: " + id); } catch (UncheckedTimeoutE...
class Task { enum Type { register, unregister } final Type type; final SingletonWorker singleton; final CompletableFuture<?> future = new CompletableFuture<>(); private Task(Type type, SingletonWorker singleton) { this.type = type; this.singleton = singleton; } static Task register(SingletonWorker singleton) { return n...
class Task { enum Type { register, unregister } final Type type; final SingletonWorker singleton; final CompletableFuture<?> future = new CompletableFuture<>(); private Task(Type type, SingletonWorker singleton) { this.type = type; this.singleton = singleton; } static Task register(SingletonWorker singleton) { return n...
```suggestion ```
void wireguard_peer_config_can_be_retrieved_for_configservers_and_exclave_nodes() { List<WireguardPeer> cfgPeers = nodeRepositoryApi.getConfigserverPeers(); assertEquals(2, cfgPeers.size()); assertWireguardPeer(cfgPeers.get(0), "cfg1.yahoo.com", "::201:1", "127.0.201.1", "lololololololololololololololololololololoo=")...
System.out.println(exclavePeers);
void wireguard_peer_config_can_be_retrieved_for_configservers_and_exclave_nodes() { List<WireguardPeer> cfgPeers = nodeRepositoryApi.getConfigserverPeers(); assertEquals(2, cfgPeers.size()); assertWireguardPeer(cfgPeers.get(0), "cfg1.yahoo.com", "::201:1", "127.0.201.1", "lololololololololololololololololololololoo=")...
class RealNodeRepositoryTest { private static final double delta = 0.00000001; private JDisc container; private NodeRepository nodeRepositoryApi; private int findRandomOpenPort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); return socket.getLocalPort(); } } /** * ...
class RealNodeRepositoryTest { private static final double delta = 0.00000001; private JDisc container; private NodeRepository nodeRepositoryApi; private int findRandomOpenPort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); return socket.getLocalPort(); } } /** * ...
Some explanation on how these numbers have been found would be nice
private double minRealMemoryGb(ClusterSpec cluster) { if (cluster.type() == ClusterSpec.Type.admin) return 0.95; return 2.3; }
return 2.3;
private double minRealMemoryGb(ClusterSpec cluster) { if (cluster.type() == ClusterSpec.Type.admin) return 0.95; return 2.3; }
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, ...
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, ...
Assumes shared hosts are at least 8 Gb and that host-memory feature flag is 0.6. First issue we should fix, for the second issue it seems like we should use 1.2 for AWS/public (cd and publiccd uses 1.2 already). But this change will still be better than it was, so I think this is OK
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, Architecture architecture) { if (nodeRepository.exclusiveAllocation(clusterSpec)) { return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources())); } return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25...
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, Architecture architecture) { if (nodeRepository.exclusiveAllocation(clusterSpec)) { return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources())); } return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
I added an explanation where they are specified, in CapacityPolicies. The numbers here just lead to a provision failure if they are not met, so they matter less.
private double minRealMemoryGb(ClusterSpec cluster) { if (cluster.type() == ClusterSpec.Type.admin) return 0.95; return 2.3; }
return 2.3;
private double minRealMemoryGb(ClusterSpec cluster) { if (cluster.type() == ClusterSpec.Type.admin) return 0.95; return 2.3; }
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, ...
class NodeResourceLimits { private final NodeRepository nodeRepository; public NodeResourceLimits(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; } /** Validates the resources applications ask for (which are in "advertised" resource space) */ public void ensureWithinAdvertisedLimits(String type, ...
Yes, if the host need more we'll have to change, but hopefully not.
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, Architecture architecture) { if (nodeRepository.exclusiveAllocation(clusterSpec)) { return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources())); } return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25...
private NodeResources clusterControllerResources(ClusterSpec clusterSpec, Architecture architecture) { if (nodeRepository.exclusiveAllocation(clusterSpec)) { return versioned(clusterSpec, Map.of(new Version(0), smallestExclusiveResources())); } return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
class CapacityPolicies { private final NodeRepository nodeRepository; private final Zone zone; private final StringFlag adminClusterNodeArchitecture; public CapacityPolicies(NodeRepository nodeRepository) { this.nodeRepository = nodeRepository; this.zone = nodeRepository.zone(); this.adminClusterNodeArchitecture = Perm...
Do we really want to kill the container on this?
void addInputMapping(String onnxName, String source) { assert(referencedEvaluator == null); inputSpecs.add(new InputSpec(onnxName, source)); }
assert(referencedEvaluator == null);
void addInputMapping(String onnxName, String source) { assert(referencedEvaluator == null); inputSpecs.add(new InputSpec(onnxName, source)); }
class OutputSpec { String onnxName; String outputAs; TensorType expectedType; OutputSpec(String name, String as, TensorType tType) { this.onnxName = name; this.outputAs = as; this.expectedType = tType; } OutputSpec(String name, String as) { this(name, as, null); } }
class OutputSpec { String onnxName; String outputAs; TensorType expectedType; OutputSpec(String name, String as, TensorType tType) { this.onnxName = name; this.outputAs = as; this.expectedType = tType; } OutputSpec(String name, String as) { this(name, as, null); } }
This constraint is correct, but the node list needs to be grouped by version beforehand. Otherwise the maintainer will fail whenever an application is upgrading, and nodes in the same cluster are on different Vespa versions.
public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect...
if (versions.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one version");
public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect...
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version ...
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version ...
Good catch. I created a grouping on major version in addition to architecture.
public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect...
if (versions.size() != 1) throw new IllegalArgumentException("List of nodes can only represent one version");
public static ResourceSnapshot from(List<Node> nodes, Instant timestamp, ZoneId zoneId) { Set<ApplicationId> applicationIds = nodes.stream() .filter(node -> node.owner().isPresent()) .map(node -> node.owner().get()) .collect(Collectors.toSet()); Set<Version> versions = nodes.stream() .map(Node::currentVersion) .collect...
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version ...
class ResourceSnapshot { private final ApplicationId applicationId; private final NodeResources resources; private final Instant timestamp; private final ZoneId zoneId; private final Version version; public ResourceSnapshot(ApplicationId applicationId, NodeResources resources, Instant timestamp, ZoneId zoneId, Version ...
```suggestion return (double) finalCount / clusterNodes.size(); ```
public double measurementsPerNode() { if (clusterNodes.size() == 0) return 0; return (double) totalMeasurementsIn(timeseries) / clusterNodes.size(); }
return (double) totalMeasurementsIn(timeseries) / clusterNodes.size();
public double measurementsPerNode() { if (clusterNodes.size() == 0) return 0; return (double) totalMeasurementsIn(timeseries) / clusterNodes.size(); }
class ClusterNodesTimeseries { private final NodeList clusterNodes; /** The measurements for all nodes in this snapshot */ private final List<NodeTimeseries> timeseries; private int initialCount, afterWarmupCount, afterStableCount, finalCount; public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clu...
class ClusterNodesTimeseries { private final NodeList clusterNodes; /** The measurements for all nodes in this snapshot */ private final List<NodeTimeseries> timeseries; private int initialCount, afterWarmupCount, afterStableCount, finalCount; public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clu...
I'm planning to remove the counters later.
public double measurementsPerNode() { if (clusterNodes.size() == 0) return 0; return (double) totalMeasurementsIn(timeseries) / clusterNodes.size(); }
return (double) totalMeasurementsIn(timeseries) / clusterNodes.size();
public double measurementsPerNode() { if (clusterNodes.size() == 0) return 0; return (double) totalMeasurementsIn(timeseries) / clusterNodes.size(); }
class ClusterNodesTimeseries { private final NodeList clusterNodes; /** The measurements for all nodes in this snapshot */ private final List<NodeTimeseries> timeseries; private int initialCount, afterWarmupCount, afterStableCount, finalCount; public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clu...
class ClusterNodesTimeseries { private final NodeList clusterNodes; /** The measurements for all nodes in this snapshot */ private final List<NodeTimeseries> timeseries; private int initialCount, afterWarmupCount, afterStableCount, finalCount; public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clu...
Use tenantSuffix = **"/" +** app.tenant().value() + "/", to ensure the suffix is actually a path component and doesn't match the random authority suffix?
public Optional<String> archiveUriFor(Node node) { if (node.allocation().isEmpty()) return Optional.empty(); ApplicationId app = node.allocation().get().owner(); return Optional.ofNullable(node.cloudAccount().isEnclave(zone) ? archiveUris.get().accountArchiveUris().get(node.cloudAccount()) : archiveUris.get().tenantArc...
String tenantSuffix = app.tenant().value() + "/";
public Optional<String> archiveUriFor(Node node) { if (node.allocation().isEmpty()) return Optional.empty(); ApplicationId app = node.allocation().get().owner(); return Optional.ofNullable(node.cloudAccount().isEnclave(zone) ? archiveUris.get().accountArchiveUris().get(node.cloudAccount()) : archiveUris.get().tenantArc...
class ArchiveUriManager { private static final Duration cacheTtl = Duration.ofMinutes(1); private final CuratorDb db; private final Zone zone; private final CachedSupplier<ArchiveUris> archiveUris; public ArchiveUriManager(CuratorDb db, Zone zone) { this.db = db; this.zone = zone; this.archiveUris = new CachedSupplier<...
class ArchiveUriManager { private static final Duration cacheTtl = Duration.ofMinutes(1); private final CuratorDb db; private final Zone zone; private final CachedSupplier<ArchiveUris> archiveUris; public ArchiveUriManager(CuratorDb db, Zone zone) { this.db = db; this.zone = zone; this.archiveUris = new CachedSupplier<...
wireguardTasksRun must be set to true if the container is restarted.
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGenera...
if (! wireguardTasksRun) wireguardTasks.forEach(task -> task.converge(context));
void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (currentRebootGeneration < node.currentRebootGeneration()) currentRebootGeneration = node.currentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.currentRestartGenera...
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeReposit...
class NodeAgentImpl implements NodeAgent { private static final Duration DEFAULT_WARM_UP_DURATION = Duration.ofSeconds(90).minus(Duration.ofSeconds(1)); private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final NodeAgentContextSupplier contextSupplier; private final NodeReposit...
Good point, fixed.
public Optional<String> archiveUriFor(Node node) { if (node.allocation().isEmpty()) return Optional.empty(); ApplicationId app = node.allocation().get().owner(); return Optional.ofNullable(node.cloudAccount().isEnclave(zone) ? archiveUris.get().accountArchiveUris().get(node.cloudAccount()) : archiveUris.get().tenantArc...
String tenantSuffix = app.tenant().value() + "/";
public Optional<String> archiveUriFor(Node node) { if (node.allocation().isEmpty()) return Optional.empty(); ApplicationId app = node.allocation().get().owner(); return Optional.ofNullable(node.cloudAccount().isEnclave(zone) ? archiveUris.get().accountArchiveUris().get(node.cloudAccount()) : archiveUris.get().tenantArc...
class ArchiveUriManager { private static final Duration cacheTtl = Duration.ofMinutes(1); private final CuratorDb db; private final Zone zone; private final CachedSupplier<ArchiveUris> archiveUris; public ArchiveUriManager(CuratorDb db, Zone zone) { this.db = db; this.zone = zone; this.archiveUris = new CachedSupplier<...
class ArchiveUriManager { private static final Duration cacheTtl = Duration.ofMinutes(1); private final CuratorDb db; private final Zone zone; private final CachedSupplier<ArchiveUris> archiveUris; public ArchiveUriManager(CuratorDb db, Zone zone) { this.db = db; this.zone = zone; this.archiveUris = new CachedSupplier<...
It's a safe assumption to fail the component (graph) construction if config is invalid.
private void extractGlobalPhaseData(RankProfilesConfig rankProfilesConfig) { for (var rp : rankProfilesConfig.rankprofile()) { String name = rp.name(); Supplier<FunctionEvaluator> functionEvaluatorSource = null; int rerankCount = -1; List<String> needInputs = null; for (var prop : rp.fef().property()) { if (prop.name()...
logger.warning("bad vespa.globalphase.rerankcount '" + prop.value() +
private void extractGlobalPhaseData(RankProfilesConfig rankProfilesConfig) { for (var rp : rankProfilesConfig.rankprofile()) { String name = rp.name(); Supplier<FunctionEvaluator> functionEvaluatorSource = null; int rerankCount = -1; List<String> needInputs = null; for (var prop : rp.fef().property()) { if (prop.name()...
class RankProfilesEvaluator extends AbstractComponent { private final ModelsEvaluator evaluator; private static final Logger logger = Logger.getLogger(RankProfilesEvaluator.class.getName()); @Inject public RankProfilesEvaluator( RankProfilesConfig rankProfilesConfig, RankingConstantsConfig constantsConfig, RankingExpre...
class RankProfilesEvaluator extends AbstractComponent { private final ModelsEvaluator evaluator; private static final Logger logger = Logger.getLogger(RankProfilesEvaluator.class.getName()); @Inject public RankProfilesEvaluator( RankProfilesConfig rankProfilesConfig, RankingConstantsConfig constantsConfig, RankingExpre...
See comment above
private void extractGlobalPhaseData(RankProfilesConfig rankProfilesConfig) { for (var rp : rankProfilesConfig.rankprofile()) { String name = rp.name(); Supplier<FunctionEvaluator> functionEvaluatorSource = null; int rerankCount = -1; List<String> needInputs = null; for (var prop : rp.fef().property()) { if (prop.name()...
logger.warning("failed setting up global-phase for " + name + " because: " + e.getMessage());
private void extractGlobalPhaseData(RankProfilesConfig rankProfilesConfig) { for (var rp : rankProfilesConfig.rankprofile()) { String name = rp.name(); Supplier<FunctionEvaluator> functionEvaluatorSource = null; int rerankCount = -1; List<String> needInputs = null; for (var prop : rp.fef().property()) { if (prop.name()...
class RankProfilesEvaluator extends AbstractComponent { private final ModelsEvaluator evaluator; private static final Logger logger = Logger.getLogger(RankProfilesEvaluator.class.getName()); @Inject public RankProfilesEvaluator( RankProfilesConfig rankProfilesConfig, RankingConstantsConfig constantsConfig, RankingExpre...
class RankProfilesEvaluator extends AbstractComponent { private final ModelsEvaluator evaluator; private static final Logger logger = Logger.getLogger(RankProfilesEvaluator.class.getName()); @Inject public RankProfilesEvaluator( RankProfilesConfig rankProfilesConfig, RankingConstantsConfig constantsConfig, RankingExpre...
Consider moving the default rerankCount to `config-model`
public void process(Query query, Result result, String schema) { var proxy = factory.proxyForSchema(schema); String rankProfile = query.getRanking().getProfile(); var data = proxy.getGlobalPhaseData(rankProfile); if (data == null) return; var functionEvaluatorSource = data.functionEvaluatorSource(); var prepared = find...
if (rerankCount < 0)
public void process(Query query, Result result, String schema) { var proxy = factory.proxyForSchema(schema); String rankProfile = query.getRanking().getProfile(); var optData = proxy.getGlobalPhaseData(rankProfile); if (optData.isEmpty()) return; GlobalPhaseData data = optData.get(); var functionEvaluatorSource = data....
class GlobalPhaseRanker { private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName()); private final RankProfilesEvaluatorFactory factory; @Inject public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) { this.factory = factory; logger.info("using factory: " + factory); } record Name...
class GlobalPhaseRanker { private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName()); private final RankProfilesEvaluatorFactory factory; @Inject public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) { this.factory = factory; logger.info("using factory: " + factory); } record Name...
it's probably easier to find if it's here, I'll keep it as-is for now.
public void process(Query query, Result result, String schema) { var proxy = factory.proxyForSchema(schema); String rankProfile = query.getRanking().getProfile(); var data = proxy.getGlobalPhaseData(rankProfile); if (data == null) return; var functionEvaluatorSource = data.functionEvaluatorSource(); var prepared = find...
if (rerankCount < 0)
public void process(Query query, Result result, String schema) { var proxy = factory.proxyForSchema(schema); String rankProfile = query.getRanking().getProfile(); var optData = proxy.getGlobalPhaseData(rankProfile); if (optData.isEmpty()) return; GlobalPhaseData data = optData.get(); var functionEvaluatorSource = data....
class GlobalPhaseRanker { private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName()); private final RankProfilesEvaluatorFactory factory; @Inject public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) { this.factory = factory; logger.info("using factory: " + factory); } record Name...
class GlobalPhaseRanker { private static final Logger logger = Logger.getLogger(GlobalPhaseRanker.class.getName()); private final RankProfilesEvaluatorFactory factory; @Inject public GlobalPhaseRanker(RankProfilesEvaluatorFactory factory) { this.factory = factory; logger.info("using factory: " + factory); } record Name...
SO this won't work before all deployemnts are re-done, but I guess that's OK?
protected double maintain() { Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>(); Map<ZoneId, Set<CloudAccount>> accountsByZone = new HashMap<>(); controller().zoneRegistry().zonesIncludingSystem().reachable().zones().forEach(zone -> { tenantsByZone.put(zone.getVirtualId(), new HashSet<>(INFRASTRUCTURE_TENANT...
else tenantsByZone.get(deployment.zone()).add(instance.id().tenant());
protected double maintain() { Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>(); Map<ZoneId, Set<CloudAccount>> accountsByZone = new HashMap<>(); controller().zoneRegistry().zonesIncludingSystem().reachable().zones().forEach(zone -> { tenantsByZone.put(zone.getVirtualId(), new HashSet<>(INFRASTRUCTURE_TENANT...
class ArchiveUriUpdater extends ControllerMaintainer { private static final Set<TenantName> INFRASTRUCTURE_TENANTS = Set.of(SystemApplication.TENANT); private final ApplicationController applications; private final NodeRepository nodeRepository; private final CuratorArchiveBucketDb archiveBucketDb; private final ZoneRe...
class ArchiveUriUpdater extends ControllerMaintainer { private static final Set<TenantName> INFRASTRUCTURE_TENANTS = Set.of(SystemApplication.TENANT); private final ApplicationController applications; private final NodeRepository nodeRepository; private final CuratorArchiveBucketDb archiveBucketDb; private final ZoneRe...